]> git.proxmox.com Git - mirror_qemu.git/blame - target/hppa/translate.c
hw/hppa: Allow up to 16 emulated CPUs
[mirror_qemu.git] / target / hppa / translate.c
CommitLineData
61766fe9
RH
1/*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
d6ea4236 9 * version 2.1 of the License, or (at your option) any later version.
61766fe9
RH
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "disas/disas.h"
23#include "qemu/host-utils.h"
24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
61766fe9 26#include "exec/cpu_ldst.h"
61766fe9
RH
27#include "exec/helper-proto.h"
28#include "exec/helper-gen.h"
869051ea 29#include "exec/translator.h"
61766fe9
RH
30#include "exec/log.h"
31
eaa3783b
RH
32/* Since we have a distinction between register size and address size,
33 we need to redefine all of these. */
34
35#undef TCGv
36#undef tcg_temp_new
eaa3783b
RH
37#undef tcg_global_mem_new
38#undef tcg_temp_local_new
39#undef tcg_temp_free
40
41#if TARGET_LONG_BITS == 64
42#define TCGv_tl TCGv_i64
43#define tcg_temp_new_tl tcg_temp_new_i64
44#define tcg_temp_free_tl tcg_temp_free_i64
45#if TARGET_REGISTER_BITS == 64
46#define tcg_gen_extu_reg_tl tcg_gen_mov_i64
47#else
48#define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
49#endif
50#else
51#define TCGv_tl TCGv_i32
52#define tcg_temp_new_tl tcg_temp_new_i32
53#define tcg_temp_free_tl tcg_temp_free_i32
54#define tcg_gen_extu_reg_tl tcg_gen_mov_i32
55#endif
56
57#if TARGET_REGISTER_BITS == 64
58#define TCGv_reg TCGv_i64
59
60#define tcg_temp_new tcg_temp_new_i64
eaa3783b
RH
61#define tcg_global_mem_new tcg_global_mem_new_i64
62#define tcg_temp_local_new tcg_temp_local_new_i64
63#define tcg_temp_free tcg_temp_free_i64
64
65#define tcg_gen_movi_reg tcg_gen_movi_i64
66#define tcg_gen_mov_reg tcg_gen_mov_i64
67#define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
68#define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
69#define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
70#define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
71#define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
72#define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
73#define tcg_gen_ld_reg tcg_gen_ld_i64
74#define tcg_gen_st8_reg tcg_gen_st8_i64
75#define tcg_gen_st16_reg tcg_gen_st16_i64
76#define tcg_gen_st32_reg tcg_gen_st32_i64
77#define tcg_gen_st_reg tcg_gen_st_i64
78#define tcg_gen_add_reg tcg_gen_add_i64
79#define tcg_gen_addi_reg tcg_gen_addi_i64
80#define tcg_gen_sub_reg tcg_gen_sub_i64
81#define tcg_gen_neg_reg tcg_gen_neg_i64
82#define tcg_gen_subfi_reg tcg_gen_subfi_i64
83#define tcg_gen_subi_reg tcg_gen_subi_i64
84#define tcg_gen_and_reg tcg_gen_and_i64
85#define tcg_gen_andi_reg tcg_gen_andi_i64
86#define tcg_gen_or_reg tcg_gen_or_i64
87#define tcg_gen_ori_reg tcg_gen_ori_i64
88#define tcg_gen_xor_reg tcg_gen_xor_i64
89#define tcg_gen_xori_reg tcg_gen_xori_i64
90#define tcg_gen_not_reg tcg_gen_not_i64
91#define tcg_gen_shl_reg tcg_gen_shl_i64
92#define tcg_gen_shli_reg tcg_gen_shli_i64
93#define tcg_gen_shr_reg tcg_gen_shr_i64
94#define tcg_gen_shri_reg tcg_gen_shri_i64
95#define tcg_gen_sar_reg tcg_gen_sar_i64
96#define tcg_gen_sari_reg tcg_gen_sari_i64
97#define tcg_gen_brcond_reg tcg_gen_brcond_i64
98#define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
99#define tcg_gen_setcond_reg tcg_gen_setcond_i64
100#define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
101#define tcg_gen_mul_reg tcg_gen_mul_i64
102#define tcg_gen_muli_reg tcg_gen_muli_i64
103#define tcg_gen_div_reg tcg_gen_div_i64
104#define tcg_gen_rem_reg tcg_gen_rem_i64
105#define tcg_gen_divu_reg tcg_gen_divu_i64
106#define tcg_gen_remu_reg tcg_gen_remu_i64
107#define tcg_gen_discard_reg tcg_gen_discard_i64
108#define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
109#define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
110#define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
111#define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
112#define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
113#define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
114#define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
115#define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
116#define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
117#define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
118#define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
119#define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
120#define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
121#define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
122#define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
123#define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
124#define tcg_gen_andc_reg tcg_gen_andc_i64
125#define tcg_gen_eqv_reg tcg_gen_eqv_i64
126#define tcg_gen_nand_reg tcg_gen_nand_i64
127#define tcg_gen_nor_reg tcg_gen_nor_i64
128#define tcg_gen_orc_reg tcg_gen_orc_i64
129#define tcg_gen_clz_reg tcg_gen_clz_i64
130#define tcg_gen_ctz_reg tcg_gen_ctz_i64
131#define tcg_gen_clzi_reg tcg_gen_clzi_i64
132#define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
133#define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
134#define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
135#define tcg_gen_rotl_reg tcg_gen_rotl_i64
136#define tcg_gen_rotli_reg tcg_gen_rotli_i64
137#define tcg_gen_rotr_reg tcg_gen_rotr_i64
138#define tcg_gen_rotri_reg tcg_gen_rotri_i64
139#define tcg_gen_deposit_reg tcg_gen_deposit_i64
140#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
141#define tcg_gen_extract_reg tcg_gen_extract_i64
142#define tcg_gen_sextract_reg tcg_gen_sextract_i64
05bfd4db 143#define tcg_gen_extract2_reg tcg_gen_extract2_i64
eaa3783b
RH
144#define tcg_const_reg tcg_const_i64
145#define tcg_const_local_reg tcg_const_local_i64
29dd6f64 146#define tcg_constant_reg tcg_constant_i64
eaa3783b
RH
147#define tcg_gen_movcond_reg tcg_gen_movcond_i64
148#define tcg_gen_add2_reg tcg_gen_add2_i64
149#define tcg_gen_sub2_reg tcg_gen_sub2_i64
150#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
151#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
152#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
5bfa8034 153#define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
eaa3783b
RH
154#else
155#define TCGv_reg TCGv_i32
156#define tcg_temp_new tcg_temp_new_i32
eaa3783b
RH
157#define tcg_global_mem_new tcg_global_mem_new_i32
158#define tcg_temp_local_new tcg_temp_local_new_i32
159#define tcg_temp_free tcg_temp_free_i32
160
161#define tcg_gen_movi_reg tcg_gen_movi_i32
162#define tcg_gen_mov_reg tcg_gen_mov_i32
163#define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
164#define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
165#define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
166#define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
167#define tcg_gen_ld32u_reg tcg_gen_ld_i32
168#define tcg_gen_ld32s_reg tcg_gen_ld_i32
169#define tcg_gen_ld_reg tcg_gen_ld_i32
170#define tcg_gen_st8_reg tcg_gen_st8_i32
171#define tcg_gen_st16_reg tcg_gen_st16_i32
172#define tcg_gen_st32_reg tcg_gen_st32_i32
173#define tcg_gen_st_reg tcg_gen_st_i32
174#define tcg_gen_add_reg tcg_gen_add_i32
175#define tcg_gen_addi_reg tcg_gen_addi_i32
176#define tcg_gen_sub_reg tcg_gen_sub_i32
177#define tcg_gen_neg_reg tcg_gen_neg_i32
178#define tcg_gen_subfi_reg tcg_gen_subfi_i32
179#define tcg_gen_subi_reg tcg_gen_subi_i32
180#define tcg_gen_and_reg tcg_gen_and_i32
181#define tcg_gen_andi_reg tcg_gen_andi_i32
182#define tcg_gen_or_reg tcg_gen_or_i32
183#define tcg_gen_ori_reg tcg_gen_ori_i32
184#define tcg_gen_xor_reg tcg_gen_xor_i32
185#define tcg_gen_xori_reg tcg_gen_xori_i32
186#define tcg_gen_not_reg tcg_gen_not_i32
187#define tcg_gen_shl_reg tcg_gen_shl_i32
188#define tcg_gen_shli_reg tcg_gen_shli_i32
189#define tcg_gen_shr_reg tcg_gen_shr_i32
190#define tcg_gen_shri_reg tcg_gen_shri_i32
191#define tcg_gen_sar_reg tcg_gen_sar_i32
192#define tcg_gen_sari_reg tcg_gen_sari_i32
193#define tcg_gen_brcond_reg tcg_gen_brcond_i32
194#define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
195#define tcg_gen_setcond_reg tcg_gen_setcond_i32
196#define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
197#define tcg_gen_mul_reg tcg_gen_mul_i32
198#define tcg_gen_muli_reg tcg_gen_muli_i32
199#define tcg_gen_div_reg tcg_gen_div_i32
200#define tcg_gen_rem_reg tcg_gen_rem_i32
201#define tcg_gen_divu_reg tcg_gen_divu_i32
202#define tcg_gen_remu_reg tcg_gen_remu_i32
203#define tcg_gen_discard_reg tcg_gen_discard_i32
204#define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
205#define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
206#define tcg_gen_extu_i32_reg tcg_gen_mov_i32
207#define tcg_gen_ext_i32_reg tcg_gen_mov_i32
208#define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
209#define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
210#define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
211#define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
212#define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
213#define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
214#define tcg_gen_ext32u_reg tcg_gen_mov_i32
215#define tcg_gen_ext32s_reg tcg_gen_mov_i32
216#define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
217#define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
218#define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
219#define tcg_gen_andc_reg tcg_gen_andc_i32
220#define tcg_gen_eqv_reg tcg_gen_eqv_i32
221#define tcg_gen_nand_reg tcg_gen_nand_i32
222#define tcg_gen_nor_reg tcg_gen_nor_i32
223#define tcg_gen_orc_reg tcg_gen_orc_i32
224#define tcg_gen_clz_reg tcg_gen_clz_i32
225#define tcg_gen_ctz_reg tcg_gen_ctz_i32
226#define tcg_gen_clzi_reg tcg_gen_clzi_i32
227#define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
228#define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
229#define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
230#define tcg_gen_rotl_reg tcg_gen_rotl_i32
231#define tcg_gen_rotli_reg tcg_gen_rotli_i32
232#define tcg_gen_rotr_reg tcg_gen_rotr_i32
233#define tcg_gen_rotri_reg tcg_gen_rotri_i32
234#define tcg_gen_deposit_reg tcg_gen_deposit_i32
235#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
236#define tcg_gen_extract_reg tcg_gen_extract_i32
237#define tcg_gen_sextract_reg tcg_gen_sextract_i32
05bfd4db 238#define tcg_gen_extract2_reg tcg_gen_extract2_i32
eaa3783b
RH
239#define tcg_const_reg tcg_const_i32
240#define tcg_const_local_reg tcg_const_local_i32
29dd6f64 241#define tcg_constant_reg tcg_constant_i32
eaa3783b
RH
242#define tcg_gen_movcond_reg tcg_gen_movcond_i32
243#define tcg_gen_add2_reg tcg_gen_add2_i32
244#define tcg_gen_sub2_reg tcg_gen_sub2_i32
245#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
246#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
247#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
5bfa8034 248#define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
eaa3783b
RH
249#endif /* TARGET_REGISTER_BITS */
250
61766fe9
RH
251typedef struct DisasCond {
252 TCGCond c;
eaa3783b 253 TCGv_reg a0, a1;
61766fe9
RH
254} DisasCond;
255
256typedef struct DisasContext {
d01a3625 257 DisasContextBase base;
61766fe9
RH
258 CPUState *cs;
259
eaa3783b
RH
260 target_ureg iaoq_f;
261 target_ureg iaoq_b;
262 target_ureg iaoq_n;
263 TCGv_reg iaoq_n_var;
61766fe9 264
86f8d05f 265 int ntempr, ntempl;
5eecd37a 266 TCGv_reg tempr[8];
86f8d05f 267 TCGv_tl templ[4];
61766fe9
RH
268
269 DisasCond null_cond;
270 TCGLabel *null_lab;
271
1a19da0d 272 uint32_t insn;
494737b7 273 uint32_t tb_flags;
3d68ee7b
RH
274 int mmu_idx;
275 int privilege;
61766fe9 276 bool psw_n_nonzero;
217d1a5e
RH
277
278#ifdef CONFIG_USER_ONLY
279 MemOp unalign;
280#endif
61766fe9
RH
281} DisasContext;
282
217d1a5e
RH
283#ifdef CONFIG_USER_ONLY
284#define UNALIGN(C) (C)->unalign
285#else
286#define UNALIGN(C) 0
287#endif
288
e36f27ef 289/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
451e4ffd 290static int expand_sm_imm(DisasContext *ctx, int val)
e36f27ef
RH
291{
292 if (val & PSW_SM_E) {
293 val = (val & ~PSW_SM_E) | PSW_E;
294 }
295 if (val & PSW_SM_W) {
296 val = (val & ~PSW_SM_W) | PSW_W;
297 }
298 return val;
299}
300
deee69a1 301/* Inverted space register indicates 0 means sr0 not inferred from base. */
451e4ffd 302static int expand_sr3x(DisasContext *ctx, int val)
deee69a1
RH
303{
304 return ~val;
305}
306
1cd012a5
RH
307/* Convert the M:A bits within a memory insn to the tri-state value
308 we use for the final M. */
451e4ffd 309static int ma_to_m(DisasContext *ctx, int val)
1cd012a5
RH
310{
311 return val & 2 ? (val & 1 ? -1 : 1) : 0;
312}
313
740038d7 314/* Convert the sign of the displacement to a pre or post-modify. */
451e4ffd 315static int pos_to_m(DisasContext *ctx, int val)
740038d7
RH
316{
317 return val ? 1 : -1;
318}
319
451e4ffd 320static int neg_to_m(DisasContext *ctx, int val)
740038d7
RH
321{
322 return val ? -1 : 1;
323}
324
325/* Used for branch targets and fp memory ops. */
451e4ffd 326static int expand_shl2(DisasContext *ctx, int val)
01afb7be
RH
327{
328 return val << 2;
329}
330
740038d7 331/* Used for fp memory ops. */
451e4ffd 332static int expand_shl3(DisasContext *ctx, int val)
740038d7
RH
333{
334 return val << 3;
335}
336
0588e061 337/* Used for assemble_21. */
451e4ffd 338static int expand_shl11(DisasContext *ctx, int val)
0588e061
RH
339{
340 return val << 11;
341}
342
01afb7be 343
40f9f908 344/* Include the auto-generated decoder. */
abff1abf 345#include "decode-insns.c.inc"
40f9f908 346
869051ea
RH
347/* We are not using a goto_tb (for whatever reason), but have updated
348 the iaq (for whatever reason), so don't do it again on exit. */
349#define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
61766fe9 350
869051ea
RH
351/* We are exiting the TB, but have neither emitted a goto_tb, nor
352 updated the iaq for the next instruction to be executed. */
353#define DISAS_IAQ_N_STALE DISAS_TARGET_1
61766fe9 354
e1b5a5ed
RH
355/* Similarly, but we want to return to the main loop immediately
356 to recognize unmasked interrupts. */
357#define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
c5d0aec2 358#define DISAS_EXIT DISAS_TARGET_3
e1b5a5ed 359
61766fe9 360/* global register indexes */
eaa3783b 361static TCGv_reg cpu_gr[32];
33423472 362static TCGv_i64 cpu_sr[4];
494737b7 363static TCGv_i64 cpu_srH;
eaa3783b
RH
364static TCGv_reg cpu_iaoq_f;
365static TCGv_reg cpu_iaoq_b;
c301f34e
RH
366static TCGv_i64 cpu_iasq_f;
367static TCGv_i64 cpu_iasq_b;
eaa3783b
RH
368static TCGv_reg cpu_sar;
369static TCGv_reg cpu_psw_n;
370static TCGv_reg cpu_psw_v;
371static TCGv_reg cpu_psw_cb;
372static TCGv_reg cpu_psw_cb_msb;
61766fe9
RH
373
374#include "exec/gen-icount.h"
375
376void hppa_translate_init(void)
377{
378#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
379
eaa3783b 380 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
61766fe9 381 static const GlobalVar vars[] = {
35136a77 382 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
61766fe9
RH
383 DEF_VAR(psw_n),
384 DEF_VAR(psw_v),
385 DEF_VAR(psw_cb),
386 DEF_VAR(psw_cb_msb),
387 DEF_VAR(iaoq_f),
388 DEF_VAR(iaoq_b),
389 };
390
391#undef DEF_VAR
392
393 /* Use the symbolic register names that match the disassembler. */
394 static const char gr_names[32][4] = {
395 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
396 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
397 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
398 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
399 };
33423472 400 /* SR[4-7] are not global registers so that we can index them. */
494737b7
RH
401 static const char sr_names[5][4] = {
402 "sr0", "sr1", "sr2", "sr3", "srH"
33423472 403 };
61766fe9 404
61766fe9
RH
405 int i;
406
f764718d 407 cpu_gr[0] = NULL;
61766fe9
RH
408 for (i = 1; i < 32; i++) {
409 cpu_gr[i] = tcg_global_mem_new(cpu_env,
410 offsetof(CPUHPPAState, gr[i]),
411 gr_names[i]);
412 }
33423472
RH
413 for (i = 0; i < 4; i++) {
414 cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
415 offsetof(CPUHPPAState, sr[i]),
416 sr_names[i]);
417 }
494737b7
RH
418 cpu_srH = tcg_global_mem_new_i64(cpu_env,
419 offsetof(CPUHPPAState, sr[4]),
420 sr_names[4]);
61766fe9
RH
421
422 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
423 const GlobalVar *v = &vars[i];
424 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
425 }
c301f34e
RH
426
427 cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
428 offsetof(CPUHPPAState, iasq_f),
429 "iasq_f");
430 cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
431 offsetof(CPUHPPAState, iasq_b),
432 "iasq_b");
61766fe9
RH
433}
434
129e9cc3
RH
435static DisasCond cond_make_f(void)
436{
f764718d
RH
437 return (DisasCond){
438 .c = TCG_COND_NEVER,
439 .a0 = NULL,
440 .a1 = NULL,
441 };
129e9cc3
RH
442}
443
df0232fe
RH
444static DisasCond cond_make_t(void)
445{
446 return (DisasCond){
447 .c = TCG_COND_ALWAYS,
448 .a0 = NULL,
449 .a1 = NULL,
450 };
451}
452
129e9cc3
RH
453static DisasCond cond_make_n(void)
454{
f764718d
RH
455 return (DisasCond){
456 .c = TCG_COND_NE,
457 .a0 = cpu_psw_n,
6e94937a 458 .a1 = tcg_constant_reg(0)
f764718d 459 };
129e9cc3
RH
460}
461
b47a4a02 462static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
129e9cc3 463{
129e9cc3 464 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
b47a4a02 465 return (DisasCond){
6e94937a 466 .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
b47a4a02
SS
467 };
468}
129e9cc3 469
b47a4a02
SS
470static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
471{
472 TCGv_reg tmp = tcg_temp_new();
473 tcg_gen_mov_reg(tmp, a0);
474 return cond_make_0_tmp(c, tmp);
129e9cc3
RH
475}
476
eaa3783b 477static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
129e9cc3
RH
478{
479 DisasCond r = { .c = c };
480
481 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
482 r.a0 = tcg_temp_new();
eaa3783b 483 tcg_gen_mov_reg(r.a0, a0);
129e9cc3 484 r.a1 = tcg_temp_new();
eaa3783b 485 tcg_gen_mov_reg(r.a1, a1);
129e9cc3
RH
486
487 return r;
488}
489
129e9cc3
RH
490static void cond_free(DisasCond *cond)
491{
492 switch (cond->c) {
493 default:
6e94937a 494 if (cond->a0 != cpu_psw_n) {
129e9cc3
RH
495 tcg_temp_free(cond->a0);
496 }
6e94937a 497 tcg_temp_free(cond->a1);
f764718d
RH
498 cond->a0 = NULL;
499 cond->a1 = NULL;
129e9cc3
RH
500 /* fallthru */
501 case TCG_COND_ALWAYS:
502 cond->c = TCG_COND_NEVER;
503 break;
504 case TCG_COND_NEVER:
505 break;
506 }
507}
508
eaa3783b 509static TCGv_reg get_temp(DisasContext *ctx)
61766fe9 510{
86f8d05f
RH
511 unsigned i = ctx->ntempr++;
512 g_assert(i < ARRAY_SIZE(ctx->tempr));
513 return ctx->tempr[i] = tcg_temp_new();
61766fe9
RH
514}
515
86f8d05f
RH
516#ifndef CONFIG_USER_ONLY
517static TCGv_tl get_temp_tl(DisasContext *ctx)
518{
519 unsigned i = ctx->ntempl++;
520 g_assert(i < ARRAY_SIZE(ctx->templ));
521 return ctx->templ[i] = tcg_temp_new_tl();
522}
523#endif
524
eaa3783b 525static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
61766fe9 526{
eaa3783b
RH
527 TCGv_reg t = get_temp(ctx);
528 tcg_gen_movi_reg(t, v);
61766fe9
RH
529 return t;
530}
531
eaa3783b 532static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
61766fe9
RH
533{
534 if (reg == 0) {
eaa3783b
RH
535 TCGv_reg t = get_temp(ctx);
536 tcg_gen_movi_reg(t, 0);
61766fe9
RH
537 return t;
538 } else {
539 return cpu_gr[reg];
540 }
541}
542
eaa3783b 543static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
61766fe9 544{
129e9cc3 545 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
61766fe9
RH
546 return get_temp(ctx);
547 } else {
548 return cpu_gr[reg];
549 }
550}
551
eaa3783b 552static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
129e9cc3
RH
553{
554 if (ctx->null_cond.c != TCG_COND_NEVER) {
eaa3783b 555 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
6e94937a 556 ctx->null_cond.a1, dest, t);
129e9cc3 557 } else {
eaa3783b 558 tcg_gen_mov_reg(dest, t);
129e9cc3
RH
559 }
560}
561
eaa3783b 562static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
129e9cc3
RH
563{
564 if (reg != 0) {
565 save_or_nullify(ctx, cpu_gr[reg], t);
566 }
567}
568
96d6407f
RH
569#ifdef HOST_WORDS_BIGENDIAN
570# define HI_OFS 0
571# define LO_OFS 4
572#else
573# define HI_OFS 4
574# define LO_OFS 0
575#endif
576
577static TCGv_i32 load_frw_i32(unsigned rt)
578{
579 TCGv_i32 ret = tcg_temp_new_i32();
580 tcg_gen_ld_i32(ret, cpu_env,
581 offsetof(CPUHPPAState, fr[rt & 31])
582 + (rt & 32 ? LO_OFS : HI_OFS));
583 return ret;
584}
585
ebe9383c
RH
586static TCGv_i32 load_frw0_i32(unsigned rt)
587{
588 if (rt == 0) {
589 return tcg_const_i32(0);
590 } else {
591 return load_frw_i32(rt);
592 }
593}
594
595static TCGv_i64 load_frw0_i64(unsigned rt)
596{
597 if (rt == 0) {
598 return tcg_const_i64(0);
599 } else {
600 TCGv_i64 ret = tcg_temp_new_i64();
601 tcg_gen_ld32u_i64(ret, cpu_env,
602 offsetof(CPUHPPAState, fr[rt & 31])
603 + (rt & 32 ? LO_OFS : HI_OFS));
604 return ret;
605 }
606}
607
96d6407f
RH
608static void save_frw_i32(unsigned rt, TCGv_i32 val)
609{
610 tcg_gen_st_i32(val, cpu_env,
611 offsetof(CPUHPPAState, fr[rt & 31])
612 + (rt & 32 ? LO_OFS : HI_OFS));
613}
614
615#undef HI_OFS
616#undef LO_OFS
617
618static TCGv_i64 load_frd(unsigned rt)
619{
620 TCGv_i64 ret = tcg_temp_new_i64();
621 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
622 return ret;
623}
624
ebe9383c
RH
625static TCGv_i64 load_frd0(unsigned rt)
626{
627 if (rt == 0) {
628 return tcg_const_i64(0);
629 } else {
630 return load_frd(rt);
631 }
632}
633
96d6407f
RH
634static void save_frd(unsigned rt, TCGv_i64 val)
635{
636 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
637}
638
33423472
RH
639static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
640{
641#ifdef CONFIG_USER_ONLY
642 tcg_gen_movi_i64(dest, 0);
643#else
644 if (reg < 4) {
645 tcg_gen_mov_i64(dest, cpu_sr[reg]);
494737b7
RH
646 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
647 tcg_gen_mov_i64(dest, cpu_srH);
33423472
RH
648 } else {
649 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
650 }
651#endif
652}
653
129e9cc3
RH
654/* Skip over the implementation of an insn that has been nullified.
655 Use this when the insn is too complex for a conditional move. */
656static void nullify_over(DisasContext *ctx)
657{
658 if (ctx->null_cond.c != TCG_COND_NEVER) {
659 /* The always condition should have been handled in the main loop. */
660 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
661
662 ctx->null_lab = gen_new_label();
129e9cc3
RH
663
664 /* If we're using PSW[N], copy it to a temp because... */
6e94937a 665 if (ctx->null_cond.a0 == cpu_psw_n) {
129e9cc3 666 ctx->null_cond.a0 = tcg_temp_new();
eaa3783b 667 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
129e9cc3
RH
668 }
669 /* ... we clear it before branching over the implementation,
670 so that (1) it's clear after nullifying this insn and
671 (2) if this insn nullifies the next, PSW[N] is valid. */
672 if (ctx->psw_n_nonzero) {
673 ctx->psw_n_nonzero = false;
eaa3783b 674 tcg_gen_movi_reg(cpu_psw_n, 0);
129e9cc3
RH
675 }
676
eaa3783b 677 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
6e94937a 678 ctx->null_cond.a1, ctx->null_lab);
129e9cc3
RH
679 cond_free(&ctx->null_cond);
680 }
681}
682
683/* Save the current nullification state to PSW[N]. */
684static void nullify_save(DisasContext *ctx)
685{
686 if (ctx->null_cond.c == TCG_COND_NEVER) {
687 if (ctx->psw_n_nonzero) {
eaa3783b 688 tcg_gen_movi_reg(cpu_psw_n, 0);
129e9cc3
RH
689 }
690 return;
691 }
6e94937a 692 if (ctx->null_cond.a0 != cpu_psw_n) {
eaa3783b 693 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
6e94937a 694 ctx->null_cond.a0, ctx->null_cond.a1);
129e9cc3
RH
695 ctx->psw_n_nonzero = true;
696 }
697 cond_free(&ctx->null_cond);
698}
699
700/* Set a PSW[N] to X. The intention is that this is used immediately
701 before a goto_tb/exit_tb, so that there is no fallthru path to other
702 code within the TB. Therefore we do not update psw_n_nonzero. */
703static void nullify_set(DisasContext *ctx, bool x)
704{
705 if (ctx->psw_n_nonzero || x) {
eaa3783b 706 tcg_gen_movi_reg(cpu_psw_n, x);
129e9cc3
RH
707 }
708}
709
710/* Mark the end of an instruction that may have been nullified.
40f9f908
RH
711 This is the pair to nullify_over. Always returns true so that
712 it may be tail-called from a translate function. */
31234768 713static bool nullify_end(DisasContext *ctx)
129e9cc3
RH
714{
715 TCGLabel *null_lab = ctx->null_lab;
31234768 716 DisasJumpType status = ctx->base.is_jmp;
129e9cc3 717
f49b3537
RH
718 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
719 For UPDATED, we cannot update on the nullified path. */
720 assert(status != DISAS_IAQ_N_UPDATED);
721
129e9cc3
RH
722 if (likely(null_lab == NULL)) {
723 /* The current insn wasn't conditional or handled the condition
724 applied to it without a branch, so the (new) setting of
725 NULL_COND can be applied directly to the next insn. */
31234768 726 return true;
129e9cc3
RH
727 }
728 ctx->null_lab = NULL;
729
730 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
731 /* The next instruction will be unconditional,
732 and NULL_COND already reflects that. */
733 gen_set_label(null_lab);
734 } else {
735 /* The insn that we just executed is itself nullifying the next
736 instruction. Store the condition in the PSW[N] global.
737 We asserted PSW[N] = 0 in nullify_over, so that after the
738 label we have the proper value in place. */
739 nullify_save(ctx);
740 gen_set_label(null_lab);
741 ctx->null_cond = cond_make_n();
742 }
869051ea 743 if (status == DISAS_NORETURN) {
31234768 744 ctx->base.is_jmp = DISAS_NEXT;
129e9cc3 745 }
31234768 746 return true;
129e9cc3
RH
747}
748
eaa3783b 749static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
61766fe9
RH
750{
751 if (unlikely(ival == -1)) {
eaa3783b 752 tcg_gen_mov_reg(dest, vval);
61766fe9 753 } else {
eaa3783b 754 tcg_gen_movi_reg(dest, ival);
61766fe9
RH
755 }
756}
757
eaa3783b 758static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
61766fe9
RH
759{
760 return ctx->iaoq_f + disp + 8;
761}
762
763static void gen_excp_1(int exception)
764{
29dd6f64 765 gen_helper_excp(cpu_env, tcg_constant_i32(exception));
61766fe9
RH
766}
767
31234768 768static void gen_excp(DisasContext *ctx, int exception)
61766fe9
RH
769{
770 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
771 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
129e9cc3 772 nullify_save(ctx);
61766fe9 773 gen_excp_1(exception);
31234768 774 ctx->base.is_jmp = DISAS_NORETURN;
61766fe9
RH
775}
776
31234768 777static bool gen_excp_iir(DisasContext *ctx, int exc)
1a19da0d 778{
31234768 779 nullify_over(ctx);
29dd6f64
RH
780 tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
781 cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
31234768
RH
782 gen_excp(ctx, exc);
783 return nullify_end(ctx);
1a19da0d
RH
784}
785
31234768 786static bool gen_illegal(DisasContext *ctx)
61766fe9 787{
31234768 788 return gen_excp_iir(ctx, EXCP_ILL);
61766fe9
RH
789}
790
40f9f908
RH
791#ifdef CONFIG_USER_ONLY
792#define CHECK_MOST_PRIVILEGED(EXCP) \
793 return gen_excp_iir(ctx, EXCP)
794#else
795#define CHECK_MOST_PRIVILEGED(EXCP) \
31234768
RH
796 do { \
797 if (ctx->privilege != 0) { \
798 return gen_excp_iir(ctx, EXCP); \
799 } \
e1b5a5ed 800 } while (0)
40f9f908 801#endif
e1b5a5ed 802
eaa3783b 803static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
61766fe9 804{
57f91498 805 return translator_use_goto_tb(&ctx->base, dest);
61766fe9
RH
806}
807
129e9cc3
RH
808/* If the next insn is to be nullified, and it's on the same page,
809 and we're not attempting to set a breakpoint on it, then we can
810 totally skip the nullified insn. This avoids creating and
811 executing a TB that merely branches to the next TB. */
812static bool use_nullify_skip(DisasContext *ctx)
813{
814 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
815 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
816}
817
61766fe9 818static void gen_goto_tb(DisasContext *ctx, int which,
eaa3783b 819 target_ureg f, target_ureg b)
61766fe9
RH
820{
821 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
822 tcg_gen_goto_tb(which);
eaa3783b
RH
823 tcg_gen_movi_reg(cpu_iaoq_f, f);
824 tcg_gen_movi_reg(cpu_iaoq_b, b);
07ea28b4 825 tcg_gen_exit_tb(ctx->base.tb, which);
61766fe9
RH
826 } else {
827 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
828 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
8532a14e 829 tcg_gen_lookup_and_goto_ptr();
61766fe9
RH
830 }
831}
832
b47a4a02
SS
833static bool cond_need_sv(int c)
834{
835 return c == 2 || c == 3 || c == 6;
836}
837
838static bool cond_need_cb(int c)
839{
840 return c == 4 || c == 5;
841}
842
843/*
844 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
845 * the Parisc 1.1 Architecture Reference Manual for details.
846 */
b2167459 847
eaa3783b
RH
848static DisasCond do_cond(unsigned cf, TCGv_reg res,
849 TCGv_reg cb_msb, TCGv_reg sv)
b2167459
RH
850{
851 DisasCond cond;
eaa3783b 852 TCGv_reg tmp;
b2167459
RH
853
854 switch (cf >> 1) {
b47a4a02 855 case 0: /* Never / TR (0 / 1) */
b2167459
RH
856 cond = cond_make_f();
857 break;
858 case 1: /* = / <> (Z / !Z) */
859 cond = cond_make_0(TCG_COND_EQ, res);
860 break;
b47a4a02
SS
861 case 2: /* < / >= (N ^ V / !(N ^ V) */
862 tmp = tcg_temp_new();
863 tcg_gen_xor_reg(tmp, res, sv);
864 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
b2167459 865 break;
b47a4a02
SS
866 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
867 /*
868 * Simplify:
869 * (N ^ V) | Z
870 * ((res < 0) ^ (sv < 0)) | !res
871 * ((res ^ sv) < 0) | !res
872 * (~(res ^ sv) >= 0) | !res
873 * !(~(res ^ sv) >> 31) | !res
874 * !(~(res ^ sv) >> 31 & res)
875 */
876 tmp = tcg_temp_new();
877 tcg_gen_eqv_reg(tmp, res, sv);
878 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
879 tcg_gen_and_reg(tmp, tmp, res);
880 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
b2167459
RH
881 break;
882 case 4: /* NUV / UV (!C / C) */
883 cond = cond_make_0(TCG_COND_EQ, cb_msb);
884 break;
885 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
886 tmp = tcg_temp_new();
eaa3783b
RH
887 tcg_gen_neg_reg(tmp, cb_msb);
888 tcg_gen_and_reg(tmp, tmp, res);
b47a4a02 889 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
b2167459
RH
890 break;
891 case 6: /* SV / NSV (V / !V) */
892 cond = cond_make_0(TCG_COND_LT, sv);
893 break;
894 case 7: /* OD / EV */
895 tmp = tcg_temp_new();
eaa3783b 896 tcg_gen_andi_reg(tmp, res, 1);
b47a4a02 897 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
b2167459
RH
898 break;
899 default:
900 g_assert_not_reached();
901 }
902 if (cf & 1) {
903 cond.c = tcg_invert_cond(cond.c);
904 }
905
906 return cond;
907}
908
909/* Similar, but for the special case of subtraction without borrow, we
910 can use the inputs directly. This can allow other computation to be
911 deleted as unused. */
912
eaa3783b
RH
913static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
914 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
b2167459
RH
915{
916 DisasCond cond;
917
918 switch (cf >> 1) {
919 case 1: /* = / <> */
920 cond = cond_make(TCG_COND_EQ, in1, in2);
921 break;
922 case 2: /* < / >= */
923 cond = cond_make(TCG_COND_LT, in1, in2);
924 break;
925 case 3: /* <= / > */
926 cond = cond_make(TCG_COND_LE, in1, in2);
927 break;
928 case 4: /* << / >>= */
929 cond = cond_make(TCG_COND_LTU, in1, in2);
930 break;
931 case 5: /* <<= / >> */
932 cond = cond_make(TCG_COND_LEU, in1, in2);
933 break;
934 default:
b47a4a02 935 return do_cond(cf, res, NULL, sv);
b2167459
RH
936 }
937 if (cf & 1) {
938 cond.c = tcg_invert_cond(cond.c);
939 }
940
941 return cond;
942}
943
df0232fe
RH
944/*
945 * Similar, but for logicals, where the carry and overflow bits are not
946 * computed, and use of them is undefined.
947 *
948 * Undefined or not, hardware does not trap. It seems reasonable to
949 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
950 * how cases c={2,3} are treated.
951 */
b2167459 952
eaa3783b 953static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
b2167459 954{
df0232fe
RH
955 switch (cf) {
956 case 0: /* never */
957 case 9: /* undef, C */
958 case 11: /* undef, C & !Z */
959 case 12: /* undef, V */
960 return cond_make_f();
961
962 case 1: /* true */
963 case 8: /* undef, !C */
964 case 10: /* undef, !C | Z */
965 case 13: /* undef, !V */
966 return cond_make_t();
967
968 case 2: /* == */
969 return cond_make_0(TCG_COND_EQ, res);
970 case 3: /* <> */
971 return cond_make_0(TCG_COND_NE, res);
972 case 4: /* < */
973 return cond_make_0(TCG_COND_LT, res);
974 case 5: /* >= */
975 return cond_make_0(TCG_COND_GE, res);
976 case 6: /* <= */
977 return cond_make_0(TCG_COND_LE, res);
978 case 7: /* > */
979 return cond_make_0(TCG_COND_GT, res);
980
981 case 14: /* OD */
982 case 15: /* EV */
983 return do_cond(cf, res, NULL, NULL);
984
985 default:
986 g_assert_not_reached();
b2167459 987 }
b2167459
RH
988}
989
98cd9ca7
RH
990/* Similar, but for shift/extract/deposit conditions. */
991
eaa3783b 992static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
98cd9ca7
RH
993{
994 unsigned c, f;
995
996 /* Convert the compressed condition codes to standard.
997 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
998 4-7 are the reverse of 0-3. */
999 c = orig & 3;
1000 if (c == 3) {
1001 c = 7;
1002 }
1003 f = (orig & 4) / 4;
1004
1005 return do_log_cond(c * 2 + f, res);
1006}
1007
b2167459
RH
1008/* Similar, but for unit conditions. */
1009
eaa3783b
RH
1010static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1011 TCGv_reg in1, TCGv_reg in2)
b2167459
RH
1012{
1013 DisasCond cond;
eaa3783b 1014 TCGv_reg tmp, cb = NULL;
b2167459 1015
b2167459
RH
1016 if (cf & 8) {
1017 /* Since we want to test lots of carry-out bits all at once, do not
1018 * do our normal thing and compute carry-in of bit B+1 since that
1019 * leaves us with carry bits spread across two words.
1020 */
1021 cb = tcg_temp_new();
1022 tmp = tcg_temp_new();
eaa3783b
RH
1023 tcg_gen_or_reg(cb, in1, in2);
1024 tcg_gen_and_reg(tmp, in1, in2);
1025 tcg_gen_andc_reg(cb, cb, res);
1026 tcg_gen_or_reg(cb, cb, tmp);
b2167459
RH
1027 tcg_temp_free(tmp);
1028 }
1029
1030 switch (cf >> 1) {
1031 case 0: /* never / TR */
1032 case 1: /* undefined */
1033 case 5: /* undefined */
1034 cond = cond_make_f();
1035 break;
1036
1037 case 2: /* SBZ / NBZ */
1038 /* See hasless(v,1) from
1039 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1040 */
1041 tmp = tcg_temp_new();
eaa3783b
RH
1042 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1043 tcg_gen_andc_reg(tmp, tmp, res);
1044 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
b2167459
RH
1045 cond = cond_make_0(TCG_COND_NE, tmp);
1046 tcg_temp_free(tmp);
1047 break;
1048
1049 case 3: /* SHZ / NHZ */
1050 tmp = tcg_temp_new();
eaa3783b
RH
1051 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1052 tcg_gen_andc_reg(tmp, tmp, res);
1053 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
b2167459
RH
1054 cond = cond_make_0(TCG_COND_NE, tmp);
1055 tcg_temp_free(tmp);
1056 break;
1057
1058 case 4: /* SDC / NDC */
eaa3783b 1059 tcg_gen_andi_reg(cb, cb, 0x88888888u);
b2167459
RH
1060 cond = cond_make_0(TCG_COND_NE, cb);
1061 break;
1062
1063 case 6: /* SBC / NBC */
eaa3783b 1064 tcg_gen_andi_reg(cb, cb, 0x80808080u);
b2167459
RH
1065 cond = cond_make_0(TCG_COND_NE, cb);
1066 break;
1067
1068 case 7: /* SHC / NHC */
eaa3783b 1069 tcg_gen_andi_reg(cb, cb, 0x80008000u);
b2167459
RH
1070 cond = cond_make_0(TCG_COND_NE, cb);
1071 break;
1072
1073 default:
1074 g_assert_not_reached();
1075 }
1076 if (cf & 8) {
1077 tcg_temp_free(cb);
1078 }
1079 if (cf & 1) {
1080 cond.c = tcg_invert_cond(cond.c);
1081 }
1082
1083 return cond;
1084}
1085
1086/* Compute signed overflow for addition. */
eaa3783b
RH
1087static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1088 TCGv_reg in1, TCGv_reg in2)
b2167459 1089{
eaa3783b
RH
1090 TCGv_reg sv = get_temp(ctx);
1091 TCGv_reg tmp = tcg_temp_new();
b2167459 1092
eaa3783b
RH
1093 tcg_gen_xor_reg(sv, res, in1);
1094 tcg_gen_xor_reg(tmp, in1, in2);
1095 tcg_gen_andc_reg(sv, sv, tmp);
b2167459
RH
1096 tcg_temp_free(tmp);
1097
1098 return sv;
1099}
1100
1101/* Compute signed overflow for subtraction. */
eaa3783b
RH
1102static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1103 TCGv_reg in1, TCGv_reg in2)
b2167459 1104{
eaa3783b
RH
1105 TCGv_reg sv = get_temp(ctx);
1106 TCGv_reg tmp = tcg_temp_new();
b2167459 1107
eaa3783b
RH
1108 tcg_gen_xor_reg(sv, res, in1);
1109 tcg_gen_xor_reg(tmp, in1, in2);
1110 tcg_gen_and_reg(sv, sv, tmp);
b2167459
RH
1111 tcg_temp_free(tmp);
1112
1113 return sv;
1114}
1115
31234768
RH
1116static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1117 TCGv_reg in2, unsigned shift, bool is_l,
1118 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
b2167459 1119{
eaa3783b 1120 TCGv_reg dest, cb, cb_msb, sv, tmp;
b2167459
RH
1121 unsigned c = cf >> 1;
1122 DisasCond cond;
1123
1124 dest = tcg_temp_new();
f764718d
RH
1125 cb = NULL;
1126 cb_msb = NULL;
b2167459
RH
1127
1128 if (shift) {
1129 tmp = get_temp(ctx);
eaa3783b 1130 tcg_gen_shli_reg(tmp, in1, shift);
b2167459
RH
1131 in1 = tmp;
1132 }
1133
b47a4a02 1134 if (!is_l || cond_need_cb(c)) {
29dd6f64 1135 TCGv_reg zero = tcg_constant_reg(0);
b2167459 1136 cb_msb = get_temp(ctx);
eaa3783b 1137 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
b2167459 1138 if (is_c) {
eaa3783b 1139 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
b2167459 1140 }
b2167459
RH
1141 if (!is_l) {
1142 cb = get_temp(ctx);
eaa3783b
RH
1143 tcg_gen_xor_reg(cb, in1, in2);
1144 tcg_gen_xor_reg(cb, cb, dest);
b2167459
RH
1145 }
1146 } else {
eaa3783b 1147 tcg_gen_add_reg(dest, in1, in2);
b2167459 1148 if (is_c) {
eaa3783b 1149 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
b2167459
RH
1150 }
1151 }
1152
1153 /* Compute signed overflow if required. */
f764718d 1154 sv = NULL;
b47a4a02 1155 if (is_tsv || cond_need_sv(c)) {
b2167459
RH
1156 sv = do_add_sv(ctx, dest, in1, in2);
1157 if (is_tsv) {
1158 /* ??? Need to include overflow from shift. */
1159 gen_helper_tsv(cpu_env, sv);
1160 }
1161 }
1162
1163 /* Emit any conditional trap before any writeback. */
1164 cond = do_cond(cf, dest, cb_msb, sv);
1165 if (is_tc) {
b2167459 1166 tmp = tcg_temp_new();
eaa3783b 1167 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
b2167459
RH
1168 gen_helper_tcond(cpu_env, tmp);
1169 tcg_temp_free(tmp);
1170 }
1171
1172 /* Write back the result. */
1173 if (!is_l) {
1174 save_or_nullify(ctx, cpu_psw_cb, cb);
1175 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1176 }
1177 save_gpr(ctx, rt, dest);
1178 tcg_temp_free(dest);
1179
1180 /* Install the new nullification. */
1181 cond_free(&ctx->null_cond);
1182 ctx->null_cond = cond;
b2167459
RH
1183}
1184
0c982a28
RH
1185static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1186 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1187{
1188 TCGv_reg tcg_r1, tcg_r2;
1189
1190 if (a->cf) {
1191 nullify_over(ctx);
1192 }
1193 tcg_r1 = load_gpr(ctx, a->r1);
1194 tcg_r2 = load_gpr(ctx, a->r2);
1195 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1196 return nullify_end(ctx);
1197}
1198
0588e061
RH
1199static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1200 bool is_tsv, bool is_tc)
1201{
1202 TCGv_reg tcg_im, tcg_r2;
1203
1204 if (a->cf) {
1205 nullify_over(ctx);
1206 }
1207 tcg_im = load_const(ctx, a->i);
1208 tcg_r2 = load_gpr(ctx, a->r);
1209 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1210 return nullify_end(ctx);
1211}
1212
31234768
RH
1213static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1214 TCGv_reg in2, bool is_tsv, bool is_b,
1215 bool is_tc, unsigned cf)
b2167459 1216{
eaa3783b 1217 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
b2167459
RH
1218 unsigned c = cf >> 1;
1219 DisasCond cond;
1220
1221 dest = tcg_temp_new();
1222 cb = tcg_temp_new();
1223 cb_msb = tcg_temp_new();
1224
29dd6f64 1225 zero = tcg_constant_reg(0);
b2167459
RH
1226 if (is_b) {
1227 /* DEST,C = IN1 + ~IN2 + C. */
eaa3783b
RH
1228 tcg_gen_not_reg(cb, in2);
1229 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1230 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1231 tcg_gen_xor_reg(cb, cb, in1);
1232 tcg_gen_xor_reg(cb, cb, dest);
b2167459
RH
1233 } else {
1234 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1235 operations by seeding the high word with 1 and subtracting. */
eaa3783b
RH
1236 tcg_gen_movi_reg(cb_msb, 1);
1237 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1238 tcg_gen_eqv_reg(cb, in1, in2);
1239 tcg_gen_xor_reg(cb, cb, dest);
b2167459 1240 }
b2167459
RH
1241
1242 /* Compute signed overflow if required. */
f764718d 1243 sv = NULL;
b47a4a02 1244 if (is_tsv || cond_need_sv(c)) {
b2167459
RH
1245 sv = do_sub_sv(ctx, dest, in1, in2);
1246 if (is_tsv) {
1247 gen_helper_tsv(cpu_env, sv);
1248 }
1249 }
1250
1251 /* Compute the condition. We cannot use the special case for borrow. */
1252 if (!is_b) {
1253 cond = do_sub_cond(cf, dest, in1, in2, sv);
1254 } else {
1255 cond = do_cond(cf, dest, cb_msb, sv);
1256 }
1257
1258 /* Emit any conditional trap before any writeback. */
1259 if (is_tc) {
b2167459 1260 tmp = tcg_temp_new();
eaa3783b 1261 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
b2167459
RH
1262 gen_helper_tcond(cpu_env, tmp);
1263 tcg_temp_free(tmp);
1264 }
1265
1266 /* Write back the result. */
1267 save_or_nullify(ctx, cpu_psw_cb, cb);
1268 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1269 save_gpr(ctx, rt, dest);
1270 tcg_temp_free(dest);
79826f99
RH
1271 tcg_temp_free(cb);
1272 tcg_temp_free(cb_msb);
b2167459
RH
1273
1274 /* Install the new nullification. */
1275 cond_free(&ctx->null_cond);
1276 ctx->null_cond = cond;
b2167459
RH
1277}
1278
0c982a28
RH
1279static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1280 bool is_tsv, bool is_b, bool is_tc)
1281{
1282 TCGv_reg tcg_r1, tcg_r2;
1283
1284 if (a->cf) {
1285 nullify_over(ctx);
1286 }
1287 tcg_r1 = load_gpr(ctx, a->r1);
1288 tcg_r2 = load_gpr(ctx, a->r2);
1289 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1290 return nullify_end(ctx);
1291}
1292
0588e061
RH
1293static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1294{
1295 TCGv_reg tcg_im, tcg_r2;
1296
1297 if (a->cf) {
1298 nullify_over(ctx);
1299 }
1300 tcg_im = load_const(ctx, a->i);
1301 tcg_r2 = load_gpr(ctx, a->r);
1302 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1303 return nullify_end(ctx);
1304}
1305
31234768
RH
1306static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1307 TCGv_reg in2, unsigned cf)
b2167459 1308{
eaa3783b 1309 TCGv_reg dest, sv;
b2167459
RH
1310 DisasCond cond;
1311
1312 dest = tcg_temp_new();
eaa3783b 1313 tcg_gen_sub_reg(dest, in1, in2);
b2167459
RH
1314
1315 /* Compute signed overflow if required. */
f764718d 1316 sv = NULL;
b47a4a02 1317 if (cond_need_sv(cf >> 1)) {
b2167459
RH
1318 sv = do_sub_sv(ctx, dest, in1, in2);
1319 }
1320
1321 /* Form the condition for the compare. */
1322 cond = do_sub_cond(cf, dest, in1, in2, sv);
1323
1324 /* Clear. */
eaa3783b 1325 tcg_gen_movi_reg(dest, 0);
b2167459
RH
1326 save_gpr(ctx, rt, dest);
1327 tcg_temp_free(dest);
1328
1329 /* Install the new nullification. */
1330 cond_free(&ctx->null_cond);
1331 ctx->null_cond = cond;
b2167459
RH
1332}
1333
31234768
RH
1334static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1335 TCGv_reg in2, unsigned cf,
1336 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
b2167459 1337{
eaa3783b 1338 TCGv_reg dest = dest_gpr(ctx, rt);
b2167459
RH
1339
1340 /* Perform the operation, and writeback. */
1341 fn(dest, in1, in2);
1342 save_gpr(ctx, rt, dest);
1343
1344 /* Install the new nullification. */
1345 cond_free(&ctx->null_cond);
1346 if (cf) {
1347 ctx->null_cond = do_log_cond(cf, dest);
1348 }
b2167459
RH
1349}
1350
0c982a28
RH
1351static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1352 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1353{
1354 TCGv_reg tcg_r1, tcg_r2;
1355
1356 if (a->cf) {
1357 nullify_over(ctx);
1358 }
1359 tcg_r1 = load_gpr(ctx, a->r1);
1360 tcg_r2 = load_gpr(ctx, a->r2);
1361 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1362 return nullify_end(ctx);
1363}
1364
31234768
RH
1365static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1366 TCGv_reg in2, unsigned cf, bool is_tc,
1367 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
b2167459 1368{
eaa3783b 1369 TCGv_reg dest;
b2167459
RH
1370 DisasCond cond;
1371
1372 if (cf == 0) {
1373 dest = dest_gpr(ctx, rt);
1374 fn(dest, in1, in2);
1375 save_gpr(ctx, rt, dest);
1376 cond_free(&ctx->null_cond);
1377 } else {
1378 dest = tcg_temp_new();
1379 fn(dest, in1, in2);
1380
1381 cond = do_unit_cond(cf, dest, in1, in2);
1382
1383 if (is_tc) {
eaa3783b 1384 TCGv_reg tmp = tcg_temp_new();
eaa3783b 1385 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
b2167459
RH
1386 gen_helper_tcond(cpu_env, tmp);
1387 tcg_temp_free(tmp);
1388 }
1389 save_gpr(ctx, rt, dest);
1390
1391 cond_free(&ctx->null_cond);
1392 ctx->null_cond = cond;
1393 }
b2167459
RH
1394}
1395
86f8d05f 1396#ifndef CONFIG_USER_ONLY
8d6ae7fb
RH
1397/* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1398 from the top 2 bits of the base register. There are a few system
1399 instructions that have a 3-bit space specifier, for which SR0 is
1400 not special. To handle this, pass ~SP. */
86f8d05f
RH
1401static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1402{
1403 TCGv_ptr ptr;
1404 TCGv_reg tmp;
1405 TCGv_i64 spc;
1406
1407 if (sp != 0) {
8d6ae7fb
RH
1408 if (sp < 0) {
1409 sp = ~sp;
1410 }
1411 spc = get_temp_tl(ctx);
1412 load_spr(ctx, spc, sp);
1413 return spc;
86f8d05f 1414 }
494737b7
RH
1415 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1416 return cpu_srH;
1417 }
86f8d05f
RH
1418
1419 ptr = tcg_temp_new_ptr();
1420 tmp = tcg_temp_new();
1421 spc = get_temp_tl(ctx);
1422
1423 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1424 tcg_gen_andi_reg(tmp, tmp, 030);
1425 tcg_gen_trunc_reg_ptr(ptr, tmp);
1426 tcg_temp_free(tmp);
1427
1428 tcg_gen_add_ptr(ptr, ptr, cpu_env);
1429 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1430 tcg_temp_free_ptr(ptr);
1431
1432 return spc;
1433}
1434#endif
1435
1436static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1437 unsigned rb, unsigned rx, int scale, target_sreg disp,
1438 unsigned sp, int modify, bool is_phys)
1439{
1440 TCGv_reg base = load_gpr(ctx, rb);
1441 TCGv_reg ofs;
1442
1443 /* Note that RX is mutually exclusive with DISP. */
1444 if (rx) {
1445 ofs = get_temp(ctx);
1446 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1447 tcg_gen_add_reg(ofs, ofs, base);
1448 } else if (disp || modify) {
1449 ofs = get_temp(ctx);
1450 tcg_gen_addi_reg(ofs, base, disp);
1451 } else {
1452 ofs = base;
1453 }
1454
1455 *pofs = ofs;
1456#ifdef CONFIG_USER_ONLY
1457 *pgva = (modify <= 0 ? ofs : base);
1458#else
1459 TCGv_tl addr = get_temp_tl(ctx);
1460 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
494737b7 1461 if (ctx->tb_flags & PSW_W) {
86f8d05f
RH
1462 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1463 }
1464 if (!is_phys) {
1465 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1466 }
1467 *pgva = addr;
1468#endif
1469}
1470
96d6407f
RH
1471/* Emit a memory load. The modify parameter should be
1472 * < 0 for pre-modify,
1473 * > 0 for post-modify,
1474 * = 0 for no base register update.
1475 */
1476static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
eaa3783b 1477 unsigned rx, int scale, target_sreg disp,
14776ab5 1478 unsigned sp, int modify, MemOp mop)
96d6407f 1479{
86f8d05f
RH
1480 TCGv_reg ofs;
1481 TCGv_tl addr;
96d6407f
RH
1482
1483 /* Caller uses nullify_over/nullify_end. */
1484 assert(ctx->null_cond.c == TCG_COND_NEVER);
1485
86f8d05f
RH
1486 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1487 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1488 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1489 if (modify) {
1490 save_gpr(ctx, rb, ofs);
96d6407f 1491 }
96d6407f
RH
1492}
1493
1494static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
eaa3783b 1495 unsigned rx, int scale, target_sreg disp,
14776ab5 1496 unsigned sp, int modify, MemOp mop)
96d6407f 1497{
86f8d05f
RH
1498 TCGv_reg ofs;
1499 TCGv_tl addr;
96d6407f
RH
1500
1501 /* Caller uses nullify_over/nullify_end. */
1502 assert(ctx->null_cond.c == TCG_COND_NEVER);
1503
86f8d05f
RH
1504 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1505 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1506 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1507 if (modify) {
1508 save_gpr(ctx, rb, ofs);
96d6407f 1509 }
96d6407f
RH
1510}
1511
1512static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
eaa3783b 1513 unsigned rx, int scale, target_sreg disp,
14776ab5 1514 unsigned sp, int modify, MemOp mop)
96d6407f 1515{
86f8d05f
RH
1516 TCGv_reg ofs;
1517 TCGv_tl addr;
96d6407f
RH
1518
1519 /* Caller uses nullify_over/nullify_end. */
1520 assert(ctx->null_cond.c == TCG_COND_NEVER);
1521
86f8d05f
RH
1522 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1523 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1524 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1525 if (modify) {
1526 save_gpr(ctx, rb, ofs);
96d6407f 1527 }
96d6407f
RH
1528}
1529
1530static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
eaa3783b 1531 unsigned rx, int scale, target_sreg disp,
14776ab5 1532 unsigned sp, int modify, MemOp mop)
96d6407f 1533{
86f8d05f
RH
1534 TCGv_reg ofs;
1535 TCGv_tl addr;
96d6407f
RH
1536
1537 /* Caller uses nullify_over/nullify_end. */
1538 assert(ctx->null_cond.c == TCG_COND_NEVER);
1539
86f8d05f
RH
1540 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1541 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1542 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1543 if (modify) {
1544 save_gpr(ctx, rb, ofs);
96d6407f 1545 }
96d6407f
RH
1546}
1547
eaa3783b
RH
1548#if TARGET_REGISTER_BITS == 64
1549#define do_load_reg do_load_64
1550#define do_store_reg do_store_64
96d6407f 1551#else
eaa3783b
RH
1552#define do_load_reg do_load_32
1553#define do_store_reg do_store_32
96d6407f
RH
1554#endif
1555
1cd012a5 1556static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
31234768 1557 unsigned rx, int scale, target_sreg disp,
14776ab5 1558 unsigned sp, int modify, MemOp mop)
96d6407f 1559{
eaa3783b 1560 TCGv_reg dest;
96d6407f
RH
1561
1562 nullify_over(ctx);
1563
1564 if (modify == 0) {
1565 /* No base register update. */
1566 dest = dest_gpr(ctx, rt);
1567 } else {
1568 /* Make sure if RT == RB, we see the result of the load. */
1569 dest = get_temp(ctx);
1570 }
86f8d05f 1571 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
96d6407f
RH
1572 save_gpr(ctx, rt, dest);
1573
1cd012a5 1574 return nullify_end(ctx);
96d6407f
RH
1575}
1576
740038d7 1577static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1578 unsigned rx, int scale, target_sreg disp,
1579 unsigned sp, int modify)
96d6407f
RH
1580{
1581 TCGv_i32 tmp;
1582
1583 nullify_over(ctx);
1584
1585 tmp = tcg_temp_new_i32();
86f8d05f 1586 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
96d6407f
RH
1587 save_frw_i32(rt, tmp);
1588 tcg_temp_free_i32(tmp);
1589
1590 if (rt == 0) {
1591 gen_helper_loaded_fr0(cpu_env);
1592 }
1593
740038d7
RH
1594 return nullify_end(ctx);
1595}
1596
1597static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1598{
1599 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1600 a->disp, a->sp, a->m);
96d6407f
RH
1601}
1602
740038d7 1603static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1604 unsigned rx, int scale, target_sreg disp,
1605 unsigned sp, int modify)
96d6407f
RH
1606{
1607 TCGv_i64 tmp;
1608
1609 nullify_over(ctx);
1610
1611 tmp = tcg_temp_new_i64();
fc313c64 1612 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
96d6407f
RH
1613 save_frd(rt, tmp);
1614 tcg_temp_free_i64(tmp);
1615
1616 if (rt == 0) {
1617 gen_helper_loaded_fr0(cpu_env);
1618 }
1619
740038d7
RH
1620 return nullify_end(ctx);
1621}
1622
1623static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1624{
1625 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1626 a->disp, a->sp, a->m);
96d6407f
RH
1627}
1628
1cd012a5 1629static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
31234768 1630 target_sreg disp, unsigned sp,
14776ab5 1631 int modify, MemOp mop)
96d6407f
RH
1632{
1633 nullify_over(ctx);
86f8d05f 1634 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1cd012a5 1635 return nullify_end(ctx);
96d6407f
RH
1636}
1637
740038d7 1638static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1639 unsigned rx, int scale, target_sreg disp,
1640 unsigned sp, int modify)
96d6407f
RH
1641{
1642 TCGv_i32 tmp;
1643
1644 nullify_over(ctx);
1645
1646 tmp = load_frw_i32(rt);
86f8d05f 1647 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
96d6407f
RH
1648 tcg_temp_free_i32(tmp);
1649
740038d7
RH
1650 return nullify_end(ctx);
1651}
1652
1653static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1654{
1655 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1656 a->disp, a->sp, a->m);
96d6407f
RH
1657}
1658
740038d7 1659static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1660 unsigned rx, int scale, target_sreg disp,
1661 unsigned sp, int modify)
96d6407f
RH
1662{
1663 TCGv_i64 tmp;
1664
1665 nullify_over(ctx);
1666
1667 tmp = load_frd(rt);
fc313c64 1668 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
96d6407f
RH
1669 tcg_temp_free_i64(tmp);
1670
740038d7
RH
1671 return nullify_end(ctx);
1672}
1673
1674static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1675{
1676 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1677 a->disp, a->sp, a->m);
96d6407f
RH
1678}
1679
1ca74648 1680static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1681 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
ebe9383c
RH
1682{
1683 TCGv_i32 tmp;
1684
1685 nullify_over(ctx);
1686 tmp = load_frw0_i32(ra);
1687
1688 func(tmp, cpu_env, tmp);
1689
1690 save_frw_i32(rt, tmp);
1691 tcg_temp_free_i32(tmp);
1ca74648 1692 return nullify_end(ctx);
ebe9383c
RH
1693}
1694
1ca74648 1695static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1696 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
ebe9383c
RH
1697{
1698 TCGv_i32 dst;
1699 TCGv_i64 src;
1700
1701 nullify_over(ctx);
1702 src = load_frd(ra);
1703 dst = tcg_temp_new_i32();
1704
1705 func(dst, cpu_env, src);
1706
1707 tcg_temp_free_i64(src);
1708 save_frw_i32(rt, dst);
1709 tcg_temp_free_i32(dst);
1ca74648 1710 return nullify_end(ctx);
ebe9383c
RH
1711}
1712
1ca74648 1713static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1714 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
ebe9383c
RH
1715{
1716 TCGv_i64 tmp;
1717
1718 nullify_over(ctx);
1719 tmp = load_frd0(ra);
1720
1721 func(tmp, cpu_env, tmp);
1722
1723 save_frd(rt, tmp);
1724 tcg_temp_free_i64(tmp);
1ca74648 1725 return nullify_end(ctx);
ebe9383c
RH
1726}
1727
1ca74648 1728static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1729 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
ebe9383c
RH
1730{
1731 TCGv_i32 src;
1732 TCGv_i64 dst;
1733
1734 nullify_over(ctx);
1735 src = load_frw0_i32(ra);
1736 dst = tcg_temp_new_i64();
1737
1738 func(dst, cpu_env, src);
1739
1740 tcg_temp_free_i32(src);
1741 save_frd(rt, dst);
1742 tcg_temp_free_i64(dst);
1ca74648 1743 return nullify_end(ctx);
ebe9383c
RH
1744}
1745
1ca74648 1746static bool do_fop_weww(DisasContext *ctx, unsigned rt,
31234768
RH
1747 unsigned ra, unsigned rb,
1748 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
ebe9383c
RH
1749{
1750 TCGv_i32 a, b;
1751
1752 nullify_over(ctx);
1753 a = load_frw0_i32(ra);
1754 b = load_frw0_i32(rb);
1755
1756 func(a, cpu_env, a, b);
1757
1758 tcg_temp_free_i32(b);
1759 save_frw_i32(rt, a);
1760 tcg_temp_free_i32(a);
1ca74648 1761 return nullify_end(ctx);
ebe9383c
RH
1762}
1763
1ca74648 1764static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
31234768
RH
1765 unsigned ra, unsigned rb,
1766 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
ebe9383c
RH
1767{
1768 TCGv_i64 a, b;
1769
1770 nullify_over(ctx);
1771 a = load_frd0(ra);
1772 b = load_frd0(rb);
1773
1774 func(a, cpu_env, a, b);
1775
1776 tcg_temp_free_i64(b);
1777 save_frd(rt, a);
1778 tcg_temp_free_i64(a);
1ca74648 1779 return nullify_end(ctx);
ebe9383c
RH
1780}
1781
98cd9ca7
RH
1782/* Emit an unconditional branch to a direct target, which may or may not
1783 have already had nullification handled. */
01afb7be 1784static bool do_dbranch(DisasContext *ctx, target_ureg dest,
31234768 1785 unsigned link, bool is_n)
98cd9ca7
RH
1786{
1787 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1788 if (link != 0) {
1789 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1790 }
1791 ctx->iaoq_n = dest;
1792 if (is_n) {
1793 ctx->null_cond.c = TCG_COND_ALWAYS;
1794 }
98cd9ca7
RH
1795 } else {
1796 nullify_over(ctx);
1797
1798 if (link != 0) {
1799 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1800 }
1801
1802 if (is_n && use_nullify_skip(ctx)) {
1803 nullify_set(ctx, 0);
1804 gen_goto_tb(ctx, 0, dest, dest + 4);
1805 } else {
1806 nullify_set(ctx, is_n);
1807 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1808 }
1809
31234768 1810 nullify_end(ctx);
98cd9ca7
RH
1811
1812 nullify_set(ctx, 0);
1813 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
31234768 1814 ctx->base.is_jmp = DISAS_NORETURN;
98cd9ca7 1815 }
01afb7be 1816 return true;
98cd9ca7
RH
1817}
1818
1819/* Emit a conditional branch to a direct target. If the branch itself
1820 is nullified, we should have already used nullify_over. */
01afb7be 1821static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
31234768 1822 DisasCond *cond)
98cd9ca7 1823{
eaa3783b 1824 target_ureg dest = iaoq_dest(ctx, disp);
98cd9ca7
RH
1825 TCGLabel *taken = NULL;
1826 TCGCond c = cond->c;
98cd9ca7
RH
1827 bool n;
1828
1829 assert(ctx->null_cond.c == TCG_COND_NEVER);
1830
1831 /* Handle TRUE and NEVER as direct branches. */
1832 if (c == TCG_COND_ALWAYS) {
01afb7be 1833 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
98cd9ca7
RH
1834 }
1835 if (c == TCG_COND_NEVER) {
01afb7be 1836 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
98cd9ca7
RH
1837 }
1838
1839 taken = gen_new_label();
eaa3783b 1840 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
98cd9ca7
RH
1841 cond_free(cond);
1842
1843 /* Not taken: Condition not satisfied; nullify on backward branches. */
1844 n = is_n && disp < 0;
1845 if (n && use_nullify_skip(ctx)) {
1846 nullify_set(ctx, 0);
a881c8e7 1847 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
98cd9ca7
RH
1848 } else {
1849 if (!n && ctx->null_lab) {
1850 gen_set_label(ctx->null_lab);
1851 ctx->null_lab = NULL;
1852 }
1853 nullify_set(ctx, n);
c301f34e
RH
1854 if (ctx->iaoq_n == -1) {
1855 /* The temporary iaoq_n_var died at the branch above.
1856 Regenerate it here instead of saving it. */
1857 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1858 }
a881c8e7 1859 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
98cd9ca7
RH
1860 }
1861
1862 gen_set_label(taken);
1863
1864 /* Taken: Condition satisfied; nullify on forward branches. */
1865 n = is_n && disp >= 0;
1866 if (n && use_nullify_skip(ctx)) {
1867 nullify_set(ctx, 0);
a881c8e7 1868 gen_goto_tb(ctx, 1, dest, dest + 4);
98cd9ca7
RH
1869 } else {
1870 nullify_set(ctx, n);
a881c8e7 1871 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
98cd9ca7
RH
1872 }
1873
1874 /* Not taken: the branch itself was nullified. */
1875 if (ctx->null_lab) {
1876 gen_set_label(ctx->null_lab);
1877 ctx->null_lab = NULL;
31234768 1878 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
98cd9ca7 1879 } else {
31234768 1880 ctx->base.is_jmp = DISAS_NORETURN;
98cd9ca7 1881 }
01afb7be 1882 return true;
98cd9ca7
RH
1883}
1884
1885/* Emit an unconditional branch to an indirect target. This handles
1886 nullification of the branch itself. */
01afb7be 1887static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
31234768 1888 unsigned link, bool is_n)
98cd9ca7 1889{
eaa3783b 1890 TCGv_reg a0, a1, next, tmp;
98cd9ca7
RH
1891 TCGCond c;
1892
1893 assert(ctx->null_lab == NULL);
1894
1895 if (ctx->null_cond.c == TCG_COND_NEVER) {
1896 if (link != 0) {
1897 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1898 }
1899 next = get_temp(ctx);
eaa3783b 1900 tcg_gen_mov_reg(next, dest);
98cd9ca7 1901 if (is_n) {
c301f34e
RH
1902 if (use_nullify_skip(ctx)) {
1903 tcg_gen_mov_reg(cpu_iaoq_f, next);
1904 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1905 nullify_set(ctx, 0);
31234768 1906 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
01afb7be 1907 return true;
c301f34e 1908 }
98cd9ca7
RH
1909 ctx->null_cond.c = TCG_COND_ALWAYS;
1910 }
c301f34e
RH
1911 ctx->iaoq_n = -1;
1912 ctx->iaoq_n_var = next;
98cd9ca7
RH
1913 } else if (is_n && use_nullify_skip(ctx)) {
1914 /* The (conditional) branch, B, nullifies the next insn, N,
1915 and we're allowed to skip execution N (no single-step or
4137cb83 1916 tracepoint in effect). Since the goto_ptr that we must use
98cd9ca7
RH
1917 for the indirect branch consumes no special resources, we
1918 can (conditionally) skip B and continue execution. */
1919 /* The use_nullify_skip test implies we have a known control path. */
1920 tcg_debug_assert(ctx->iaoq_b != -1);
1921 tcg_debug_assert(ctx->iaoq_n != -1);
1922
1923 /* We do have to handle the non-local temporary, DEST, before
1924 branching. Since IOAQ_F is not really live at this point, we
1925 can simply store DEST optimistically. Similarly with IAOQ_B. */
eaa3783b
RH
1926 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1927 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
98cd9ca7
RH
1928
1929 nullify_over(ctx);
1930 if (link != 0) {
eaa3783b 1931 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
98cd9ca7 1932 }
7f11636d 1933 tcg_gen_lookup_and_goto_ptr();
01afb7be 1934 return nullify_end(ctx);
98cd9ca7 1935 } else {
98cd9ca7
RH
1936 c = ctx->null_cond.c;
1937 a0 = ctx->null_cond.a0;
1938 a1 = ctx->null_cond.a1;
1939
1940 tmp = tcg_temp_new();
1941 next = get_temp(ctx);
1942
1943 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
eaa3783b 1944 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
98cd9ca7
RH
1945 ctx->iaoq_n = -1;
1946 ctx->iaoq_n_var = next;
1947
1948 if (link != 0) {
eaa3783b 1949 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
98cd9ca7
RH
1950 }
1951
1952 if (is_n) {
1953 /* The branch nullifies the next insn, which means the state of N
1954 after the branch is the inverse of the state of N that applied
1955 to the branch. */
eaa3783b 1956 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
98cd9ca7
RH
1957 cond_free(&ctx->null_cond);
1958 ctx->null_cond = cond_make_n();
1959 ctx->psw_n_nonzero = true;
1960 } else {
1961 cond_free(&ctx->null_cond);
1962 }
1963 }
01afb7be 1964 return true;
98cd9ca7
RH
1965}
1966
660eefe1
RH
1967/* Implement
1968 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1969 * IAOQ_Next{30..31} ← GR[b]{30..31};
1970 * else
1971 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1972 * which keeps the privilege level from being increased.
1973 */
1974static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1975{
660eefe1
RH
1976 TCGv_reg dest;
1977 switch (ctx->privilege) {
1978 case 0:
1979 /* Privilege 0 is maximum and is allowed to decrease. */
1980 return offset;
1981 case 3:
993119fe 1982 /* Privilege 3 is minimum and is never allowed to increase. */
660eefe1
RH
1983 dest = get_temp(ctx);
1984 tcg_gen_ori_reg(dest, offset, 3);
1985 break;
1986 default:
993119fe 1987 dest = get_temp(ctx);
660eefe1
RH
1988 tcg_gen_andi_reg(dest, offset, -4);
1989 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1990 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
660eefe1
RH
1991 break;
1992 }
1993 return dest;
660eefe1
RH
1994}
1995
ba1d0b44 1996#ifdef CONFIG_USER_ONLY
7ad439df
RH
1997/* On Linux, page zero is normally marked execute only + gateway.
1998 Therefore normal read or write is supposed to fail, but specific
1999 offsets have kernel code mapped to raise permissions to implement
2000 system calls. Handling this via an explicit check here, rather
2001 in than the "be disp(sr2,r0)" instruction that probably sent us
2002 here, is the easiest way to handle the branch delay slot on the
2003 aforementioned BE. */
31234768 2004static void do_page_zero(DisasContext *ctx)
7ad439df
RH
2005{
2006 /* If by some means we get here with PSW[N]=1, that implies that
2007 the B,GATE instruction would be skipped, and we'd fault on the
2008 next insn within the privilaged page. */
2009 switch (ctx->null_cond.c) {
2010 case TCG_COND_NEVER:
2011 break;
2012 case TCG_COND_ALWAYS:
eaa3783b 2013 tcg_gen_movi_reg(cpu_psw_n, 0);
7ad439df
RH
2014 goto do_sigill;
2015 default:
2016 /* Since this is always the first (and only) insn within the
2017 TB, we should know the state of PSW[N] from TB->FLAGS. */
2018 g_assert_not_reached();
2019 }
2020
2021 /* Check that we didn't arrive here via some means that allowed
2022 non-sequential instruction execution. Normally the PSW[B] bit
2023 detects this by disallowing the B,GATE instruction to execute
2024 under such conditions. */
2025 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2026 goto do_sigill;
2027 }
2028
ebd0e151 2029 switch (ctx->iaoq_f & -4) {
7ad439df 2030 case 0x00: /* Null pointer call */
2986721d 2031 gen_excp_1(EXCP_IMP);
31234768
RH
2032 ctx->base.is_jmp = DISAS_NORETURN;
2033 break;
7ad439df
RH
2034
2035 case 0xb0: /* LWS */
2036 gen_excp_1(EXCP_SYSCALL_LWS);
31234768
RH
2037 ctx->base.is_jmp = DISAS_NORETURN;
2038 break;
7ad439df
RH
2039
2040 case 0xe0: /* SET_THREAD_POINTER */
35136a77 2041 tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
ebd0e151 2042 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
eaa3783b 2043 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
31234768
RH
2044 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2045 break;
7ad439df
RH
2046
2047 case 0x100: /* SYSCALL */
2048 gen_excp_1(EXCP_SYSCALL);
31234768
RH
2049 ctx->base.is_jmp = DISAS_NORETURN;
2050 break;
7ad439df
RH
2051
2052 default:
2053 do_sigill:
2986721d 2054 gen_excp_1(EXCP_ILL);
31234768
RH
2055 ctx->base.is_jmp = DISAS_NORETURN;
2056 break;
7ad439df
RH
2057 }
2058}
ba1d0b44 2059#endif
7ad439df 2060
deee69a1 2061static bool trans_nop(DisasContext *ctx, arg_nop *a)
b2167459
RH
2062{
2063 cond_free(&ctx->null_cond);
31234768 2064 return true;
b2167459
RH
2065}
2066
40f9f908 2067static bool trans_break(DisasContext *ctx, arg_break *a)
98a9cb79 2068{
31234768 2069 return gen_excp_iir(ctx, EXCP_BREAK);
98a9cb79
RH
2070}
2071
e36f27ef 2072static bool trans_sync(DisasContext *ctx, arg_sync *a)
98a9cb79
RH
2073{
2074 /* No point in nullifying the memory barrier. */
2075 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2076
2077 cond_free(&ctx->null_cond);
31234768 2078 return true;
98a9cb79
RH
2079}
2080
c603e14a 2081static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
98a9cb79 2082{
c603e14a 2083 unsigned rt = a->t;
eaa3783b
RH
2084 TCGv_reg tmp = dest_gpr(ctx, rt);
2085 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
98a9cb79
RH
2086 save_gpr(ctx, rt, tmp);
2087
2088 cond_free(&ctx->null_cond);
31234768 2089 return true;
98a9cb79
RH
2090}
2091
c603e14a 2092static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
98a9cb79 2093{
c603e14a
RH
2094 unsigned rt = a->t;
2095 unsigned rs = a->sp;
33423472
RH
2096 TCGv_i64 t0 = tcg_temp_new_i64();
2097 TCGv_reg t1 = tcg_temp_new();
98a9cb79 2098
33423472
RH
2099 load_spr(ctx, t0, rs);
2100 tcg_gen_shri_i64(t0, t0, 32);
2101 tcg_gen_trunc_i64_reg(t1, t0);
2102
2103 save_gpr(ctx, rt, t1);
2104 tcg_temp_free(t1);
2105 tcg_temp_free_i64(t0);
98a9cb79
RH
2106
2107 cond_free(&ctx->null_cond);
31234768 2108 return true;
98a9cb79
RH
2109}
2110
c603e14a 2111static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
98a9cb79 2112{
c603e14a
RH
2113 unsigned rt = a->t;
2114 unsigned ctl = a->r;
eaa3783b 2115 TCGv_reg tmp;
98a9cb79
RH
2116
2117 switch (ctl) {
35136a77 2118 case CR_SAR:
98a9cb79 2119#ifdef TARGET_HPPA64
c603e14a 2120 if (a->e == 0) {
98a9cb79
RH
2121 /* MFSAR without ,W masks low 5 bits. */
2122 tmp = dest_gpr(ctx, rt);
eaa3783b 2123 tcg_gen_andi_reg(tmp, cpu_sar, 31);
98a9cb79 2124 save_gpr(ctx, rt, tmp);
35136a77 2125 goto done;
98a9cb79
RH
2126 }
2127#endif
2128 save_gpr(ctx, rt, cpu_sar);
35136a77
RH
2129 goto done;
2130 case CR_IT: /* Interval Timer */
2131 /* FIXME: Respect PSW_S bit. */
2132 nullify_over(ctx);
98a9cb79 2133 tmp = dest_gpr(ctx, rt);
84b41e65 2134 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
49c29d6c
RH
2135 gen_io_start();
2136 gen_helper_read_interval_timer(tmp);
31234768 2137 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
49c29d6c
RH
2138 } else {
2139 gen_helper_read_interval_timer(tmp);
49c29d6c 2140 }
98a9cb79 2141 save_gpr(ctx, rt, tmp);
31234768 2142 return nullify_end(ctx);
98a9cb79 2143 case 26:
98a9cb79 2144 case 27:
98a9cb79
RH
2145 break;
2146 default:
2147 /* All other control registers are privileged. */
35136a77
RH
2148 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2149 break;
98a9cb79
RH
2150 }
2151
35136a77
RH
2152 tmp = get_temp(ctx);
2153 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2154 save_gpr(ctx, rt, tmp);
2155
2156 done:
98a9cb79 2157 cond_free(&ctx->null_cond);
31234768 2158 return true;
98a9cb79
RH
2159}
2160
c603e14a 2161static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
33423472 2162{
c603e14a
RH
2163 unsigned rr = a->r;
2164 unsigned rs = a->sp;
33423472
RH
2165 TCGv_i64 t64;
2166
2167 if (rs >= 5) {
2168 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2169 }
2170 nullify_over(ctx);
2171
2172 t64 = tcg_temp_new_i64();
2173 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2174 tcg_gen_shli_i64(t64, t64, 32);
2175
2176 if (rs >= 4) {
2177 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
494737b7 2178 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
33423472
RH
2179 } else {
2180 tcg_gen_mov_i64(cpu_sr[rs], t64);
2181 }
2182 tcg_temp_free_i64(t64);
2183
31234768 2184 return nullify_end(ctx);
33423472
RH
2185}
2186
c603e14a 2187static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
98a9cb79 2188{
c603e14a 2189 unsigned ctl = a->t;
4845f015 2190 TCGv_reg reg;
eaa3783b 2191 TCGv_reg tmp;
98a9cb79 2192
35136a77 2193 if (ctl == CR_SAR) {
4845f015 2194 reg = load_gpr(ctx, a->r);
98a9cb79 2195 tmp = tcg_temp_new();
35136a77 2196 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
98a9cb79
RH
2197 save_or_nullify(ctx, cpu_sar, tmp);
2198 tcg_temp_free(tmp);
35136a77
RH
2199
2200 cond_free(&ctx->null_cond);
31234768 2201 return true;
98a9cb79
RH
2202 }
2203
35136a77
RH
2204 /* All other control registers are privileged or read-only. */
2205 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2206
c603e14a 2207#ifndef CONFIG_USER_ONLY
35136a77 2208 nullify_over(ctx);
4845f015
SS
2209 reg = load_gpr(ctx, a->r);
2210
35136a77
RH
2211 switch (ctl) {
2212 case CR_IT:
49c29d6c 2213 gen_helper_write_interval_timer(cpu_env, reg);
35136a77 2214 break;
4f5f2548
RH
2215 case CR_EIRR:
2216 gen_helper_write_eirr(cpu_env, reg);
2217 break;
2218 case CR_EIEM:
2219 gen_helper_write_eiem(cpu_env, reg);
31234768 2220 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
4f5f2548
RH
2221 break;
2222
35136a77
RH
2223 case CR_IIASQ:
2224 case CR_IIAOQ:
2225 /* FIXME: Respect PSW_Q bit */
2226 /* The write advances the queue and stores to the back element. */
2227 tmp = get_temp(ctx);
2228 tcg_gen_ld_reg(tmp, cpu_env,
2229 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2230 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2231 tcg_gen_st_reg(reg, cpu_env,
2232 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2233 break;
2234
d5de20bd
SS
2235 case CR_PID1:
2236 case CR_PID2:
2237 case CR_PID3:
2238 case CR_PID4:
2239 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2240#ifndef CONFIG_USER_ONLY
2241 gen_helper_change_prot_id(cpu_env);
2242#endif
2243 break;
2244
35136a77
RH
2245 default:
2246 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2247 break;
2248 }
31234768 2249 return nullify_end(ctx);
4f5f2548 2250#endif
98a9cb79
RH
2251}
2252
c603e14a 2253static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
98a9cb79 2254{
eaa3783b 2255 TCGv_reg tmp = tcg_temp_new();
98a9cb79 2256
c603e14a 2257 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
eaa3783b 2258 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
98a9cb79
RH
2259 save_or_nullify(ctx, cpu_sar, tmp);
2260 tcg_temp_free(tmp);
2261
2262 cond_free(&ctx->null_cond);
31234768 2263 return true;
98a9cb79
RH
2264}
2265
e36f27ef 2266static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
98a9cb79 2267{
e36f27ef 2268 TCGv_reg dest = dest_gpr(ctx, a->t);
98a9cb79 2269
2330504c
HD
2270#ifdef CONFIG_USER_ONLY
2271 /* We don't implement space registers in user mode. */
eaa3783b 2272 tcg_gen_movi_reg(dest, 0);
2330504c 2273#else
2330504c
HD
2274 TCGv_i64 t0 = tcg_temp_new_i64();
2275
e36f27ef 2276 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2330504c
HD
2277 tcg_gen_shri_i64(t0, t0, 32);
2278 tcg_gen_trunc_i64_reg(dest, t0);
2279
2280 tcg_temp_free_i64(t0);
2281#endif
e36f27ef 2282 save_gpr(ctx, a->t, dest);
98a9cb79
RH
2283
2284 cond_free(&ctx->null_cond);
31234768 2285 return true;
98a9cb79
RH
2286}
2287
e36f27ef 2288static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
e1b5a5ed 2289{
e36f27ef
RH
2290 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2291#ifndef CONFIG_USER_ONLY
e1b5a5ed
RH
2292 TCGv_reg tmp;
2293
e1b5a5ed
RH
2294 nullify_over(ctx);
2295
2296 tmp = get_temp(ctx);
2297 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
e36f27ef 2298 tcg_gen_andi_reg(tmp, tmp, ~a->i);
e1b5a5ed 2299 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
e36f27ef 2300 save_gpr(ctx, a->t, tmp);
e1b5a5ed
RH
2301
2302 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
31234768
RH
2303 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2304 return nullify_end(ctx);
e36f27ef 2305#endif
e1b5a5ed
RH
2306}
2307
e36f27ef 2308static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
e1b5a5ed 2309{
e36f27ef
RH
2310 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2311#ifndef CONFIG_USER_ONLY
e1b5a5ed
RH
2312 TCGv_reg tmp;
2313
e1b5a5ed
RH
2314 nullify_over(ctx);
2315
2316 tmp = get_temp(ctx);
2317 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
e36f27ef 2318 tcg_gen_ori_reg(tmp, tmp, a->i);
e1b5a5ed 2319 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
e36f27ef 2320 save_gpr(ctx, a->t, tmp);
e1b5a5ed
RH
2321
2322 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
31234768
RH
2323 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2324 return nullify_end(ctx);
e36f27ef 2325#endif
e1b5a5ed
RH
2326}
2327
c603e14a 2328static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
e1b5a5ed 2329{
e1b5a5ed 2330 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
c603e14a
RH
2331#ifndef CONFIG_USER_ONLY
2332 TCGv_reg tmp, reg;
e1b5a5ed
RH
2333 nullify_over(ctx);
2334
c603e14a 2335 reg = load_gpr(ctx, a->r);
e1b5a5ed
RH
2336 tmp = get_temp(ctx);
2337 gen_helper_swap_system_mask(tmp, cpu_env, reg);
2338
2339 /* Exit the TB to recognize new interrupts. */
31234768
RH
2340 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2341 return nullify_end(ctx);
c603e14a 2342#endif
e1b5a5ed 2343}
f49b3537 2344
e36f27ef 2345static bool do_rfi(DisasContext *ctx, bool rfi_r)
f49b3537 2346{
f49b3537 2347 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
e36f27ef 2348#ifndef CONFIG_USER_ONLY
f49b3537
RH
2349 nullify_over(ctx);
2350
e36f27ef 2351 if (rfi_r) {
f49b3537
RH
2352 gen_helper_rfi_r(cpu_env);
2353 } else {
2354 gen_helper_rfi(cpu_env);
2355 }
31234768 2356 /* Exit the TB to recognize new interrupts. */
8532a14e 2357 tcg_gen_exit_tb(NULL, 0);
31234768 2358 ctx->base.is_jmp = DISAS_NORETURN;
f49b3537 2359
31234768 2360 return nullify_end(ctx);
e36f27ef
RH
2361#endif
2362}
2363
2364static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2365{
2366 return do_rfi(ctx, false);
2367}
2368
2369static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2370{
2371 return do_rfi(ctx, true);
f49b3537 2372}
6210db05 2373
96927adb
RH
2374static bool trans_halt(DisasContext *ctx, arg_halt *a)
2375{
2376 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
e36f27ef 2377#ifndef CONFIG_USER_ONLY
96927adb
RH
2378 nullify_over(ctx);
2379 gen_helper_halt(cpu_env);
2380 ctx->base.is_jmp = DISAS_NORETURN;
2381 return nullify_end(ctx);
2382#endif
2383}
2384
2385static bool trans_reset(DisasContext *ctx, arg_reset *a)
6210db05
HD
2386{
2387 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
96927adb 2388#ifndef CONFIG_USER_ONLY
6210db05 2389 nullify_over(ctx);
96927adb 2390 gen_helper_reset(cpu_env);
31234768
RH
2391 ctx->base.is_jmp = DISAS_NORETURN;
2392 return nullify_end(ctx);
96927adb 2393#endif
6210db05 2394}
e1b5a5ed 2395
deee69a1 2396static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
98a9cb79 2397{
deee69a1
RH
2398 if (a->m) {
2399 TCGv_reg dest = dest_gpr(ctx, a->b);
2400 TCGv_reg src1 = load_gpr(ctx, a->b);
2401 TCGv_reg src2 = load_gpr(ctx, a->x);
98a9cb79 2402
deee69a1
RH
2403 /* The only thing we need to do is the base register modification. */
2404 tcg_gen_add_reg(dest, src1, src2);
2405 save_gpr(ctx, a->b, dest);
2406 }
98a9cb79 2407 cond_free(&ctx->null_cond);
31234768 2408 return true;
98a9cb79
RH
2409}
2410
deee69a1 2411static bool trans_probe(DisasContext *ctx, arg_probe *a)
98a9cb79 2412{
86f8d05f 2413 TCGv_reg dest, ofs;
eed14219 2414 TCGv_i32 level, want;
86f8d05f 2415 TCGv_tl addr;
98a9cb79
RH
2416
2417 nullify_over(ctx);
2418
deee69a1
RH
2419 dest = dest_gpr(ctx, a->t);
2420 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
eed14219 2421
deee69a1 2422 if (a->imm) {
29dd6f64 2423 level = tcg_constant_i32(a->ri);
98a9cb79 2424 } else {
eed14219 2425 level = tcg_temp_new_i32();
deee69a1 2426 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
eed14219 2427 tcg_gen_andi_i32(level, level, 3);
98a9cb79 2428 }
29dd6f64 2429 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
eed14219
RH
2430
2431 gen_helper_probe(dest, cpu_env, addr, level, want);
2432
eed14219
RH
2433 tcg_temp_free_i32(level);
2434
deee69a1 2435 save_gpr(ctx, a->t, dest);
31234768 2436 return nullify_end(ctx);
98a9cb79
RH
2437}
2438
deee69a1 2439static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
8d6ae7fb 2440{
deee69a1
RH
2441 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2442#ifndef CONFIG_USER_ONLY
8d6ae7fb
RH
2443 TCGv_tl addr;
2444 TCGv_reg ofs, reg;
2445
8d6ae7fb
RH
2446 nullify_over(ctx);
2447
deee69a1
RH
2448 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2449 reg = load_gpr(ctx, a->r);
2450 if (a->addr) {
8d6ae7fb
RH
2451 gen_helper_itlba(cpu_env, addr, reg);
2452 } else {
2453 gen_helper_itlbp(cpu_env, addr, reg);
2454 }
2455
32dc7569
SS
2456 /* Exit TB for TLB change if mmu is enabled. */
2457 if (ctx->tb_flags & PSW_C) {
31234768
RH
2458 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2459 }
2460 return nullify_end(ctx);
deee69a1 2461#endif
8d6ae7fb 2462}
63300a00 2463
deee69a1 2464static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
63300a00 2465{
deee69a1
RH
2466 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2467#ifndef CONFIG_USER_ONLY
63300a00
RH
2468 TCGv_tl addr;
2469 TCGv_reg ofs;
2470
63300a00
RH
2471 nullify_over(ctx);
2472
deee69a1
RH
2473 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2474 if (a->m) {
2475 save_gpr(ctx, a->b, ofs);
63300a00 2476 }
deee69a1 2477 if (a->local) {
63300a00
RH
2478 gen_helper_ptlbe(cpu_env);
2479 } else {
2480 gen_helper_ptlb(cpu_env, addr);
2481 }
2482
2483 /* Exit TB for TLB change if mmu is enabled. */
6797c315
NH
2484 if (ctx->tb_flags & PSW_C) {
2485 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2486 }
2487 return nullify_end(ctx);
2488#endif
2489}
2490
2491/*
2492 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2493 * See
2494 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2495 * page 13-9 (195/206)
2496 */
2497static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2498{
2499 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2500#ifndef CONFIG_USER_ONLY
2501 TCGv_tl addr, atl, stl;
2502 TCGv_reg reg;
2503
2504 nullify_over(ctx);
2505
2506 /*
2507 * FIXME:
2508 * if (not (pcxl or pcxl2))
2509 * return gen_illegal(ctx);
2510 *
2511 * Note for future: these are 32-bit systems; no hppa64.
2512 */
2513
2514 atl = tcg_temp_new_tl();
2515 stl = tcg_temp_new_tl();
2516 addr = tcg_temp_new_tl();
2517
2518 tcg_gen_ld32u_i64(stl, cpu_env,
2519 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2520 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2521 tcg_gen_ld32u_i64(atl, cpu_env,
2522 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2523 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2524 tcg_gen_shli_i64(stl, stl, 32);
2525 tcg_gen_or_tl(addr, atl, stl);
2526 tcg_temp_free_tl(atl);
2527 tcg_temp_free_tl(stl);
2528
2529 reg = load_gpr(ctx, a->r);
2530 if (a->addr) {
2531 gen_helper_itlba(cpu_env, addr, reg);
2532 } else {
2533 gen_helper_itlbp(cpu_env, addr, reg);
2534 }
2535 tcg_temp_free_tl(addr);
2536
2537 /* Exit TB for TLB change if mmu is enabled. */
32dc7569 2538 if (ctx->tb_flags & PSW_C) {
31234768
RH
2539 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2540 }
2541 return nullify_end(ctx);
deee69a1 2542#endif
63300a00 2543}
2dfcca9f 2544
deee69a1 2545static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2dfcca9f 2546{
deee69a1
RH
2547 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2548#ifndef CONFIG_USER_ONLY
2dfcca9f
RH
2549 TCGv_tl vaddr;
2550 TCGv_reg ofs, paddr;
2551
2dfcca9f
RH
2552 nullify_over(ctx);
2553
deee69a1 2554 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2dfcca9f
RH
2555
2556 paddr = tcg_temp_new();
2557 gen_helper_lpa(paddr, cpu_env, vaddr);
2558
2559 /* Note that physical address result overrides base modification. */
deee69a1
RH
2560 if (a->m) {
2561 save_gpr(ctx, a->b, ofs);
2dfcca9f 2562 }
deee69a1 2563 save_gpr(ctx, a->t, paddr);
2dfcca9f
RH
2564 tcg_temp_free(paddr);
2565
31234768 2566 return nullify_end(ctx);
deee69a1 2567#endif
2dfcca9f 2568}
43a97b81 2569
deee69a1 2570static bool trans_lci(DisasContext *ctx, arg_lci *a)
43a97b81 2571{
43a97b81
RH
2572 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2573
2574 /* The Coherence Index is an implementation-defined function of the
2575 physical address. Two addresses with the same CI have a coherent
2576 view of the cache. Our implementation is to return 0 for all,
2577 since the entire address space is coherent. */
29dd6f64 2578 save_gpr(ctx, a->t, tcg_constant_reg(0));
43a97b81 2579
31234768
RH
2580 cond_free(&ctx->null_cond);
2581 return true;
43a97b81 2582}
98a9cb79 2583
0c982a28 2584static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
b2167459 2585{
0c982a28
RH
2586 return do_add_reg(ctx, a, false, false, false, false);
2587}
b2167459 2588
0c982a28
RH
2589static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2590{
2591 return do_add_reg(ctx, a, true, false, false, false);
2592}
b2167459 2593
0c982a28
RH
2594static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2595{
2596 return do_add_reg(ctx, a, false, true, false, false);
b2167459
RH
2597}
2598
0c982a28 2599static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
b2167459 2600{
0c982a28
RH
2601 return do_add_reg(ctx, a, false, false, false, true);
2602}
b2167459 2603
0c982a28
RH
2604static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2605{
2606 return do_add_reg(ctx, a, false, true, false, true);
2607}
b2167459 2608
0c982a28
RH
2609static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2610{
2611 return do_sub_reg(ctx, a, false, false, false);
b2167459
RH
2612}
2613
0c982a28 2614static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2615{
0c982a28
RH
2616 return do_sub_reg(ctx, a, true, false, false);
2617}
b2167459 2618
0c982a28
RH
2619static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2620{
2621 return do_sub_reg(ctx, a, false, false, true);
b2167459
RH
2622}
2623
0c982a28 2624static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2625{
0c982a28
RH
2626 return do_sub_reg(ctx, a, true, false, true);
2627}
2628
2629static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2630{
2631 return do_sub_reg(ctx, a, false, true, false);
2632}
2633
2634static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2635{
2636 return do_sub_reg(ctx, a, true, true, false);
2637}
2638
2639static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2640{
2641 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2642}
2643
2644static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2645{
2646 return do_log_reg(ctx, a, tcg_gen_and_reg);
2647}
2648
2649static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2650{
2651 if (a->cf == 0) {
2652 unsigned r2 = a->r2;
2653 unsigned r1 = a->r1;
2654 unsigned rt = a->t;
b2167459 2655
7aee8189
RH
2656 if (rt == 0) { /* NOP */
2657 cond_free(&ctx->null_cond);
2658 return true;
2659 }
2660 if (r2 == 0) { /* COPY */
2661 if (r1 == 0) {
2662 TCGv_reg dest = dest_gpr(ctx, rt);
2663 tcg_gen_movi_reg(dest, 0);
2664 save_gpr(ctx, rt, dest);
2665 } else {
2666 save_gpr(ctx, rt, cpu_gr[r1]);
2667 }
2668 cond_free(&ctx->null_cond);
2669 return true;
2670 }
2671#ifndef CONFIG_USER_ONLY
2672 /* These are QEMU extensions and are nops in the real architecture:
2673 *
2674 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2675 * or %r31,%r31,%r31 -- death loop; offline cpu
2676 * currently implemented as idle.
2677 */
2678 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
7aee8189
RH
2679 /* No need to check for supervisor, as userland can only pause
2680 until the next timer interrupt. */
2681 nullify_over(ctx);
2682
2683 /* Advance the instruction queue. */
2684 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2685 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2686 nullify_set(ctx, 0);
2687
2688 /* Tell the qemu main loop to halt until this cpu has work. */
29dd6f64
RH
2689 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
2690 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
7aee8189
RH
2691 gen_excp_1(EXCP_HALTED);
2692 ctx->base.is_jmp = DISAS_NORETURN;
2693
2694 return nullify_end(ctx);
2695 }
2696#endif
b2167459 2697 }
0c982a28
RH
2698 return do_log_reg(ctx, a, tcg_gen_or_reg);
2699}
7aee8189 2700
0c982a28
RH
2701static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2702{
2703 return do_log_reg(ctx, a, tcg_gen_xor_reg);
b2167459
RH
2704}
2705
0c982a28 2706static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2707{
eaa3783b 2708 TCGv_reg tcg_r1, tcg_r2;
b2167459 2709
0c982a28 2710 if (a->cf) {
b2167459
RH
2711 nullify_over(ctx);
2712 }
0c982a28
RH
2713 tcg_r1 = load_gpr(ctx, a->r1);
2714 tcg_r2 = load_gpr(ctx, a->r2);
2715 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
31234768 2716 return nullify_end(ctx);
b2167459
RH
2717}
2718
0c982a28 2719static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2720{
eaa3783b 2721 TCGv_reg tcg_r1, tcg_r2;
b2167459 2722
0c982a28 2723 if (a->cf) {
b2167459
RH
2724 nullify_over(ctx);
2725 }
0c982a28
RH
2726 tcg_r1 = load_gpr(ctx, a->r1);
2727 tcg_r2 = load_gpr(ctx, a->r2);
2728 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
31234768 2729 return nullify_end(ctx);
b2167459
RH
2730}
2731
0c982a28 2732static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
b2167459 2733{
eaa3783b 2734 TCGv_reg tcg_r1, tcg_r2, tmp;
b2167459 2735
0c982a28 2736 if (a->cf) {
b2167459
RH
2737 nullify_over(ctx);
2738 }
0c982a28
RH
2739 tcg_r1 = load_gpr(ctx, a->r1);
2740 tcg_r2 = load_gpr(ctx, a->r2);
b2167459 2741 tmp = get_temp(ctx);
eaa3783b 2742 tcg_gen_not_reg(tmp, tcg_r2);
0c982a28 2743 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
31234768 2744 return nullify_end(ctx);
b2167459
RH
2745}
2746
0c982a28
RH
2747static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2748{
2749 return do_uaddcm(ctx, a, false);
2750}
2751
2752static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2753{
2754 return do_uaddcm(ctx, a, true);
2755}
2756
2757static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
b2167459 2758{
eaa3783b 2759 TCGv_reg tmp;
b2167459
RH
2760
2761 nullify_over(ctx);
2762
2763 tmp = get_temp(ctx);
eaa3783b 2764 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
b2167459 2765 if (!is_i) {
eaa3783b 2766 tcg_gen_not_reg(tmp, tmp);
b2167459 2767 }
eaa3783b
RH
2768 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2769 tcg_gen_muli_reg(tmp, tmp, 6);
60e29463 2770 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
31234768 2771 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
31234768 2772 return nullify_end(ctx);
b2167459
RH
2773}
2774
0c982a28
RH
2775static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2776{
2777 return do_dcor(ctx, a, false);
2778}
2779
2780static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2781{
2782 return do_dcor(ctx, a, true);
2783}
2784
2785static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2786{
eaa3783b 2787 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
b2167459
RH
2788
2789 nullify_over(ctx);
2790
0c982a28
RH
2791 in1 = load_gpr(ctx, a->r1);
2792 in2 = load_gpr(ctx, a->r2);
b2167459
RH
2793
2794 add1 = tcg_temp_new();
2795 add2 = tcg_temp_new();
2796 addc = tcg_temp_new();
2797 dest = tcg_temp_new();
29dd6f64 2798 zero = tcg_constant_reg(0);
b2167459
RH
2799
2800 /* Form R1 << 1 | PSW[CB]{8}. */
eaa3783b
RH
2801 tcg_gen_add_reg(add1, in1, in1);
2802 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
b2167459
RH
2803
2804 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2805 carry{8} requires that we subtract via + ~R2 + 1, as described in
2806 the manual. By extracting and masking V, we can produce the
2807 proper inputs to the addition without movcond. */
eaa3783b
RH
2808 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2809 tcg_gen_xor_reg(add2, in2, addc);
2810 tcg_gen_andi_reg(addc, addc, 1);
b2167459
RH
2811 /* ??? This is only correct for 32-bit. */
2812 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2813 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2814
2815 tcg_temp_free(addc);
b2167459
RH
2816
2817 /* Write back the result register. */
0c982a28 2818 save_gpr(ctx, a->t, dest);
b2167459
RH
2819
2820 /* Write back PSW[CB]. */
eaa3783b
RH
2821 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2822 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
b2167459
RH
2823
2824 /* Write back PSW[V] for the division step. */
eaa3783b
RH
2825 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2826 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
b2167459
RH
2827
2828 /* Install the new nullification. */
0c982a28 2829 if (a->cf) {
eaa3783b 2830 TCGv_reg sv = NULL;
b47a4a02 2831 if (cond_need_sv(a->cf >> 1)) {
b2167459
RH
2832 /* ??? The lshift is supposed to contribute to overflow. */
2833 sv = do_add_sv(ctx, dest, add1, add2);
2834 }
0c982a28 2835 ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
b2167459
RH
2836 }
2837
2838 tcg_temp_free(add1);
2839 tcg_temp_free(add2);
2840 tcg_temp_free(dest);
2841
31234768 2842 return nullify_end(ctx);
b2167459
RH
2843}
2844
0588e061 2845static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
b2167459 2846{
0588e061
RH
2847 return do_add_imm(ctx, a, false, false);
2848}
b2167459 2849
0588e061
RH
2850static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2851{
2852 return do_add_imm(ctx, a, true, false);
b2167459
RH
2853}
2854
0588e061 2855static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
b2167459 2856{
0588e061
RH
2857 return do_add_imm(ctx, a, false, true);
2858}
b2167459 2859
0588e061
RH
2860static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2861{
2862 return do_add_imm(ctx, a, true, true);
2863}
b2167459 2864
0588e061
RH
2865static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2866{
2867 return do_sub_imm(ctx, a, false);
2868}
b2167459 2869
0588e061
RH
2870static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2871{
2872 return do_sub_imm(ctx, a, true);
b2167459
RH
2873}
2874
0588e061 2875static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
b2167459 2876{
eaa3783b 2877 TCGv_reg tcg_im, tcg_r2;
b2167459 2878
0588e061 2879 if (a->cf) {
b2167459
RH
2880 nullify_over(ctx);
2881 }
2882
0588e061
RH
2883 tcg_im = load_const(ctx, a->i);
2884 tcg_r2 = load_gpr(ctx, a->r);
2885 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
b2167459 2886
31234768 2887 return nullify_end(ctx);
b2167459
RH
2888}
2889
1cd012a5 2890static bool trans_ld(DisasContext *ctx, arg_ldst *a)
96d6407f 2891{
1cd012a5
RH
2892 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2893 a->disp, a->sp, a->m, a->size | MO_TE);
96d6407f
RH
2894}
2895
1cd012a5 2896static bool trans_st(DisasContext *ctx, arg_ldst *a)
96d6407f 2897{
1cd012a5
RH
2898 assert(a->x == 0 && a->scale == 0);
2899 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
96d6407f
RH
2900}
2901
1cd012a5 2902static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
96d6407f 2903{
b1af755c 2904 MemOp mop = MO_TE | MO_ALIGN | a->size;
86f8d05f
RH
2905 TCGv_reg zero, dest, ofs;
2906 TCGv_tl addr;
96d6407f
RH
2907
2908 nullify_over(ctx);
2909
1cd012a5 2910 if (a->m) {
86f8d05f
RH
2911 /* Base register modification. Make sure if RT == RB,
2912 we see the result of the load. */
96d6407f
RH
2913 dest = get_temp(ctx);
2914 } else {
1cd012a5 2915 dest = dest_gpr(ctx, a->t);
96d6407f
RH
2916 }
2917
1cd012a5
RH
2918 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2919 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
b1af755c
RH
2920
2921 /*
2922 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2923 * However actual hardware succeeds with aligned mod 4.
2924 * Detect this case and log a GUEST_ERROR.
2925 *
2926 * TODO: HPPA64 relaxes the over-alignment requirement
2927 * with the ,co completer.
2928 */
2929 gen_helper_ldc_check(addr);
2930
29dd6f64 2931 zero = tcg_constant_reg(0);
86f8d05f 2932 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
b1af755c 2933
1cd012a5
RH
2934 if (a->m) {
2935 save_gpr(ctx, a->b, ofs);
96d6407f 2936 }
1cd012a5 2937 save_gpr(ctx, a->t, dest);
96d6407f 2938
31234768 2939 return nullify_end(ctx);
96d6407f
RH
2940}
2941
1cd012a5 2942static bool trans_stby(DisasContext *ctx, arg_stby *a)
96d6407f 2943{
86f8d05f
RH
2944 TCGv_reg ofs, val;
2945 TCGv_tl addr;
96d6407f
RH
2946
2947 nullify_over(ctx);
2948
1cd012a5 2949 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
86f8d05f 2950 ctx->mmu_idx == MMU_PHYS_IDX);
1cd012a5
RH
2951 val = load_gpr(ctx, a->r);
2952 if (a->a) {
f9f46db4
EC
2953 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2954 gen_helper_stby_e_parallel(cpu_env, addr, val);
2955 } else {
2956 gen_helper_stby_e(cpu_env, addr, val);
2957 }
96d6407f 2958 } else {
f9f46db4
EC
2959 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2960 gen_helper_stby_b_parallel(cpu_env, addr, val);
2961 } else {
2962 gen_helper_stby_b(cpu_env, addr, val);
2963 }
96d6407f 2964 }
1cd012a5 2965 if (a->m) {
86f8d05f 2966 tcg_gen_andi_reg(ofs, ofs, ~3);
1cd012a5 2967 save_gpr(ctx, a->b, ofs);
96d6407f 2968 }
96d6407f 2969
31234768 2970 return nullify_end(ctx);
96d6407f
RH
2971}
2972
1cd012a5 2973static bool trans_lda(DisasContext *ctx, arg_ldst *a)
d0a851cc
RH
2974{
2975 int hold_mmu_idx = ctx->mmu_idx;
d0a851cc
RH
2976
2977 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
d0a851cc 2978 ctx->mmu_idx = MMU_PHYS_IDX;
1cd012a5 2979 trans_ld(ctx, a);
d0a851cc 2980 ctx->mmu_idx = hold_mmu_idx;
31234768 2981 return true;
d0a851cc
RH
2982}
2983
1cd012a5 2984static bool trans_sta(DisasContext *ctx, arg_ldst *a)
d0a851cc
RH
2985{
2986 int hold_mmu_idx = ctx->mmu_idx;
d0a851cc
RH
2987
2988 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
d0a851cc 2989 ctx->mmu_idx = MMU_PHYS_IDX;
1cd012a5 2990 trans_st(ctx, a);
d0a851cc 2991 ctx->mmu_idx = hold_mmu_idx;
31234768 2992 return true;
d0a851cc 2993}
95412a61 2994
0588e061 2995static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
b2167459 2996{
0588e061 2997 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
b2167459 2998
0588e061
RH
2999 tcg_gen_movi_reg(tcg_rt, a->i);
3000 save_gpr(ctx, a->t, tcg_rt);
b2167459 3001 cond_free(&ctx->null_cond);
31234768 3002 return true;
b2167459
RH
3003}
3004
0588e061 3005static bool trans_addil(DisasContext *ctx, arg_addil *a)
b2167459 3006{
0588e061 3007 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
eaa3783b 3008 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
b2167459 3009
0588e061 3010 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
b2167459
RH
3011 save_gpr(ctx, 1, tcg_r1);
3012 cond_free(&ctx->null_cond);
31234768 3013 return true;
b2167459
RH
3014}
3015
0588e061 3016static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
b2167459 3017{
0588e061 3018 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
b2167459
RH
3019
3020 /* Special case rb == 0, for the LDI pseudo-op.
3021 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
0588e061
RH
3022 if (a->b == 0) {
3023 tcg_gen_movi_reg(tcg_rt, a->i);
b2167459 3024 } else {
0588e061 3025 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
b2167459 3026 }
0588e061 3027 save_gpr(ctx, a->t, tcg_rt);
b2167459 3028 cond_free(&ctx->null_cond);
31234768 3029 return true;
b2167459
RH
3030}
3031
01afb7be
RH
3032static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3033 unsigned c, unsigned f, unsigned n, int disp)
98cd9ca7 3034{
01afb7be 3035 TCGv_reg dest, in2, sv;
98cd9ca7
RH
3036 DisasCond cond;
3037
98cd9ca7
RH
3038 in2 = load_gpr(ctx, r);
3039 dest = get_temp(ctx);
3040
eaa3783b 3041 tcg_gen_sub_reg(dest, in1, in2);
98cd9ca7 3042
f764718d 3043 sv = NULL;
b47a4a02 3044 if (cond_need_sv(c)) {
98cd9ca7
RH
3045 sv = do_sub_sv(ctx, dest, in1, in2);
3046 }
3047
01afb7be
RH
3048 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3049 return do_cbranch(ctx, disp, n, &cond);
98cd9ca7
RH
3050}
3051
01afb7be 3052static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
98cd9ca7 3053{
01afb7be
RH
3054 nullify_over(ctx);
3055 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3056}
98cd9ca7 3057
01afb7be
RH
3058static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3059{
98cd9ca7 3060 nullify_over(ctx);
01afb7be
RH
3061 return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3062}
3063
3064static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3065 unsigned c, unsigned f, unsigned n, int disp)
3066{
3067 TCGv_reg dest, in2, sv, cb_msb;
3068 DisasCond cond;
98cd9ca7 3069
98cd9ca7 3070 in2 = load_gpr(ctx, r);
43675d20 3071 dest = tcg_temp_new();
f764718d
RH
3072 sv = NULL;
3073 cb_msb = NULL;
98cd9ca7 3074
b47a4a02 3075 if (cond_need_cb(c)) {
98cd9ca7 3076 cb_msb = get_temp(ctx);
eaa3783b
RH
3077 tcg_gen_movi_reg(cb_msb, 0);
3078 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
b47a4a02 3079 } else {
eaa3783b 3080 tcg_gen_add_reg(dest, in1, in2);
b47a4a02
SS
3081 }
3082 if (cond_need_sv(c)) {
98cd9ca7 3083 sv = do_add_sv(ctx, dest, in1, in2);
98cd9ca7
RH
3084 }
3085
01afb7be 3086 cond = do_cond(c * 2 + f, dest, cb_msb, sv);
43675d20
SS
3087 save_gpr(ctx, r, dest);
3088 tcg_temp_free(dest);
01afb7be 3089 return do_cbranch(ctx, disp, n, &cond);
98cd9ca7
RH
3090}
3091
01afb7be
RH
3092static bool trans_addb(DisasContext *ctx, arg_addb *a)
3093{
3094 nullify_over(ctx);
3095 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3096}
3097
3098static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3099{
3100 nullify_over(ctx);
3101 return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3102}
3103
3104static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
98cd9ca7 3105{
eaa3783b 3106 TCGv_reg tmp, tcg_r;
98cd9ca7
RH
3107 DisasCond cond;
3108
3109 nullify_over(ctx);
3110
3111 tmp = tcg_temp_new();
01afb7be
RH
3112 tcg_r = load_gpr(ctx, a->r);
3113 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
98cd9ca7 3114
01afb7be 3115 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
98cd9ca7 3116 tcg_temp_free(tmp);
01afb7be 3117 return do_cbranch(ctx, a->disp, a->n, &cond);
98cd9ca7
RH
3118}
3119
01afb7be
RH
3120static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3121{
3122 TCGv_reg tmp, tcg_r;
3123 DisasCond cond;
3124
3125 nullify_over(ctx);
3126
3127 tmp = tcg_temp_new();
3128 tcg_r = load_gpr(ctx, a->r);
3129 tcg_gen_shli_reg(tmp, tcg_r, a->p);
3130
3131 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3132 tcg_temp_free(tmp);
3133 return do_cbranch(ctx, a->disp, a->n, &cond);
3134}
3135
3136static bool trans_movb(DisasContext *ctx, arg_movb *a)
98cd9ca7 3137{
eaa3783b 3138 TCGv_reg dest;
98cd9ca7
RH
3139 DisasCond cond;
3140
3141 nullify_over(ctx);
3142
01afb7be
RH
3143 dest = dest_gpr(ctx, a->r2);
3144 if (a->r1 == 0) {
eaa3783b 3145 tcg_gen_movi_reg(dest, 0);
98cd9ca7 3146 } else {
01afb7be 3147 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
98cd9ca7
RH
3148 }
3149
01afb7be
RH
3150 cond = do_sed_cond(a->c, dest);
3151 return do_cbranch(ctx, a->disp, a->n, &cond);
3152}
3153
3154static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3155{
3156 TCGv_reg dest;
3157 DisasCond cond;
3158
3159 nullify_over(ctx);
3160
3161 dest = dest_gpr(ctx, a->r);
3162 tcg_gen_movi_reg(dest, a->i);
3163
3164 cond = do_sed_cond(a->c, dest);
3165 return do_cbranch(ctx, a->disp, a->n, &cond);
98cd9ca7
RH
3166}
3167
30878590 3168static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
0b1347d2 3169{
eaa3783b 3170 TCGv_reg dest;
0b1347d2 3171
30878590 3172 if (a->c) {
0b1347d2
RH
3173 nullify_over(ctx);
3174 }
3175
30878590
RH
3176 dest = dest_gpr(ctx, a->t);
3177 if (a->r1 == 0) {
3178 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
eaa3783b 3179 tcg_gen_shr_reg(dest, dest, cpu_sar);
30878590 3180 } else if (a->r1 == a->r2) {
0b1347d2 3181 TCGv_i32 t32 = tcg_temp_new_i32();
30878590 3182 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
0b1347d2 3183 tcg_gen_rotr_i32(t32, t32, cpu_sar);
eaa3783b 3184 tcg_gen_extu_i32_reg(dest, t32);
0b1347d2
RH
3185 tcg_temp_free_i32(t32);
3186 } else {
3187 TCGv_i64 t = tcg_temp_new_i64();
3188 TCGv_i64 s = tcg_temp_new_i64();
3189
30878590 3190 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
eaa3783b 3191 tcg_gen_extu_reg_i64(s, cpu_sar);
0b1347d2 3192 tcg_gen_shr_i64(t, t, s);
eaa3783b 3193 tcg_gen_trunc_i64_reg(dest, t);
0b1347d2
RH
3194
3195 tcg_temp_free_i64(t);
3196 tcg_temp_free_i64(s);
3197 }
30878590 3198 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3199
3200 /* Install the new nullification. */
3201 cond_free(&ctx->null_cond);
30878590
RH
3202 if (a->c) {
3203 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3204 }
31234768 3205 return nullify_end(ctx);
0b1347d2
RH
3206}
3207
30878590 3208static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
0b1347d2 3209{
30878590 3210 unsigned sa = 31 - a->cpos;
eaa3783b 3211 TCGv_reg dest, t2;
0b1347d2 3212
30878590 3213 if (a->c) {
0b1347d2
RH
3214 nullify_over(ctx);
3215 }
3216
30878590
RH
3217 dest = dest_gpr(ctx, a->t);
3218 t2 = load_gpr(ctx, a->r2);
05bfd4db
RH
3219 if (a->r1 == 0) {
3220 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3221 } else if (TARGET_REGISTER_BITS == 32) {
3222 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3223 } else if (a->r1 == a->r2) {
0b1347d2 3224 TCGv_i32 t32 = tcg_temp_new_i32();
eaa3783b 3225 tcg_gen_trunc_reg_i32(t32, t2);
0b1347d2 3226 tcg_gen_rotri_i32(t32, t32, sa);
eaa3783b 3227 tcg_gen_extu_i32_reg(dest, t32);
0b1347d2 3228 tcg_temp_free_i32(t32);
0b1347d2 3229 } else {
05bfd4db
RH
3230 TCGv_i64 t64 = tcg_temp_new_i64();
3231 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3232 tcg_gen_shri_i64(t64, t64, sa);
3233 tcg_gen_trunc_i64_reg(dest, t64);
3234 tcg_temp_free_i64(t64);
0b1347d2 3235 }
30878590 3236 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3237
3238 /* Install the new nullification. */
3239 cond_free(&ctx->null_cond);
30878590
RH
3240 if (a->c) {
3241 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3242 }
31234768 3243 return nullify_end(ctx);
0b1347d2
RH
3244}
3245
30878590 3246static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
0b1347d2 3247{
30878590 3248 unsigned len = 32 - a->clen;
eaa3783b 3249 TCGv_reg dest, src, tmp;
0b1347d2 3250
30878590 3251 if (a->c) {
0b1347d2
RH
3252 nullify_over(ctx);
3253 }
3254
30878590
RH
3255 dest = dest_gpr(ctx, a->t);
3256 src = load_gpr(ctx, a->r);
0b1347d2
RH
3257 tmp = tcg_temp_new();
3258
3259 /* Recall that SAR is using big-endian bit numbering. */
eaa3783b 3260 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
30878590 3261 if (a->se) {
eaa3783b
RH
3262 tcg_gen_sar_reg(dest, src, tmp);
3263 tcg_gen_sextract_reg(dest, dest, 0, len);
0b1347d2 3264 } else {
eaa3783b
RH
3265 tcg_gen_shr_reg(dest, src, tmp);
3266 tcg_gen_extract_reg(dest, dest, 0, len);
0b1347d2
RH
3267 }
3268 tcg_temp_free(tmp);
30878590 3269 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3270
3271 /* Install the new nullification. */
3272 cond_free(&ctx->null_cond);
30878590
RH
3273 if (a->c) {
3274 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3275 }
31234768 3276 return nullify_end(ctx);
0b1347d2
RH
3277}
3278
30878590 3279static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
0b1347d2 3280{
30878590
RH
3281 unsigned len = 32 - a->clen;
3282 unsigned cpos = 31 - a->pos;
eaa3783b 3283 TCGv_reg dest, src;
0b1347d2 3284
30878590 3285 if (a->c) {
0b1347d2
RH
3286 nullify_over(ctx);
3287 }
3288
30878590
RH
3289 dest = dest_gpr(ctx, a->t);
3290 src = load_gpr(ctx, a->r);
3291 if (a->se) {
eaa3783b 3292 tcg_gen_sextract_reg(dest, src, cpos, len);
0b1347d2 3293 } else {
eaa3783b 3294 tcg_gen_extract_reg(dest, src, cpos, len);
0b1347d2 3295 }
30878590 3296 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3297
3298 /* Install the new nullification. */
3299 cond_free(&ctx->null_cond);
30878590
RH
3300 if (a->c) {
3301 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3302 }
31234768 3303 return nullify_end(ctx);
0b1347d2
RH
3304}
3305
30878590 3306static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
0b1347d2 3307{
30878590 3308 unsigned len = 32 - a->clen;
eaa3783b
RH
3309 target_sreg mask0, mask1;
3310 TCGv_reg dest;
0b1347d2 3311
30878590 3312 if (a->c) {
0b1347d2
RH
3313 nullify_over(ctx);
3314 }
30878590
RH
3315 if (a->cpos + len > 32) {
3316 len = 32 - a->cpos;
0b1347d2
RH
3317 }
3318
30878590
RH
3319 dest = dest_gpr(ctx, a->t);
3320 mask0 = deposit64(0, a->cpos, len, a->i);
3321 mask1 = deposit64(-1, a->cpos, len, a->i);
0b1347d2 3322
30878590
RH
3323 if (a->nz) {
3324 TCGv_reg src = load_gpr(ctx, a->t);
0b1347d2 3325 if (mask1 != -1) {
eaa3783b 3326 tcg_gen_andi_reg(dest, src, mask1);
0b1347d2
RH
3327 src = dest;
3328 }
eaa3783b 3329 tcg_gen_ori_reg(dest, src, mask0);
0b1347d2 3330 } else {
eaa3783b 3331 tcg_gen_movi_reg(dest, mask0);
0b1347d2 3332 }
30878590 3333 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3334
3335 /* Install the new nullification. */
3336 cond_free(&ctx->null_cond);
30878590
RH
3337 if (a->c) {
3338 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3339 }
31234768 3340 return nullify_end(ctx);
0b1347d2
RH
3341}
3342
30878590 3343static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
0b1347d2 3344{
30878590
RH
3345 unsigned rs = a->nz ? a->t : 0;
3346 unsigned len = 32 - a->clen;
eaa3783b 3347 TCGv_reg dest, val;
0b1347d2 3348
30878590 3349 if (a->c) {
0b1347d2
RH
3350 nullify_over(ctx);
3351 }
30878590
RH
3352 if (a->cpos + len > 32) {
3353 len = 32 - a->cpos;
0b1347d2
RH
3354 }
3355
30878590
RH
3356 dest = dest_gpr(ctx, a->t);
3357 val = load_gpr(ctx, a->r);
0b1347d2 3358 if (rs == 0) {
30878590 3359 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
0b1347d2 3360 } else {
30878590 3361 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
0b1347d2 3362 }
30878590 3363 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3364
3365 /* Install the new nullification. */
3366 cond_free(&ctx->null_cond);
30878590
RH
3367 if (a->c) {
3368 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3369 }
31234768 3370 return nullify_end(ctx);
0b1347d2
RH
3371}
3372
30878590
RH
3373static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3374 unsigned nz, unsigned clen, TCGv_reg val)
0b1347d2 3375{
0b1347d2
RH
3376 unsigned rs = nz ? rt : 0;
3377 unsigned len = 32 - clen;
30878590 3378 TCGv_reg mask, tmp, shift, dest;
0b1347d2
RH
3379 unsigned msb = 1U << (len - 1);
3380
0b1347d2
RH
3381 dest = dest_gpr(ctx, rt);
3382 shift = tcg_temp_new();
3383 tmp = tcg_temp_new();
3384
3385 /* Convert big-endian bit numbering in SAR to left-shift. */
eaa3783b 3386 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
0b1347d2 3387
eaa3783b
RH
3388 mask = tcg_const_reg(msb + (msb - 1));
3389 tcg_gen_and_reg(tmp, val, mask);
0b1347d2 3390 if (rs) {
eaa3783b
RH
3391 tcg_gen_shl_reg(mask, mask, shift);
3392 tcg_gen_shl_reg(tmp, tmp, shift);
3393 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3394 tcg_gen_or_reg(dest, dest, tmp);
0b1347d2 3395 } else {
eaa3783b 3396 tcg_gen_shl_reg(dest, tmp, shift);
0b1347d2
RH
3397 }
3398 tcg_temp_free(shift);
3399 tcg_temp_free(mask);
3400 tcg_temp_free(tmp);
3401 save_gpr(ctx, rt, dest);
3402
3403 /* Install the new nullification. */
3404 cond_free(&ctx->null_cond);
3405 if (c) {
3406 ctx->null_cond = do_sed_cond(c, dest);
3407 }
31234768 3408 return nullify_end(ctx);
0b1347d2
RH
3409}
3410
30878590
RH
3411static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3412{
a6deecce
SS
3413 if (a->c) {
3414 nullify_over(ctx);
3415 }
30878590
RH
3416 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3417}
3418
3419static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3420{
a6deecce
SS
3421 if (a->c) {
3422 nullify_over(ctx);
3423 }
30878590
RH
3424 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3425}
0b1347d2 3426
8340f534 3427static bool trans_be(DisasContext *ctx, arg_be *a)
98cd9ca7 3428{
660eefe1 3429 TCGv_reg tmp;
98cd9ca7 3430
c301f34e 3431#ifdef CONFIG_USER_ONLY
98cd9ca7
RH
3432 /* ??? It seems like there should be a good way of using
3433 "be disp(sr2, r0)", the canonical gateway entry mechanism
3434 to our advantage. But that appears to be inconvenient to
3435 manage along side branch delay slots. Therefore we handle
3436 entry into the gateway page via absolute address. */
98cd9ca7
RH
3437 /* Since we don't implement spaces, just branch. Do notice the special
3438 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3439 goto_tb to the TB containing the syscall. */
8340f534
RH
3440 if (a->b == 0) {
3441 return do_dbranch(ctx, a->disp, a->l, a->n);
98cd9ca7 3442 }
c301f34e 3443#else
c301f34e 3444 nullify_over(ctx);
660eefe1
RH
3445#endif
3446
3447 tmp = get_temp(ctx);
8340f534 3448 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
660eefe1 3449 tmp = do_ibranch_priv(ctx, tmp);
c301f34e
RH
3450
3451#ifdef CONFIG_USER_ONLY
8340f534 3452 return do_ibranch(ctx, tmp, a->l, a->n);
c301f34e
RH
3453#else
3454 TCGv_i64 new_spc = tcg_temp_new_i64();
3455
8340f534
RH
3456 load_spr(ctx, new_spc, a->sp);
3457 if (a->l) {
c301f34e
RH
3458 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3459 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3460 }
8340f534 3461 if (a->n && use_nullify_skip(ctx)) {
c301f34e
RH
3462 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3463 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3464 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3465 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3466 } else {
3467 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3468 if (ctx->iaoq_b == -1) {
3469 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3470 }
3471 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3472 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
8340f534 3473 nullify_set(ctx, a->n);
c301f34e
RH
3474 }
3475 tcg_temp_free_i64(new_spc);
3476 tcg_gen_lookup_and_goto_ptr();
31234768
RH
3477 ctx->base.is_jmp = DISAS_NORETURN;
3478 return nullify_end(ctx);
c301f34e 3479#endif
98cd9ca7
RH
3480}
3481
8340f534 3482static bool trans_bl(DisasContext *ctx, arg_bl *a)
98cd9ca7 3483{
8340f534 3484 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
98cd9ca7
RH
3485}
3486
8340f534 3487static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
43e05652 3488{
8340f534 3489 target_ureg dest = iaoq_dest(ctx, a->disp);
43e05652 3490
6e5f5300
SS
3491 nullify_over(ctx);
3492
43e05652
RH
3493 /* Make sure the caller hasn't done something weird with the queue.
3494 * ??? This is not quite the same as the PSW[B] bit, which would be
3495 * expensive to track. Real hardware will trap for
3496 * b gateway
3497 * b gateway+4 (in delay slot of first branch)
3498 * However, checking for a non-sequential instruction queue *will*
3499 * diagnose the security hole
3500 * b gateway
3501 * b evil
3502 * in which instructions at evil would run with increased privs.
3503 */
3504 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3505 return gen_illegal(ctx);
3506 }
3507
3508#ifndef CONFIG_USER_ONLY
3509 if (ctx->tb_flags & PSW_C) {
3510 CPUHPPAState *env = ctx->cs->env_ptr;
3511 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3512 /* If we could not find a TLB entry, then we need to generate an
3513 ITLB miss exception so the kernel will provide it.
3514 The resulting TLB fill operation will invalidate this TB and
3515 we will re-translate, at which point we *will* be able to find
3516 the TLB entry and determine if this is in fact a gateway page. */
3517 if (type < 0) {
31234768
RH
3518 gen_excp(ctx, EXCP_ITLB_MISS);
3519 return true;
43e05652
RH
3520 }
3521 /* No change for non-gateway pages or for priv decrease. */
3522 if (type >= 4 && type - 4 < ctx->privilege) {
3523 dest = deposit32(dest, 0, 2, type - 4);
3524 }
3525 } else {
3526 dest &= -4; /* priv = 0 */
3527 }
3528#endif
3529
6e5f5300
SS
3530 if (a->l) {
3531 TCGv_reg tmp = dest_gpr(ctx, a->l);
3532 if (ctx->privilege < 3) {
3533 tcg_gen_andi_reg(tmp, tmp, -4);
3534 }
3535 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3536 save_gpr(ctx, a->l, tmp);
3537 }
3538
3539 return do_dbranch(ctx, dest, 0, a->n);
43e05652
RH
3540}
3541
8340f534 3542static bool trans_blr(DisasContext *ctx, arg_blr *a)
98cd9ca7 3543{
b35aec85
RH
3544 if (a->x) {
3545 TCGv_reg tmp = get_temp(ctx);
3546 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3547 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3548 /* The computation here never changes privilege level. */
3549 return do_ibranch(ctx, tmp, a->l, a->n);
3550 } else {
3551 /* BLR R0,RX is a good way to load PC+8 into RX. */
3552 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3553 }
98cd9ca7
RH
3554}
3555
8340f534 3556static bool trans_bv(DisasContext *ctx, arg_bv *a)
98cd9ca7 3557{
eaa3783b 3558 TCGv_reg dest;
98cd9ca7 3559
8340f534
RH
3560 if (a->x == 0) {
3561 dest = load_gpr(ctx, a->b);
98cd9ca7
RH
3562 } else {
3563 dest = get_temp(ctx);
8340f534
RH
3564 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3565 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
98cd9ca7 3566 }
660eefe1 3567 dest = do_ibranch_priv(ctx, dest);
8340f534 3568 return do_ibranch(ctx, dest, 0, a->n);
98cd9ca7
RH
3569}
3570
8340f534 3571static bool trans_bve(DisasContext *ctx, arg_bve *a)
98cd9ca7 3572{
660eefe1 3573 TCGv_reg dest;
98cd9ca7 3574
c301f34e 3575#ifdef CONFIG_USER_ONLY
8340f534
RH
3576 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3577 return do_ibranch(ctx, dest, a->l, a->n);
c301f34e
RH
3578#else
3579 nullify_over(ctx);
8340f534 3580 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
c301f34e
RH
3581
3582 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3583 if (ctx->iaoq_b == -1) {
3584 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3585 }
3586 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3587 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
8340f534
RH
3588 if (a->l) {
3589 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e 3590 }
8340f534 3591 nullify_set(ctx, a->n);
c301f34e 3592 tcg_gen_lookup_and_goto_ptr();
31234768
RH
3593 ctx->base.is_jmp = DISAS_NORETURN;
3594 return nullify_end(ctx);
c301f34e 3595#endif
98cd9ca7
RH
3596}
3597
1ca74648
RH
3598/*
3599 * Float class 0
3600 */
ebe9383c 3601
1ca74648 3602static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3603{
1ca74648 3604 tcg_gen_mov_i32(dst, src);
ebe9383c
RH
3605}
3606
1ca74648 3607static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3608{
1ca74648 3609 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
ebe9383c
RH
3610}
3611
1ca74648 3612static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
ebe9383c 3613{
1ca74648 3614 tcg_gen_mov_i64(dst, src);
ebe9383c
RH
3615}
3616
1ca74648 3617static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3618{
1ca74648 3619 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
ebe9383c
RH
3620}
3621
1ca74648 3622static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3623{
1ca74648 3624 tcg_gen_andi_i32(dst, src, INT32_MAX);
ebe9383c
RH
3625}
3626
1ca74648 3627static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3628{
1ca74648 3629 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
ebe9383c
RH
3630}
3631
1ca74648 3632static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
ebe9383c 3633{
1ca74648 3634 tcg_gen_andi_i64(dst, src, INT64_MAX);
ebe9383c
RH
3635}
3636
1ca74648 3637static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3638{
1ca74648 3639 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
ebe9383c
RH
3640}
3641
1ca74648 3642static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3643{
1ca74648 3644 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
ebe9383c
RH
3645}
3646
1ca74648 3647static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3648{
1ca74648 3649 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
ebe9383c
RH
3650}
3651
1ca74648 3652static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3653{
1ca74648 3654 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
ebe9383c
RH
3655}
3656
1ca74648 3657static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3658{
1ca74648 3659 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
ebe9383c
RH
3660}
3661
1ca74648 3662static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3663{
1ca74648 3664 tcg_gen_xori_i32(dst, src, INT32_MIN);
ebe9383c
RH
3665}
3666
1ca74648 3667static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3668{
1ca74648 3669 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
ebe9383c
RH
3670}
3671
3672static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3673{
3674 tcg_gen_xori_i64(dst, src, INT64_MIN);
3675}
3676
1ca74648
RH
3677static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3678{
3679 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3680}
3681
3682static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c
RH
3683{
3684 tcg_gen_ori_i32(dst, src, INT32_MIN);
3685}
3686
1ca74648
RH
3687static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3688{
3689 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3690}
3691
ebe9383c
RH
3692static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3693{
3694 tcg_gen_ori_i64(dst, src, INT64_MIN);
3695}
3696
1ca74648
RH
3697static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3698{
3699 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3700}
3701
3702/*
3703 * Float class 1
3704 */
3705
3706static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3707{
3708 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3709}
3710
3711static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3712{
3713 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3714}
3715
3716static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3717{
3718 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3719}
3720
3721static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3722{
3723 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3724}
3725
3726static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3727{
3728 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3729}
3730
3731static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3732{
3733 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3734}
3735
3736static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3737{
3738 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3739}
3740
3741static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3742{
3743 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3744}
3745
3746static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3747{
3748 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3749}
3750
3751static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3752{
3753 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3754}
3755
3756static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3757{
3758 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3759}
3760
3761static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3762{
3763 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3764}
3765
3766static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3767{
3768 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3769}
3770
3771static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3772{
3773 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3774}
3775
3776static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3777{
3778 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3779}
3780
3781static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3782{
3783 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3784}
3785
3786static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3787{
3788 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3789}
3790
3791static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3792{
3793 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3794}
3795
3796static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3797{
3798 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3799}
3800
3801static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3802{
3803 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3804}
3805
3806static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3807{
3808 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3809}
3810
3811static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3812{
3813 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3814}
3815
3816static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3817{
3818 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3819}
3820
3821static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3822{
3823 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3824}
3825
3826static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3827{
3828 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3829}
3830
3831static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3832{
3833 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3834}
3835
3836/*
3837 * Float class 2
3838 */
3839
3840static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
ebe9383c
RH
3841{
3842 TCGv_i32 ta, tb, tc, ty;
3843
3844 nullify_over(ctx);
3845
1ca74648
RH
3846 ta = load_frw0_i32(a->r1);
3847 tb = load_frw0_i32(a->r2);
29dd6f64
RH
3848 ty = tcg_constant_i32(a->y);
3849 tc = tcg_constant_i32(a->c);
ebe9383c
RH
3850
3851 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3852
3853 tcg_temp_free_i32(ta);
3854 tcg_temp_free_i32(tb);
ebe9383c 3855
1ca74648 3856 return nullify_end(ctx);
ebe9383c
RH
3857}
3858
1ca74648 3859static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
ebe9383c 3860{
ebe9383c
RH
3861 TCGv_i64 ta, tb;
3862 TCGv_i32 tc, ty;
3863
3864 nullify_over(ctx);
3865
1ca74648
RH
3866 ta = load_frd0(a->r1);
3867 tb = load_frd0(a->r2);
29dd6f64
RH
3868 ty = tcg_constant_i32(a->y);
3869 tc = tcg_constant_i32(a->c);
ebe9383c
RH
3870
3871 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3872
3873 tcg_temp_free_i64(ta);
3874 tcg_temp_free_i64(tb);
ebe9383c 3875
31234768 3876 return nullify_end(ctx);
ebe9383c
RH
3877}
3878
1ca74648 3879static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
ebe9383c 3880{
eaa3783b 3881 TCGv_reg t;
ebe9383c
RH
3882
3883 nullify_over(ctx);
3884
1ca74648 3885 t = get_temp(ctx);
eaa3783b 3886 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
ebe9383c 3887
1ca74648
RH
3888 if (a->y == 1) {
3889 int mask;
3890 bool inv = false;
3891
3892 switch (a->c) {
3893 case 0: /* simple */
3894 tcg_gen_andi_reg(t, t, 0x4000000);
3895 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3896 goto done;
3897 case 2: /* rej */
3898 inv = true;
3899 /* fallthru */
3900 case 1: /* acc */
3901 mask = 0x43ff800;
3902 break;
3903 case 6: /* rej8 */
3904 inv = true;
3905 /* fallthru */
3906 case 5: /* acc8 */
3907 mask = 0x43f8000;
3908 break;
3909 case 9: /* acc6 */
3910 mask = 0x43e0000;
3911 break;
3912 case 13: /* acc4 */
3913 mask = 0x4380000;
3914 break;
3915 case 17: /* acc2 */
3916 mask = 0x4200000;
3917 break;
3918 default:
3919 gen_illegal(ctx);
3920 return true;
3921 }
3922 if (inv) {
3923 TCGv_reg c = load_const(ctx, mask);
3924 tcg_gen_or_reg(t, t, c);
3925 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3926 } else {
3927 tcg_gen_andi_reg(t, t, mask);
3928 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3929 }
3930 } else {
3931 unsigned cbit = (a->y ^ 1) - 1;
3932
3933 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3934 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3935 tcg_temp_free(t);
3936 }
3937
3938 done:
31234768 3939 return nullify_end(ctx);
ebe9383c
RH
3940}
3941
1ca74648
RH
3942/*
3943 * Float class 2
3944 */
3945
3946static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
ebe9383c 3947{
1ca74648
RH
3948 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3949}
ebe9383c 3950
1ca74648
RH
3951static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3952{
3953 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3954}
ebe9383c 3955
1ca74648
RH
3956static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3957{
3958 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3959}
ebe9383c 3960
1ca74648
RH
3961static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3962{
3963 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
ebe9383c
RH
3964}
3965
1ca74648 3966static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
ebe9383c 3967{
1ca74648
RH
3968 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3969}
3970
3971static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3972{
3973 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3974}
3975
3976static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3977{
3978 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3979}
3980
3981static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3982{
3983 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3984}
3985
3986static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3987{
3988 TCGv_i64 x, y;
ebe9383c
RH
3989
3990 nullify_over(ctx);
3991
1ca74648
RH
3992 x = load_frw0_i64(a->r1);
3993 y = load_frw0_i64(a->r2);
3994 tcg_gen_mul_i64(x, x, y);
3995 save_frd(a->t, x);
3996 tcg_temp_free_i64(x);
3997 tcg_temp_free_i64(y);
ebe9383c 3998
31234768 3999 return nullify_end(ctx);
ebe9383c
RH
4000}
4001
ebe9383c
RH
4002/* Convert the fmpyadd single-precision register encodings to standard. */
4003static inline int fmpyadd_s_reg(unsigned r)
4004{
4005 return (r & 16) * 2 + 16 + (r & 15);
4006}
4007
b1e2af57 4008static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
ebe9383c 4009{
b1e2af57
RH
4010 int tm = fmpyadd_s_reg(a->tm);
4011 int ra = fmpyadd_s_reg(a->ra);
4012 int ta = fmpyadd_s_reg(a->ta);
4013 int rm2 = fmpyadd_s_reg(a->rm2);
4014 int rm1 = fmpyadd_s_reg(a->rm1);
ebe9383c
RH
4015
4016 nullify_over(ctx);
4017
b1e2af57
RH
4018 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4019 do_fop_weww(ctx, ta, ta, ra,
4020 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
ebe9383c 4021
31234768 4022 return nullify_end(ctx);
ebe9383c
RH
4023}
4024
b1e2af57
RH
4025static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4026{
4027 return do_fmpyadd_s(ctx, a, false);
4028}
4029
4030static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4031{
4032 return do_fmpyadd_s(ctx, a, true);
4033}
4034
4035static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4036{
4037 nullify_over(ctx);
4038
4039 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4040 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4041 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4042
4043 return nullify_end(ctx);
4044}
4045
4046static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4047{
4048 return do_fmpyadd_d(ctx, a, false);
4049}
4050
4051static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4052{
4053 return do_fmpyadd_d(ctx, a, true);
4054}
4055
c3bad4f8 4056static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
ebe9383c 4057{
c3bad4f8 4058 TCGv_i32 x, y, z;
ebe9383c
RH
4059
4060 nullify_over(ctx);
c3bad4f8
RH
4061 x = load_frw0_i32(a->rm1);
4062 y = load_frw0_i32(a->rm2);
4063 z = load_frw0_i32(a->ra3);
ebe9383c 4064
c3bad4f8
RH
4065 if (a->neg) {
4066 gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
ebe9383c 4067 } else {
c3bad4f8 4068 gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
ebe9383c
RH
4069 }
4070
c3bad4f8
RH
4071 tcg_temp_free_i32(y);
4072 tcg_temp_free_i32(z);
4073 save_frw_i32(a->t, x);
4074 tcg_temp_free_i32(x);
31234768 4075 return nullify_end(ctx);
ebe9383c
RH
4076}
4077
c3bad4f8 4078static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
ebe9383c 4079{
c3bad4f8 4080 TCGv_i64 x, y, z;
ebe9383c
RH
4081
4082 nullify_over(ctx);
c3bad4f8
RH
4083 x = load_frd0(a->rm1);
4084 y = load_frd0(a->rm2);
4085 z = load_frd0(a->ra3);
ebe9383c 4086
c3bad4f8
RH
4087 if (a->neg) {
4088 gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
ebe9383c 4089 } else {
c3bad4f8 4090 gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
ebe9383c
RH
4091 }
4092
c3bad4f8
RH
4093 tcg_temp_free_i64(y);
4094 tcg_temp_free_i64(z);
4095 save_frd(a->t, x);
4096 tcg_temp_free_i64(x);
31234768 4097 return nullify_end(ctx);
ebe9383c
RH
4098}
4099
15da177b
SS
4100static bool trans_diag(DisasContext *ctx, arg_diag *a)
4101{
4102 qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n");
4103 cond_free(&ctx->null_cond);
4104 return true;
4105}
4106
b542683d 4107static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
61766fe9 4108{
51b061fb 4109 DisasContext *ctx = container_of(dcbase, DisasContext, base);
f764718d 4110 int bound;
61766fe9 4111
51b061fb 4112 ctx->cs = cs;
494737b7 4113 ctx->tb_flags = ctx->base.tb->flags;
3d68ee7b
RH
4114
4115#ifdef CONFIG_USER_ONLY
4116 ctx->privilege = MMU_USER_IDX;
4117 ctx->mmu_idx = MMU_USER_IDX;
ebd0e151
RH
4118 ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4119 ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
217d1a5e 4120 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
3d68ee7b 4121#else
494737b7
RH
4122 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4123 ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
3d68ee7b 4124
c301f34e
RH
4125 /* Recover the IAOQ values from the GVA + PRIV. */
4126 uint64_t cs_base = ctx->base.tb->cs_base;
4127 uint64_t iasq_f = cs_base & ~0xffffffffull;
4128 int32_t diff = cs_base;
4129
4130 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4131 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4132#endif
51b061fb 4133 ctx->iaoq_n = -1;
f764718d 4134 ctx->iaoq_n_var = NULL;
61766fe9 4135
3d68ee7b
RH
4136 /* Bound the number of instructions by those left on the page. */
4137 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 4138 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
3d68ee7b 4139
86f8d05f
RH
4140 ctx->ntempr = 0;
4141 ctx->ntempl = 0;
4142 memset(ctx->tempr, 0, sizeof(ctx->tempr));
4143 memset(ctx->templ, 0, sizeof(ctx->templ));
51b061fb 4144}
61766fe9 4145
51b061fb
RH
4146static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4147{
4148 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4149
3d68ee7b 4150 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
51b061fb
RH
4151 ctx->null_cond = cond_make_f();
4152 ctx->psw_n_nonzero = false;
494737b7 4153 if (ctx->tb_flags & PSW_N) {
51b061fb
RH
4154 ctx->null_cond.c = TCG_COND_ALWAYS;
4155 ctx->psw_n_nonzero = true;
129e9cc3 4156 }
51b061fb
RH
4157 ctx->null_lab = NULL;
4158}
129e9cc3 4159
51b061fb
RH
4160static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4161{
4162 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4163
51b061fb
RH
4164 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4165}
4166
51b061fb
RH
4167static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4168{
4169 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4170 CPUHPPAState *env = cs->env_ptr;
4171 DisasJumpType ret;
4172 int i, n;
4173
4174 /* Execute one insn. */
ba1d0b44 4175#ifdef CONFIG_USER_ONLY
c301f34e 4176 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
31234768
RH
4177 do_page_zero(ctx);
4178 ret = ctx->base.is_jmp;
51b061fb 4179 assert(ret != DISAS_NEXT);
ba1d0b44
RH
4180 } else
4181#endif
4182 {
51b061fb
RH
4183 /* Always fetch the insn, even if nullified, so that we check
4184 the page permissions for execute. */
4e116893 4185 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
51b061fb
RH
4186
4187 /* Set up the IA queue for the next insn.
4188 This will be overwritten by a branch. */
4189 if (ctx->iaoq_b == -1) {
4190 ctx->iaoq_n = -1;
4191 ctx->iaoq_n_var = get_temp(ctx);
eaa3783b 4192 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
7ad439df 4193 } else {
51b061fb 4194 ctx->iaoq_n = ctx->iaoq_b + 4;
f764718d 4195 ctx->iaoq_n_var = NULL;
61766fe9
RH
4196 }
4197
51b061fb
RH
4198 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4199 ctx->null_cond.c = TCG_COND_NEVER;
4200 ret = DISAS_NEXT;
4201 } else {
1a19da0d 4202 ctx->insn = insn;
31274b46
RH
4203 if (!decode(ctx, insn)) {
4204 gen_illegal(ctx);
4205 }
31234768 4206 ret = ctx->base.is_jmp;
51b061fb 4207 assert(ctx->null_lab == NULL);
61766fe9 4208 }
51b061fb 4209 }
61766fe9 4210
51b061fb 4211 /* Free any temporaries allocated. */
86f8d05f
RH
4212 for (i = 0, n = ctx->ntempr; i < n; ++i) {
4213 tcg_temp_free(ctx->tempr[i]);
4214 ctx->tempr[i] = NULL;
4215 }
4216 for (i = 0, n = ctx->ntempl; i < n; ++i) {
4217 tcg_temp_free_tl(ctx->templ[i]);
4218 ctx->templ[i] = NULL;
51b061fb 4219 }
86f8d05f
RH
4220 ctx->ntempr = 0;
4221 ctx->ntempl = 0;
61766fe9 4222
3d68ee7b
RH
4223 /* Advance the insn queue. Note that this check also detects
4224 a priority change within the instruction queue. */
51b061fb 4225 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
c301f34e
RH
4226 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4227 && use_goto_tb(ctx, ctx->iaoq_b)
4228 && (ctx->null_cond.c == TCG_COND_NEVER
4229 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
51b061fb
RH
4230 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4231 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
31234768 4232 ctx->base.is_jmp = ret = DISAS_NORETURN;
51b061fb 4233 } else {
31234768 4234 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
c301f34e 4235 }
61766fe9 4236 }
51b061fb
RH
4237 ctx->iaoq_f = ctx->iaoq_b;
4238 ctx->iaoq_b = ctx->iaoq_n;
c301f34e 4239 ctx->base.pc_next += 4;
51b061fb 4240
c5d0aec2
RH
4241 switch (ret) {
4242 case DISAS_NORETURN:
4243 case DISAS_IAQ_N_UPDATED:
4244 break;
4245
4246 case DISAS_NEXT:
4247 case DISAS_IAQ_N_STALE:
4248 case DISAS_IAQ_N_STALE_EXIT:
4249 if (ctx->iaoq_f == -1) {
4250 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4251 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e 4252#ifndef CONFIG_USER_ONLY
c5d0aec2 4253 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
c301f34e 4254#endif
c5d0aec2
RH
4255 nullify_save(ctx);
4256 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4257 ? DISAS_EXIT
4258 : DISAS_IAQ_N_UPDATED);
4259 } else if (ctx->iaoq_b == -1) {
4260 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4261 }
4262 break;
4263
4264 default:
4265 g_assert_not_reached();
51b061fb
RH
4266 }
4267}
4268
4269static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4270{
4271 DisasContext *ctx = container_of(dcbase, DisasContext, base);
e1b5a5ed 4272 DisasJumpType is_jmp = ctx->base.is_jmp;
61766fe9 4273
e1b5a5ed 4274 switch (is_jmp) {
869051ea 4275 case DISAS_NORETURN:
61766fe9 4276 break;
51b061fb 4277 case DISAS_TOO_MANY:
869051ea 4278 case DISAS_IAQ_N_STALE:
e1b5a5ed 4279 case DISAS_IAQ_N_STALE_EXIT:
51b061fb
RH
4280 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4281 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4282 nullify_save(ctx);
61766fe9 4283 /* FALLTHRU */
869051ea 4284 case DISAS_IAQ_N_UPDATED:
8532a14e 4285 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
7f11636d 4286 tcg_gen_lookup_and_goto_ptr();
8532a14e 4287 break;
61766fe9 4288 }
c5d0aec2
RH
4289 /* FALLTHRU */
4290 case DISAS_EXIT:
4291 tcg_gen_exit_tb(NULL, 0);
61766fe9
RH
4292 break;
4293 default:
51b061fb 4294 g_assert_not_reached();
61766fe9 4295 }
51b061fb 4296}
61766fe9 4297
51b061fb
RH
4298static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4299{
c301f34e 4300 target_ulong pc = dcbase->pc_first;
61766fe9 4301
ba1d0b44
RH
4302#ifdef CONFIG_USER_ONLY
4303 switch (pc) {
51b061fb
RH
4304 case 0x00:
4305 qemu_log("IN:\n0x00000000: (null)\n");
ba1d0b44 4306 return;
51b061fb
RH
4307 case 0xb0:
4308 qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
ba1d0b44 4309 return;
51b061fb
RH
4310 case 0xe0:
4311 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
ba1d0b44 4312 return;
51b061fb
RH
4313 case 0x100:
4314 qemu_log("IN:\n0x00000100: syscall\n");
ba1d0b44 4315 return;
61766fe9 4316 }
ba1d0b44
RH
4317#endif
4318
4319 qemu_log("IN: %s\n", lookup_symbol(pc));
eaa3783b 4320 log_target_disas(cs, pc, dcbase->tb->size);
51b061fb
RH
4321}
4322
4323static const TranslatorOps hppa_tr_ops = {
4324 .init_disas_context = hppa_tr_init_disas_context,
4325 .tb_start = hppa_tr_tb_start,
4326 .insn_start = hppa_tr_insn_start,
51b061fb
RH
4327 .translate_insn = hppa_tr_translate_insn,
4328 .tb_stop = hppa_tr_tb_stop,
4329 .disas_log = hppa_tr_disas_log,
4330};
4331
8b86d6d2 4332void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
51b061fb
RH
4333{
4334 DisasContext ctx;
8b86d6d2 4335 translator_loop(&hppa_tr_ops, &ctx.base, cs, tb, max_insns);
61766fe9
RH
4336}
4337
4338void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4339 target_ulong *data)
4340{
4341 env->iaoq_f = data[0];
86f8d05f 4342 if (data[1] != (target_ureg)-1) {
61766fe9
RH
4343 env->iaoq_b = data[1];
4344 }
4345 /* Since we were executing the instruction at IAOQ_F, and took some
4346 sort of action that provoked the cpu_restore_state, we can infer
4347 that the instruction was not nullified. */
4348 env->psw_n = 0;
4349}