]> git.proxmox.com Git - mirror_qemu.git/blame - target/hppa/translate.c
target/hppa: Add privilege to MMU index conversion helpers
[mirror_qemu.git] / target / hppa / translate.c
CommitLineData
61766fe9
RH
1/*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
d6ea4236 9 * version 2.1 of the License, or (at your option) any later version.
61766fe9
RH
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "disas/disas.h"
23#include "qemu/host-utils.h"
24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
61766fe9 26#include "exec/cpu_ldst.h"
61766fe9
RH
27#include "exec/helper-proto.h"
28#include "exec/helper-gen.h"
869051ea 29#include "exec/translator.h"
61766fe9
RH
30#include "exec/log.h"
31
d53106c9
RH
32#define HELPER_H "helper.h"
33#include "exec/helper-info.c.inc"
34#undef HELPER_H
35
36
eaa3783b
RH
37/* Since we have a distinction between register size and address size,
38 we need to redefine all of these. */
39
40#undef TCGv
41#undef tcg_temp_new
eaa3783b 42#undef tcg_global_mem_new
eaa3783b
RH
43
44#if TARGET_LONG_BITS == 64
45#define TCGv_tl TCGv_i64
46#define tcg_temp_new_tl tcg_temp_new_i64
eaa3783b
RH
47#if TARGET_REGISTER_BITS == 64
48#define tcg_gen_extu_reg_tl tcg_gen_mov_i64
49#else
50#define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
51#endif
52#else
53#define TCGv_tl TCGv_i32
54#define tcg_temp_new_tl tcg_temp_new_i32
eaa3783b
RH
55#define tcg_gen_extu_reg_tl tcg_gen_mov_i32
56#endif
57
58#if TARGET_REGISTER_BITS == 64
59#define TCGv_reg TCGv_i64
60
61#define tcg_temp_new tcg_temp_new_i64
eaa3783b 62#define tcg_global_mem_new tcg_global_mem_new_i64
eaa3783b
RH
63
64#define tcg_gen_movi_reg tcg_gen_movi_i64
65#define tcg_gen_mov_reg tcg_gen_mov_i64
66#define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
67#define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
68#define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
69#define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
70#define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
71#define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
72#define tcg_gen_ld_reg tcg_gen_ld_i64
73#define tcg_gen_st8_reg tcg_gen_st8_i64
74#define tcg_gen_st16_reg tcg_gen_st16_i64
75#define tcg_gen_st32_reg tcg_gen_st32_i64
76#define tcg_gen_st_reg tcg_gen_st_i64
77#define tcg_gen_add_reg tcg_gen_add_i64
78#define tcg_gen_addi_reg tcg_gen_addi_i64
79#define tcg_gen_sub_reg tcg_gen_sub_i64
80#define tcg_gen_neg_reg tcg_gen_neg_i64
81#define tcg_gen_subfi_reg tcg_gen_subfi_i64
82#define tcg_gen_subi_reg tcg_gen_subi_i64
83#define tcg_gen_and_reg tcg_gen_and_i64
84#define tcg_gen_andi_reg tcg_gen_andi_i64
85#define tcg_gen_or_reg tcg_gen_or_i64
86#define tcg_gen_ori_reg tcg_gen_ori_i64
87#define tcg_gen_xor_reg tcg_gen_xor_i64
88#define tcg_gen_xori_reg tcg_gen_xori_i64
89#define tcg_gen_not_reg tcg_gen_not_i64
90#define tcg_gen_shl_reg tcg_gen_shl_i64
91#define tcg_gen_shli_reg tcg_gen_shli_i64
92#define tcg_gen_shr_reg tcg_gen_shr_i64
93#define tcg_gen_shri_reg tcg_gen_shri_i64
94#define tcg_gen_sar_reg tcg_gen_sar_i64
95#define tcg_gen_sari_reg tcg_gen_sari_i64
96#define tcg_gen_brcond_reg tcg_gen_brcond_i64
97#define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
98#define tcg_gen_setcond_reg tcg_gen_setcond_i64
99#define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
100#define tcg_gen_mul_reg tcg_gen_mul_i64
101#define tcg_gen_muli_reg tcg_gen_muli_i64
102#define tcg_gen_div_reg tcg_gen_div_i64
103#define tcg_gen_rem_reg tcg_gen_rem_i64
104#define tcg_gen_divu_reg tcg_gen_divu_i64
105#define tcg_gen_remu_reg tcg_gen_remu_i64
106#define tcg_gen_discard_reg tcg_gen_discard_i64
107#define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
108#define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
109#define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
110#define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
111#define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
112#define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
113#define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
114#define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
115#define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
116#define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
117#define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
118#define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
119#define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
120#define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
121#define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
122#define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
123#define tcg_gen_andc_reg tcg_gen_andc_i64
124#define tcg_gen_eqv_reg tcg_gen_eqv_i64
125#define tcg_gen_nand_reg tcg_gen_nand_i64
126#define tcg_gen_nor_reg tcg_gen_nor_i64
127#define tcg_gen_orc_reg tcg_gen_orc_i64
128#define tcg_gen_clz_reg tcg_gen_clz_i64
129#define tcg_gen_ctz_reg tcg_gen_ctz_i64
130#define tcg_gen_clzi_reg tcg_gen_clzi_i64
131#define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
132#define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
133#define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
134#define tcg_gen_rotl_reg tcg_gen_rotl_i64
135#define tcg_gen_rotli_reg tcg_gen_rotli_i64
136#define tcg_gen_rotr_reg tcg_gen_rotr_i64
137#define tcg_gen_rotri_reg tcg_gen_rotri_i64
138#define tcg_gen_deposit_reg tcg_gen_deposit_i64
139#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
140#define tcg_gen_extract_reg tcg_gen_extract_i64
141#define tcg_gen_sextract_reg tcg_gen_sextract_i64
05bfd4db 142#define tcg_gen_extract2_reg tcg_gen_extract2_i64
29dd6f64 143#define tcg_constant_reg tcg_constant_i64
eaa3783b
RH
144#define tcg_gen_movcond_reg tcg_gen_movcond_i64
145#define tcg_gen_add2_reg tcg_gen_add2_i64
146#define tcg_gen_sub2_reg tcg_gen_sub2_i64
147#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
148#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
149#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
5bfa8034 150#define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
eaa3783b
RH
151#else
152#define TCGv_reg TCGv_i32
153#define tcg_temp_new tcg_temp_new_i32
eaa3783b 154#define tcg_global_mem_new tcg_global_mem_new_i32
eaa3783b
RH
155
156#define tcg_gen_movi_reg tcg_gen_movi_i32
157#define tcg_gen_mov_reg tcg_gen_mov_i32
158#define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
159#define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
160#define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
161#define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
162#define tcg_gen_ld32u_reg tcg_gen_ld_i32
163#define tcg_gen_ld32s_reg tcg_gen_ld_i32
164#define tcg_gen_ld_reg tcg_gen_ld_i32
165#define tcg_gen_st8_reg tcg_gen_st8_i32
166#define tcg_gen_st16_reg tcg_gen_st16_i32
167#define tcg_gen_st32_reg tcg_gen_st32_i32
168#define tcg_gen_st_reg tcg_gen_st_i32
169#define tcg_gen_add_reg tcg_gen_add_i32
170#define tcg_gen_addi_reg tcg_gen_addi_i32
171#define tcg_gen_sub_reg tcg_gen_sub_i32
172#define tcg_gen_neg_reg tcg_gen_neg_i32
173#define tcg_gen_subfi_reg tcg_gen_subfi_i32
174#define tcg_gen_subi_reg tcg_gen_subi_i32
175#define tcg_gen_and_reg tcg_gen_and_i32
176#define tcg_gen_andi_reg tcg_gen_andi_i32
177#define tcg_gen_or_reg tcg_gen_or_i32
178#define tcg_gen_ori_reg tcg_gen_ori_i32
179#define tcg_gen_xor_reg tcg_gen_xor_i32
180#define tcg_gen_xori_reg tcg_gen_xori_i32
181#define tcg_gen_not_reg tcg_gen_not_i32
182#define tcg_gen_shl_reg tcg_gen_shl_i32
183#define tcg_gen_shli_reg tcg_gen_shli_i32
184#define tcg_gen_shr_reg tcg_gen_shr_i32
185#define tcg_gen_shri_reg tcg_gen_shri_i32
186#define tcg_gen_sar_reg tcg_gen_sar_i32
187#define tcg_gen_sari_reg tcg_gen_sari_i32
188#define tcg_gen_brcond_reg tcg_gen_brcond_i32
189#define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
190#define tcg_gen_setcond_reg tcg_gen_setcond_i32
191#define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
192#define tcg_gen_mul_reg tcg_gen_mul_i32
193#define tcg_gen_muli_reg tcg_gen_muli_i32
194#define tcg_gen_div_reg tcg_gen_div_i32
195#define tcg_gen_rem_reg tcg_gen_rem_i32
196#define tcg_gen_divu_reg tcg_gen_divu_i32
197#define tcg_gen_remu_reg tcg_gen_remu_i32
198#define tcg_gen_discard_reg tcg_gen_discard_i32
199#define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
200#define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
201#define tcg_gen_extu_i32_reg tcg_gen_mov_i32
202#define tcg_gen_ext_i32_reg tcg_gen_mov_i32
203#define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
204#define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
205#define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
206#define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
207#define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
208#define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
209#define tcg_gen_ext32u_reg tcg_gen_mov_i32
210#define tcg_gen_ext32s_reg tcg_gen_mov_i32
211#define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
212#define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
213#define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
214#define tcg_gen_andc_reg tcg_gen_andc_i32
215#define tcg_gen_eqv_reg tcg_gen_eqv_i32
216#define tcg_gen_nand_reg tcg_gen_nand_i32
217#define tcg_gen_nor_reg tcg_gen_nor_i32
218#define tcg_gen_orc_reg tcg_gen_orc_i32
219#define tcg_gen_clz_reg tcg_gen_clz_i32
220#define tcg_gen_ctz_reg tcg_gen_ctz_i32
221#define tcg_gen_clzi_reg tcg_gen_clzi_i32
222#define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
223#define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
224#define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
225#define tcg_gen_rotl_reg tcg_gen_rotl_i32
226#define tcg_gen_rotli_reg tcg_gen_rotli_i32
227#define tcg_gen_rotr_reg tcg_gen_rotr_i32
228#define tcg_gen_rotri_reg tcg_gen_rotri_i32
229#define tcg_gen_deposit_reg tcg_gen_deposit_i32
230#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
231#define tcg_gen_extract_reg tcg_gen_extract_i32
232#define tcg_gen_sextract_reg tcg_gen_sextract_i32
05bfd4db 233#define tcg_gen_extract2_reg tcg_gen_extract2_i32
29dd6f64 234#define tcg_constant_reg tcg_constant_i32
eaa3783b
RH
235#define tcg_gen_movcond_reg tcg_gen_movcond_i32
236#define tcg_gen_add2_reg tcg_gen_add2_i32
237#define tcg_gen_sub2_reg tcg_gen_sub2_i32
238#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
239#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
240#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
5bfa8034 241#define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
eaa3783b
RH
242#endif /* TARGET_REGISTER_BITS */
243
61766fe9
RH
244typedef struct DisasCond {
245 TCGCond c;
eaa3783b 246 TCGv_reg a0, a1;
61766fe9
RH
247} DisasCond;
248
249typedef struct DisasContext {
d01a3625 250 DisasContextBase base;
61766fe9
RH
251 CPUState *cs;
252
eaa3783b
RH
253 target_ureg iaoq_f;
254 target_ureg iaoq_b;
255 target_ureg iaoq_n;
256 TCGv_reg iaoq_n_var;
61766fe9 257
86f8d05f 258 int ntempr, ntempl;
5eecd37a 259 TCGv_reg tempr[8];
86f8d05f 260 TCGv_tl templ[4];
61766fe9
RH
261
262 DisasCond null_cond;
263 TCGLabel *null_lab;
264
1a19da0d 265 uint32_t insn;
494737b7 266 uint32_t tb_flags;
3d68ee7b
RH
267 int mmu_idx;
268 int privilege;
61766fe9 269 bool psw_n_nonzero;
217d1a5e
RH
270
271#ifdef CONFIG_USER_ONLY
272 MemOp unalign;
273#endif
61766fe9
RH
274} DisasContext;
275
217d1a5e
RH
276#ifdef CONFIG_USER_ONLY
277#define UNALIGN(C) (C)->unalign
278#else
2d4afb03 279#define UNALIGN(C) MO_ALIGN
217d1a5e
RH
280#endif
281
e36f27ef 282/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
451e4ffd 283static int expand_sm_imm(DisasContext *ctx, int val)
e36f27ef
RH
284{
285 if (val & PSW_SM_E) {
286 val = (val & ~PSW_SM_E) | PSW_E;
287 }
288 if (val & PSW_SM_W) {
289 val = (val & ~PSW_SM_W) | PSW_W;
290 }
291 return val;
292}
293
deee69a1 294/* Inverted space register indicates 0 means sr0 not inferred from base. */
451e4ffd 295static int expand_sr3x(DisasContext *ctx, int val)
deee69a1
RH
296{
297 return ~val;
298}
299
1cd012a5
RH
300/* Convert the M:A bits within a memory insn to the tri-state value
301 we use for the final M. */
451e4ffd 302static int ma_to_m(DisasContext *ctx, int val)
1cd012a5
RH
303{
304 return val & 2 ? (val & 1 ? -1 : 1) : 0;
305}
306
740038d7 307/* Convert the sign of the displacement to a pre or post-modify. */
451e4ffd 308static int pos_to_m(DisasContext *ctx, int val)
740038d7
RH
309{
310 return val ? 1 : -1;
311}
312
451e4ffd 313static int neg_to_m(DisasContext *ctx, int val)
740038d7
RH
314{
315 return val ? -1 : 1;
316}
317
318/* Used for branch targets and fp memory ops. */
451e4ffd 319static int expand_shl2(DisasContext *ctx, int val)
01afb7be
RH
320{
321 return val << 2;
322}
323
740038d7 324/* Used for fp memory ops. */
451e4ffd 325static int expand_shl3(DisasContext *ctx, int val)
740038d7
RH
326{
327 return val << 3;
328}
329
0588e061 330/* Used for assemble_21. */
451e4ffd 331static int expand_shl11(DisasContext *ctx, int val)
0588e061
RH
332{
333 return val << 11;
334}
335
01afb7be 336
40f9f908 337/* Include the auto-generated decoder. */
abff1abf 338#include "decode-insns.c.inc"
40f9f908 339
869051ea
RH
340/* We are not using a goto_tb (for whatever reason), but have updated
341 the iaq (for whatever reason), so don't do it again on exit. */
342#define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
61766fe9 343
869051ea
RH
344/* We are exiting the TB, but have neither emitted a goto_tb, nor
345 updated the iaq for the next instruction to be executed. */
346#define DISAS_IAQ_N_STALE DISAS_TARGET_1
61766fe9 347
e1b5a5ed
RH
348/* Similarly, but we want to return to the main loop immediately
349 to recognize unmasked interrupts. */
350#define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
c5d0aec2 351#define DISAS_EXIT DISAS_TARGET_3
e1b5a5ed 352
61766fe9 353/* global register indexes */
eaa3783b 354static TCGv_reg cpu_gr[32];
33423472 355static TCGv_i64 cpu_sr[4];
494737b7 356static TCGv_i64 cpu_srH;
eaa3783b
RH
357static TCGv_reg cpu_iaoq_f;
358static TCGv_reg cpu_iaoq_b;
c301f34e
RH
359static TCGv_i64 cpu_iasq_f;
360static TCGv_i64 cpu_iasq_b;
eaa3783b
RH
361static TCGv_reg cpu_sar;
362static TCGv_reg cpu_psw_n;
363static TCGv_reg cpu_psw_v;
364static TCGv_reg cpu_psw_cb;
365static TCGv_reg cpu_psw_cb_msb;
61766fe9 366
61766fe9
RH
367void hppa_translate_init(void)
368{
369#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
370
eaa3783b 371 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
61766fe9 372 static const GlobalVar vars[] = {
35136a77 373 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
61766fe9
RH
374 DEF_VAR(psw_n),
375 DEF_VAR(psw_v),
376 DEF_VAR(psw_cb),
377 DEF_VAR(psw_cb_msb),
378 DEF_VAR(iaoq_f),
379 DEF_VAR(iaoq_b),
380 };
381
382#undef DEF_VAR
383
384 /* Use the symbolic register names that match the disassembler. */
385 static const char gr_names[32][4] = {
386 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
387 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
388 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
389 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
390 };
33423472 391 /* SR[4-7] are not global registers so that we can index them. */
494737b7
RH
392 static const char sr_names[5][4] = {
393 "sr0", "sr1", "sr2", "sr3", "srH"
33423472 394 };
61766fe9 395
61766fe9
RH
396 int i;
397
f764718d 398 cpu_gr[0] = NULL;
61766fe9
RH
399 for (i = 1; i < 32; i++) {
400 cpu_gr[i] = tcg_global_mem_new(cpu_env,
401 offsetof(CPUHPPAState, gr[i]),
402 gr_names[i]);
403 }
33423472
RH
404 for (i = 0; i < 4; i++) {
405 cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
406 offsetof(CPUHPPAState, sr[i]),
407 sr_names[i]);
408 }
494737b7
RH
409 cpu_srH = tcg_global_mem_new_i64(cpu_env,
410 offsetof(CPUHPPAState, sr[4]),
411 sr_names[4]);
61766fe9
RH
412
413 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
414 const GlobalVar *v = &vars[i];
415 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
416 }
c301f34e
RH
417
418 cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
419 offsetof(CPUHPPAState, iasq_f),
420 "iasq_f");
421 cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
422 offsetof(CPUHPPAState, iasq_b),
423 "iasq_b");
61766fe9
RH
424}
425
129e9cc3
RH
426static DisasCond cond_make_f(void)
427{
f764718d
RH
428 return (DisasCond){
429 .c = TCG_COND_NEVER,
430 .a0 = NULL,
431 .a1 = NULL,
432 };
129e9cc3
RH
433}
434
df0232fe
RH
435static DisasCond cond_make_t(void)
436{
437 return (DisasCond){
438 .c = TCG_COND_ALWAYS,
439 .a0 = NULL,
440 .a1 = NULL,
441 };
442}
443
129e9cc3
RH
444static DisasCond cond_make_n(void)
445{
f764718d
RH
446 return (DisasCond){
447 .c = TCG_COND_NE,
448 .a0 = cpu_psw_n,
6e94937a 449 .a1 = tcg_constant_reg(0)
f764718d 450 };
129e9cc3
RH
451}
452
b47a4a02 453static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
129e9cc3 454{
129e9cc3 455 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
b47a4a02 456 return (DisasCond){
6e94937a 457 .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
b47a4a02
SS
458 };
459}
129e9cc3 460
b47a4a02
SS
461static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
462{
463 TCGv_reg tmp = tcg_temp_new();
464 tcg_gen_mov_reg(tmp, a0);
465 return cond_make_0_tmp(c, tmp);
129e9cc3
RH
466}
467
eaa3783b 468static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
129e9cc3
RH
469{
470 DisasCond r = { .c = c };
471
472 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
473 r.a0 = tcg_temp_new();
eaa3783b 474 tcg_gen_mov_reg(r.a0, a0);
129e9cc3 475 r.a1 = tcg_temp_new();
eaa3783b 476 tcg_gen_mov_reg(r.a1, a1);
129e9cc3
RH
477
478 return r;
479}
480
129e9cc3
RH
481static void cond_free(DisasCond *cond)
482{
483 switch (cond->c) {
484 default:
f764718d
RH
485 cond->a0 = NULL;
486 cond->a1 = NULL;
129e9cc3
RH
487 /* fallthru */
488 case TCG_COND_ALWAYS:
489 cond->c = TCG_COND_NEVER;
490 break;
491 case TCG_COND_NEVER:
492 break;
493 }
494}
495
eaa3783b 496static TCGv_reg get_temp(DisasContext *ctx)
61766fe9 497{
86f8d05f
RH
498 unsigned i = ctx->ntempr++;
499 g_assert(i < ARRAY_SIZE(ctx->tempr));
500 return ctx->tempr[i] = tcg_temp_new();
61766fe9
RH
501}
502
86f8d05f
RH
503#ifndef CONFIG_USER_ONLY
504static TCGv_tl get_temp_tl(DisasContext *ctx)
505{
506 unsigned i = ctx->ntempl++;
507 g_assert(i < ARRAY_SIZE(ctx->templ));
508 return ctx->templ[i] = tcg_temp_new_tl();
509}
510#endif
511
eaa3783b 512static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
61766fe9 513{
eaa3783b
RH
514 TCGv_reg t = get_temp(ctx);
515 tcg_gen_movi_reg(t, v);
61766fe9
RH
516 return t;
517}
518
eaa3783b 519static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
61766fe9
RH
520{
521 if (reg == 0) {
eaa3783b
RH
522 TCGv_reg t = get_temp(ctx);
523 tcg_gen_movi_reg(t, 0);
61766fe9
RH
524 return t;
525 } else {
526 return cpu_gr[reg];
527 }
528}
529
eaa3783b 530static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
61766fe9 531{
129e9cc3 532 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
61766fe9
RH
533 return get_temp(ctx);
534 } else {
535 return cpu_gr[reg];
536 }
537}
538
eaa3783b 539static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
129e9cc3
RH
540{
541 if (ctx->null_cond.c != TCG_COND_NEVER) {
eaa3783b 542 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
6e94937a 543 ctx->null_cond.a1, dest, t);
129e9cc3 544 } else {
eaa3783b 545 tcg_gen_mov_reg(dest, t);
129e9cc3
RH
546 }
547}
548
eaa3783b 549static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
129e9cc3
RH
550{
551 if (reg != 0) {
552 save_or_nullify(ctx, cpu_gr[reg], t);
553 }
554}
555
e03b5686 556#if HOST_BIG_ENDIAN
96d6407f
RH
557# define HI_OFS 0
558# define LO_OFS 4
559#else
560# define HI_OFS 4
561# define LO_OFS 0
562#endif
563
564static TCGv_i32 load_frw_i32(unsigned rt)
565{
566 TCGv_i32 ret = tcg_temp_new_i32();
567 tcg_gen_ld_i32(ret, cpu_env,
568 offsetof(CPUHPPAState, fr[rt & 31])
569 + (rt & 32 ? LO_OFS : HI_OFS));
570 return ret;
571}
572
ebe9383c
RH
573static TCGv_i32 load_frw0_i32(unsigned rt)
574{
575 if (rt == 0) {
0992a930
RH
576 TCGv_i32 ret = tcg_temp_new_i32();
577 tcg_gen_movi_i32(ret, 0);
578 return ret;
ebe9383c
RH
579 } else {
580 return load_frw_i32(rt);
581 }
582}
583
584static TCGv_i64 load_frw0_i64(unsigned rt)
585{
0992a930 586 TCGv_i64 ret = tcg_temp_new_i64();
ebe9383c 587 if (rt == 0) {
0992a930 588 tcg_gen_movi_i64(ret, 0);
ebe9383c 589 } else {
ebe9383c
RH
590 tcg_gen_ld32u_i64(ret, cpu_env,
591 offsetof(CPUHPPAState, fr[rt & 31])
592 + (rt & 32 ? LO_OFS : HI_OFS));
ebe9383c 593 }
0992a930 594 return ret;
ebe9383c
RH
595}
596
96d6407f
RH
597static void save_frw_i32(unsigned rt, TCGv_i32 val)
598{
599 tcg_gen_st_i32(val, cpu_env,
600 offsetof(CPUHPPAState, fr[rt & 31])
601 + (rt & 32 ? LO_OFS : HI_OFS));
602}
603
604#undef HI_OFS
605#undef LO_OFS
606
607static TCGv_i64 load_frd(unsigned rt)
608{
609 TCGv_i64 ret = tcg_temp_new_i64();
610 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
611 return ret;
612}
613
ebe9383c
RH
614static TCGv_i64 load_frd0(unsigned rt)
615{
616 if (rt == 0) {
0992a930
RH
617 TCGv_i64 ret = tcg_temp_new_i64();
618 tcg_gen_movi_i64(ret, 0);
619 return ret;
ebe9383c
RH
620 } else {
621 return load_frd(rt);
622 }
623}
624
96d6407f
RH
625static void save_frd(unsigned rt, TCGv_i64 val)
626{
627 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
628}
629
33423472
RH
630static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
631{
632#ifdef CONFIG_USER_ONLY
633 tcg_gen_movi_i64(dest, 0);
634#else
635 if (reg < 4) {
636 tcg_gen_mov_i64(dest, cpu_sr[reg]);
494737b7
RH
637 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
638 tcg_gen_mov_i64(dest, cpu_srH);
33423472
RH
639 } else {
640 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
641 }
642#endif
643}
644
129e9cc3
RH
645/* Skip over the implementation of an insn that has been nullified.
646 Use this when the insn is too complex for a conditional move. */
647static void nullify_over(DisasContext *ctx)
648{
649 if (ctx->null_cond.c != TCG_COND_NEVER) {
650 /* The always condition should have been handled in the main loop. */
651 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
652
653 ctx->null_lab = gen_new_label();
129e9cc3
RH
654
655 /* If we're using PSW[N], copy it to a temp because... */
6e94937a 656 if (ctx->null_cond.a0 == cpu_psw_n) {
129e9cc3 657 ctx->null_cond.a0 = tcg_temp_new();
eaa3783b 658 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
129e9cc3
RH
659 }
660 /* ... we clear it before branching over the implementation,
661 so that (1) it's clear after nullifying this insn and
662 (2) if this insn nullifies the next, PSW[N] is valid. */
663 if (ctx->psw_n_nonzero) {
664 ctx->psw_n_nonzero = false;
eaa3783b 665 tcg_gen_movi_reg(cpu_psw_n, 0);
129e9cc3
RH
666 }
667
eaa3783b 668 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
6e94937a 669 ctx->null_cond.a1, ctx->null_lab);
129e9cc3
RH
670 cond_free(&ctx->null_cond);
671 }
672}
673
674/* Save the current nullification state to PSW[N]. */
675static void nullify_save(DisasContext *ctx)
676{
677 if (ctx->null_cond.c == TCG_COND_NEVER) {
678 if (ctx->psw_n_nonzero) {
eaa3783b 679 tcg_gen_movi_reg(cpu_psw_n, 0);
129e9cc3
RH
680 }
681 return;
682 }
6e94937a 683 if (ctx->null_cond.a0 != cpu_psw_n) {
eaa3783b 684 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
6e94937a 685 ctx->null_cond.a0, ctx->null_cond.a1);
129e9cc3
RH
686 ctx->psw_n_nonzero = true;
687 }
688 cond_free(&ctx->null_cond);
689}
690
691/* Set a PSW[N] to X. The intention is that this is used immediately
692 before a goto_tb/exit_tb, so that there is no fallthru path to other
693 code within the TB. Therefore we do not update psw_n_nonzero. */
694static void nullify_set(DisasContext *ctx, bool x)
695{
696 if (ctx->psw_n_nonzero || x) {
eaa3783b 697 tcg_gen_movi_reg(cpu_psw_n, x);
129e9cc3
RH
698 }
699}
700
701/* Mark the end of an instruction that may have been nullified.
40f9f908
RH
702 This is the pair to nullify_over. Always returns true so that
703 it may be tail-called from a translate function. */
31234768 704static bool nullify_end(DisasContext *ctx)
129e9cc3
RH
705{
706 TCGLabel *null_lab = ctx->null_lab;
31234768 707 DisasJumpType status = ctx->base.is_jmp;
129e9cc3 708
f49b3537
RH
709 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
710 For UPDATED, we cannot update on the nullified path. */
711 assert(status != DISAS_IAQ_N_UPDATED);
712
129e9cc3
RH
713 if (likely(null_lab == NULL)) {
714 /* The current insn wasn't conditional or handled the condition
715 applied to it without a branch, so the (new) setting of
716 NULL_COND can be applied directly to the next insn. */
31234768 717 return true;
129e9cc3
RH
718 }
719 ctx->null_lab = NULL;
720
721 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
722 /* The next instruction will be unconditional,
723 and NULL_COND already reflects that. */
724 gen_set_label(null_lab);
725 } else {
726 /* The insn that we just executed is itself nullifying the next
727 instruction. Store the condition in the PSW[N] global.
728 We asserted PSW[N] = 0 in nullify_over, so that after the
729 label we have the proper value in place. */
730 nullify_save(ctx);
731 gen_set_label(null_lab);
732 ctx->null_cond = cond_make_n();
733 }
869051ea 734 if (status == DISAS_NORETURN) {
31234768 735 ctx->base.is_jmp = DISAS_NEXT;
129e9cc3 736 }
31234768 737 return true;
129e9cc3
RH
738}
739
eaa3783b 740static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
61766fe9
RH
741{
742 if (unlikely(ival == -1)) {
eaa3783b 743 tcg_gen_mov_reg(dest, vval);
61766fe9 744 } else {
eaa3783b 745 tcg_gen_movi_reg(dest, ival);
61766fe9
RH
746 }
747}
748
eaa3783b 749static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
61766fe9
RH
750{
751 return ctx->iaoq_f + disp + 8;
752}
753
754static void gen_excp_1(int exception)
755{
29dd6f64 756 gen_helper_excp(cpu_env, tcg_constant_i32(exception));
61766fe9
RH
757}
758
31234768 759static void gen_excp(DisasContext *ctx, int exception)
61766fe9
RH
760{
761 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
762 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
129e9cc3 763 nullify_save(ctx);
61766fe9 764 gen_excp_1(exception);
31234768 765 ctx->base.is_jmp = DISAS_NORETURN;
61766fe9
RH
766}
767
31234768 768static bool gen_excp_iir(DisasContext *ctx, int exc)
1a19da0d 769{
31234768 770 nullify_over(ctx);
29dd6f64
RH
771 tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
772 cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
31234768
RH
773 gen_excp(ctx, exc);
774 return nullify_end(ctx);
1a19da0d
RH
775}
776
31234768 777static bool gen_illegal(DisasContext *ctx)
61766fe9 778{
31234768 779 return gen_excp_iir(ctx, EXCP_ILL);
61766fe9
RH
780}
781
40f9f908
RH
782#ifdef CONFIG_USER_ONLY
783#define CHECK_MOST_PRIVILEGED(EXCP) \
784 return gen_excp_iir(ctx, EXCP)
785#else
786#define CHECK_MOST_PRIVILEGED(EXCP) \
31234768
RH
787 do { \
788 if (ctx->privilege != 0) { \
789 return gen_excp_iir(ctx, EXCP); \
790 } \
e1b5a5ed 791 } while (0)
40f9f908 792#endif
e1b5a5ed 793
eaa3783b 794static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
61766fe9 795{
57f91498 796 return translator_use_goto_tb(&ctx->base, dest);
61766fe9
RH
797}
798
129e9cc3
RH
799/* If the next insn is to be nullified, and it's on the same page,
800 and we're not attempting to set a breakpoint on it, then we can
801 totally skip the nullified insn. This avoids creating and
802 executing a TB that merely branches to the next TB. */
803static bool use_nullify_skip(DisasContext *ctx)
804{
805 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
806 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
807}
808
61766fe9 809static void gen_goto_tb(DisasContext *ctx, int which,
eaa3783b 810 target_ureg f, target_ureg b)
61766fe9
RH
811{
812 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
813 tcg_gen_goto_tb(which);
eaa3783b
RH
814 tcg_gen_movi_reg(cpu_iaoq_f, f);
815 tcg_gen_movi_reg(cpu_iaoq_b, b);
07ea28b4 816 tcg_gen_exit_tb(ctx->base.tb, which);
61766fe9
RH
817 } else {
818 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
819 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
8532a14e 820 tcg_gen_lookup_and_goto_ptr();
61766fe9
RH
821 }
822}
823
b47a4a02
SS
824static bool cond_need_sv(int c)
825{
826 return c == 2 || c == 3 || c == 6;
827}
828
829static bool cond_need_cb(int c)
830{
831 return c == 4 || c == 5;
832}
833
834/*
835 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
836 * the Parisc 1.1 Architecture Reference Manual for details.
837 */
b2167459 838
eaa3783b
RH
839static DisasCond do_cond(unsigned cf, TCGv_reg res,
840 TCGv_reg cb_msb, TCGv_reg sv)
b2167459
RH
841{
842 DisasCond cond;
eaa3783b 843 TCGv_reg tmp;
b2167459
RH
844
845 switch (cf >> 1) {
b47a4a02 846 case 0: /* Never / TR (0 / 1) */
b2167459
RH
847 cond = cond_make_f();
848 break;
849 case 1: /* = / <> (Z / !Z) */
850 cond = cond_make_0(TCG_COND_EQ, res);
851 break;
b47a4a02
SS
852 case 2: /* < / >= (N ^ V / !(N ^ V) */
853 tmp = tcg_temp_new();
854 tcg_gen_xor_reg(tmp, res, sv);
855 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
b2167459 856 break;
b47a4a02
SS
857 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
858 /*
859 * Simplify:
860 * (N ^ V) | Z
861 * ((res < 0) ^ (sv < 0)) | !res
862 * ((res ^ sv) < 0) | !res
863 * (~(res ^ sv) >= 0) | !res
864 * !(~(res ^ sv) >> 31) | !res
865 * !(~(res ^ sv) >> 31 & res)
866 */
867 tmp = tcg_temp_new();
868 tcg_gen_eqv_reg(tmp, res, sv);
869 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
870 tcg_gen_and_reg(tmp, tmp, res);
871 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
b2167459
RH
872 break;
873 case 4: /* NUV / UV (!C / C) */
874 cond = cond_make_0(TCG_COND_EQ, cb_msb);
875 break;
876 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
877 tmp = tcg_temp_new();
eaa3783b
RH
878 tcg_gen_neg_reg(tmp, cb_msb);
879 tcg_gen_and_reg(tmp, tmp, res);
b47a4a02 880 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
b2167459
RH
881 break;
882 case 6: /* SV / NSV (V / !V) */
883 cond = cond_make_0(TCG_COND_LT, sv);
884 break;
885 case 7: /* OD / EV */
886 tmp = tcg_temp_new();
eaa3783b 887 tcg_gen_andi_reg(tmp, res, 1);
b47a4a02 888 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
b2167459
RH
889 break;
890 default:
891 g_assert_not_reached();
892 }
893 if (cf & 1) {
894 cond.c = tcg_invert_cond(cond.c);
895 }
896
897 return cond;
898}
899
900/* Similar, but for the special case of subtraction without borrow, we
901 can use the inputs directly. This can allow other computation to be
902 deleted as unused. */
903
eaa3783b
RH
904static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
905 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
b2167459
RH
906{
907 DisasCond cond;
908
909 switch (cf >> 1) {
910 case 1: /* = / <> */
911 cond = cond_make(TCG_COND_EQ, in1, in2);
912 break;
913 case 2: /* < / >= */
914 cond = cond_make(TCG_COND_LT, in1, in2);
915 break;
916 case 3: /* <= / > */
917 cond = cond_make(TCG_COND_LE, in1, in2);
918 break;
919 case 4: /* << / >>= */
920 cond = cond_make(TCG_COND_LTU, in1, in2);
921 break;
922 case 5: /* <<= / >> */
923 cond = cond_make(TCG_COND_LEU, in1, in2);
924 break;
925 default:
b47a4a02 926 return do_cond(cf, res, NULL, sv);
b2167459
RH
927 }
928 if (cf & 1) {
929 cond.c = tcg_invert_cond(cond.c);
930 }
931
932 return cond;
933}
934
df0232fe
RH
935/*
936 * Similar, but for logicals, where the carry and overflow bits are not
937 * computed, and use of them is undefined.
938 *
939 * Undefined or not, hardware does not trap. It seems reasonable to
940 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
941 * how cases c={2,3} are treated.
942 */
b2167459 943
eaa3783b 944static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
b2167459 945{
df0232fe
RH
946 switch (cf) {
947 case 0: /* never */
948 case 9: /* undef, C */
949 case 11: /* undef, C & !Z */
950 case 12: /* undef, V */
951 return cond_make_f();
952
953 case 1: /* true */
954 case 8: /* undef, !C */
955 case 10: /* undef, !C | Z */
956 case 13: /* undef, !V */
957 return cond_make_t();
958
959 case 2: /* == */
960 return cond_make_0(TCG_COND_EQ, res);
961 case 3: /* <> */
962 return cond_make_0(TCG_COND_NE, res);
963 case 4: /* < */
964 return cond_make_0(TCG_COND_LT, res);
965 case 5: /* >= */
966 return cond_make_0(TCG_COND_GE, res);
967 case 6: /* <= */
968 return cond_make_0(TCG_COND_LE, res);
969 case 7: /* > */
970 return cond_make_0(TCG_COND_GT, res);
971
972 case 14: /* OD */
973 case 15: /* EV */
974 return do_cond(cf, res, NULL, NULL);
975
976 default:
977 g_assert_not_reached();
b2167459 978 }
b2167459
RH
979}
980
98cd9ca7
RH
981/* Similar, but for shift/extract/deposit conditions. */
982
eaa3783b 983static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
98cd9ca7
RH
984{
985 unsigned c, f;
986
987 /* Convert the compressed condition codes to standard.
988 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
989 4-7 are the reverse of 0-3. */
990 c = orig & 3;
991 if (c == 3) {
992 c = 7;
993 }
994 f = (orig & 4) / 4;
995
996 return do_log_cond(c * 2 + f, res);
997}
998
b2167459
RH
999/* Similar, but for unit conditions. */
1000
eaa3783b
RH
1001static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1002 TCGv_reg in1, TCGv_reg in2)
b2167459
RH
1003{
1004 DisasCond cond;
eaa3783b 1005 TCGv_reg tmp, cb = NULL;
b2167459 1006
b2167459
RH
1007 if (cf & 8) {
1008 /* Since we want to test lots of carry-out bits all at once, do not
1009 * do our normal thing and compute carry-in of bit B+1 since that
1010 * leaves us with carry bits spread across two words.
1011 */
1012 cb = tcg_temp_new();
1013 tmp = tcg_temp_new();
eaa3783b
RH
1014 tcg_gen_or_reg(cb, in1, in2);
1015 tcg_gen_and_reg(tmp, in1, in2);
1016 tcg_gen_andc_reg(cb, cb, res);
1017 tcg_gen_or_reg(cb, cb, tmp);
b2167459
RH
1018 }
1019
1020 switch (cf >> 1) {
1021 case 0: /* never / TR */
1022 case 1: /* undefined */
1023 case 5: /* undefined */
1024 cond = cond_make_f();
1025 break;
1026
1027 case 2: /* SBZ / NBZ */
1028 /* See hasless(v,1) from
1029 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1030 */
1031 tmp = tcg_temp_new();
eaa3783b
RH
1032 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1033 tcg_gen_andc_reg(tmp, tmp, res);
1034 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
b2167459 1035 cond = cond_make_0(TCG_COND_NE, tmp);
b2167459
RH
1036 break;
1037
1038 case 3: /* SHZ / NHZ */
1039 tmp = tcg_temp_new();
eaa3783b
RH
1040 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1041 tcg_gen_andc_reg(tmp, tmp, res);
1042 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
b2167459 1043 cond = cond_make_0(TCG_COND_NE, tmp);
b2167459
RH
1044 break;
1045
1046 case 4: /* SDC / NDC */
eaa3783b 1047 tcg_gen_andi_reg(cb, cb, 0x88888888u);
b2167459
RH
1048 cond = cond_make_0(TCG_COND_NE, cb);
1049 break;
1050
1051 case 6: /* SBC / NBC */
eaa3783b 1052 tcg_gen_andi_reg(cb, cb, 0x80808080u);
b2167459
RH
1053 cond = cond_make_0(TCG_COND_NE, cb);
1054 break;
1055
1056 case 7: /* SHC / NHC */
eaa3783b 1057 tcg_gen_andi_reg(cb, cb, 0x80008000u);
b2167459
RH
1058 cond = cond_make_0(TCG_COND_NE, cb);
1059 break;
1060
1061 default:
1062 g_assert_not_reached();
1063 }
b2167459
RH
1064 if (cf & 1) {
1065 cond.c = tcg_invert_cond(cond.c);
1066 }
1067
1068 return cond;
1069}
1070
1071/* Compute signed overflow for addition. */
eaa3783b
RH
1072static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1073 TCGv_reg in1, TCGv_reg in2)
b2167459 1074{
eaa3783b
RH
1075 TCGv_reg sv = get_temp(ctx);
1076 TCGv_reg tmp = tcg_temp_new();
b2167459 1077
eaa3783b
RH
1078 tcg_gen_xor_reg(sv, res, in1);
1079 tcg_gen_xor_reg(tmp, in1, in2);
1080 tcg_gen_andc_reg(sv, sv, tmp);
b2167459
RH
1081
1082 return sv;
1083}
1084
1085/* Compute signed overflow for subtraction. */
eaa3783b
RH
1086static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1087 TCGv_reg in1, TCGv_reg in2)
b2167459 1088{
eaa3783b
RH
1089 TCGv_reg sv = get_temp(ctx);
1090 TCGv_reg tmp = tcg_temp_new();
b2167459 1091
eaa3783b
RH
1092 tcg_gen_xor_reg(sv, res, in1);
1093 tcg_gen_xor_reg(tmp, in1, in2);
1094 tcg_gen_and_reg(sv, sv, tmp);
b2167459
RH
1095
1096 return sv;
1097}
1098
31234768
RH
1099static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1100 TCGv_reg in2, unsigned shift, bool is_l,
1101 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
b2167459 1102{
eaa3783b 1103 TCGv_reg dest, cb, cb_msb, sv, tmp;
b2167459
RH
1104 unsigned c = cf >> 1;
1105 DisasCond cond;
1106
1107 dest = tcg_temp_new();
f764718d
RH
1108 cb = NULL;
1109 cb_msb = NULL;
b2167459
RH
1110
1111 if (shift) {
1112 tmp = get_temp(ctx);
eaa3783b 1113 tcg_gen_shli_reg(tmp, in1, shift);
b2167459
RH
1114 in1 = tmp;
1115 }
1116
b47a4a02 1117 if (!is_l || cond_need_cb(c)) {
29dd6f64 1118 TCGv_reg zero = tcg_constant_reg(0);
b2167459 1119 cb_msb = get_temp(ctx);
eaa3783b 1120 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
b2167459 1121 if (is_c) {
eaa3783b 1122 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
b2167459 1123 }
b2167459
RH
1124 if (!is_l) {
1125 cb = get_temp(ctx);
eaa3783b
RH
1126 tcg_gen_xor_reg(cb, in1, in2);
1127 tcg_gen_xor_reg(cb, cb, dest);
b2167459
RH
1128 }
1129 } else {
eaa3783b 1130 tcg_gen_add_reg(dest, in1, in2);
b2167459 1131 if (is_c) {
eaa3783b 1132 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
b2167459
RH
1133 }
1134 }
1135
1136 /* Compute signed overflow if required. */
f764718d 1137 sv = NULL;
b47a4a02 1138 if (is_tsv || cond_need_sv(c)) {
b2167459
RH
1139 sv = do_add_sv(ctx, dest, in1, in2);
1140 if (is_tsv) {
1141 /* ??? Need to include overflow from shift. */
1142 gen_helper_tsv(cpu_env, sv);
1143 }
1144 }
1145
1146 /* Emit any conditional trap before any writeback. */
1147 cond = do_cond(cf, dest, cb_msb, sv);
1148 if (is_tc) {
b2167459 1149 tmp = tcg_temp_new();
eaa3783b 1150 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
b2167459 1151 gen_helper_tcond(cpu_env, tmp);
b2167459
RH
1152 }
1153
1154 /* Write back the result. */
1155 if (!is_l) {
1156 save_or_nullify(ctx, cpu_psw_cb, cb);
1157 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1158 }
1159 save_gpr(ctx, rt, dest);
b2167459
RH
1160
1161 /* Install the new nullification. */
1162 cond_free(&ctx->null_cond);
1163 ctx->null_cond = cond;
b2167459
RH
1164}
1165
0c982a28
RH
1166static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1167 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1168{
1169 TCGv_reg tcg_r1, tcg_r2;
1170
1171 if (a->cf) {
1172 nullify_over(ctx);
1173 }
1174 tcg_r1 = load_gpr(ctx, a->r1);
1175 tcg_r2 = load_gpr(ctx, a->r2);
1176 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1177 return nullify_end(ctx);
1178}
1179
0588e061
RH
1180static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1181 bool is_tsv, bool is_tc)
1182{
1183 TCGv_reg tcg_im, tcg_r2;
1184
1185 if (a->cf) {
1186 nullify_over(ctx);
1187 }
1188 tcg_im = load_const(ctx, a->i);
1189 tcg_r2 = load_gpr(ctx, a->r);
1190 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1191 return nullify_end(ctx);
1192}
1193
31234768
RH
1194static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1195 TCGv_reg in2, bool is_tsv, bool is_b,
1196 bool is_tc, unsigned cf)
b2167459 1197{
eaa3783b 1198 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
b2167459
RH
1199 unsigned c = cf >> 1;
1200 DisasCond cond;
1201
1202 dest = tcg_temp_new();
1203 cb = tcg_temp_new();
1204 cb_msb = tcg_temp_new();
1205
29dd6f64 1206 zero = tcg_constant_reg(0);
b2167459
RH
1207 if (is_b) {
1208 /* DEST,C = IN1 + ~IN2 + C. */
eaa3783b
RH
1209 tcg_gen_not_reg(cb, in2);
1210 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1211 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1212 tcg_gen_xor_reg(cb, cb, in1);
1213 tcg_gen_xor_reg(cb, cb, dest);
b2167459
RH
1214 } else {
1215 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1216 operations by seeding the high word with 1 and subtracting. */
eaa3783b
RH
1217 tcg_gen_movi_reg(cb_msb, 1);
1218 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1219 tcg_gen_eqv_reg(cb, in1, in2);
1220 tcg_gen_xor_reg(cb, cb, dest);
b2167459 1221 }
b2167459
RH
1222
1223 /* Compute signed overflow if required. */
f764718d 1224 sv = NULL;
b47a4a02 1225 if (is_tsv || cond_need_sv(c)) {
b2167459
RH
1226 sv = do_sub_sv(ctx, dest, in1, in2);
1227 if (is_tsv) {
1228 gen_helper_tsv(cpu_env, sv);
1229 }
1230 }
1231
1232 /* Compute the condition. We cannot use the special case for borrow. */
1233 if (!is_b) {
1234 cond = do_sub_cond(cf, dest, in1, in2, sv);
1235 } else {
1236 cond = do_cond(cf, dest, cb_msb, sv);
1237 }
1238
1239 /* Emit any conditional trap before any writeback. */
1240 if (is_tc) {
b2167459 1241 tmp = tcg_temp_new();
eaa3783b 1242 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
b2167459 1243 gen_helper_tcond(cpu_env, tmp);
b2167459
RH
1244 }
1245
1246 /* Write back the result. */
1247 save_or_nullify(ctx, cpu_psw_cb, cb);
1248 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1249 save_gpr(ctx, rt, dest);
b2167459
RH
1250
1251 /* Install the new nullification. */
1252 cond_free(&ctx->null_cond);
1253 ctx->null_cond = cond;
b2167459
RH
1254}
1255
0c982a28
RH
1256static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1257 bool is_tsv, bool is_b, bool is_tc)
1258{
1259 TCGv_reg tcg_r1, tcg_r2;
1260
1261 if (a->cf) {
1262 nullify_over(ctx);
1263 }
1264 tcg_r1 = load_gpr(ctx, a->r1);
1265 tcg_r2 = load_gpr(ctx, a->r2);
1266 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1267 return nullify_end(ctx);
1268}
1269
0588e061
RH
1270static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1271{
1272 TCGv_reg tcg_im, tcg_r2;
1273
1274 if (a->cf) {
1275 nullify_over(ctx);
1276 }
1277 tcg_im = load_const(ctx, a->i);
1278 tcg_r2 = load_gpr(ctx, a->r);
1279 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1280 return nullify_end(ctx);
1281}
1282
31234768
RH
1283static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1284 TCGv_reg in2, unsigned cf)
b2167459 1285{
eaa3783b 1286 TCGv_reg dest, sv;
b2167459
RH
1287 DisasCond cond;
1288
1289 dest = tcg_temp_new();
eaa3783b 1290 tcg_gen_sub_reg(dest, in1, in2);
b2167459
RH
1291
1292 /* Compute signed overflow if required. */
f764718d 1293 sv = NULL;
b47a4a02 1294 if (cond_need_sv(cf >> 1)) {
b2167459
RH
1295 sv = do_sub_sv(ctx, dest, in1, in2);
1296 }
1297
1298 /* Form the condition for the compare. */
1299 cond = do_sub_cond(cf, dest, in1, in2, sv);
1300
1301 /* Clear. */
eaa3783b 1302 tcg_gen_movi_reg(dest, 0);
b2167459 1303 save_gpr(ctx, rt, dest);
b2167459
RH
1304
1305 /* Install the new nullification. */
1306 cond_free(&ctx->null_cond);
1307 ctx->null_cond = cond;
b2167459
RH
1308}
1309
31234768
RH
1310static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1311 TCGv_reg in2, unsigned cf,
1312 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
b2167459 1313{
eaa3783b 1314 TCGv_reg dest = dest_gpr(ctx, rt);
b2167459
RH
1315
1316 /* Perform the operation, and writeback. */
1317 fn(dest, in1, in2);
1318 save_gpr(ctx, rt, dest);
1319
1320 /* Install the new nullification. */
1321 cond_free(&ctx->null_cond);
1322 if (cf) {
1323 ctx->null_cond = do_log_cond(cf, dest);
1324 }
b2167459
RH
1325}
1326
0c982a28
RH
1327static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1328 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1329{
1330 TCGv_reg tcg_r1, tcg_r2;
1331
1332 if (a->cf) {
1333 nullify_over(ctx);
1334 }
1335 tcg_r1 = load_gpr(ctx, a->r1);
1336 tcg_r2 = load_gpr(ctx, a->r2);
1337 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1338 return nullify_end(ctx);
1339}
1340
31234768
RH
1341static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1342 TCGv_reg in2, unsigned cf, bool is_tc,
1343 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
b2167459 1344{
eaa3783b 1345 TCGv_reg dest;
b2167459
RH
1346 DisasCond cond;
1347
1348 if (cf == 0) {
1349 dest = dest_gpr(ctx, rt);
1350 fn(dest, in1, in2);
1351 save_gpr(ctx, rt, dest);
1352 cond_free(&ctx->null_cond);
1353 } else {
1354 dest = tcg_temp_new();
1355 fn(dest, in1, in2);
1356
1357 cond = do_unit_cond(cf, dest, in1, in2);
1358
1359 if (is_tc) {
eaa3783b 1360 TCGv_reg tmp = tcg_temp_new();
eaa3783b 1361 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
b2167459 1362 gen_helper_tcond(cpu_env, tmp);
b2167459
RH
1363 }
1364 save_gpr(ctx, rt, dest);
1365
1366 cond_free(&ctx->null_cond);
1367 ctx->null_cond = cond;
1368 }
b2167459
RH
1369}
1370
86f8d05f 1371#ifndef CONFIG_USER_ONLY
8d6ae7fb
RH
1372/* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1373 from the top 2 bits of the base register. There are a few system
1374 instructions that have a 3-bit space specifier, for which SR0 is
1375 not special. To handle this, pass ~SP. */
86f8d05f
RH
1376static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1377{
1378 TCGv_ptr ptr;
1379 TCGv_reg tmp;
1380 TCGv_i64 spc;
1381
1382 if (sp != 0) {
8d6ae7fb
RH
1383 if (sp < 0) {
1384 sp = ~sp;
1385 }
1386 spc = get_temp_tl(ctx);
1387 load_spr(ctx, spc, sp);
1388 return spc;
86f8d05f 1389 }
494737b7
RH
1390 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1391 return cpu_srH;
1392 }
86f8d05f
RH
1393
1394 ptr = tcg_temp_new_ptr();
1395 tmp = tcg_temp_new();
1396 spc = get_temp_tl(ctx);
1397
1398 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1399 tcg_gen_andi_reg(tmp, tmp, 030);
1400 tcg_gen_trunc_reg_ptr(ptr, tmp);
86f8d05f
RH
1401
1402 tcg_gen_add_ptr(ptr, ptr, cpu_env);
1403 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
86f8d05f
RH
1404
1405 return spc;
1406}
1407#endif
1408
1409static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1410 unsigned rb, unsigned rx, int scale, target_sreg disp,
1411 unsigned sp, int modify, bool is_phys)
1412{
1413 TCGv_reg base = load_gpr(ctx, rb);
1414 TCGv_reg ofs;
1415
1416 /* Note that RX is mutually exclusive with DISP. */
1417 if (rx) {
1418 ofs = get_temp(ctx);
1419 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1420 tcg_gen_add_reg(ofs, ofs, base);
1421 } else if (disp || modify) {
1422 ofs = get_temp(ctx);
1423 tcg_gen_addi_reg(ofs, base, disp);
1424 } else {
1425 ofs = base;
1426 }
1427
1428 *pofs = ofs;
1429#ifdef CONFIG_USER_ONLY
1430 *pgva = (modify <= 0 ? ofs : base);
1431#else
1432 TCGv_tl addr = get_temp_tl(ctx);
1433 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
494737b7 1434 if (ctx->tb_flags & PSW_W) {
86f8d05f
RH
1435 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1436 }
1437 if (!is_phys) {
1438 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1439 }
1440 *pgva = addr;
1441#endif
1442}
1443
96d6407f
RH
1444/* Emit a memory load. The modify parameter should be
1445 * < 0 for pre-modify,
1446 * > 0 for post-modify,
1447 * = 0 for no base register update.
1448 */
1449static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
eaa3783b 1450 unsigned rx, int scale, target_sreg disp,
14776ab5 1451 unsigned sp, int modify, MemOp mop)
96d6407f 1452{
86f8d05f
RH
1453 TCGv_reg ofs;
1454 TCGv_tl addr;
96d6407f
RH
1455
1456 /* Caller uses nullify_over/nullify_end. */
1457 assert(ctx->null_cond.c == TCG_COND_NEVER);
1458
86f8d05f
RH
1459 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1460 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1461 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1462 if (modify) {
1463 save_gpr(ctx, rb, ofs);
96d6407f 1464 }
96d6407f
RH
1465}
1466
1467static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
eaa3783b 1468 unsigned rx, int scale, target_sreg disp,
14776ab5 1469 unsigned sp, int modify, MemOp mop)
96d6407f 1470{
86f8d05f
RH
1471 TCGv_reg ofs;
1472 TCGv_tl addr;
96d6407f
RH
1473
1474 /* Caller uses nullify_over/nullify_end. */
1475 assert(ctx->null_cond.c == TCG_COND_NEVER);
1476
86f8d05f
RH
1477 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1478 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1479 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1480 if (modify) {
1481 save_gpr(ctx, rb, ofs);
96d6407f 1482 }
96d6407f
RH
1483}
1484
1485static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
eaa3783b 1486 unsigned rx, int scale, target_sreg disp,
14776ab5 1487 unsigned sp, int modify, MemOp mop)
96d6407f 1488{
86f8d05f
RH
1489 TCGv_reg ofs;
1490 TCGv_tl addr;
96d6407f
RH
1491
1492 /* Caller uses nullify_over/nullify_end. */
1493 assert(ctx->null_cond.c == TCG_COND_NEVER);
1494
86f8d05f
RH
1495 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1496 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1497 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1498 if (modify) {
1499 save_gpr(ctx, rb, ofs);
96d6407f 1500 }
96d6407f
RH
1501}
1502
1503static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
eaa3783b 1504 unsigned rx, int scale, target_sreg disp,
14776ab5 1505 unsigned sp, int modify, MemOp mop)
96d6407f 1506{
86f8d05f
RH
1507 TCGv_reg ofs;
1508 TCGv_tl addr;
96d6407f
RH
1509
1510 /* Caller uses nullify_over/nullify_end. */
1511 assert(ctx->null_cond.c == TCG_COND_NEVER);
1512
86f8d05f
RH
1513 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1514 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1515 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1516 if (modify) {
1517 save_gpr(ctx, rb, ofs);
96d6407f 1518 }
96d6407f
RH
1519}
1520
eaa3783b
RH
1521#if TARGET_REGISTER_BITS == 64
1522#define do_load_reg do_load_64
1523#define do_store_reg do_store_64
96d6407f 1524#else
eaa3783b
RH
1525#define do_load_reg do_load_32
1526#define do_store_reg do_store_32
96d6407f
RH
1527#endif
1528
1cd012a5 1529static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
31234768 1530 unsigned rx, int scale, target_sreg disp,
14776ab5 1531 unsigned sp, int modify, MemOp mop)
96d6407f 1532{
eaa3783b 1533 TCGv_reg dest;
96d6407f
RH
1534
1535 nullify_over(ctx);
1536
1537 if (modify == 0) {
1538 /* No base register update. */
1539 dest = dest_gpr(ctx, rt);
1540 } else {
1541 /* Make sure if RT == RB, we see the result of the load. */
1542 dest = get_temp(ctx);
1543 }
86f8d05f 1544 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
96d6407f
RH
1545 save_gpr(ctx, rt, dest);
1546
1cd012a5 1547 return nullify_end(ctx);
96d6407f
RH
1548}
1549
740038d7 1550static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1551 unsigned rx, int scale, target_sreg disp,
1552 unsigned sp, int modify)
96d6407f
RH
1553{
1554 TCGv_i32 tmp;
1555
1556 nullify_over(ctx);
1557
1558 tmp = tcg_temp_new_i32();
86f8d05f 1559 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
96d6407f 1560 save_frw_i32(rt, tmp);
96d6407f
RH
1561
1562 if (rt == 0) {
1563 gen_helper_loaded_fr0(cpu_env);
1564 }
1565
740038d7
RH
1566 return nullify_end(ctx);
1567}
1568
1569static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1570{
1571 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1572 a->disp, a->sp, a->m);
96d6407f
RH
1573}
1574
740038d7 1575static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1576 unsigned rx, int scale, target_sreg disp,
1577 unsigned sp, int modify)
96d6407f
RH
1578{
1579 TCGv_i64 tmp;
1580
1581 nullify_over(ctx);
1582
1583 tmp = tcg_temp_new_i64();
fc313c64 1584 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
96d6407f 1585 save_frd(rt, tmp);
96d6407f
RH
1586
1587 if (rt == 0) {
1588 gen_helper_loaded_fr0(cpu_env);
1589 }
1590
740038d7
RH
1591 return nullify_end(ctx);
1592}
1593
1594static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1595{
1596 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1597 a->disp, a->sp, a->m);
96d6407f
RH
1598}
1599
1cd012a5 1600static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
31234768 1601 target_sreg disp, unsigned sp,
14776ab5 1602 int modify, MemOp mop)
96d6407f
RH
1603{
1604 nullify_over(ctx);
86f8d05f 1605 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1cd012a5 1606 return nullify_end(ctx);
96d6407f
RH
1607}
1608
740038d7 1609static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1610 unsigned rx, int scale, target_sreg disp,
1611 unsigned sp, int modify)
96d6407f
RH
1612{
1613 TCGv_i32 tmp;
1614
1615 nullify_over(ctx);
1616
1617 tmp = load_frw_i32(rt);
86f8d05f 1618 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
96d6407f 1619
740038d7
RH
1620 return nullify_end(ctx);
1621}
1622
1623static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1624{
1625 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1626 a->disp, a->sp, a->m);
96d6407f
RH
1627}
1628
740038d7 1629static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1630 unsigned rx, int scale, target_sreg disp,
1631 unsigned sp, int modify)
96d6407f
RH
1632{
1633 TCGv_i64 tmp;
1634
1635 nullify_over(ctx);
1636
1637 tmp = load_frd(rt);
fc313c64 1638 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
96d6407f 1639
740038d7
RH
1640 return nullify_end(ctx);
1641}
1642
1643static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1644{
1645 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1646 a->disp, a->sp, a->m);
96d6407f
RH
1647}
1648
1ca74648 1649static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1650 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
ebe9383c
RH
1651{
1652 TCGv_i32 tmp;
1653
1654 nullify_over(ctx);
1655 tmp = load_frw0_i32(ra);
1656
1657 func(tmp, cpu_env, tmp);
1658
1659 save_frw_i32(rt, tmp);
1ca74648 1660 return nullify_end(ctx);
ebe9383c
RH
1661}
1662
1ca74648 1663static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1664 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
ebe9383c
RH
1665{
1666 TCGv_i32 dst;
1667 TCGv_i64 src;
1668
1669 nullify_over(ctx);
1670 src = load_frd(ra);
1671 dst = tcg_temp_new_i32();
1672
1673 func(dst, cpu_env, src);
1674
ebe9383c 1675 save_frw_i32(rt, dst);
1ca74648 1676 return nullify_end(ctx);
ebe9383c
RH
1677}
1678
1ca74648 1679static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1680 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
ebe9383c
RH
1681{
1682 TCGv_i64 tmp;
1683
1684 nullify_over(ctx);
1685 tmp = load_frd0(ra);
1686
1687 func(tmp, cpu_env, tmp);
1688
1689 save_frd(rt, tmp);
1ca74648 1690 return nullify_end(ctx);
ebe9383c
RH
1691}
1692
1ca74648 1693static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1694 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
ebe9383c
RH
1695{
1696 TCGv_i32 src;
1697 TCGv_i64 dst;
1698
1699 nullify_over(ctx);
1700 src = load_frw0_i32(ra);
1701 dst = tcg_temp_new_i64();
1702
1703 func(dst, cpu_env, src);
1704
ebe9383c 1705 save_frd(rt, dst);
1ca74648 1706 return nullify_end(ctx);
ebe9383c
RH
1707}
1708
1ca74648 1709static bool do_fop_weww(DisasContext *ctx, unsigned rt,
31234768
RH
1710 unsigned ra, unsigned rb,
1711 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
ebe9383c
RH
1712{
1713 TCGv_i32 a, b;
1714
1715 nullify_over(ctx);
1716 a = load_frw0_i32(ra);
1717 b = load_frw0_i32(rb);
1718
1719 func(a, cpu_env, a, b);
1720
ebe9383c 1721 save_frw_i32(rt, a);
1ca74648 1722 return nullify_end(ctx);
ebe9383c
RH
1723}
1724
1ca74648 1725static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
31234768
RH
1726 unsigned ra, unsigned rb,
1727 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
ebe9383c
RH
1728{
1729 TCGv_i64 a, b;
1730
1731 nullify_over(ctx);
1732 a = load_frd0(ra);
1733 b = load_frd0(rb);
1734
1735 func(a, cpu_env, a, b);
1736
ebe9383c 1737 save_frd(rt, a);
1ca74648 1738 return nullify_end(ctx);
ebe9383c
RH
1739}
1740
98cd9ca7
RH
1741/* Emit an unconditional branch to a direct target, which may or may not
1742 have already had nullification handled. */
01afb7be 1743static bool do_dbranch(DisasContext *ctx, target_ureg dest,
31234768 1744 unsigned link, bool is_n)
98cd9ca7
RH
1745{
1746 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1747 if (link != 0) {
1748 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1749 }
1750 ctx->iaoq_n = dest;
1751 if (is_n) {
1752 ctx->null_cond.c = TCG_COND_ALWAYS;
1753 }
98cd9ca7
RH
1754 } else {
1755 nullify_over(ctx);
1756
1757 if (link != 0) {
1758 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1759 }
1760
1761 if (is_n && use_nullify_skip(ctx)) {
1762 nullify_set(ctx, 0);
1763 gen_goto_tb(ctx, 0, dest, dest + 4);
1764 } else {
1765 nullify_set(ctx, is_n);
1766 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1767 }
1768
31234768 1769 nullify_end(ctx);
98cd9ca7
RH
1770
1771 nullify_set(ctx, 0);
1772 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
31234768 1773 ctx->base.is_jmp = DISAS_NORETURN;
98cd9ca7 1774 }
01afb7be 1775 return true;
98cd9ca7
RH
1776}
1777
1778/* Emit a conditional branch to a direct target. If the branch itself
1779 is nullified, we should have already used nullify_over. */
01afb7be 1780static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
31234768 1781 DisasCond *cond)
98cd9ca7 1782{
eaa3783b 1783 target_ureg dest = iaoq_dest(ctx, disp);
98cd9ca7
RH
1784 TCGLabel *taken = NULL;
1785 TCGCond c = cond->c;
98cd9ca7
RH
1786 bool n;
1787
1788 assert(ctx->null_cond.c == TCG_COND_NEVER);
1789
1790 /* Handle TRUE and NEVER as direct branches. */
1791 if (c == TCG_COND_ALWAYS) {
01afb7be 1792 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
98cd9ca7
RH
1793 }
1794 if (c == TCG_COND_NEVER) {
01afb7be 1795 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
98cd9ca7
RH
1796 }
1797
1798 taken = gen_new_label();
eaa3783b 1799 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
98cd9ca7
RH
1800 cond_free(cond);
1801
1802 /* Not taken: Condition not satisfied; nullify on backward branches. */
1803 n = is_n && disp < 0;
1804 if (n && use_nullify_skip(ctx)) {
1805 nullify_set(ctx, 0);
a881c8e7 1806 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
98cd9ca7
RH
1807 } else {
1808 if (!n && ctx->null_lab) {
1809 gen_set_label(ctx->null_lab);
1810 ctx->null_lab = NULL;
1811 }
1812 nullify_set(ctx, n);
c301f34e
RH
1813 if (ctx->iaoq_n == -1) {
1814 /* The temporary iaoq_n_var died at the branch above.
1815 Regenerate it here instead of saving it. */
1816 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1817 }
a881c8e7 1818 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
98cd9ca7
RH
1819 }
1820
1821 gen_set_label(taken);
1822
1823 /* Taken: Condition satisfied; nullify on forward branches. */
1824 n = is_n && disp >= 0;
1825 if (n && use_nullify_skip(ctx)) {
1826 nullify_set(ctx, 0);
a881c8e7 1827 gen_goto_tb(ctx, 1, dest, dest + 4);
98cd9ca7
RH
1828 } else {
1829 nullify_set(ctx, n);
a881c8e7 1830 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
98cd9ca7
RH
1831 }
1832
1833 /* Not taken: the branch itself was nullified. */
1834 if (ctx->null_lab) {
1835 gen_set_label(ctx->null_lab);
1836 ctx->null_lab = NULL;
31234768 1837 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
98cd9ca7 1838 } else {
31234768 1839 ctx->base.is_jmp = DISAS_NORETURN;
98cd9ca7 1840 }
01afb7be 1841 return true;
98cd9ca7
RH
1842}
1843
1844/* Emit an unconditional branch to an indirect target. This handles
1845 nullification of the branch itself. */
01afb7be 1846static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
31234768 1847 unsigned link, bool is_n)
98cd9ca7 1848{
eaa3783b 1849 TCGv_reg a0, a1, next, tmp;
98cd9ca7
RH
1850 TCGCond c;
1851
1852 assert(ctx->null_lab == NULL);
1853
1854 if (ctx->null_cond.c == TCG_COND_NEVER) {
1855 if (link != 0) {
1856 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1857 }
1858 next = get_temp(ctx);
eaa3783b 1859 tcg_gen_mov_reg(next, dest);
98cd9ca7 1860 if (is_n) {
c301f34e
RH
1861 if (use_nullify_skip(ctx)) {
1862 tcg_gen_mov_reg(cpu_iaoq_f, next);
1863 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1864 nullify_set(ctx, 0);
31234768 1865 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
01afb7be 1866 return true;
c301f34e 1867 }
98cd9ca7
RH
1868 ctx->null_cond.c = TCG_COND_ALWAYS;
1869 }
c301f34e
RH
1870 ctx->iaoq_n = -1;
1871 ctx->iaoq_n_var = next;
98cd9ca7
RH
1872 } else if (is_n && use_nullify_skip(ctx)) {
1873 /* The (conditional) branch, B, nullifies the next insn, N,
1874 and we're allowed to skip execution N (no single-step or
4137cb83 1875 tracepoint in effect). Since the goto_ptr that we must use
98cd9ca7
RH
1876 for the indirect branch consumes no special resources, we
1877 can (conditionally) skip B and continue execution. */
1878 /* The use_nullify_skip test implies we have a known control path. */
1879 tcg_debug_assert(ctx->iaoq_b != -1);
1880 tcg_debug_assert(ctx->iaoq_n != -1);
1881
1882 /* We do have to handle the non-local temporary, DEST, before
1883 branching. Since IOAQ_F is not really live at this point, we
1884 can simply store DEST optimistically. Similarly with IAOQ_B. */
eaa3783b
RH
1885 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1886 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
98cd9ca7
RH
1887
1888 nullify_over(ctx);
1889 if (link != 0) {
eaa3783b 1890 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
98cd9ca7 1891 }
7f11636d 1892 tcg_gen_lookup_and_goto_ptr();
01afb7be 1893 return nullify_end(ctx);
98cd9ca7 1894 } else {
98cd9ca7
RH
1895 c = ctx->null_cond.c;
1896 a0 = ctx->null_cond.a0;
1897 a1 = ctx->null_cond.a1;
1898
1899 tmp = tcg_temp_new();
1900 next = get_temp(ctx);
1901
1902 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
eaa3783b 1903 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
98cd9ca7
RH
1904 ctx->iaoq_n = -1;
1905 ctx->iaoq_n_var = next;
1906
1907 if (link != 0) {
eaa3783b 1908 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
98cd9ca7
RH
1909 }
1910
1911 if (is_n) {
1912 /* The branch nullifies the next insn, which means the state of N
1913 after the branch is the inverse of the state of N that applied
1914 to the branch. */
eaa3783b 1915 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
98cd9ca7
RH
1916 cond_free(&ctx->null_cond);
1917 ctx->null_cond = cond_make_n();
1918 ctx->psw_n_nonzero = true;
1919 } else {
1920 cond_free(&ctx->null_cond);
1921 }
1922 }
01afb7be 1923 return true;
98cd9ca7
RH
1924}
1925
660eefe1
RH
1926/* Implement
1927 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1928 * IAOQ_Next{30..31} ← GR[b]{30..31};
1929 * else
1930 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1931 * which keeps the privilege level from being increased.
1932 */
1933static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1934{
660eefe1
RH
1935 TCGv_reg dest;
1936 switch (ctx->privilege) {
1937 case 0:
1938 /* Privilege 0 is maximum and is allowed to decrease. */
1939 return offset;
1940 case 3:
993119fe 1941 /* Privilege 3 is minimum and is never allowed to increase. */
660eefe1
RH
1942 dest = get_temp(ctx);
1943 tcg_gen_ori_reg(dest, offset, 3);
1944 break;
1945 default:
993119fe 1946 dest = get_temp(ctx);
660eefe1
RH
1947 tcg_gen_andi_reg(dest, offset, -4);
1948 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1949 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
660eefe1
RH
1950 break;
1951 }
1952 return dest;
660eefe1
RH
1953}
1954
ba1d0b44 1955#ifdef CONFIG_USER_ONLY
7ad439df
RH
1956/* On Linux, page zero is normally marked execute only + gateway.
1957 Therefore normal read or write is supposed to fail, but specific
1958 offsets have kernel code mapped to raise permissions to implement
1959 system calls. Handling this via an explicit check here, rather
1960 in than the "be disp(sr2,r0)" instruction that probably sent us
1961 here, is the easiest way to handle the branch delay slot on the
1962 aforementioned BE. */
31234768 1963static void do_page_zero(DisasContext *ctx)
7ad439df
RH
1964{
1965 /* If by some means we get here with PSW[N]=1, that implies that
1966 the B,GATE instruction would be skipped, and we'd fault on the
8b81968c 1967 next insn within the privileged page. */
7ad439df
RH
1968 switch (ctx->null_cond.c) {
1969 case TCG_COND_NEVER:
1970 break;
1971 case TCG_COND_ALWAYS:
eaa3783b 1972 tcg_gen_movi_reg(cpu_psw_n, 0);
7ad439df
RH
1973 goto do_sigill;
1974 default:
1975 /* Since this is always the first (and only) insn within the
1976 TB, we should know the state of PSW[N] from TB->FLAGS. */
1977 g_assert_not_reached();
1978 }
1979
1980 /* Check that we didn't arrive here via some means that allowed
1981 non-sequential instruction execution. Normally the PSW[B] bit
1982 detects this by disallowing the B,GATE instruction to execute
1983 under such conditions. */
1984 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1985 goto do_sigill;
1986 }
1987
ebd0e151 1988 switch (ctx->iaoq_f & -4) {
7ad439df 1989 case 0x00: /* Null pointer call */
2986721d 1990 gen_excp_1(EXCP_IMP);
31234768
RH
1991 ctx->base.is_jmp = DISAS_NORETURN;
1992 break;
7ad439df
RH
1993
1994 case 0xb0: /* LWS */
1995 gen_excp_1(EXCP_SYSCALL_LWS);
31234768
RH
1996 ctx->base.is_jmp = DISAS_NORETURN;
1997 break;
7ad439df
RH
1998
1999 case 0xe0: /* SET_THREAD_POINTER */
35136a77 2000 tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
ebd0e151 2001 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
eaa3783b 2002 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
31234768
RH
2003 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2004 break;
7ad439df
RH
2005
2006 case 0x100: /* SYSCALL */
2007 gen_excp_1(EXCP_SYSCALL);
31234768
RH
2008 ctx->base.is_jmp = DISAS_NORETURN;
2009 break;
7ad439df
RH
2010
2011 default:
2012 do_sigill:
2986721d 2013 gen_excp_1(EXCP_ILL);
31234768
RH
2014 ctx->base.is_jmp = DISAS_NORETURN;
2015 break;
7ad439df
RH
2016 }
2017}
ba1d0b44 2018#endif
7ad439df 2019
deee69a1 2020static bool trans_nop(DisasContext *ctx, arg_nop *a)
b2167459
RH
2021{
2022 cond_free(&ctx->null_cond);
31234768 2023 return true;
b2167459
RH
2024}
2025
40f9f908 2026static bool trans_break(DisasContext *ctx, arg_break *a)
98a9cb79 2027{
31234768 2028 return gen_excp_iir(ctx, EXCP_BREAK);
98a9cb79
RH
2029}
2030
e36f27ef 2031static bool trans_sync(DisasContext *ctx, arg_sync *a)
98a9cb79
RH
2032{
2033 /* No point in nullifying the memory barrier. */
2034 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2035
2036 cond_free(&ctx->null_cond);
31234768 2037 return true;
98a9cb79
RH
2038}
2039
c603e14a 2040static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
98a9cb79 2041{
c603e14a 2042 unsigned rt = a->t;
eaa3783b
RH
2043 TCGv_reg tmp = dest_gpr(ctx, rt);
2044 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
98a9cb79
RH
2045 save_gpr(ctx, rt, tmp);
2046
2047 cond_free(&ctx->null_cond);
31234768 2048 return true;
98a9cb79
RH
2049}
2050
c603e14a 2051static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
98a9cb79 2052{
c603e14a
RH
2053 unsigned rt = a->t;
2054 unsigned rs = a->sp;
33423472
RH
2055 TCGv_i64 t0 = tcg_temp_new_i64();
2056 TCGv_reg t1 = tcg_temp_new();
98a9cb79 2057
33423472
RH
2058 load_spr(ctx, t0, rs);
2059 tcg_gen_shri_i64(t0, t0, 32);
2060 tcg_gen_trunc_i64_reg(t1, t0);
2061
2062 save_gpr(ctx, rt, t1);
98a9cb79
RH
2063
2064 cond_free(&ctx->null_cond);
31234768 2065 return true;
98a9cb79
RH
2066}
2067
c603e14a 2068static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
98a9cb79 2069{
c603e14a
RH
2070 unsigned rt = a->t;
2071 unsigned ctl = a->r;
eaa3783b 2072 TCGv_reg tmp;
98a9cb79
RH
2073
2074 switch (ctl) {
35136a77 2075 case CR_SAR:
98a9cb79 2076#ifdef TARGET_HPPA64
c603e14a 2077 if (a->e == 0) {
98a9cb79
RH
2078 /* MFSAR without ,W masks low 5 bits. */
2079 tmp = dest_gpr(ctx, rt);
eaa3783b 2080 tcg_gen_andi_reg(tmp, cpu_sar, 31);
98a9cb79 2081 save_gpr(ctx, rt, tmp);
35136a77 2082 goto done;
98a9cb79
RH
2083 }
2084#endif
2085 save_gpr(ctx, rt, cpu_sar);
35136a77
RH
2086 goto done;
2087 case CR_IT: /* Interval Timer */
2088 /* FIXME: Respect PSW_S bit. */
2089 nullify_over(ctx);
98a9cb79 2090 tmp = dest_gpr(ctx, rt);
dfd1b812 2091 if (translator_io_start(&ctx->base)) {
49c29d6c 2092 gen_helper_read_interval_timer(tmp);
31234768 2093 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
49c29d6c
RH
2094 } else {
2095 gen_helper_read_interval_timer(tmp);
49c29d6c 2096 }
98a9cb79 2097 save_gpr(ctx, rt, tmp);
31234768 2098 return nullify_end(ctx);
98a9cb79 2099 case 26:
98a9cb79 2100 case 27:
98a9cb79
RH
2101 break;
2102 default:
2103 /* All other control registers are privileged. */
35136a77
RH
2104 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2105 break;
98a9cb79
RH
2106 }
2107
35136a77
RH
2108 tmp = get_temp(ctx);
2109 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2110 save_gpr(ctx, rt, tmp);
2111
2112 done:
98a9cb79 2113 cond_free(&ctx->null_cond);
31234768 2114 return true;
98a9cb79
RH
2115}
2116
c603e14a 2117static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
33423472 2118{
c603e14a
RH
2119 unsigned rr = a->r;
2120 unsigned rs = a->sp;
33423472
RH
2121 TCGv_i64 t64;
2122
2123 if (rs >= 5) {
2124 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2125 }
2126 nullify_over(ctx);
2127
2128 t64 = tcg_temp_new_i64();
2129 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2130 tcg_gen_shli_i64(t64, t64, 32);
2131
2132 if (rs >= 4) {
2133 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
494737b7 2134 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
33423472
RH
2135 } else {
2136 tcg_gen_mov_i64(cpu_sr[rs], t64);
2137 }
33423472 2138
31234768 2139 return nullify_end(ctx);
33423472
RH
2140}
2141
c603e14a 2142static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
98a9cb79 2143{
c603e14a 2144 unsigned ctl = a->t;
4845f015 2145 TCGv_reg reg;
eaa3783b 2146 TCGv_reg tmp;
98a9cb79 2147
35136a77 2148 if (ctl == CR_SAR) {
4845f015 2149 reg = load_gpr(ctx, a->r);
98a9cb79 2150 tmp = tcg_temp_new();
35136a77 2151 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
98a9cb79 2152 save_or_nullify(ctx, cpu_sar, tmp);
35136a77
RH
2153
2154 cond_free(&ctx->null_cond);
31234768 2155 return true;
98a9cb79
RH
2156 }
2157
35136a77
RH
2158 /* All other control registers are privileged or read-only. */
2159 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2160
c603e14a 2161#ifndef CONFIG_USER_ONLY
35136a77 2162 nullify_over(ctx);
4845f015
SS
2163 reg = load_gpr(ctx, a->r);
2164
35136a77
RH
2165 switch (ctl) {
2166 case CR_IT:
49c29d6c 2167 gen_helper_write_interval_timer(cpu_env, reg);
35136a77 2168 break;
4f5f2548
RH
2169 case CR_EIRR:
2170 gen_helper_write_eirr(cpu_env, reg);
2171 break;
2172 case CR_EIEM:
2173 gen_helper_write_eiem(cpu_env, reg);
31234768 2174 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
4f5f2548
RH
2175 break;
2176
35136a77
RH
2177 case CR_IIASQ:
2178 case CR_IIAOQ:
2179 /* FIXME: Respect PSW_Q bit */
2180 /* The write advances the queue and stores to the back element. */
2181 tmp = get_temp(ctx);
2182 tcg_gen_ld_reg(tmp, cpu_env,
2183 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2184 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2185 tcg_gen_st_reg(reg, cpu_env,
2186 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2187 break;
2188
d5de20bd
SS
2189 case CR_PID1:
2190 case CR_PID2:
2191 case CR_PID3:
2192 case CR_PID4:
2193 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2194#ifndef CONFIG_USER_ONLY
2195 gen_helper_change_prot_id(cpu_env);
2196#endif
2197 break;
2198
35136a77
RH
2199 default:
2200 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2201 break;
2202 }
31234768 2203 return nullify_end(ctx);
4f5f2548 2204#endif
98a9cb79
RH
2205}
2206
c603e14a 2207static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
98a9cb79 2208{
eaa3783b 2209 TCGv_reg tmp = tcg_temp_new();
98a9cb79 2210
c603e14a 2211 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
eaa3783b 2212 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
98a9cb79 2213 save_or_nullify(ctx, cpu_sar, tmp);
98a9cb79
RH
2214
2215 cond_free(&ctx->null_cond);
31234768 2216 return true;
98a9cb79
RH
2217}
2218
e36f27ef 2219static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
98a9cb79 2220{
e36f27ef 2221 TCGv_reg dest = dest_gpr(ctx, a->t);
98a9cb79 2222
2330504c
HD
2223#ifdef CONFIG_USER_ONLY
2224 /* We don't implement space registers in user mode. */
eaa3783b 2225 tcg_gen_movi_reg(dest, 0);
2330504c 2226#else
2330504c
HD
2227 TCGv_i64 t0 = tcg_temp_new_i64();
2228
e36f27ef 2229 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2330504c
HD
2230 tcg_gen_shri_i64(t0, t0, 32);
2231 tcg_gen_trunc_i64_reg(dest, t0);
2330504c 2232#endif
e36f27ef 2233 save_gpr(ctx, a->t, dest);
98a9cb79
RH
2234
2235 cond_free(&ctx->null_cond);
31234768 2236 return true;
98a9cb79
RH
2237}
2238
e36f27ef 2239static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
e1b5a5ed 2240{
e36f27ef
RH
2241 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2242#ifndef CONFIG_USER_ONLY
e1b5a5ed
RH
2243 TCGv_reg tmp;
2244
e1b5a5ed
RH
2245 nullify_over(ctx);
2246
2247 tmp = get_temp(ctx);
2248 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
e36f27ef 2249 tcg_gen_andi_reg(tmp, tmp, ~a->i);
e1b5a5ed 2250 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
e36f27ef 2251 save_gpr(ctx, a->t, tmp);
e1b5a5ed
RH
2252
2253 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
31234768
RH
2254 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2255 return nullify_end(ctx);
e36f27ef 2256#endif
e1b5a5ed
RH
2257}
2258
e36f27ef 2259static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
e1b5a5ed 2260{
e36f27ef
RH
2261 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2262#ifndef CONFIG_USER_ONLY
e1b5a5ed
RH
2263 TCGv_reg tmp;
2264
e1b5a5ed
RH
2265 nullify_over(ctx);
2266
2267 tmp = get_temp(ctx);
2268 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
e36f27ef 2269 tcg_gen_ori_reg(tmp, tmp, a->i);
e1b5a5ed 2270 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
e36f27ef 2271 save_gpr(ctx, a->t, tmp);
e1b5a5ed
RH
2272
2273 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
31234768
RH
2274 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2275 return nullify_end(ctx);
e36f27ef 2276#endif
e1b5a5ed
RH
2277}
2278
c603e14a 2279static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
e1b5a5ed 2280{
e1b5a5ed 2281 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
c603e14a
RH
2282#ifndef CONFIG_USER_ONLY
2283 TCGv_reg tmp, reg;
e1b5a5ed
RH
2284 nullify_over(ctx);
2285
c603e14a 2286 reg = load_gpr(ctx, a->r);
e1b5a5ed
RH
2287 tmp = get_temp(ctx);
2288 gen_helper_swap_system_mask(tmp, cpu_env, reg);
2289
2290 /* Exit the TB to recognize new interrupts. */
31234768
RH
2291 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2292 return nullify_end(ctx);
c603e14a 2293#endif
e1b5a5ed 2294}
f49b3537 2295
e36f27ef 2296static bool do_rfi(DisasContext *ctx, bool rfi_r)
f49b3537 2297{
f49b3537 2298 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
e36f27ef 2299#ifndef CONFIG_USER_ONLY
f49b3537
RH
2300 nullify_over(ctx);
2301
e36f27ef 2302 if (rfi_r) {
f49b3537
RH
2303 gen_helper_rfi_r(cpu_env);
2304 } else {
2305 gen_helper_rfi(cpu_env);
2306 }
31234768 2307 /* Exit the TB to recognize new interrupts. */
8532a14e 2308 tcg_gen_exit_tb(NULL, 0);
31234768 2309 ctx->base.is_jmp = DISAS_NORETURN;
f49b3537 2310
31234768 2311 return nullify_end(ctx);
e36f27ef
RH
2312#endif
2313}
2314
2315static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2316{
2317 return do_rfi(ctx, false);
2318}
2319
2320static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2321{
2322 return do_rfi(ctx, true);
f49b3537 2323}
6210db05 2324
96927adb
RH
2325static bool trans_halt(DisasContext *ctx, arg_halt *a)
2326{
2327 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
e36f27ef 2328#ifndef CONFIG_USER_ONLY
96927adb
RH
2329 nullify_over(ctx);
2330 gen_helper_halt(cpu_env);
2331 ctx->base.is_jmp = DISAS_NORETURN;
2332 return nullify_end(ctx);
2333#endif
2334}
2335
2336static bool trans_reset(DisasContext *ctx, arg_reset *a)
6210db05
HD
2337{
2338 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
96927adb 2339#ifndef CONFIG_USER_ONLY
6210db05 2340 nullify_over(ctx);
96927adb 2341 gen_helper_reset(cpu_env);
31234768
RH
2342 ctx->base.is_jmp = DISAS_NORETURN;
2343 return nullify_end(ctx);
96927adb 2344#endif
6210db05 2345}
e1b5a5ed 2346
4a4554c6
HD
2347static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2348{
2349 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2350#ifndef CONFIG_USER_ONLY
2351 nullify_over(ctx);
2352 gen_helper_getshadowregs(cpu_env);
2353 return nullify_end(ctx);
2354#endif
2355}
2356
deee69a1 2357static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
98a9cb79 2358{
deee69a1
RH
2359 if (a->m) {
2360 TCGv_reg dest = dest_gpr(ctx, a->b);
2361 TCGv_reg src1 = load_gpr(ctx, a->b);
2362 TCGv_reg src2 = load_gpr(ctx, a->x);
98a9cb79 2363
deee69a1
RH
2364 /* The only thing we need to do is the base register modification. */
2365 tcg_gen_add_reg(dest, src1, src2);
2366 save_gpr(ctx, a->b, dest);
2367 }
98a9cb79 2368 cond_free(&ctx->null_cond);
31234768 2369 return true;
98a9cb79
RH
2370}
2371
deee69a1 2372static bool trans_probe(DisasContext *ctx, arg_probe *a)
98a9cb79 2373{
86f8d05f 2374 TCGv_reg dest, ofs;
eed14219 2375 TCGv_i32 level, want;
86f8d05f 2376 TCGv_tl addr;
98a9cb79
RH
2377
2378 nullify_over(ctx);
2379
deee69a1
RH
2380 dest = dest_gpr(ctx, a->t);
2381 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
eed14219 2382
deee69a1 2383 if (a->imm) {
29dd6f64 2384 level = tcg_constant_i32(a->ri);
98a9cb79 2385 } else {
eed14219 2386 level = tcg_temp_new_i32();
deee69a1 2387 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
eed14219 2388 tcg_gen_andi_i32(level, level, 3);
98a9cb79 2389 }
29dd6f64 2390 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
eed14219
RH
2391
2392 gen_helper_probe(dest, cpu_env, addr, level, want);
2393
deee69a1 2394 save_gpr(ctx, a->t, dest);
31234768 2395 return nullify_end(ctx);
98a9cb79
RH
2396}
2397
deee69a1 2398static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
8d6ae7fb 2399{
deee69a1
RH
2400 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2401#ifndef CONFIG_USER_ONLY
8d6ae7fb
RH
2402 TCGv_tl addr;
2403 TCGv_reg ofs, reg;
2404
8d6ae7fb
RH
2405 nullify_over(ctx);
2406
deee69a1
RH
2407 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2408 reg = load_gpr(ctx, a->r);
2409 if (a->addr) {
8d6ae7fb
RH
2410 gen_helper_itlba(cpu_env, addr, reg);
2411 } else {
2412 gen_helper_itlbp(cpu_env, addr, reg);
2413 }
2414
32dc7569
SS
2415 /* Exit TB for TLB change if mmu is enabled. */
2416 if (ctx->tb_flags & PSW_C) {
31234768
RH
2417 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2418 }
2419 return nullify_end(ctx);
deee69a1 2420#endif
8d6ae7fb 2421}
63300a00 2422
deee69a1 2423static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
63300a00 2424{
deee69a1
RH
2425 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2426#ifndef CONFIG_USER_ONLY
63300a00
RH
2427 TCGv_tl addr;
2428 TCGv_reg ofs;
2429
63300a00
RH
2430 nullify_over(ctx);
2431
deee69a1
RH
2432 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2433 if (a->m) {
2434 save_gpr(ctx, a->b, ofs);
63300a00 2435 }
deee69a1 2436 if (a->local) {
63300a00
RH
2437 gen_helper_ptlbe(cpu_env);
2438 } else {
2439 gen_helper_ptlb(cpu_env, addr);
2440 }
2441
2442 /* Exit TB for TLB change if mmu is enabled. */
6797c315
NH
2443 if (ctx->tb_flags & PSW_C) {
2444 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2445 }
2446 return nullify_end(ctx);
2447#endif
2448}
2449
2450/*
2451 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2452 * See
2453 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2454 * page 13-9 (195/206)
2455 */
2456static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2457{
2458 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2459#ifndef CONFIG_USER_ONLY
2460 TCGv_tl addr, atl, stl;
2461 TCGv_reg reg;
2462
2463 nullify_over(ctx);
2464
2465 /*
2466 * FIXME:
2467 * if (not (pcxl or pcxl2))
2468 * return gen_illegal(ctx);
2469 *
2470 * Note for future: these are 32-bit systems; no hppa64.
2471 */
2472
2473 atl = tcg_temp_new_tl();
2474 stl = tcg_temp_new_tl();
2475 addr = tcg_temp_new_tl();
2476
2477 tcg_gen_ld32u_i64(stl, cpu_env,
2478 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2479 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2480 tcg_gen_ld32u_i64(atl, cpu_env,
2481 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2482 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2483 tcg_gen_shli_i64(stl, stl, 32);
2484 tcg_gen_or_tl(addr, atl, stl);
6797c315
NH
2485
2486 reg = load_gpr(ctx, a->r);
2487 if (a->addr) {
2488 gen_helper_itlba(cpu_env, addr, reg);
2489 } else {
2490 gen_helper_itlbp(cpu_env, addr, reg);
2491 }
6797c315
NH
2492
2493 /* Exit TB for TLB change if mmu is enabled. */
32dc7569 2494 if (ctx->tb_flags & PSW_C) {
31234768
RH
2495 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2496 }
2497 return nullify_end(ctx);
deee69a1 2498#endif
63300a00 2499}
2dfcca9f 2500
deee69a1 2501static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2dfcca9f 2502{
deee69a1
RH
2503 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2504#ifndef CONFIG_USER_ONLY
2dfcca9f
RH
2505 TCGv_tl vaddr;
2506 TCGv_reg ofs, paddr;
2507
2dfcca9f
RH
2508 nullify_over(ctx);
2509
deee69a1 2510 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2dfcca9f
RH
2511
2512 paddr = tcg_temp_new();
2513 gen_helper_lpa(paddr, cpu_env, vaddr);
2514
2515 /* Note that physical address result overrides base modification. */
deee69a1
RH
2516 if (a->m) {
2517 save_gpr(ctx, a->b, ofs);
2dfcca9f 2518 }
deee69a1 2519 save_gpr(ctx, a->t, paddr);
2dfcca9f 2520
31234768 2521 return nullify_end(ctx);
deee69a1 2522#endif
2dfcca9f 2523}
43a97b81 2524
deee69a1 2525static bool trans_lci(DisasContext *ctx, arg_lci *a)
43a97b81 2526{
43a97b81
RH
2527 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2528
2529 /* The Coherence Index is an implementation-defined function of the
2530 physical address. Two addresses with the same CI have a coherent
2531 view of the cache. Our implementation is to return 0 for all,
2532 since the entire address space is coherent. */
29dd6f64 2533 save_gpr(ctx, a->t, tcg_constant_reg(0));
43a97b81 2534
31234768
RH
2535 cond_free(&ctx->null_cond);
2536 return true;
43a97b81 2537}
98a9cb79 2538
0c982a28 2539static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
b2167459 2540{
0c982a28
RH
2541 return do_add_reg(ctx, a, false, false, false, false);
2542}
b2167459 2543
0c982a28
RH
2544static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2545{
2546 return do_add_reg(ctx, a, true, false, false, false);
2547}
b2167459 2548
0c982a28
RH
2549static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2550{
2551 return do_add_reg(ctx, a, false, true, false, false);
b2167459
RH
2552}
2553
0c982a28 2554static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
b2167459 2555{
0c982a28
RH
2556 return do_add_reg(ctx, a, false, false, false, true);
2557}
b2167459 2558
0c982a28
RH
2559static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2560{
2561 return do_add_reg(ctx, a, false, true, false, true);
2562}
b2167459 2563
0c982a28
RH
2564static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2565{
2566 return do_sub_reg(ctx, a, false, false, false);
b2167459
RH
2567}
2568
0c982a28 2569static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2570{
0c982a28
RH
2571 return do_sub_reg(ctx, a, true, false, false);
2572}
b2167459 2573
0c982a28
RH
2574static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2575{
2576 return do_sub_reg(ctx, a, false, false, true);
b2167459
RH
2577}
2578
0c982a28 2579static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2580{
0c982a28
RH
2581 return do_sub_reg(ctx, a, true, false, true);
2582}
2583
2584static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2585{
2586 return do_sub_reg(ctx, a, false, true, false);
2587}
2588
2589static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2590{
2591 return do_sub_reg(ctx, a, true, true, false);
2592}
2593
2594static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2595{
2596 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2597}
2598
2599static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2600{
2601 return do_log_reg(ctx, a, tcg_gen_and_reg);
2602}
2603
2604static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2605{
2606 if (a->cf == 0) {
2607 unsigned r2 = a->r2;
2608 unsigned r1 = a->r1;
2609 unsigned rt = a->t;
b2167459 2610
7aee8189
RH
2611 if (rt == 0) { /* NOP */
2612 cond_free(&ctx->null_cond);
2613 return true;
2614 }
2615 if (r2 == 0) { /* COPY */
2616 if (r1 == 0) {
2617 TCGv_reg dest = dest_gpr(ctx, rt);
2618 tcg_gen_movi_reg(dest, 0);
2619 save_gpr(ctx, rt, dest);
2620 } else {
2621 save_gpr(ctx, rt, cpu_gr[r1]);
2622 }
2623 cond_free(&ctx->null_cond);
2624 return true;
2625 }
2626#ifndef CONFIG_USER_ONLY
2627 /* These are QEMU extensions and are nops in the real architecture:
2628 *
2629 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2630 * or %r31,%r31,%r31 -- death loop; offline cpu
2631 * currently implemented as idle.
2632 */
2633 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
7aee8189
RH
2634 /* No need to check for supervisor, as userland can only pause
2635 until the next timer interrupt. */
2636 nullify_over(ctx);
2637
2638 /* Advance the instruction queue. */
2639 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2640 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2641 nullify_set(ctx, 0);
2642
2643 /* Tell the qemu main loop to halt until this cpu has work. */
29dd6f64
RH
2644 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
2645 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
7aee8189
RH
2646 gen_excp_1(EXCP_HALTED);
2647 ctx->base.is_jmp = DISAS_NORETURN;
2648
2649 return nullify_end(ctx);
2650 }
2651#endif
b2167459 2652 }
0c982a28
RH
2653 return do_log_reg(ctx, a, tcg_gen_or_reg);
2654}
7aee8189 2655
0c982a28
RH
2656static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2657{
2658 return do_log_reg(ctx, a, tcg_gen_xor_reg);
b2167459
RH
2659}
2660
0c982a28 2661static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2662{
eaa3783b 2663 TCGv_reg tcg_r1, tcg_r2;
b2167459 2664
0c982a28 2665 if (a->cf) {
b2167459
RH
2666 nullify_over(ctx);
2667 }
0c982a28
RH
2668 tcg_r1 = load_gpr(ctx, a->r1);
2669 tcg_r2 = load_gpr(ctx, a->r2);
2670 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
31234768 2671 return nullify_end(ctx);
b2167459
RH
2672}
2673
0c982a28 2674static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2675{
eaa3783b 2676 TCGv_reg tcg_r1, tcg_r2;
b2167459 2677
0c982a28 2678 if (a->cf) {
b2167459
RH
2679 nullify_over(ctx);
2680 }
0c982a28
RH
2681 tcg_r1 = load_gpr(ctx, a->r1);
2682 tcg_r2 = load_gpr(ctx, a->r2);
2683 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
31234768 2684 return nullify_end(ctx);
b2167459
RH
2685}
2686
0c982a28 2687static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
b2167459 2688{
eaa3783b 2689 TCGv_reg tcg_r1, tcg_r2, tmp;
b2167459 2690
0c982a28 2691 if (a->cf) {
b2167459
RH
2692 nullify_over(ctx);
2693 }
0c982a28
RH
2694 tcg_r1 = load_gpr(ctx, a->r1);
2695 tcg_r2 = load_gpr(ctx, a->r2);
b2167459 2696 tmp = get_temp(ctx);
eaa3783b 2697 tcg_gen_not_reg(tmp, tcg_r2);
0c982a28 2698 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
31234768 2699 return nullify_end(ctx);
b2167459
RH
2700}
2701
0c982a28
RH
2702static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2703{
2704 return do_uaddcm(ctx, a, false);
2705}
2706
2707static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2708{
2709 return do_uaddcm(ctx, a, true);
2710}
2711
2712static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
b2167459 2713{
eaa3783b 2714 TCGv_reg tmp;
b2167459
RH
2715
2716 nullify_over(ctx);
2717
2718 tmp = get_temp(ctx);
eaa3783b 2719 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
b2167459 2720 if (!is_i) {
eaa3783b 2721 tcg_gen_not_reg(tmp, tmp);
b2167459 2722 }
eaa3783b
RH
2723 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2724 tcg_gen_muli_reg(tmp, tmp, 6);
60e29463 2725 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
31234768 2726 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
31234768 2727 return nullify_end(ctx);
b2167459
RH
2728}
2729
0c982a28
RH
2730static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2731{
2732 return do_dcor(ctx, a, false);
2733}
2734
2735static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2736{
2737 return do_dcor(ctx, a, true);
2738}
2739
2740static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2741{
eaa3783b 2742 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
b2167459
RH
2743
2744 nullify_over(ctx);
2745
0c982a28
RH
2746 in1 = load_gpr(ctx, a->r1);
2747 in2 = load_gpr(ctx, a->r2);
b2167459
RH
2748
2749 add1 = tcg_temp_new();
2750 add2 = tcg_temp_new();
2751 addc = tcg_temp_new();
2752 dest = tcg_temp_new();
29dd6f64 2753 zero = tcg_constant_reg(0);
b2167459
RH
2754
2755 /* Form R1 << 1 | PSW[CB]{8}. */
eaa3783b
RH
2756 tcg_gen_add_reg(add1, in1, in1);
2757 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
b2167459
RH
2758
2759 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2760 carry{8} requires that we subtract via + ~R2 + 1, as described in
2761 the manual. By extracting and masking V, we can produce the
2762 proper inputs to the addition without movcond. */
eaa3783b
RH
2763 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2764 tcg_gen_xor_reg(add2, in2, addc);
2765 tcg_gen_andi_reg(addc, addc, 1);
b2167459
RH
2766 /* ??? This is only correct for 32-bit. */
2767 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2768 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2769
b2167459 2770 /* Write back the result register. */
0c982a28 2771 save_gpr(ctx, a->t, dest);
b2167459
RH
2772
2773 /* Write back PSW[CB]. */
eaa3783b
RH
2774 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2775 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
b2167459
RH
2776
2777 /* Write back PSW[V] for the division step. */
eaa3783b
RH
2778 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2779 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
b2167459
RH
2780
2781 /* Install the new nullification. */
0c982a28 2782 if (a->cf) {
eaa3783b 2783 TCGv_reg sv = NULL;
b47a4a02 2784 if (cond_need_sv(a->cf >> 1)) {
b2167459
RH
2785 /* ??? The lshift is supposed to contribute to overflow. */
2786 sv = do_add_sv(ctx, dest, add1, add2);
2787 }
0c982a28 2788 ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
b2167459
RH
2789 }
2790
31234768 2791 return nullify_end(ctx);
b2167459
RH
2792}
2793
0588e061 2794static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
b2167459 2795{
0588e061
RH
2796 return do_add_imm(ctx, a, false, false);
2797}
b2167459 2798
0588e061
RH
2799static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2800{
2801 return do_add_imm(ctx, a, true, false);
b2167459
RH
2802}
2803
0588e061 2804static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
b2167459 2805{
0588e061
RH
2806 return do_add_imm(ctx, a, false, true);
2807}
b2167459 2808
0588e061
RH
2809static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2810{
2811 return do_add_imm(ctx, a, true, true);
2812}
b2167459 2813
0588e061
RH
2814static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2815{
2816 return do_sub_imm(ctx, a, false);
2817}
b2167459 2818
0588e061
RH
2819static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2820{
2821 return do_sub_imm(ctx, a, true);
b2167459
RH
2822}
2823
0588e061 2824static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
b2167459 2825{
eaa3783b 2826 TCGv_reg tcg_im, tcg_r2;
b2167459 2827
0588e061 2828 if (a->cf) {
b2167459
RH
2829 nullify_over(ctx);
2830 }
2831
0588e061
RH
2832 tcg_im = load_const(ctx, a->i);
2833 tcg_r2 = load_gpr(ctx, a->r);
2834 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
b2167459 2835
31234768 2836 return nullify_end(ctx);
b2167459
RH
2837}
2838
1cd012a5 2839static bool trans_ld(DisasContext *ctx, arg_ldst *a)
96d6407f 2840{
0786a3b6
HD
2841 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2842 return gen_illegal(ctx);
2843 } else {
2844 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
1cd012a5 2845 a->disp, a->sp, a->m, a->size | MO_TE);
0786a3b6 2846 }
96d6407f
RH
2847}
2848
1cd012a5 2849static bool trans_st(DisasContext *ctx, arg_ldst *a)
96d6407f 2850{
1cd012a5 2851 assert(a->x == 0 && a->scale == 0);
0786a3b6
HD
2852 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2853 return gen_illegal(ctx);
2854 } else {
2855 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2856 }
96d6407f
RH
2857}
2858
1cd012a5 2859static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
96d6407f 2860{
b1af755c 2861 MemOp mop = MO_TE | MO_ALIGN | a->size;
86f8d05f
RH
2862 TCGv_reg zero, dest, ofs;
2863 TCGv_tl addr;
96d6407f
RH
2864
2865 nullify_over(ctx);
2866
1cd012a5 2867 if (a->m) {
86f8d05f
RH
2868 /* Base register modification. Make sure if RT == RB,
2869 we see the result of the load. */
96d6407f
RH
2870 dest = get_temp(ctx);
2871 } else {
1cd012a5 2872 dest = dest_gpr(ctx, a->t);
96d6407f
RH
2873 }
2874
1cd012a5
RH
2875 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2876 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
b1af755c
RH
2877
2878 /*
2879 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2880 * However actual hardware succeeds with aligned mod 4.
2881 * Detect this case and log a GUEST_ERROR.
2882 *
2883 * TODO: HPPA64 relaxes the over-alignment requirement
2884 * with the ,co completer.
2885 */
2886 gen_helper_ldc_check(addr);
2887
29dd6f64 2888 zero = tcg_constant_reg(0);
86f8d05f 2889 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
b1af755c 2890
1cd012a5
RH
2891 if (a->m) {
2892 save_gpr(ctx, a->b, ofs);
96d6407f 2893 }
1cd012a5 2894 save_gpr(ctx, a->t, dest);
96d6407f 2895
31234768 2896 return nullify_end(ctx);
96d6407f
RH
2897}
2898
1cd012a5 2899static bool trans_stby(DisasContext *ctx, arg_stby *a)
96d6407f 2900{
86f8d05f
RH
2901 TCGv_reg ofs, val;
2902 TCGv_tl addr;
96d6407f
RH
2903
2904 nullify_over(ctx);
2905
1cd012a5 2906 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
86f8d05f 2907 ctx->mmu_idx == MMU_PHYS_IDX);
1cd012a5
RH
2908 val = load_gpr(ctx, a->r);
2909 if (a->a) {
f9f46db4
EC
2910 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2911 gen_helper_stby_e_parallel(cpu_env, addr, val);
2912 } else {
2913 gen_helper_stby_e(cpu_env, addr, val);
2914 }
96d6407f 2915 } else {
f9f46db4
EC
2916 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2917 gen_helper_stby_b_parallel(cpu_env, addr, val);
2918 } else {
2919 gen_helper_stby_b(cpu_env, addr, val);
2920 }
96d6407f 2921 }
1cd012a5 2922 if (a->m) {
86f8d05f 2923 tcg_gen_andi_reg(ofs, ofs, ~3);
1cd012a5 2924 save_gpr(ctx, a->b, ofs);
96d6407f 2925 }
96d6407f 2926
31234768 2927 return nullify_end(ctx);
96d6407f
RH
2928}
2929
1cd012a5 2930static bool trans_lda(DisasContext *ctx, arg_ldst *a)
d0a851cc
RH
2931{
2932 int hold_mmu_idx = ctx->mmu_idx;
d0a851cc
RH
2933
2934 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
d0a851cc 2935 ctx->mmu_idx = MMU_PHYS_IDX;
1cd012a5 2936 trans_ld(ctx, a);
d0a851cc 2937 ctx->mmu_idx = hold_mmu_idx;
31234768 2938 return true;
d0a851cc
RH
2939}
2940
1cd012a5 2941static bool trans_sta(DisasContext *ctx, arg_ldst *a)
d0a851cc
RH
2942{
2943 int hold_mmu_idx = ctx->mmu_idx;
d0a851cc
RH
2944
2945 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
d0a851cc 2946 ctx->mmu_idx = MMU_PHYS_IDX;
1cd012a5 2947 trans_st(ctx, a);
d0a851cc 2948 ctx->mmu_idx = hold_mmu_idx;
31234768 2949 return true;
d0a851cc 2950}
95412a61 2951
0588e061 2952static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
b2167459 2953{
0588e061 2954 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
b2167459 2955
0588e061
RH
2956 tcg_gen_movi_reg(tcg_rt, a->i);
2957 save_gpr(ctx, a->t, tcg_rt);
b2167459 2958 cond_free(&ctx->null_cond);
31234768 2959 return true;
b2167459
RH
2960}
2961
0588e061 2962static bool trans_addil(DisasContext *ctx, arg_addil *a)
b2167459 2963{
0588e061 2964 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
eaa3783b 2965 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
b2167459 2966
0588e061 2967 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
b2167459
RH
2968 save_gpr(ctx, 1, tcg_r1);
2969 cond_free(&ctx->null_cond);
31234768 2970 return true;
b2167459
RH
2971}
2972
0588e061 2973static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
b2167459 2974{
0588e061 2975 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
b2167459
RH
2976
2977 /* Special case rb == 0, for the LDI pseudo-op.
2978 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
0588e061
RH
2979 if (a->b == 0) {
2980 tcg_gen_movi_reg(tcg_rt, a->i);
b2167459 2981 } else {
0588e061 2982 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
b2167459 2983 }
0588e061 2984 save_gpr(ctx, a->t, tcg_rt);
b2167459 2985 cond_free(&ctx->null_cond);
31234768 2986 return true;
b2167459
RH
2987}
2988
01afb7be
RH
2989static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
2990 unsigned c, unsigned f, unsigned n, int disp)
98cd9ca7 2991{
01afb7be 2992 TCGv_reg dest, in2, sv;
98cd9ca7
RH
2993 DisasCond cond;
2994
98cd9ca7
RH
2995 in2 = load_gpr(ctx, r);
2996 dest = get_temp(ctx);
2997
eaa3783b 2998 tcg_gen_sub_reg(dest, in1, in2);
98cd9ca7 2999
f764718d 3000 sv = NULL;
b47a4a02 3001 if (cond_need_sv(c)) {
98cd9ca7
RH
3002 sv = do_sub_sv(ctx, dest, in1, in2);
3003 }
3004
01afb7be
RH
3005 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3006 return do_cbranch(ctx, disp, n, &cond);
98cd9ca7
RH
3007}
3008
01afb7be 3009static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
98cd9ca7 3010{
01afb7be
RH
3011 nullify_over(ctx);
3012 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3013}
98cd9ca7 3014
01afb7be
RH
3015static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3016{
98cd9ca7 3017 nullify_over(ctx);
01afb7be
RH
3018 return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3019}
3020
3021static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3022 unsigned c, unsigned f, unsigned n, int disp)
3023{
3024 TCGv_reg dest, in2, sv, cb_msb;
3025 DisasCond cond;
98cd9ca7 3026
98cd9ca7 3027 in2 = load_gpr(ctx, r);
43675d20 3028 dest = tcg_temp_new();
f764718d
RH
3029 sv = NULL;
3030 cb_msb = NULL;
98cd9ca7 3031
b47a4a02 3032 if (cond_need_cb(c)) {
98cd9ca7 3033 cb_msb = get_temp(ctx);
eaa3783b
RH
3034 tcg_gen_movi_reg(cb_msb, 0);
3035 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
b47a4a02 3036 } else {
eaa3783b 3037 tcg_gen_add_reg(dest, in1, in2);
b47a4a02
SS
3038 }
3039 if (cond_need_sv(c)) {
98cd9ca7 3040 sv = do_add_sv(ctx, dest, in1, in2);
98cd9ca7
RH
3041 }
3042
01afb7be 3043 cond = do_cond(c * 2 + f, dest, cb_msb, sv);
43675d20 3044 save_gpr(ctx, r, dest);
01afb7be 3045 return do_cbranch(ctx, disp, n, &cond);
98cd9ca7
RH
3046}
3047
01afb7be
RH
3048static bool trans_addb(DisasContext *ctx, arg_addb *a)
3049{
3050 nullify_over(ctx);
3051 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3052}
3053
3054static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3055{
3056 nullify_over(ctx);
3057 return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3058}
3059
3060static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
98cd9ca7 3061{
eaa3783b 3062 TCGv_reg tmp, tcg_r;
98cd9ca7
RH
3063 DisasCond cond;
3064
3065 nullify_over(ctx);
3066
3067 tmp = tcg_temp_new();
01afb7be
RH
3068 tcg_r = load_gpr(ctx, a->r);
3069 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
98cd9ca7 3070
01afb7be 3071 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
01afb7be 3072 return do_cbranch(ctx, a->disp, a->n, &cond);
98cd9ca7
RH
3073}
3074
01afb7be
RH
3075static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3076{
3077 TCGv_reg tmp, tcg_r;
3078 DisasCond cond;
3079
3080 nullify_over(ctx);
3081
3082 tmp = tcg_temp_new();
3083 tcg_r = load_gpr(ctx, a->r);
3084 tcg_gen_shli_reg(tmp, tcg_r, a->p);
3085
3086 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
01afb7be
RH
3087 return do_cbranch(ctx, a->disp, a->n, &cond);
3088}
3089
3090static bool trans_movb(DisasContext *ctx, arg_movb *a)
98cd9ca7 3091{
eaa3783b 3092 TCGv_reg dest;
98cd9ca7
RH
3093 DisasCond cond;
3094
3095 nullify_over(ctx);
3096
01afb7be
RH
3097 dest = dest_gpr(ctx, a->r2);
3098 if (a->r1 == 0) {
eaa3783b 3099 tcg_gen_movi_reg(dest, 0);
98cd9ca7 3100 } else {
01afb7be 3101 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
98cd9ca7
RH
3102 }
3103
01afb7be
RH
3104 cond = do_sed_cond(a->c, dest);
3105 return do_cbranch(ctx, a->disp, a->n, &cond);
3106}
3107
3108static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3109{
3110 TCGv_reg dest;
3111 DisasCond cond;
3112
3113 nullify_over(ctx);
3114
3115 dest = dest_gpr(ctx, a->r);
3116 tcg_gen_movi_reg(dest, a->i);
3117
3118 cond = do_sed_cond(a->c, dest);
3119 return do_cbranch(ctx, a->disp, a->n, &cond);
98cd9ca7
RH
3120}
3121
30878590 3122static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
0b1347d2 3123{
eaa3783b 3124 TCGv_reg dest;
0b1347d2 3125
30878590 3126 if (a->c) {
0b1347d2
RH
3127 nullify_over(ctx);
3128 }
3129
30878590
RH
3130 dest = dest_gpr(ctx, a->t);
3131 if (a->r1 == 0) {
3132 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
eaa3783b 3133 tcg_gen_shr_reg(dest, dest, cpu_sar);
30878590 3134 } else if (a->r1 == a->r2) {
0b1347d2 3135 TCGv_i32 t32 = tcg_temp_new_i32();
30878590 3136 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
0b1347d2 3137 tcg_gen_rotr_i32(t32, t32, cpu_sar);
eaa3783b 3138 tcg_gen_extu_i32_reg(dest, t32);
0b1347d2
RH
3139 } else {
3140 TCGv_i64 t = tcg_temp_new_i64();
3141 TCGv_i64 s = tcg_temp_new_i64();
3142
30878590 3143 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
eaa3783b 3144 tcg_gen_extu_reg_i64(s, cpu_sar);
0b1347d2 3145 tcg_gen_shr_i64(t, t, s);
eaa3783b 3146 tcg_gen_trunc_i64_reg(dest, t);
0b1347d2 3147 }
30878590 3148 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3149
3150 /* Install the new nullification. */
3151 cond_free(&ctx->null_cond);
30878590
RH
3152 if (a->c) {
3153 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3154 }
31234768 3155 return nullify_end(ctx);
0b1347d2
RH
3156}
3157
30878590 3158static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
0b1347d2 3159{
30878590 3160 unsigned sa = 31 - a->cpos;
eaa3783b 3161 TCGv_reg dest, t2;
0b1347d2 3162
30878590 3163 if (a->c) {
0b1347d2
RH
3164 nullify_over(ctx);
3165 }
3166
30878590
RH
3167 dest = dest_gpr(ctx, a->t);
3168 t2 = load_gpr(ctx, a->r2);
05bfd4db
RH
3169 if (a->r1 == 0) {
3170 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3171 } else if (TARGET_REGISTER_BITS == 32) {
3172 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3173 } else if (a->r1 == a->r2) {
0b1347d2 3174 TCGv_i32 t32 = tcg_temp_new_i32();
eaa3783b 3175 tcg_gen_trunc_reg_i32(t32, t2);
0b1347d2 3176 tcg_gen_rotri_i32(t32, t32, sa);
eaa3783b 3177 tcg_gen_extu_i32_reg(dest, t32);
0b1347d2 3178 } else {
05bfd4db
RH
3179 TCGv_i64 t64 = tcg_temp_new_i64();
3180 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3181 tcg_gen_shri_i64(t64, t64, sa);
3182 tcg_gen_trunc_i64_reg(dest, t64);
0b1347d2 3183 }
30878590 3184 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3185
3186 /* Install the new nullification. */
3187 cond_free(&ctx->null_cond);
30878590
RH
3188 if (a->c) {
3189 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3190 }
31234768 3191 return nullify_end(ctx);
0b1347d2
RH
3192}
3193
30878590 3194static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
0b1347d2 3195{
30878590 3196 unsigned len = 32 - a->clen;
eaa3783b 3197 TCGv_reg dest, src, tmp;
0b1347d2 3198
30878590 3199 if (a->c) {
0b1347d2
RH
3200 nullify_over(ctx);
3201 }
3202
30878590
RH
3203 dest = dest_gpr(ctx, a->t);
3204 src = load_gpr(ctx, a->r);
0b1347d2
RH
3205 tmp = tcg_temp_new();
3206
3207 /* Recall that SAR is using big-endian bit numbering. */
eaa3783b 3208 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
30878590 3209 if (a->se) {
eaa3783b
RH
3210 tcg_gen_sar_reg(dest, src, tmp);
3211 tcg_gen_sextract_reg(dest, dest, 0, len);
0b1347d2 3212 } else {
eaa3783b
RH
3213 tcg_gen_shr_reg(dest, src, tmp);
3214 tcg_gen_extract_reg(dest, dest, 0, len);
0b1347d2 3215 }
30878590 3216 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3217
3218 /* Install the new nullification. */
3219 cond_free(&ctx->null_cond);
30878590
RH
3220 if (a->c) {
3221 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3222 }
31234768 3223 return nullify_end(ctx);
0b1347d2
RH
3224}
3225
30878590 3226static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
0b1347d2 3227{
30878590
RH
3228 unsigned len = 32 - a->clen;
3229 unsigned cpos = 31 - a->pos;
eaa3783b 3230 TCGv_reg dest, src;
0b1347d2 3231
30878590 3232 if (a->c) {
0b1347d2
RH
3233 nullify_over(ctx);
3234 }
3235
30878590
RH
3236 dest = dest_gpr(ctx, a->t);
3237 src = load_gpr(ctx, a->r);
3238 if (a->se) {
eaa3783b 3239 tcg_gen_sextract_reg(dest, src, cpos, len);
0b1347d2 3240 } else {
eaa3783b 3241 tcg_gen_extract_reg(dest, src, cpos, len);
0b1347d2 3242 }
30878590 3243 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3244
3245 /* Install the new nullification. */
3246 cond_free(&ctx->null_cond);
30878590
RH
3247 if (a->c) {
3248 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3249 }
31234768 3250 return nullify_end(ctx);
0b1347d2
RH
3251}
3252
30878590 3253static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
0b1347d2 3254{
30878590 3255 unsigned len = 32 - a->clen;
eaa3783b
RH
3256 target_sreg mask0, mask1;
3257 TCGv_reg dest;
0b1347d2 3258
30878590 3259 if (a->c) {
0b1347d2
RH
3260 nullify_over(ctx);
3261 }
30878590
RH
3262 if (a->cpos + len > 32) {
3263 len = 32 - a->cpos;
0b1347d2
RH
3264 }
3265
30878590
RH
3266 dest = dest_gpr(ctx, a->t);
3267 mask0 = deposit64(0, a->cpos, len, a->i);
3268 mask1 = deposit64(-1, a->cpos, len, a->i);
0b1347d2 3269
30878590
RH
3270 if (a->nz) {
3271 TCGv_reg src = load_gpr(ctx, a->t);
0b1347d2 3272 if (mask1 != -1) {
eaa3783b 3273 tcg_gen_andi_reg(dest, src, mask1);
0b1347d2
RH
3274 src = dest;
3275 }
eaa3783b 3276 tcg_gen_ori_reg(dest, src, mask0);
0b1347d2 3277 } else {
eaa3783b 3278 tcg_gen_movi_reg(dest, mask0);
0b1347d2 3279 }
30878590 3280 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3281
3282 /* Install the new nullification. */
3283 cond_free(&ctx->null_cond);
30878590
RH
3284 if (a->c) {
3285 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3286 }
31234768 3287 return nullify_end(ctx);
0b1347d2
RH
3288}
3289
30878590 3290static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
0b1347d2 3291{
30878590
RH
3292 unsigned rs = a->nz ? a->t : 0;
3293 unsigned len = 32 - a->clen;
eaa3783b 3294 TCGv_reg dest, val;
0b1347d2 3295
30878590 3296 if (a->c) {
0b1347d2
RH
3297 nullify_over(ctx);
3298 }
30878590
RH
3299 if (a->cpos + len > 32) {
3300 len = 32 - a->cpos;
0b1347d2
RH
3301 }
3302
30878590
RH
3303 dest = dest_gpr(ctx, a->t);
3304 val = load_gpr(ctx, a->r);
0b1347d2 3305 if (rs == 0) {
30878590 3306 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
0b1347d2 3307 } else {
30878590 3308 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
0b1347d2 3309 }
30878590 3310 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3311
3312 /* Install the new nullification. */
3313 cond_free(&ctx->null_cond);
30878590
RH
3314 if (a->c) {
3315 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3316 }
31234768 3317 return nullify_end(ctx);
0b1347d2
RH
3318}
3319
30878590
RH
3320static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3321 unsigned nz, unsigned clen, TCGv_reg val)
0b1347d2 3322{
0b1347d2
RH
3323 unsigned rs = nz ? rt : 0;
3324 unsigned len = 32 - clen;
30878590 3325 TCGv_reg mask, tmp, shift, dest;
0b1347d2
RH
3326 unsigned msb = 1U << (len - 1);
3327
0b1347d2
RH
3328 dest = dest_gpr(ctx, rt);
3329 shift = tcg_temp_new();
3330 tmp = tcg_temp_new();
3331
3332 /* Convert big-endian bit numbering in SAR to left-shift. */
eaa3783b 3333 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
0b1347d2 3334
0992a930
RH
3335 mask = tcg_temp_new();
3336 tcg_gen_movi_reg(mask, msb + (msb - 1));
eaa3783b 3337 tcg_gen_and_reg(tmp, val, mask);
0b1347d2 3338 if (rs) {
eaa3783b
RH
3339 tcg_gen_shl_reg(mask, mask, shift);
3340 tcg_gen_shl_reg(tmp, tmp, shift);
3341 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3342 tcg_gen_or_reg(dest, dest, tmp);
0b1347d2 3343 } else {
eaa3783b 3344 tcg_gen_shl_reg(dest, tmp, shift);
0b1347d2 3345 }
0b1347d2
RH
3346 save_gpr(ctx, rt, dest);
3347
3348 /* Install the new nullification. */
3349 cond_free(&ctx->null_cond);
3350 if (c) {
3351 ctx->null_cond = do_sed_cond(c, dest);
3352 }
31234768 3353 return nullify_end(ctx);
0b1347d2
RH
3354}
3355
30878590
RH
3356static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3357{
a6deecce
SS
3358 if (a->c) {
3359 nullify_over(ctx);
3360 }
30878590
RH
3361 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3362}
3363
3364static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3365{
a6deecce
SS
3366 if (a->c) {
3367 nullify_over(ctx);
3368 }
30878590
RH
3369 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3370}
0b1347d2 3371
8340f534 3372static bool trans_be(DisasContext *ctx, arg_be *a)
98cd9ca7 3373{
660eefe1 3374 TCGv_reg tmp;
98cd9ca7 3375
c301f34e 3376#ifdef CONFIG_USER_ONLY
98cd9ca7
RH
3377 /* ??? It seems like there should be a good way of using
3378 "be disp(sr2, r0)", the canonical gateway entry mechanism
3379 to our advantage. But that appears to be inconvenient to
3380 manage along side branch delay slots. Therefore we handle
3381 entry into the gateway page via absolute address. */
98cd9ca7
RH
3382 /* Since we don't implement spaces, just branch. Do notice the special
3383 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3384 goto_tb to the TB containing the syscall. */
8340f534
RH
3385 if (a->b == 0) {
3386 return do_dbranch(ctx, a->disp, a->l, a->n);
98cd9ca7 3387 }
c301f34e 3388#else
c301f34e 3389 nullify_over(ctx);
660eefe1
RH
3390#endif
3391
3392 tmp = get_temp(ctx);
8340f534 3393 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
660eefe1 3394 tmp = do_ibranch_priv(ctx, tmp);
c301f34e
RH
3395
3396#ifdef CONFIG_USER_ONLY
8340f534 3397 return do_ibranch(ctx, tmp, a->l, a->n);
c301f34e
RH
3398#else
3399 TCGv_i64 new_spc = tcg_temp_new_i64();
3400
8340f534
RH
3401 load_spr(ctx, new_spc, a->sp);
3402 if (a->l) {
c301f34e
RH
3403 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3404 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3405 }
8340f534 3406 if (a->n && use_nullify_skip(ctx)) {
c301f34e
RH
3407 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3408 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3409 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3410 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3411 } else {
3412 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3413 if (ctx->iaoq_b == -1) {
3414 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3415 }
3416 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3417 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
8340f534 3418 nullify_set(ctx, a->n);
c301f34e 3419 }
c301f34e 3420 tcg_gen_lookup_and_goto_ptr();
31234768
RH
3421 ctx->base.is_jmp = DISAS_NORETURN;
3422 return nullify_end(ctx);
c301f34e 3423#endif
98cd9ca7
RH
3424}
3425
8340f534 3426static bool trans_bl(DisasContext *ctx, arg_bl *a)
98cd9ca7 3427{
8340f534 3428 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
98cd9ca7
RH
3429}
3430
8340f534 3431static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
43e05652 3432{
8340f534 3433 target_ureg dest = iaoq_dest(ctx, a->disp);
43e05652 3434
6e5f5300
SS
3435 nullify_over(ctx);
3436
43e05652
RH
3437 /* Make sure the caller hasn't done something weird with the queue.
3438 * ??? This is not quite the same as the PSW[B] bit, which would be
3439 * expensive to track. Real hardware will trap for
3440 * b gateway
3441 * b gateway+4 (in delay slot of first branch)
3442 * However, checking for a non-sequential instruction queue *will*
3443 * diagnose the security hole
3444 * b gateway
3445 * b evil
3446 * in which instructions at evil would run with increased privs.
3447 */
3448 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3449 return gen_illegal(ctx);
3450 }
3451
3452#ifndef CONFIG_USER_ONLY
3453 if (ctx->tb_flags & PSW_C) {
3454 CPUHPPAState *env = ctx->cs->env_ptr;
3455 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3456 /* If we could not find a TLB entry, then we need to generate an
3457 ITLB miss exception so the kernel will provide it.
3458 The resulting TLB fill operation will invalidate this TB and
3459 we will re-translate, at which point we *will* be able to find
3460 the TLB entry and determine if this is in fact a gateway page. */
3461 if (type < 0) {
31234768
RH
3462 gen_excp(ctx, EXCP_ITLB_MISS);
3463 return true;
43e05652
RH
3464 }
3465 /* No change for non-gateway pages or for priv decrease. */
3466 if (type >= 4 && type - 4 < ctx->privilege) {
3467 dest = deposit32(dest, 0, 2, type - 4);
3468 }
3469 } else {
3470 dest &= -4; /* priv = 0 */
3471 }
3472#endif
3473
6e5f5300
SS
3474 if (a->l) {
3475 TCGv_reg tmp = dest_gpr(ctx, a->l);
3476 if (ctx->privilege < 3) {
3477 tcg_gen_andi_reg(tmp, tmp, -4);
3478 }
3479 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3480 save_gpr(ctx, a->l, tmp);
3481 }
3482
3483 return do_dbranch(ctx, dest, 0, a->n);
43e05652
RH
3484}
3485
8340f534 3486static bool trans_blr(DisasContext *ctx, arg_blr *a)
98cd9ca7 3487{
b35aec85
RH
3488 if (a->x) {
3489 TCGv_reg tmp = get_temp(ctx);
3490 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3491 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3492 /* The computation here never changes privilege level. */
3493 return do_ibranch(ctx, tmp, a->l, a->n);
3494 } else {
3495 /* BLR R0,RX is a good way to load PC+8 into RX. */
3496 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3497 }
98cd9ca7
RH
3498}
3499
8340f534 3500static bool trans_bv(DisasContext *ctx, arg_bv *a)
98cd9ca7 3501{
eaa3783b 3502 TCGv_reg dest;
98cd9ca7 3503
8340f534
RH
3504 if (a->x == 0) {
3505 dest = load_gpr(ctx, a->b);
98cd9ca7
RH
3506 } else {
3507 dest = get_temp(ctx);
8340f534
RH
3508 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3509 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
98cd9ca7 3510 }
660eefe1 3511 dest = do_ibranch_priv(ctx, dest);
8340f534 3512 return do_ibranch(ctx, dest, 0, a->n);
98cd9ca7
RH
3513}
3514
8340f534 3515static bool trans_bve(DisasContext *ctx, arg_bve *a)
98cd9ca7 3516{
660eefe1 3517 TCGv_reg dest;
98cd9ca7 3518
c301f34e 3519#ifdef CONFIG_USER_ONLY
8340f534
RH
3520 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3521 return do_ibranch(ctx, dest, a->l, a->n);
c301f34e
RH
3522#else
3523 nullify_over(ctx);
8340f534 3524 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
c301f34e
RH
3525
3526 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3527 if (ctx->iaoq_b == -1) {
3528 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3529 }
3530 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3531 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
8340f534
RH
3532 if (a->l) {
3533 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e 3534 }
8340f534 3535 nullify_set(ctx, a->n);
c301f34e 3536 tcg_gen_lookup_and_goto_ptr();
31234768
RH
3537 ctx->base.is_jmp = DISAS_NORETURN;
3538 return nullify_end(ctx);
c301f34e 3539#endif
98cd9ca7
RH
3540}
3541
1ca74648
RH
3542/*
3543 * Float class 0
3544 */
ebe9383c 3545
1ca74648 3546static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3547{
1ca74648 3548 tcg_gen_mov_i32(dst, src);
ebe9383c
RH
3549}
3550
59f8c04b
HD
3551static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3552{
a300dad3
RH
3553 uint64_t ret;
3554
3555 if (TARGET_REGISTER_BITS == 64) {
3556 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3557 } else {
3558 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3559 }
3560
59f8c04b 3561 nullify_over(ctx);
a300dad3 3562 save_frd(0, tcg_constant_i64(ret));
59f8c04b
HD
3563 return nullify_end(ctx);
3564}
3565
1ca74648 3566static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3567{
1ca74648 3568 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
ebe9383c
RH
3569}
3570
1ca74648 3571static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
ebe9383c 3572{
1ca74648 3573 tcg_gen_mov_i64(dst, src);
ebe9383c
RH
3574}
3575
1ca74648 3576static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3577{
1ca74648 3578 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
ebe9383c
RH
3579}
3580
1ca74648 3581static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3582{
1ca74648 3583 tcg_gen_andi_i32(dst, src, INT32_MAX);
ebe9383c
RH
3584}
3585
1ca74648 3586static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3587{
1ca74648 3588 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
ebe9383c
RH
3589}
3590
1ca74648 3591static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
ebe9383c 3592{
1ca74648 3593 tcg_gen_andi_i64(dst, src, INT64_MAX);
ebe9383c
RH
3594}
3595
1ca74648 3596static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3597{
1ca74648 3598 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
ebe9383c
RH
3599}
3600
1ca74648 3601static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3602{
1ca74648 3603 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
ebe9383c
RH
3604}
3605
1ca74648 3606static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3607{
1ca74648 3608 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
ebe9383c
RH
3609}
3610
1ca74648 3611static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3612{
1ca74648 3613 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
ebe9383c
RH
3614}
3615
1ca74648 3616static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3617{
1ca74648 3618 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
ebe9383c
RH
3619}
3620
1ca74648 3621static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3622{
1ca74648 3623 tcg_gen_xori_i32(dst, src, INT32_MIN);
ebe9383c
RH
3624}
3625
1ca74648 3626static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3627{
1ca74648 3628 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
ebe9383c
RH
3629}
3630
3631static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3632{
3633 tcg_gen_xori_i64(dst, src, INT64_MIN);
3634}
3635
1ca74648
RH
3636static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3637{
3638 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3639}
3640
3641static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c
RH
3642{
3643 tcg_gen_ori_i32(dst, src, INT32_MIN);
3644}
3645
1ca74648
RH
3646static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3647{
3648 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3649}
3650
ebe9383c
RH
3651static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3652{
3653 tcg_gen_ori_i64(dst, src, INT64_MIN);
3654}
3655
1ca74648
RH
3656static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3657{
3658 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3659}
3660
3661/*
3662 * Float class 1
3663 */
3664
3665static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3666{
3667 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3668}
3669
3670static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3671{
3672 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3673}
3674
3675static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3676{
3677 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3678}
3679
3680static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3681{
3682 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3683}
3684
3685static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3686{
3687 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3688}
3689
3690static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3691{
3692 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3693}
3694
3695static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3696{
3697 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3698}
3699
3700static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3701{
3702 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3703}
3704
3705static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3706{
3707 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3708}
3709
3710static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3711{
3712 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3713}
3714
3715static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3716{
3717 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3718}
3719
3720static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3721{
3722 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3723}
3724
3725static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3726{
3727 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3728}
3729
3730static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3731{
3732 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3733}
3734
3735static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3736{
3737 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3738}
3739
3740static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3741{
3742 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3743}
3744
3745static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3746{
3747 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3748}
3749
3750static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3751{
3752 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3753}
3754
3755static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3756{
3757 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3758}
3759
3760static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3761{
3762 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3763}
3764
3765static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3766{
3767 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3768}
3769
3770static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3771{
3772 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3773}
3774
3775static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3776{
3777 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3778}
3779
3780static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3781{
3782 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3783}
3784
3785static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3786{
3787 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3788}
3789
3790static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3791{
3792 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3793}
3794
3795/*
3796 * Float class 2
3797 */
3798
3799static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
ebe9383c
RH
3800{
3801 TCGv_i32 ta, tb, tc, ty;
3802
3803 nullify_over(ctx);
3804
1ca74648
RH
3805 ta = load_frw0_i32(a->r1);
3806 tb = load_frw0_i32(a->r2);
29dd6f64
RH
3807 ty = tcg_constant_i32(a->y);
3808 tc = tcg_constant_i32(a->c);
ebe9383c
RH
3809
3810 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3811
1ca74648 3812 return nullify_end(ctx);
ebe9383c
RH
3813}
3814
1ca74648 3815static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
ebe9383c 3816{
ebe9383c
RH
3817 TCGv_i64 ta, tb;
3818 TCGv_i32 tc, ty;
3819
3820 nullify_over(ctx);
3821
1ca74648
RH
3822 ta = load_frd0(a->r1);
3823 tb = load_frd0(a->r2);
29dd6f64
RH
3824 ty = tcg_constant_i32(a->y);
3825 tc = tcg_constant_i32(a->c);
ebe9383c
RH
3826
3827 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3828
31234768 3829 return nullify_end(ctx);
ebe9383c
RH
3830}
3831
1ca74648 3832static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
ebe9383c 3833{
eaa3783b 3834 TCGv_reg t;
ebe9383c
RH
3835
3836 nullify_over(ctx);
3837
1ca74648 3838 t = get_temp(ctx);
eaa3783b 3839 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
ebe9383c 3840
1ca74648
RH
3841 if (a->y == 1) {
3842 int mask;
3843 bool inv = false;
3844
3845 switch (a->c) {
3846 case 0: /* simple */
3847 tcg_gen_andi_reg(t, t, 0x4000000);
3848 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3849 goto done;
3850 case 2: /* rej */
3851 inv = true;
3852 /* fallthru */
3853 case 1: /* acc */
3854 mask = 0x43ff800;
3855 break;
3856 case 6: /* rej8 */
3857 inv = true;
3858 /* fallthru */
3859 case 5: /* acc8 */
3860 mask = 0x43f8000;
3861 break;
3862 case 9: /* acc6 */
3863 mask = 0x43e0000;
3864 break;
3865 case 13: /* acc4 */
3866 mask = 0x4380000;
3867 break;
3868 case 17: /* acc2 */
3869 mask = 0x4200000;
3870 break;
3871 default:
3872 gen_illegal(ctx);
3873 return true;
3874 }
3875 if (inv) {
3876 TCGv_reg c = load_const(ctx, mask);
3877 tcg_gen_or_reg(t, t, c);
3878 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3879 } else {
3880 tcg_gen_andi_reg(t, t, mask);
3881 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3882 }
3883 } else {
3884 unsigned cbit = (a->y ^ 1) - 1;
3885
3886 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3887 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
1ca74648
RH
3888 }
3889
3890 done:
31234768 3891 return nullify_end(ctx);
ebe9383c
RH
3892}
3893
1ca74648
RH
3894/*
3895 * Float class 2
3896 */
3897
3898static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
ebe9383c 3899{
1ca74648
RH
3900 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3901}
ebe9383c 3902
1ca74648
RH
3903static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3904{
3905 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3906}
ebe9383c 3907
1ca74648
RH
3908static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3909{
3910 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3911}
ebe9383c 3912
1ca74648
RH
3913static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3914{
3915 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
ebe9383c
RH
3916}
3917
1ca74648 3918static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
ebe9383c 3919{
1ca74648
RH
3920 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3921}
3922
3923static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3924{
3925 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3926}
3927
3928static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3929{
3930 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3931}
3932
3933static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3934{
3935 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3936}
3937
3938static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3939{
3940 TCGv_i64 x, y;
ebe9383c
RH
3941
3942 nullify_over(ctx);
3943
1ca74648
RH
3944 x = load_frw0_i64(a->r1);
3945 y = load_frw0_i64(a->r2);
3946 tcg_gen_mul_i64(x, x, y);
3947 save_frd(a->t, x);
ebe9383c 3948
31234768 3949 return nullify_end(ctx);
ebe9383c
RH
3950}
3951
ebe9383c
RH
3952/* Convert the fmpyadd single-precision register encodings to standard. */
3953static inline int fmpyadd_s_reg(unsigned r)
3954{
3955 return (r & 16) * 2 + 16 + (r & 15);
3956}
3957
b1e2af57 3958static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
ebe9383c 3959{
b1e2af57
RH
3960 int tm = fmpyadd_s_reg(a->tm);
3961 int ra = fmpyadd_s_reg(a->ra);
3962 int ta = fmpyadd_s_reg(a->ta);
3963 int rm2 = fmpyadd_s_reg(a->rm2);
3964 int rm1 = fmpyadd_s_reg(a->rm1);
ebe9383c
RH
3965
3966 nullify_over(ctx);
3967
b1e2af57
RH
3968 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3969 do_fop_weww(ctx, ta, ta, ra,
3970 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
ebe9383c 3971
31234768 3972 return nullify_end(ctx);
ebe9383c
RH
3973}
3974
b1e2af57
RH
3975static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
3976{
3977 return do_fmpyadd_s(ctx, a, false);
3978}
3979
3980static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
3981{
3982 return do_fmpyadd_s(ctx, a, true);
3983}
3984
3985static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3986{
3987 nullify_over(ctx);
3988
3989 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
3990 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
3991 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3992
3993 return nullify_end(ctx);
3994}
3995
3996static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
3997{
3998 return do_fmpyadd_d(ctx, a, false);
3999}
4000
4001static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4002{
4003 return do_fmpyadd_d(ctx, a, true);
4004}
4005
c3bad4f8 4006static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
ebe9383c 4007{
c3bad4f8 4008 TCGv_i32 x, y, z;
ebe9383c
RH
4009
4010 nullify_over(ctx);
c3bad4f8
RH
4011 x = load_frw0_i32(a->rm1);
4012 y = load_frw0_i32(a->rm2);
4013 z = load_frw0_i32(a->ra3);
ebe9383c 4014
c3bad4f8
RH
4015 if (a->neg) {
4016 gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
ebe9383c 4017 } else {
c3bad4f8 4018 gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
ebe9383c
RH
4019 }
4020
c3bad4f8 4021 save_frw_i32(a->t, x);
31234768 4022 return nullify_end(ctx);
ebe9383c
RH
4023}
4024
c3bad4f8 4025static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
ebe9383c 4026{
c3bad4f8 4027 TCGv_i64 x, y, z;
ebe9383c
RH
4028
4029 nullify_over(ctx);
c3bad4f8
RH
4030 x = load_frd0(a->rm1);
4031 y = load_frd0(a->rm2);
4032 z = load_frd0(a->ra3);
ebe9383c 4033
c3bad4f8
RH
4034 if (a->neg) {
4035 gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
ebe9383c 4036 } else {
c3bad4f8 4037 gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
ebe9383c
RH
4038 }
4039
c3bad4f8 4040 save_frd(a->t, x);
31234768 4041 return nullify_end(ctx);
ebe9383c
RH
4042}
4043
15da177b
SS
4044static bool trans_diag(DisasContext *ctx, arg_diag *a)
4045{
4046 qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n");
4047 cond_free(&ctx->null_cond);
4048 return true;
4049}
4050
b542683d 4051static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
61766fe9 4052{
51b061fb 4053 DisasContext *ctx = container_of(dcbase, DisasContext, base);
f764718d 4054 int bound;
61766fe9 4055
51b061fb 4056 ctx->cs = cs;
494737b7 4057 ctx->tb_flags = ctx->base.tb->flags;
3d68ee7b
RH
4058
4059#ifdef CONFIG_USER_ONLY
c01e5dfb 4060 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
3d68ee7b 4061 ctx->mmu_idx = MMU_USER_IDX;
c01e5dfb
HD
4062 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4063 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
217d1a5e 4064 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
3d68ee7b 4065#else
494737b7 4066 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
c01e5dfb
HD
4067 ctx->mmu_idx = (ctx->tb_flags & PSW_D ?
4068 PRIV_TO_MMU_IDX(ctx->privilege) : MMU_PHYS_IDX);
3d68ee7b 4069
c301f34e
RH
4070 /* Recover the IAOQ values from the GVA + PRIV. */
4071 uint64_t cs_base = ctx->base.tb->cs_base;
4072 uint64_t iasq_f = cs_base & ~0xffffffffull;
4073 int32_t diff = cs_base;
4074
4075 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4076 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4077#endif
51b061fb 4078 ctx->iaoq_n = -1;
f764718d 4079 ctx->iaoq_n_var = NULL;
61766fe9 4080
3d68ee7b
RH
4081 /* Bound the number of instructions by those left on the page. */
4082 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 4083 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
3d68ee7b 4084
86f8d05f
RH
4085 ctx->ntempr = 0;
4086 ctx->ntempl = 0;
4087 memset(ctx->tempr, 0, sizeof(ctx->tempr));
4088 memset(ctx->templ, 0, sizeof(ctx->templ));
51b061fb 4089}
61766fe9 4090
51b061fb
RH
4091static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4092{
4093 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4094
3d68ee7b 4095 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
51b061fb
RH
4096 ctx->null_cond = cond_make_f();
4097 ctx->psw_n_nonzero = false;
494737b7 4098 if (ctx->tb_flags & PSW_N) {
51b061fb
RH
4099 ctx->null_cond.c = TCG_COND_ALWAYS;
4100 ctx->psw_n_nonzero = true;
129e9cc3 4101 }
51b061fb
RH
4102 ctx->null_lab = NULL;
4103}
129e9cc3 4104
51b061fb
RH
4105static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4106{
4107 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4108
51b061fb
RH
4109 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4110}
4111
51b061fb
RH
4112static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4113{
4114 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4115 CPUHPPAState *env = cs->env_ptr;
4116 DisasJumpType ret;
4117 int i, n;
4118
4119 /* Execute one insn. */
ba1d0b44 4120#ifdef CONFIG_USER_ONLY
c301f34e 4121 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
31234768
RH
4122 do_page_zero(ctx);
4123 ret = ctx->base.is_jmp;
51b061fb 4124 assert(ret != DISAS_NEXT);
ba1d0b44
RH
4125 } else
4126#endif
4127 {
51b061fb
RH
4128 /* Always fetch the insn, even if nullified, so that we check
4129 the page permissions for execute. */
4e116893 4130 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
51b061fb
RH
4131
4132 /* Set up the IA queue for the next insn.
4133 This will be overwritten by a branch. */
4134 if (ctx->iaoq_b == -1) {
4135 ctx->iaoq_n = -1;
4136 ctx->iaoq_n_var = get_temp(ctx);
eaa3783b 4137 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
7ad439df 4138 } else {
51b061fb 4139 ctx->iaoq_n = ctx->iaoq_b + 4;
f764718d 4140 ctx->iaoq_n_var = NULL;
61766fe9
RH
4141 }
4142
51b061fb
RH
4143 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4144 ctx->null_cond.c = TCG_COND_NEVER;
4145 ret = DISAS_NEXT;
4146 } else {
1a19da0d 4147 ctx->insn = insn;
31274b46
RH
4148 if (!decode(ctx, insn)) {
4149 gen_illegal(ctx);
4150 }
31234768 4151 ret = ctx->base.is_jmp;
51b061fb 4152 assert(ctx->null_lab == NULL);
61766fe9 4153 }
51b061fb 4154 }
61766fe9 4155
af187238 4156 /* Forget any temporaries allocated. */
86f8d05f 4157 for (i = 0, n = ctx->ntempr; i < n; ++i) {
86f8d05f
RH
4158 ctx->tempr[i] = NULL;
4159 }
4160 for (i = 0, n = ctx->ntempl; i < n; ++i) {
86f8d05f 4161 ctx->templ[i] = NULL;
51b061fb 4162 }
86f8d05f
RH
4163 ctx->ntempr = 0;
4164 ctx->ntempl = 0;
61766fe9 4165
3d68ee7b
RH
4166 /* Advance the insn queue. Note that this check also detects
4167 a priority change within the instruction queue. */
51b061fb 4168 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
c301f34e
RH
4169 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4170 && use_goto_tb(ctx, ctx->iaoq_b)
4171 && (ctx->null_cond.c == TCG_COND_NEVER
4172 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
51b061fb
RH
4173 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4174 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
31234768 4175 ctx->base.is_jmp = ret = DISAS_NORETURN;
51b061fb 4176 } else {
31234768 4177 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
c301f34e 4178 }
61766fe9 4179 }
51b061fb
RH
4180 ctx->iaoq_f = ctx->iaoq_b;
4181 ctx->iaoq_b = ctx->iaoq_n;
c301f34e 4182 ctx->base.pc_next += 4;
51b061fb 4183
c5d0aec2
RH
4184 switch (ret) {
4185 case DISAS_NORETURN:
4186 case DISAS_IAQ_N_UPDATED:
4187 break;
4188
4189 case DISAS_NEXT:
4190 case DISAS_IAQ_N_STALE:
4191 case DISAS_IAQ_N_STALE_EXIT:
4192 if (ctx->iaoq_f == -1) {
4193 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4194 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e 4195#ifndef CONFIG_USER_ONLY
c5d0aec2 4196 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
c301f34e 4197#endif
c5d0aec2
RH
4198 nullify_save(ctx);
4199 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4200 ? DISAS_EXIT
4201 : DISAS_IAQ_N_UPDATED);
4202 } else if (ctx->iaoq_b == -1) {
4203 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4204 }
4205 break;
4206
4207 default:
4208 g_assert_not_reached();
51b061fb
RH
4209 }
4210}
4211
4212static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4213{
4214 DisasContext *ctx = container_of(dcbase, DisasContext, base);
e1b5a5ed 4215 DisasJumpType is_jmp = ctx->base.is_jmp;
61766fe9 4216
e1b5a5ed 4217 switch (is_jmp) {
869051ea 4218 case DISAS_NORETURN:
61766fe9 4219 break;
51b061fb 4220 case DISAS_TOO_MANY:
869051ea 4221 case DISAS_IAQ_N_STALE:
e1b5a5ed 4222 case DISAS_IAQ_N_STALE_EXIT:
51b061fb
RH
4223 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4224 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4225 nullify_save(ctx);
61766fe9 4226 /* FALLTHRU */
869051ea 4227 case DISAS_IAQ_N_UPDATED:
8532a14e 4228 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
7f11636d 4229 tcg_gen_lookup_and_goto_ptr();
8532a14e 4230 break;
61766fe9 4231 }
c5d0aec2
RH
4232 /* FALLTHRU */
4233 case DISAS_EXIT:
4234 tcg_gen_exit_tb(NULL, 0);
61766fe9
RH
4235 break;
4236 default:
51b061fb 4237 g_assert_not_reached();
61766fe9 4238 }
51b061fb 4239}
61766fe9 4240
8eb806a7
RH
4241static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4242 CPUState *cs, FILE *logfile)
51b061fb 4243{
c301f34e 4244 target_ulong pc = dcbase->pc_first;
61766fe9 4245
ba1d0b44
RH
4246#ifdef CONFIG_USER_ONLY
4247 switch (pc) {
51b061fb 4248 case 0x00:
8eb806a7 4249 fprintf(logfile, "IN:\n0x00000000: (null)\n");
ba1d0b44 4250 return;
51b061fb 4251 case 0xb0:
8eb806a7 4252 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
ba1d0b44 4253 return;
51b061fb 4254 case 0xe0:
8eb806a7 4255 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
ba1d0b44 4256 return;
51b061fb 4257 case 0x100:
8eb806a7 4258 fprintf(logfile, "IN:\n0x00000100: syscall\n");
ba1d0b44 4259 return;
61766fe9 4260 }
ba1d0b44
RH
4261#endif
4262
8eb806a7
RH
4263 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4264 target_disas(logfile, cs, pc, dcbase->tb->size);
51b061fb
RH
4265}
4266
4267static const TranslatorOps hppa_tr_ops = {
4268 .init_disas_context = hppa_tr_init_disas_context,
4269 .tb_start = hppa_tr_tb_start,
4270 .insn_start = hppa_tr_insn_start,
51b061fb
RH
4271 .translate_insn = hppa_tr_translate_insn,
4272 .tb_stop = hppa_tr_tb_stop,
4273 .disas_log = hppa_tr_disas_log,
4274};
4275
597f9b2d 4276void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
306c8721 4277 target_ulong pc, void *host_pc)
51b061fb
RH
4278{
4279 DisasContext ctx;
306c8721 4280 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
61766fe9 4281}