]> git.proxmox.com Git - mirror_qemu.git/blame - target/hppa/translate.c
target/translate: Remove unnecessary 'exec/cpu_ldst.h' header
[mirror_qemu.git] / target / hppa / translate.c
CommitLineData
61766fe9
RH
1/*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
d6ea4236 9 * version 2.1 of the License, or (at your option) any later version.
61766fe9
RH
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "disas/disas.h"
23#include "qemu/host-utils.h"
24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
61766fe9
RH
26#include "exec/helper-proto.h"
27#include "exec/helper-gen.h"
869051ea 28#include "exec/translator.h"
61766fe9
RH
29#include "exec/log.h"
30
d53106c9
RH
31#define HELPER_H "helper.h"
32#include "exec/helper-info.c.inc"
33#undef HELPER_H
34
35
eaa3783b
RH
36/* Since we have a distinction between register size and address size,
37 we need to redefine all of these. */
38
39#undef TCGv
40#undef tcg_temp_new
eaa3783b 41#undef tcg_global_mem_new
eaa3783b
RH
42
43#if TARGET_LONG_BITS == 64
44#define TCGv_tl TCGv_i64
45#define tcg_temp_new_tl tcg_temp_new_i64
eaa3783b
RH
46#if TARGET_REGISTER_BITS == 64
47#define tcg_gen_extu_reg_tl tcg_gen_mov_i64
48#else
49#define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
50#endif
51#else
52#define TCGv_tl TCGv_i32
53#define tcg_temp_new_tl tcg_temp_new_i32
eaa3783b
RH
54#define tcg_gen_extu_reg_tl tcg_gen_mov_i32
55#endif
56
57#if TARGET_REGISTER_BITS == 64
58#define TCGv_reg TCGv_i64
59
60#define tcg_temp_new tcg_temp_new_i64
eaa3783b 61#define tcg_global_mem_new tcg_global_mem_new_i64
eaa3783b
RH
62
63#define tcg_gen_movi_reg tcg_gen_movi_i64
64#define tcg_gen_mov_reg tcg_gen_mov_i64
65#define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
66#define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
67#define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
68#define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
69#define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
70#define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
71#define tcg_gen_ld_reg tcg_gen_ld_i64
72#define tcg_gen_st8_reg tcg_gen_st8_i64
73#define tcg_gen_st16_reg tcg_gen_st16_i64
74#define tcg_gen_st32_reg tcg_gen_st32_i64
75#define tcg_gen_st_reg tcg_gen_st_i64
76#define tcg_gen_add_reg tcg_gen_add_i64
77#define tcg_gen_addi_reg tcg_gen_addi_i64
78#define tcg_gen_sub_reg tcg_gen_sub_i64
79#define tcg_gen_neg_reg tcg_gen_neg_i64
80#define tcg_gen_subfi_reg tcg_gen_subfi_i64
81#define tcg_gen_subi_reg tcg_gen_subi_i64
82#define tcg_gen_and_reg tcg_gen_and_i64
83#define tcg_gen_andi_reg tcg_gen_andi_i64
84#define tcg_gen_or_reg tcg_gen_or_i64
85#define tcg_gen_ori_reg tcg_gen_ori_i64
86#define tcg_gen_xor_reg tcg_gen_xor_i64
87#define tcg_gen_xori_reg tcg_gen_xori_i64
88#define tcg_gen_not_reg tcg_gen_not_i64
89#define tcg_gen_shl_reg tcg_gen_shl_i64
90#define tcg_gen_shli_reg tcg_gen_shli_i64
91#define tcg_gen_shr_reg tcg_gen_shr_i64
92#define tcg_gen_shri_reg tcg_gen_shri_i64
93#define tcg_gen_sar_reg tcg_gen_sar_i64
94#define tcg_gen_sari_reg tcg_gen_sari_i64
95#define tcg_gen_brcond_reg tcg_gen_brcond_i64
96#define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
97#define tcg_gen_setcond_reg tcg_gen_setcond_i64
98#define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
99#define tcg_gen_mul_reg tcg_gen_mul_i64
100#define tcg_gen_muli_reg tcg_gen_muli_i64
101#define tcg_gen_div_reg tcg_gen_div_i64
102#define tcg_gen_rem_reg tcg_gen_rem_i64
103#define tcg_gen_divu_reg tcg_gen_divu_i64
104#define tcg_gen_remu_reg tcg_gen_remu_i64
105#define tcg_gen_discard_reg tcg_gen_discard_i64
106#define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
107#define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
108#define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
109#define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
110#define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
111#define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
112#define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
113#define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
114#define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
115#define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
116#define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
117#define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
118#define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
119#define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
120#define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
121#define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
122#define tcg_gen_andc_reg tcg_gen_andc_i64
123#define tcg_gen_eqv_reg tcg_gen_eqv_i64
124#define tcg_gen_nand_reg tcg_gen_nand_i64
125#define tcg_gen_nor_reg tcg_gen_nor_i64
126#define tcg_gen_orc_reg tcg_gen_orc_i64
127#define tcg_gen_clz_reg tcg_gen_clz_i64
128#define tcg_gen_ctz_reg tcg_gen_ctz_i64
129#define tcg_gen_clzi_reg tcg_gen_clzi_i64
130#define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
131#define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
132#define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
133#define tcg_gen_rotl_reg tcg_gen_rotl_i64
134#define tcg_gen_rotli_reg tcg_gen_rotli_i64
135#define tcg_gen_rotr_reg tcg_gen_rotr_i64
136#define tcg_gen_rotri_reg tcg_gen_rotri_i64
137#define tcg_gen_deposit_reg tcg_gen_deposit_i64
138#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
139#define tcg_gen_extract_reg tcg_gen_extract_i64
140#define tcg_gen_sextract_reg tcg_gen_sextract_i64
05bfd4db 141#define tcg_gen_extract2_reg tcg_gen_extract2_i64
29dd6f64 142#define tcg_constant_reg tcg_constant_i64
eaa3783b
RH
143#define tcg_gen_movcond_reg tcg_gen_movcond_i64
144#define tcg_gen_add2_reg tcg_gen_add2_i64
145#define tcg_gen_sub2_reg tcg_gen_sub2_i64
146#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
147#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
148#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
5bfa8034 149#define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
eaa3783b
RH
150#else
151#define TCGv_reg TCGv_i32
152#define tcg_temp_new tcg_temp_new_i32
eaa3783b 153#define tcg_global_mem_new tcg_global_mem_new_i32
eaa3783b
RH
154
155#define tcg_gen_movi_reg tcg_gen_movi_i32
156#define tcg_gen_mov_reg tcg_gen_mov_i32
157#define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
158#define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
159#define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
160#define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
161#define tcg_gen_ld32u_reg tcg_gen_ld_i32
162#define tcg_gen_ld32s_reg tcg_gen_ld_i32
163#define tcg_gen_ld_reg tcg_gen_ld_i32
164#define tcg_gen_st8_reg tcg_gen_st8_i32
165#define tcg_gen_st16_reg tcg_gen_st16_i32
166#define tcg_gen_st32_reg tcg_gen_st32_i32
167#define tcg_gen_st_reg tcg_gen_st_i32
168#define tcg_gen_add_reg tcg_gen_add_i32
169#define tcg_gen_addi_reg tcg_gen_addi_i32
170#define tcg_gen_sub_reg tcg_gen_sub_i32
171#define tcg_gen_neg_reg tcg_gen_neg_i32
172#define tcg_gen_subfi_reg tcg_gen_subfi_i32
173#define tcg_gen_subi_reg tcg_gen_subi_i32
174#define tcg_gen_and_reg tcg_gen_and_i32
175#define tcg_gen_andi_reg tcg_gen_andi_i32
176#define tcg_gen_or_reg tcg_gen_or_i32
177#define tcg_gen_ori_reg tcg_gen_ori_i32
178#define tcg_gen_xor_reg tcg_gen_xor_i32
179#define tcg_gen_xori_reg tcg_gen_xori_i32
180#define tcg_gen_not_reg tcg_gen_not_i32
181#define tcg_gen_shl_reg tcg_gen_shl_i32
182#define tcg_gen_shli_reg tcg_gen_shli_i32
183#define tcg_gen_shr_reg tcg_gen_shr_i32
184#define tcg_gen_shri_reg tcg_gen_shri_i32
185#define tcg_gen_sar_reg tcg_gen_sar_i32
186#define tcg_gen_sari_reg tcg_gen_sari_i32
187#define tcg_gen_brcond_reg tcg_gen_brcond_i32
188#define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
189#define tcg_gen_setcond_reg tcg_gen_setcond_i32
190#define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
191#define tcg_gen_mul_reg tcg_gen_mul_i32
192#define tcg_gen_muli_reg tcg_gen_muli_i32
193#define tcg_gen_div_reg tcg_gen_div_i32
194#define tcg_gen_rem_reg tcg_gen_rem_i32
195#define tcg_gen_divu_reg tcg_gen_divu_i32
196#define tcg_gen_remu_reg tcg_gen_remu_i32
197#define tcg_gen_discard_reg tcg_gen_discard_i32
198#define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
199#define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
200#define tcg_gen_extu_i32_reg tcg_gen_mov_i32
201#define tcg_gen_ext_i32_reg tcg_gen_mov_i32
202#define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
203#define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
204#define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
205#define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
206#define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
207#define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
208#define tcg_gen_ext32u_reg tcg_gen_mov_i32
209#define tcg_gen_ext32s_reg tcg_gen_mov_i32
210#define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
211#define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
212#define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
213#define tcg_gen_andc_reg tcg_gen_andc_i32
214#define tcg_gen_eqv_reg tcg_gen_eqv_i32
215#define tcg_gen_nand_reg tcg_gen_nand_i32
216#define tcg_gen_nor_reg tcg_gen_nor_i32
217#define tcg_gen_orc_reg tcg_gen_orc_i32
218#define tcg_gen_clz_reg tcg_gen_clz_i32
219#define tcg_gen_ctz_reg tcg_gen_ctz_i32
220#define tcg_gen_clzi_reg tcg_gen_clzi_i32
221#define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
222#define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
223#define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
224#define tcg_gen_rotl_reg tcg_gen_rotl_i32
225#define tcg_gen_rotli_reg tcg_gen_rotli_i32
226#define tcg_gen_rotr_reg tcg_gen_rotr_i32
227#define tcg_gen_rotri_reg tcg_gen_rotri_i32
228#define tcg_gen_deposit_reg tcg_gen_deposit_i32
229#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
230#define tcg_gen_extract_reg tcg_gen_extract_i32
231#define tcg_gen_sextract_reg tcg_gen_sextract_i32
05bfd4db 232#define tcg_gen_extract2_reg tcg_gen_extract2_i32
29dd6f64 233#define tcg_constant_reg tcg_constant_i32
eaa3783b
RH
234#define tcg_gen_movcond_reg tcg_gen_movcond_i32
235#define tcg_gen_add2_reg tcg_gen_add2_i32
236#define tcg_gen_sub2_reg tcg_gen_sub2_i32
237#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
238#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
239#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
5bfa8034 240#define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
eaa3783b
RH
241#endif /* TARGET_REGISTER_BITS */
242
61766fe9
RH
243typedef struct DisasCond {
244 TCGCond c;
eaa3783b 245 TCGv_reg a0, a1;
61766fe9
RH
246} DisasCond;
247
248typedef struct DisasContext {
d01a3625 249 DisasContextBase base;
61766fe9
RH
250 CPUState *cs;
251
eaa3783b
RH
252 target_ureg iaoq_f;
253 target_ureg iaoq_b;
254 target_ureg iaoq_n;
255 TCGv_reg iaoq_n_var;
61766fe9 256
86f8d05f 257 int ntempr, ntempl;
5eecd37a 258 TCGv_reg tempr[8];
86f8d05f 259 TCGv_tl templ[4];
61766fe9
RH
260
261 DisasCond null_cond;
262 TCGLabel *null_lab;
263
1a19da0d 264 uint32_t insn;
494737b7 265 uint32_t tb_flags;
3d68ee7b
RH
266 int mmu_idx;
267 int privilege;
61766fe9 268 bool psw_n_nonzero;
217d1a5e
RH
269
270#ifdef CONFIG_USER_ONLY
271 MemOp unalign;
272#endif
61766fe9
RH
273} DisasContext;
274
217d1a5e
RH
275#ifdef CONFIG_USER_ONLY
276#define UNALIGN(C) (C)->unalign
277#else
2d4afb03 278#define UNALIGN(C) MO_ALIGN
217d1a5e
RH
279#endif
280
e36f27ef 281/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
451e4ffd 282static int expand_sm_imm(DisasContext *ctx, int val)
e36f27ef
RH
283{
284 if (val & PSW_SM_E) {
285 val = (val & ~PSW_SM_E) | PSW_E;
286 }
287 if (val & PSW_SM_W) {
288 val = (val & ~PSW_SM_W) | PSW_W;
289 }
290 return val;
291}
292
deee69a1 293/* Inverted space register indicates 0 means sr0 not inferred from base. */
451e4ffd 294static int expand_sr3x(DisasContext *ctx, int val)
deee69a1
RH
295{
296 return ~val;
297}
298
1cd012a5
RH
299/* Convert the M:A bits within a memory insn to the tri-state value
300 we use for the final M. */
451e4ffd 301static int ma_to_m(DisasContext *ctx, int val)
1cd012a5
RH
302{
303 return val & 2 ? (val & 1 ? -1 : 1) : 0;
304}
305
740038d7 306/* Convert the sign of the displacement to a pre or post-modify. */
451e4ffd 307static int pos_to_m(DisasContext *ctx, int val)
740038d7
RH
308{
309 return val ? 1 : -1;
310}
311
451e4ffd 312static int neg_to_m(DisasContext *ctx, int val)
740038d7
RH
313{
314 return val ? -1 : 1;
315}
316
317/* Used for branch targets and fp memory ops. */
451e4ffd 318static int expand_shl2(DisasContext *ctx, int val)
01afb7be
RH
319{
320 return val << 2;
321}
322
740038d7 323/* Used for fp memory ops. */
451e4ffd 324static int expand_shl3(DisasContext *ctx, int val)
740038d7
RH
325{
326 return val << 3;
327}
328
0588e061 329/* Used for assemble_21. */
451e4ffd 330static int expand_shl11(DisasContext *ctx, int val)
0588e061
RH
331{
332 return val << 11;
333}
334
01afb7be 335
40f9f908 336/* Include the auto-generated decoder. */
abff1abf 337#include "decode-insns.c.inc"
40f9f908 338
869051ea
RH
339/* We are not using a goto_tb (for whatever reason), but have updated
340 the iaq (for whatever reason), so don't do it again on exit. */
341#define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
61766fe9 342
869051ea
RH
343/* We are exiting the TB, but have neither emitted a goto_tb, nor
344 updated the iaq for the next instruction to be executed. */
345#define DISAS_IAQ_N_STALE DISAS_TARGET_1
61766fe9 346
e1b5a5ed
RH
347/* Similarly, but we want to return to the main loop immediately
348 to recognize unmasked interrupts. */
349#define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
c5d0aec2 350#define DISAS_EXIT DISAS_TARGET_3
e1b5a5ed 351
61766fe9 352/* global register indexes */
eaa3783b 353static TCGv_reg cpu_gr[32];
33423472 354static TCGv_i64 cpu_sr[4];
494737b7 355static TCGv_i64 cpu_srH;
eaa3783b
RH
356static TCGv_reg cpu_iaoq_f;
357static TCGv_reg cpu_iaoq_b;
c301f34e
RH
358static TCGv_i64 cpu_iasq_f;
359static TCGv_i64 cpu_iasq_b;
eaa3783b
RH
360static TCGv_reg cpu_sar;
361static TCGv_reg cpu_psw_n;
362static TCGv_reg cpu_psw_v;
363static TCGv_reg cpu_psw_cb;
364static TCGv_reg cpu_psw_cb_msb;
61766fe9 365
61766fe9
RH
366void hppa_translate_init(void)
367{
368#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
369
eaa3783b 370 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
61766fe9 371 static const GlobalVar vars[] = {
35136a77 372 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
61766fe9
RH
373 DEF_VAR(psw_n),
374 DEF_VAR(psw_v),
375 DEF_VAR(psw_cb),
376 DEF_VAR(psw_cb_msb),
377 DEF_VAR(iaoq_f),
378 DEF_VAR(iaoq_b),
379 };
380
381#undef DEF_VAR
382
383 /* Use the symbolic register names that match the disassembler. */
384 static const char gr_names[32][4] = {
385 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
386 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
387 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
388 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
389 };
33423472 390 /* SR[4-7] are not global registers so that we can index them. */
494737b7
RH
391 static const char sr_names[5][4] = {
392 "sr0", "sr1", "sr2", "sr3", "srH"
33423472 393 };
61766fe9 394
61766fe9
RH
395 int i;
396
f764718d 397 cpu_gr[0] = NULL;
61766fe9
RH
398 for (i = 1; i < 32; i++) {
399 cpu_gr[i] = tcg_global_mem_new(cpu_env,
400 offsetof(CPUHPPAState, gr[i]),
401 gr_names[i]);
402 }
33423472
RH
403 for (i = 0; i < 4; i++) {
404 cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
405 offsetof(CPUHPPAState, sr[i]),
406 sr_names[i]);
407 }
494737b7
RH
408 cpu_srH = tcg_global_mem_new_i64(cpu_env,
409 offsetof(CPUHPPAState, sr[4]),
410 sr_names[4]);
61766fe9
RH
411
412 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
413 const GlobalVar *v = &vars[i];
414 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
415 }
c301f34e
RH
416
417 cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
418 offsetof(CPUHPPAState, iasq_f),
419 "iasq_f");
420 cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
421 offsetof(CPUHPPAState, iasq_b),
422 "iasq_b");
61766fe9
RH
423}
424
129e9cc3
RH
425static DisasCond cond_make_f(void)
426{
f764718d
RH
427 return (DisasCond){
428 .c = TCG_COND_NEVER,
429 .a0 = NULL,
430 .a1 = NULL,
431 };
129e9cc3
RH
432}
433
df0232fe
RH
434static DisasCond cond_make_t(void)
435{
436 return (DisasCond){
437 .c = TCG_COND_ALWAYS,
438 .a0 = NULL,
439 .a1 = NULL,
440 };
441}
442
129e9cc3
RH
443static DisasCond cond_make_n(void)
444{
f764718d
RH
445 return (DisasCond){
446 .c = TCG_COND_NE,
447 .a0 = cpu_psw_n,
6e94937a 448 .a1 = tcg_constant_reg(0)
f764718d 449 };
129e9cc3
RH
450}
451
b47a4a02 452static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
129e9cc3 453{
129e9cc3 454 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
b47a4a02 455 return (DisasCond){
6e94937a 456 .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
b47a4a02
SS
457 };
458}
129e9cc3 459
b47a4a02
SS
460static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
461{
462 TCGv_reg tmp = tcg_temp_new();
463 tcg_gen_mov_reg(tmp, a0);
464 return cond_make_0_tmp(c, tmp);
129e9cc3
RH
465}
466
eaa3783b 467static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
129e9cc3
RH
468{
469 DisasCond r = { .c = c };
470
471 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
472 r.a0 = tcg_temp_new();
eaa3783b 473 tcg_gen_mov_reg(r.a0, a0);
129e9cc3 474 r.a1 = tcg_temp_new();
eaa3783b 475 tcg_gen_mov_reg(r.a1, a1);
129e9cc3
RH
476
477 return r;
478}
479
129e9cc3
RH
480static void cond_free(DisasCond *cond)
481{
482 switch (cond->c) {
483 default:
f764718d
RH
484 cond->a0 = NULL;
485 cond->a1 = NULL;
129e9cc3
RH
486 /* fallthru */
487 case TCG_COND_ALWAYS:
488 cond->c = TCG_COND_NEVER;
489 break;
490 case TCG_COND_NEVER:
491 break;
492 }
493}
494
eaa3783b 495static TCGv_reg get_temp(DisasContext *ctx)
61766fe9 496{
86f8d05f
RH
497 unsigned i = ctx->ntempr++;
498 g_assert(i < ARRAY_SIZE(ctx->tempr));
499 return ctx->tempr[i] = tcg_temp_new();
61766fe9
RH
500}
501
86f8d05f
RH
502#ifndef CONFIG_USER_ONLY
503static TCGv_tl get_temp_tl(DisasContext *ctx)
504{
505 unsigned i = ctx->ntempl++;
506 g_assert(i < ARRAY_SIZE(ctx->templ));
507 return ctx->templ[i] = tcg_temp_new_tl();
508}
509#endif
510
eaa3783b 511static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
61766fe9 512{
eaa3783b
RH
513 TCGv_reg t = get_temp(ctx);
514 tcg_gen_movi_reg(t, v);
61766fe9
RH
515 return t;
516}
517
eaa3783b 518static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
61766fe9
RH
519{
520 if (reg == 0) {
eaa3783b
RH
521 TCGv_reg t = get_temp(ctx);
522 tcg_gen_movi_reg(t, 0);
61766fe9
RH
523 return t;
524 } else {
525 return cpu_gr[reg];
526 }
527}
528
eaa3783b 529static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
61766fe9 530{
129e9cc3 531 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
61766fe9
RH
532 return get_temp(ctx);
533 } else {
534 return cpu_gr[reg];
535 }
536}
537
eaa3783b 538static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
129e9cc3
RH
539{
540 if (ctx->null_cond.c != TCG_COND_NEVER) {
eaa3783b 541 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
6e94937a 542 ctx->null_cond.a1, dest, t);
129e9cc3 543 } else {
eaa3783b 544 tcg_gen_mov_reg(dest, t);
129e9cc3
RH
545 }
546}
547
eaa3783b 548static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
129e9cc3
RH
549{
550 if (reg != 0) {
551 save_or_nullify(ctx, cpu_gr[reg], t);
552 }
553}
554
e03b5686 555#if HOST_BIG_ENDIAN
96d6407f
RH
556# define HI_OFS 0
557# define LO_OFS 4
558#else
559# define HI_OFS 4
560# define LO_OFS 0
561#endif
562
563static TCGv_i32 load_frw_i32(unsigned rt)
564{
565 TCGv_i32 ret = tcg_temp_new_i32();
566 tcg_gen_ld_i32(ret, cpu_env,
567 offsetof(CPUHPPAState, fr[rt & 31])
568 + (rt & 32 ? LO_OFS : HI_OFS));
569 return ret;
570}
571
ebe9383c
RH
572static TCGv_i32 load_frw0_i32(unsigned rt)
573{
574 if (rt == 0) {
0992a930
RH
575 TCGv_i32 ret = tcg_temp_new_i32();
576 tcg_gen_movi_i32(ret, 0);
577 return ret;
ebe9383c
RH
578 } else {
579 return load_frw_i32(rt);
580 }
581}
582
583static TCGv_i64 load_frw0_i64(unsigned rt)
584{
0992a930 585 TCGv_i64 ret = tcg_temp_new_i64();
ebe9383c 586 if (rt == 0) {
0992a930 587 tcg_gen_movi_i64(ret, 0);
ebe9383c 588 } else {
ebe9383c
RH
589 tcg_gen_ld32u_i64(ret, cpu_env,
590 offsetof(CPUHPPAState, fr[rt & 31])
591 + (rt & 32 ? LO_OFS : HI_OFS));
ebe9383c 592 }
0992a930 593 return ret;
ebe9383c
RH
594}
595
96d6407f
RH
596static void save_frw_i32(unsigned rt, TCGv_i32 val)
597{
598 tcg_gen_st_i32(val, cpu_env,
599 offsetof(CPUHPPAState, fr[rt & 31])
600 + (rt & 32 ? LO_OFS : HI_OFS));
601}
602
603#undef HI_OFS
604#undef LO_OFS
605
606static TCGv_i64 load_frd(unsigned rt)
607{
608 TCGv_i64 ret = tcg_temp_new_i64();
609 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
610 return ret;
611}
612
ebe9383c
RH
613static TCGv_i64 load_frd0(unsigned rt)
614{
615 if (rt == 0) {
0992a930
RH
616 TCGv_i64 ret = tcg_temp_new_i64();
617 tcg_gen_movi_i64(ret, 0);
618 return ret;
ebe9383c
RH
619 } else {
620 return load_frd(rt);
621 }
622}
623
96d6407f
RH
624static void save_frd(unsigned rt, TCGv_i64 val)
625{
626 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
627}
628
33423472
RH
629static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
630{
631#ifdef CONFIG_USER_ONLY
632 tcg_gen_movi_i64(dest, 0);
633#else
634 if (reg < 4) {
635 tcg_gen_mov_i64(dest, cpu_sr[reg]);
494737b7
RH
636 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
637 tcg_gen_mov_i64(dest, cpu_srH);
33423472
RH
638 } else {
639 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
640 }
641#endif
642}
643
129e9cc3
RH
644/* Skip over the implementation of an insn that has been nullified.
645 Use this when the insn is too complex for a conditional move. */
646static void nullify_over(DisasContext *ctx)
647{
648 if (ctx->null_cond.c != TCG_COND_NEVER) {
649 /* The always condition should have been handled in the main loop. */
650 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
651
652 ctx->null_lab = gen_new_label();
129e9cc3
RH
653
654 /* If we're using PSW[N], copy it to a temp because... */
6e94937a 655 if (ctx->null_cond.a0 == cpu_psw_n) {
129e9cc3 656 ctx->null_cond.a0 = tcg_temp_new();
eaa3783b 657 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
129e9cc3
RH
658 }
659 /* ... we clear it before branching over the implementation,
660 so that (1) it's clear after nullifying this insn and
661 (2) if this insn nullifies the next, PSW[N] is valid. */
662 if (ctx->psw_n_nonzero) {
663 ctx->psw_n_nonzero = false;
eaa3783b 664 tcg_gen_movi_reg(cpu_psw_n, 0);
129e9cc3
RH
665 }
666
eaa3783b 667 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
6e94937a 668 ctx->null_cond.a1, ctx->null_lab);
129e9cc3
RH
669 cond_free(&ctx->null_cond);
670 }
671}
672
673/* Save the current nullification state to PSW[N]. */
674static void nullify_save(DisasContext *ctx)
675{
676 if (ctx->null_cond.c == TCG_COND_NEVER) {
677 if (ctx->psw_n_nonzero) {
eaa3783b 678 tcg_gen_movi_reg(cpu_psw_n, 0);
129e9cc3
RH
679 }
680 return;
681 }
6e94937a 682 if (ctx->null_cond.a0 != cpu_psw_n) {
eaa3783b 683 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
6e94937a 684 ctx->null_cond.a0, ctx->null_cond.a1);
129e9cc3
RH
685 ctx->psw_n_nonzero = true;
686 }
687 cond_free(&ctx->null_cond);
688}
689
690/* Set a PSW[N] to X. The intention is that this is used immediately
691 before a goto_tb/exit_tb, so that there is no fallthru path to other
692 code within the TB. Therefore we do not update psw_n_nonzero. */
693static void nullify_set(DisasContext *ctx, bool x)
694{
695 if (ctx->psw_n_nonzero || x) {
eaa3783b 696 tcg_gen_movi_reg(cpu_psw_n, x);
129e9cc3
RH
697 }
698}
699
700/* Mark the end of an instruction that may have been nullified.
40f9f908
RH
701 This is the pair to nullify_over. Always returns true so that
702 it may be tail-called from a translate function. */
31234768 703static bool nullify_end(DisasContext *ctx)
129e9cc3
RH
704{
705 TCGLabel *null_lab = ctx->null_lab;
31234768 706 DisasJumpType status = ctx->base.is_jmp;
129e9cc3 707
f49b3537
RH
708 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
709 For UPDATED, we cannot update on the nullified path. */
710 assert(status != DISAS_IAQ_N_UPDATED);
711
129e9cc3
RH
712 if (likely(null_lab == NULL)) {
713 /* The current insn wasn't conditional or handled the condition
714 applied to it without a branch, so the (new) setting of
715 NULL_COND can be applied directly to the next insn. */
31234768 716 return true;
129e9cc3
RH
717 }
718 ctx->null_lab = NULL;
719
720 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
721 /* The next instruction will be unconditional,
722 and NULL_COND already reflects that. */
723 gen_set_label(null_lab);
724 } else {
725 /* The insn that we just executed is itself nullifying the next
726 instruction. Store the condition in the PSW[N] global.
727 We asserted PSW[N] = 0 in nullify_over, so that after the
728 label we have the proper value in place. */
729 nullify_save(ctx);
730 gen_set_label(null_lab);
731 ctx->null_cond = cond_make_n();
732 }
869051ea 733 if (status == DISAS_NORETURN) {
31234768 734 ctx->base.is_jmp = DISAS_NEXT;
129e9cc3 735 }
31234768 736 return true;
129e9cc3
RH
737}
738
eaa3783b 739static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
61766fe9
RH
740{
741 if (unlikely(ival == -1)) {
eaa3783b 742 tcg_gen_mov_reg(dest, vval);
61766fe9 743 } else {
eaa3783b 744 tcg_gen_movi_reg(dest, ival);
61766fe9
RH
745 }
746}
747
eaa3783b 748static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
61766fe9
RH
749{
750 return ctx->iaoq_f + disp + 8;
751}
752
753static void gen_excp_1(int exception)
754{
29dd6f64 755 gen_helper_excp(cpu_env, tcg_constant_i32(exception));
61766fe9
RH
756}
757
31234768 758static void gen_excp(DisasContext *ctx, int exception)
61766fe9
RH
759{
760 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
761 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
129e9cc3 762 nullify_save(ctx);
61766fe9 763 gen_excp_1(exception);
31234768 764 ctx->base.is_jmp = DISAS_NORETURN;
61766fe9
RH
765}
766
31234768 767static bool gen_excp_iir(DisasContext *ctx, int exc)
1a19da0d 768{
31234768 769 nullify_over(ctx);
29dd6f64
RH
770 tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
771 cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
31234768
RH
772 gen_excp(ctx, exc);
773 return nullify_end(ctx);
1a19da0d
RH
774}
775
31234768 776static bool gen_illegal(DisasContext *ctx)
61766fe9 777{
31234768 778 return gen_excp_iir(ctx, EXCP_ILL);
61766fe9
RH
779}
780
40f9f908
RH
781#ifdef CONFIG_USER_ONLY
782#define CHECK_MOST_PRIVILEGED(EXCP) \
783 return gen_excp_iir(ctx, EXCP)
784#else
785#define CHECK_MOST_PRIVILEGED(EXCP) \
31234768
RH
786 do { \
787 if (ctx->privilege != 0) { \
788 return gen_excp_iir(ctx, EXCP); \
789 } \
e1b5a5ed 790 } while (0)
40f9f908 791#endif
e1b5a5ed 792
eaa3783b 793static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
61766fe9 794{
57f91498 795 return translator_use_goto_tb(&ctx->base, dest);
61766fe9
RH
796}
797
129e9cc3
RH
798/* If the next insn is to be nullified, and it's on the same page,
799 and we're not attempting to set a breakpoint on it, then we can
800 totally skip the nullified insn. This avoids creating and
801 executing a TB that merely branches to the next TB. */
802static bool use_nullify_skip(DisasContext *ctx)
803{
804 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
805 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
806}
807
61766fe9 808static void gen_goto_tb(DisasContext *ctx, int which,
eaa3783b 809 target_ureg f, target_ureg b)
61766fe9
RH
810{
811 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
812 tcg_gen_goto_tb(which);
eaa3783b
RH
813 tcg_gen_movi_reg(cpu_iaoq_f, f);
814 tcg_gen_movi_reg(cpu_iaoq_b, b);
07ea28b4 815 tcg_gen_exit_tb(ctx->base.tb, which);
61766fe9
RH
816 } else {
817 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
818 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
8532a14e 819 tcg_gen_lookup_and_goto_ptr();
61766fe9
RH
820 }
821}
822
b47a4a02
SS
823static bool cond_need_sv(int c)
824{
825 return c == 2 || c == 3 || c == 6;
826}
827
828static bool cond_need_cb(int c)
829{
830 return c == 4 || c == 5;
831}
832
833/*
834 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
835 * the Parisc 1.1 Architecture Reference Manual for details.
836 */
b2167459 837
eaa3783b
RH
838static DisasCond do_cond(unsigned cf, TCGv_reg res,
839 TCGv_reg cb_msb, TCGv_reg sv)
b2167459
RH
840{
841 DisasCond cond;
eaa3783b 842 TCGv_reg tmp;
b2167459
RH
843
844 switch (cf >> 1) {
b47a4a02 845 case 0: /* Never / TR (0 / 1) */
b2167459
RH
846 cond = cond_make_f();
847 break;
848 case 1: /* = / <> (Z / !Z) */
849 cond = cond_make_0(TCG_COND_EQ, res);
850 break;
b47a4a02
SS
851 case 2: /* < / >= (N ^ V / !(N ^ V) */
852 tmp = tcg_temp_new();
853 tcg_gen_xor_reg(tmp, res, sv);
854 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
b2167459 855 break;
b47a4a02
SS
856 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
857 /*
858 * Simplify:
859 * (N ^ V) | Z
860 * ((res < 0) ^ (sv < 0)) | !res
861 * ((res ^ sv) < 0) | !res
862 * (~(res ^ sv) >= 0) | !res
863 * !(~(res ^ sv) >> 31) | !res
864 * !(~(res ^ sv) >> 31 & res)
865 */
866 tmp = tcg_temp_new();
867 tcg_gen_eqv_reg(tmp, res, sv);
868 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
869 tcg_gen_and_reg(tmp, tmp, res);
870 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
b2167459
RH
871 break;
872 case 4: /* NUV / UV (!C / C) */
873 cond = cond_make_0(TCG_COND_EQ, cb_msb);
874 break;
875 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
876 tmp = tcg_temp_new();
eaa3783b
RH
877 tcg_gen_neg_reg(tmp, cb_msb);
878 tcg_gen_and_reg(tmp, tmp, res);
b47a4a02 879 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
b2167459
RH
880 break;
881 case 6: /* SV / NSV (V / !V) */
882 cond = cond_make_0(TCG_COND_LT, sv);
883 break;
884 case 7: /* OD / EV */
885 tmp = tcg_temp_new();
eaa3783b 886 tcg_gen_andi_reg(tmp, res, 1);
b47a4a02 887 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
b2167459
RH
888 break;
889 default:
890 g_assert_not_reached();
891 }
892 if (cf & 1) {
893 cond.c = tcg_invert_cond(cond.c);
894 }
895
896 return cond;
897}
898
899/* Similar, but for the special case of subtraction without borrow, we
900 can use the inputs directly. This can allow other computation to be
901 deleted as unused. */
902
eaa3783b
RH
903static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
904 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
b2167459
RH
905{
906 DisasCond cond;
907
908 switch (cf >> 1) {
909 case 1: /* = / <> */
910 cond = cond_make(TCG_COND_EQ, in1, in2);
911 break;
912 case 2: /* < / >= */
913 cond = cond_make(TCG_COND_LT, in1, in2);
914 break;
915 case 3: /* <= / > */
916 cond = cond_make(TCG_COND_LE, in1, in2);
917 break;
918 case 4: /* << / >>= */
919 cond = cond_make(TCG_COND_LTU, in1, in2);
920 break;
921 case 5: /* <<= / >> */
922 cond = cond_make(TCG_COND_LEU, in1, in2);
923 break;
924 default:
b47a4a02 925 return do_cond(cf, res, NULL, sv);
b2167459
RH
926 }
927 if (cf & 1) {
928 cond.c = tcg_invert_cond(cond.c);
929 }
930
931 return cond;
932}
933
df0232fe
RH
934/*
935 * Similar, but for logicals, where the carry and overflow bits are not
936 * computed, and use of them is undefined.
937 *
938 * Undefined or not, hardware does not trap. It seems reasonable to
939 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
940 * how cases c={2,3} are treated.
941 */
b2167459 942
eaa3783b 943static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
b2167459 944{
df0232fe
RH
945 switch (cf) {
946 case 0: /* never */
947 case 9: /* undef, C */
948 case 11: /* undef, C & !Z */
949 case 12: /* undef, V */
950 return cond_make_f();
951
952 case 1: /* true */
953 case 8: /* undef, !C */
954 case 10: /* undef, !C | Z */
955 case 13: /* undef, !V */
956 return cond_make_t();
957
958 case 2: /* == */
959 return cond_make_0(TCG_COND_EQ, res);
960 case 3: /* <> */
961 return cond_make_0(TCG_COND_NE, res);
962 case 4: /* < */
963 return cond_make_0(TCG_COND_LT, res);
964 case 5: /* >= */
965 return cond_make_0(TCG_COND_GE, res);
966 case 6: /* <= */
967 return cond_make_0(TCG_COND_LE, res);
968 case 7: /* > */
969 return cond_make_0(TCG_COND_GT, res);
970
971 case 14: /* OD */
972 case 15: /* EV */
973 return do_cond(cf, res, NULL, NULL);
974
975 default:
976 g_assert_not_reached();
b2167459 977 }
b2167459
RH
978}
979
98cd9ca7
RH
980/* Similar, but for shift/extract/deposit conditions. */
981
eaa3783b 982static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
98cd9ca7
RH
983{
984 unsigned c, f;
985
986 /* Convert the compressed condition codes to standard.
987 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
988 4-7 are the reverse of 0-3. */
989 c = orig & 3;
990 if (c == 3) {
991 c = 7;
992 }
993 f = (orig & 4) / 4;
994
995 return do_log_cond(c * 2 + f, res);
996}
997
b2167459
RH
998/* Similar, but for unit conditions. */
999
eaa3783b
RH
1000static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1001 TCGv_reg in1, TCGv_reg in2)
b2167459
RH
1002{
1003 DisasCond cond;
eaa3783b 1004 TCGv_reg tmp, cb = NULL;
b2167459 1005
b2167459
RH
1006 if (cf & 8) {
1007 /* Since we want to test lots of carry-out bits all at once, do not
1008 * do our normal thing and compute carry-in of bit B+1 since that
1009 * leaves us with carry bits spread across two words.
1010 */
1011 cb = tcg_temp_new();
1012 tmp = tcg_temp_new();
eaa3783b
RH
1013 tcg_gen_or_reg(cb, in1, in2);
1014 tcg_gen_and_reg(tmp, in1, in2);
1015 tcg_gen_andc_reg(cb, cb, res);
1016 tcg_gen_or_reg(cb, cb, tmp);
b2167459
RH
1017 }
1018
1019 switch (cf >> 1) {
1020 case 0: /* never / TR */
1021 case 1: /* undefined */
1022 case 5: /* undefined */
1023 cond = cond_make_f();
1024 break;
1025
1026 case 2: /* SBZ / NBZ */
1027 /* See hasless(v,1) from
1028 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1029 */
1030 tmp = tcg_temp_new();
eaa3783b
RH
1031 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1032 tcg_gen_andc_reg(tmp, tmp, res);
1033 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
b2167459 1034 cond = cond_make_0(TCG_COND_NE, tmp);
b2167459
RH
1035 break;
1036
1037 case 3: /* SHZ / NHZ */
1038 tmp = tcg_temp_new();
eaa3783b
RH
1039 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1040 tcg_gen_andc_reg(tmp, tmp, res);
1041 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
b2167459 1042 cond = cond_make_0(TCG_COND_NE, tmp);
b2167459
RH
1043 break;
1044
1045 case 4: /* SDC / NDC */
eaa3783b 1046 tcg_gen_andi_reg(cb, cb, 0x88888888u);
b2167459
RH
1047 cond = cond_make_0(TCG_COND_NE, cb);
1048 break;
1049
1050 case 6: /* SBC / NBC */
eaa3783b 1051 tcg_gen_andi_reg(cb, cb, 0x80808080u);
b2167459
RH
1052 cond = cond_make_0(TCG_COND_NE, cb);
1053 break;
1054
1055 case 7: /* SHC / NHC */
eaa3783b 1056 tcg_gen_andi_reg(cb, cb, 0x80008000u);
b2167459
RH
1057 cond = cond_make_0(TCG_COND_NE, cb);
1058 break;
1059
1060 default:
1061 g_assert_not_reached();
1062 }
b2167459
RH
1063 if (cf & 1) {
1064 cond.c = tcg_invert_cond(cond.c);
1065 }
1066
1067 return cond;
1068}
1069
1070/* Compute signed overflow for addition. */
eaa3783b
RH
1071static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1072 TCGv_reg in1, TCGv_reg in2)
b2167459 1073{
eaa3783b
RH
1074 TCGv_reg sv = get_temp(ctx);
1075 TCGv_reg tmp = tcg_temp_new();
b2167459 1076
eaa3783b
RH
1077 tcg_gen_xor_reg(sv, res, in1);
1078 tcg_gen_xor_reg(tmp, in1, in2);
1079 tcg_gen_andc_reg(sv, sv, tmp);
b2167459
RH
1080
1081 return sv;
1082}
1083
1084/* Compute signed overflow for subtraction. */
eaa3783b
RH
1085static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1086 TCGv_reg in1, TCGv_reg in2)
b2167459 1087{
eaa3783b
RH
1088 TCGv_reg sv = get_temp(ctx);
1089 TCGv_reg tmp = tcg_temp_new();
b2167459 1090
eaa3783b
RH
1091 tcg_gen_xor_reg(sv, res, in1);
1092 tcg_gen_xor_reg(tmp, in1, in2);
1093 tcg_gen_and_reg(sv, sv, tmp);
b2167459
RH
1094
1095 return sv;
1096}
1097
31234768
RH
1098static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1099 TCGv_reg in2, unsigned shift, bool is_l,
1100 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
b2167459 1101{
eaa3783b 1102 TCGv_reg dest, cb, cb_msb, sv, tmp;
b2167459
RH
1103 unsigned c = cf >> 1;
1104 DisasCond cond;
1105
1106 dest = tcg_temp_new();
f764718d
RH
1107 cb = NULL;
1108 cb_msb = NULL;
b2167459
RH
1109
1110 if (shift) {
1111 tmp = get_temp(ctx);
eaa3783b 1112 tcg_gen_shli_reg(tmp, in1, shift);
b2167459
RH
1113 in1 = tmp;
1114 }
1115
b47a4a02 1116 if (!is_l || cond_need_cb(c)) {
29dd6f64 1117 TCGv_reg zero = tcg_constant_reg(0);
b2167459 1118 cb_msb = get_temp(ctx);
eaa3783b 1119 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
b2167459 1120 if (is_c) {
eaa3783b 1121 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
b2167459 1122 }
b2167459
RH
1123 if (!is_l) {
1124 cb = get_temp(ctx);
eaa3783b
RH
1125 tcg_gen_xor_reg(cb, in1, in2);
1126 tcg_gen_xor_reg(cb, cb, dest);
b2167459
RH
1127 }
1128 } else {
eaa3783b 1129 tcg_gen_add_reg(dest, in1, in2);
b2167459 1130 if (is_c) {
eaa3783b 1131 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
b2167459
RH
1132 }
1133 }
1134
1135 /* Compute signed overflow if required. */
f764718d 1136 sv = NULL;
b47a4a02 1137 if (is_tsv || cond_need_sv(c)) {
b2167459
RH
1138 sv = do_add_sv(ctx, dest, in1, in2);
1139 if (is_tsv) {
1140 /* ??? Need to include overflow from shift. */
1141 gen_helper_tsv(cpu_env, sv);
1142 }
1143 }
1144
1145 /* Emit any conditional trap before any writeback. */
1146 cond = do_cond(cf, dest, cb_msb, sv);
1147 if (is_tc) {
b2167459 1148 tmp = tcg_temp_new();
eaa3783b 1149 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
b2167459 1150 gen_helper_tcond(cpu_env, tmp);
b2167459
RH
1151 }
1152
1153 /* Write back the result. */
1154 if (!is_l) {
1155 save_or_nullify(ctx, cpu_psw_cb, cb);
1156 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1157 }
1158 save_gpr(ctx, rt, dest);
b2167459
RH
1159
1160 /* Install the new nullification. */
1161 cond_free(&ctx->null_cond);
1162 ctx->null_cond = cond;
b2167459
RH
1163}
1164
0c982a28
RH
1165static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1166 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1167{
1168 TCGv_reg tcg_r1, tcg_r2;
1169
1170 if (a->cf) {
1171 nullify_over(ctx);
1172 }
1173 tcg_r1 = load_gpr(ctx, a->r1);
1174 tcg_r2 = load_gpr(ctx, a->r2);
1175 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1176 return nullify_end(ctx);
1177}
1178
0588e061
RH
1179static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1180 bool is_tsv, bool is_tc)
1181{
1182 TCGv_reg tcg_im, tcg_r2;
1183
1184 if (a->cf) {
1185 nullify_over(ctx);
1186 }
1187 tcg_im = load_const(ctx, a->i);
1188 tcg_r2 = load_gpr(ctx, a->r);
1189 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1190 return nullify_end(ctx);
1191}
1192
31234768
RH
1193static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1194 TCGv_reg in2, bool is_tsv, bool is_b,
1195 bool is_tc, unsigned cf)
b2167459 1196{
eaa3783b 1197 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
b2167459
RH
1198 unsigned c = cf >> 1;
1199 DisasCond cond;
1200
1201 dest = tcg_temp_new();
1202 cb = tcg_temp_new();
1203 cb_msb = tcg_temp_new();
1204
29dd6f64 1205 zero = tcg_constant_reg(0);
b2167459
RH
1206 if (is_b) {
1207 /* DEST,C = IN1 + ~IN2 + C. */
eaa3783b
RH
1208 tcg_gen_not_reg(cb, in2);
1209 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1210 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1211 tcg_gen_xor_reg(cb, cb, in1);
1212 tcg_gen_xor_reg(cb, cb, dest);
b2167459
RH
1213 } else {
1214 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1215 operations by seeding the high word with 1 and subtracting. */
eaa3783b
RH
1216 tcg_gen_movi_reg(cb_msb, 1);
1217 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1218 tcg_gen_eqv_reg(cb, in1, in2);
1219 tcg_gen_xor_reg(cb, cb, dest);
b2167459 1220 }
b2167459
RH
1221
1222 /* Compute signed overflow if required. */
f764718d 1223 sv = NULL;
b47a4a02 1224 if (is_tsv || cond_need_sv(c)) {
b2167459
RH
1225 sv = do_sub_sv(ctx, dest, in1, in2);
1226 if (is_tsv) {
1227 gen_helper_tsv(cpu_env, sv);
1228 }
1229 }
1230
1231 /* Compute the condition. We cannot use the special case for borrow. */
1232 if (!is_b) {
1233 cond = do_sub_cond(cf, dest, in1, in2, sv);
1234 } else {
1235 cond = do_cond(cf, dest, cb_msb, sv);
1236 }
1237
1238 /* Emit any conditional trap before any writeback. */
1239 if (is_tc) {
b2167459 1240 tmp = tcg_temp_new();
eaa3783b 1241 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
b2167459 1242 gen_helper_tcond(cpu_env, tmp);
b2167459
RH
1243 }
1244
1245 /* Write back the result. */
1246 save_or_nullify(ctx, cpu_psw_cb, cb);
1247 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1248 save_gpr(ctx, rt, dest);
b2167459
RH
1249
1250 /* Install the new nullification. */
1251 cond_free(&ctx->null_cond);
1252 ctx->null_cond = cond;
b2167459
RH
1253}
1254
0c982a28
RH
1255static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1256 bool is_tsv, bool is_b, bool is_tc)
1257{
1258 TCGv_reg tcg_r1, tcg_r2;
1259
1260 if (a->cf) {
1261 nullify_over(ctx);
1262 }
1263 tcg_r1 = load_gpr(ctx, a->r1);
1264 tcg_r2 = load_gpr(ctx, a->r2);
1265 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1266 return nullify_end(ctx);
1267}
1268
0588e061
RH
1269static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1270{
1271 TCGv_reg tcg_im, tcg_r2;
1272
1273 if (a->cf) {
1274 nullify_over(ctx);
1275 }
1276 tcg_im = load_const(ctx, a->i);
1277 tcg_r2 = load_gpr(ctx, a->r);
1278 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1279 return nullify_end(ctx);
1280}
1281
31234768
RH
1282static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1283 TCGv_reg in2, unsigned cf)
b2167459 1284{
eaa3783b 1285 TCGv_reg dest, sv;
b2167459
RH
1286 DisasCond cond;
1287
1288 dest = tcg_temp_new();
eaa3783b 1289 tcg_gen_sub_reg(dest, in1, in2);
b2167459
RH
1290
1291 /* Compute signed overflow if required. */
f764718d 1292 sv = NULL;
b47a4a02 1293 if (cond_need_sv(cf >> 1)) {
b2167459
RH
1294 sv = do_sub_sv(ctx, dest, in1, in2);
1295 }
1296
1297 /* Form the condition for the compare. */
1298 cond = do_sub_cond(cf, dest, in1, in2, sv);
1299
1300 /* Clear. */
eaa3783b 1301 tcg_gen_movi_reg(dest, 0);
b2167459 1302 save_gpr(ctx, rt, dest);
b2167459
RH
1303
1304 /* Install the new nullification. */
1305 cond_free(&ctx->null_cond);
1306 ctx->null_cond = cond;
b2167459
RH
1307}
1308
31234768
RH
1309static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1310 TCGv_reg in2, unsigned cf,
1311 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
b2167459 1312{
eaa3783b 1313 TCGv_reg dest = dest_gpr(ctx, rt);
b2167459
RH
1314
1315 /* Perform the operation, and writeback. */
1316 fn(dest, in1, in2);
1317 save_gpr(ctx, rt, dest);
1318
1319 /* Install the new nullification. */
1320 cond_free(&ctx->null_cond);
1321 if (cf) {
1322 ctx->null_cond = do_log_cond(cf, dest);
1323 }
b2167459
RH
1324}
1325
0c982a28
RH
1326static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1327 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1328{
1329 TCGv_reg tcg_r1, tcg_r2;
1330
1331 if (a->cf) {
1332 nullify_over(ctx);
1333 }
1334 tcg_r1 = load_gpr(ctx, a->r1);
1335 tcg_r2 = load_gpr(ctx, a->r2);
1336 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1337 return nullify_end(ctx);
1338}
1339
31234768
RH
1340static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1341 TCGv_reg in2, unsigned cf, bool is_tc,
1342 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
b2167459 1343{
eaa3783b 1344 TCGv_reg dest;
b2167459
RH
1345 DisasCond cond;
1346
1347 if (cf == 0) {
1348 dest = dest_gpr(ctx, rt);
1349 fn(dest, in1, in2);
1350 save_gpr(ctx, rt, dest);
1351 cond_free(&ctx->null_cond);
1352 } else {
1353 dest = tcg_temp_new();
1354 fn(dest, in1, in2);
1355
1356 cond = do_unit_cond(cf, dest, in1, in2);
1357
1358 if (is_tc) {
eaa3783b 1359 TCGv_reg tmp = tcg_temp_new();
eaa3783b 1360 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
b2167459 1361 gen_helper_tcond(cpu_env, tmp);
b2167459
RH
1362 }
1363 save_gpr(ctx, rt, dest);
1364
1365 cond_free(&ctx->null_cond);
1366 ctx->null_cond = cond;
1367 }
b2167459
RH
1368}
1369
86f8d05f 1370#ifndef CONFIG_USER_ONLY
8d6ae7fb
RH
1371/* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1372 from the top 2 bits of the base register. There are a few system
1373 instructions that have a 3-bit space specifier, for which SR0 is
1374 not special. To handle this, pass ~SP. */
86f8d05f
RH
1375static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1376{
1377 TCGv_ptr ptr;
1378 TCGv_reg tmp;
1379 TCGv_i64 spc;
1380
1381 if (sp != 0) {
8d6ae7fb
RH
1382 if (sp < 0) {
1383 sp = ~sp;
1384 }
1385 spc = get_temp_tl(ctx);
1386 load_spr(ctx, spc, sp);
1387 return spc;
86f8d05f 1388 }
494737b7
RH
1389 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1390 return cpu_srH;
1391 }
86f8d05f
RH
1392
1393 ptr = tcg_temp_new_ptr();
1394 tmp = tcg_temp_new();
1395 spc = get_temp_tl(ctx);
1396
1397 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1398 tcg_gen_andi_reg(tmp, tmp, 030);
1399 tcg_gen_trunc_reg_ptr(ptr, tmp);
86f8d05f
RH
1400
1401 tcg_gen_add_ptr(ptr, ptr, cpu_env);
1402 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
86f8d05f
RH
1403
1404 return spc;
1405}
1406#endif
1407
1408static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1409 unsigned rb, unsigned rx, int scale, target_sreg disp,
1410 unsigned sp, int modify, bool is_phys)
1411{
1412 TCGv_reg base = load_gpr(ctx, rb);
1413 TCGv_reg ofs;
1414
1415 /* Note that RX is mutually exclusive with DISP. */
1416 if (rx) {
1417 ofs = get_temp(ctx);
1418 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1419 tcg_gen_add_reg(ofs, ofs, base);
1420 } else if (disp || modify) {
1421 ofs = get_temp(ctx);
1422 tcg_gen_addi_reg(ofs, base, disp);
1423 } else {
1424 ofs = base;
1425 }
1426
1427 *pofs = ofs;
1428#ifdef CONFIG_USER_ONLY
1429 *pgva = (modify <= 0 ? ofs : base);
1430#else
1431 TCGv_tl addr = get_temp_tl(ctx);
1432 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
494737b7 1433 if (ctx->tb_flags & PSW_W) {
86f8d05f
RH
1434 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1435 }
1436 if (!is_phys) {
1437 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1438 }
1439 *pgva = addr;
1440#endif
1441}
1442
96d6407f
RH
1443/* Emit a memory load. The modify parameter should be
1444 * < 0 for pre-modify,
1445 * > 0 for post-modify,
1446 * = 0 for no base register update.
1447 */
1448static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
eaa3783b 1449 unsigned rx, int scale, target_sreg disp,
14776ab5 1450 unsigned sp, int modify, MemOp mop)
96d6407f 1451{
86f8d05f
RH
1452 TCGv_reg ofs;
1453 TCGv_tl addr;
96d6407f
RH
1454
1455 /* Caller uses nullify_over/nullify_end. */
1456 assert(ctx->null_cond.c == TCG_COND_NEVER);
1457
86f8d05f
RH
1458 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1459 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1460 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1461 if (modify) {
1462 save_gpr(ctx, rb, ofs);
96d6407f 1463 }
96d6407f
RH
1464}
1465
1466static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
eaa3783b 1467 unsigned rx, int scale, target_sreg disp,
14776ab5 1468 unsigned sp, int modify, MemOp mop)
96d6407f 1469{
86f8d05f
RH
1470 TCGv_reg ofs;
1471 TCGv_tl addr;
96d6407f
RH
1472
1473 /* Caller uses nullify_over/nullify_end. */
1474 assert(ctx->null_cond.c == TCG_COND_NEVER);
1475
86f8d05f
RH
1476 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1477 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1478 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1479 if (modify) {
1480 save_gpr(ctx, rb, ofs);
96d6407f 1481 }
96d6407f
RH
1482}
1483
1484static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
eaa3783b 1485 unsigned rx, int scale, target_sreg disp,
14776ab5 1486 unsigned sp, int modify, MemOp mop)
96d6407f 1487{
86f8d05f
RH
1488 TCGv_reg ofs;
1489 TCGv_tl addr;
96d6407f
RH
1490
1491 /* Caller uses nullify_over/nullify_end. */
1492 assert(ctx->null_cond.c == TCG_COND_NEVER);
1493
86f8d05f
RH
1494 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1495 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1496 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1497 if (modify) {
1498 save_gpr(ctx, rb, ofs);
96d6407f 1499 }
96d6407f
RH
1500}
1501
1502static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
eaa3783b 1503 unsigned rx, int scale, target_sreg disp,
14776ab5 1504 unsigned sp, int modify, MemOp mop)
96d6407f 1505{
86f8d05f
RH
1506 TCGv_reg ofs;
1507 TCGv_tl addr;
96d6407f
RH
1508
1509 /* Caller uses nullify_over/nullify_end. */
1510 assert(ctx->null_cond.c == TCG_COND_NEVER);
1511
86f8d05f
RH
1512 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1513 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1514 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1515 if (modify) {
1516 save_gpr(ctx, rb, ofs);
96d6407f 1517 }
96d6407f
RH
1518}
1519
eaa3783b
RH
1520#if TARGET_REGISTER_BITS == 64
1521#define do_load_reg do_load_64
1522#define do_store_reg do_store_64
96d6407f 1523#else
eaa3783b
RH
1524#define do_load_reg do_load_32
1525#define do_store_reg do_store_32
96d6407f
RH
1526#endif
1527
1cd012a5 1528static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
31234768 1529 unsigned rx, int scale, target_sreg disp,
14776ab5 1530 unsigned sp, int modify, MemOp mop)
96d6407f 1531{
eaa3783b 1532 TCGv_reg dest;
96d6407f
RH
1533
1534 nullify_over(ctx);
1535
1536 if (modify == 0) {
1537 /* No base register update. */
1538 dest = dest_gpr(ctx, rt);
1539 } else {
1540 /* Make sure if RT == RB, we see the result of the load. */
1541 dest = get_temp(ctx);
1542 }
86f8d05f 1543 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
96d6407f
RH
1544 save_gpr(ctx, rt, dest);
1545
1cd012a5 1546 return nullify_end(ctx);
96d6407f
RH
1547}
1548
740038d7 1549static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1550 unsigned rx, int scale, target_sreg disp,
1551 unsigned sp, int modify)
96d6407f
RH
1552{
1553 TCGv_i32 tmp;
1554
1555 nullify_over(ctx);
1556
1557 tmp = tcg_temp_new_i32();
86f8d05f 1558 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
96d6407f 1559 save_frw_i32(rt, tmp);
96d6407f
RH
1560
1561 if (rt == 0) {
1562 gen_helper_loaded_fr0(cpu_env);
1563 }
1564
740038d7
RH
1565 return nullify_end(ctx);
1566}
1567
1568static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1569{
1570 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1571 a->disp, a->sp, a->m);
96d6407f
RH
1572}
1573
740038d7 1574static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1575 unsigned rx, int scale, target_sreg disp,
1576 unsigned sp, int modify)
96d6407f
RH
1577{
1578 TCGv_i64 tmp;
1579
1580 nullify_over(ctx);
1581
1582 tmp = tcg_temp_new_i64();
fc313c64 1583 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
96d6407f 1584 save_frd(rt, tmp);
96d6407f
RH
1585
1586 if (rt == 0) {
1587 gen_helper_loaded_fr0(cpu_env);
1588 }
1589
740038d7
RH
1590 return nullify_end(ctx);
1591}
1592
1593static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1594{
1595 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1596 a->disp, a->sp, a->m);
96d6407f
RH
1597}
1598
1cd012a5 1599static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
31234768 1600 target_sreg disp, unsigned sp,
14776ab5 1601 int modify, MemOp mop)
96d6407f
RH
1602{
1603 nullify_over(ctx);
86f8d05f 1604 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1cd012a5 1605 return nullify_end(ctx);
96d6407f
RH
1606}
1607
740038d7 1608static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1609 unsigned rx, int scale, target_sreg disp,
1610 unsigned sp, int modify)
96d6407f
RH
1611{
1612 TCGv_i32 tmp;
1613
1614 nullify_over(ctx);
1615
1616 tmp = load_frw_i32(rt);
86f8d05f 1617 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
96d6407f 1618
740038d7
RH
1619 return nullify_end(ctx);
1620}
1621
1622static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1623{
1624 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1625 a->disp, a->sp, a->m);
96d6407f
RH
1626}
1627
740038d7 1628static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1629 unsigned rx, int scale, target_sreg disp,
1630 unsigned sp, int modify)
96d6407f
RH
1631{
1632 TCGv_i64 tmp;
1633
1634 nullify_over(ctx);
1635
1636 tmp = load_frd(rt);
fc313c64 1637 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
96d6407f 1638
740038d7
RH
1639 return nullify_end(ctx);
1640}
1641
1642static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1643{
1644 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1645 a->disp, a->sp, a->m);
96d6407f
RH
1646}
1647
1ca74648 1648static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1649 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
ebe9383c
RH
1650{
1651 TCGv_i32 tmp;
1652
1653 nullify_over(ctx);
1654 tmp = load_frw0_i32(ra);
1655
1656 func(tmp, cpu_env, tmp);
1657
1658 save_frw_i32(rt, tmp);
1ca74648 1659 return nullify_end(ctx);
ebe9383c
RH
1660}
1661
1ca74648 1662static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1663 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
ebe9383c
RH
1664{
1665 TCGv_i32 dst;
1666 TCGv_i64 src;
1667
1668 nullify_over(ctx);
1669 src = load_frd(ra);
1670 dst = tcg_temp_new_i32();
1671
1672 func(dst, cpu_env, src);
1673
ebe9383c 1674 save_frw_i32(rt, dst);
1ca74648 1675 return nullify_end(ctx);
ebe9383c
RH
1676}
1677
1ca74648 1678static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1679 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
ebe9383c
RH
1680{
1681 TCGv_i64 tmp;
1682
1683 nullify_over(ctx);
1684 tmp = load_frd0(ra);
1685
1686 func(tmp, cpu_env, tmp);
1687
1688 save_frd(rt, tmp);
1ca74648 1689 return nullify_end(ctx);
ebe9383c
RH
1690}
1691
1ca74648 1692static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1693 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
ebe9383c
RH
1694{
1695 TCGv_i32 src;
1696 TCGv_i64 dst;
1697
1698 nullify_over(ctx);
1699 src = load_frw0_i32(ra);
1700 dst = tcg_temp_new_i64();
1701
1702 func(dst, cpu_env, src);
1703
ebe9383c 1704 save_frd(rt, dst);
1ca74648 1705 return nullify_end(ctx);
ebe9383c
RH
1706}
1707
1ca74648 1708static bool do_fop_weww(DisasContext *ctx, unsigned rt,
31234768
RH
1709 unsigned ra, unsigned rb,
1710 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
ebe9383c
RH
1711{
1712 TCGv_i32 a, b;
1713
1714 nullify_over(ctx);
1715 a = load_frw0_i32(ra);
1716 b = load_frw0_i32(rb);
1717
1718 func(a, cpu_env, a, b);
1719
ebe9383c 1720 save_frw_i32(rt, a);
1ca74648 1721 return nullify_end(ctx);
ebe9383c
RH
1722}
1723
1ca74648 1724static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
31234768
RH
1725 unsigned ra, unsigned rb,
1726 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
ebe9383c
RH
1727{
1728 TCGv_i64 a, b;
1729
1730 nullify_over(ctx);
1731 a = load_frd0(ra);
1732 b = load_frd0(rb);
1733
1734 func(a, cpu_env, a, b);
1735
ebe9383c 1736 save_frd(rt, a);
1ca74648 1737 return nullify_end(ctx);
ebe9383c
RH
1738}
1739
98cd9ca7
RH
1740/* Emit an unconditional branch to a direct target, which may or may not
1741 have already had nullification handled. */
01afb7be 1742static bool do_dbranch(DisasContext *ctx, target_ureg dest,
31234768 1743 unsigned link, bool is_n)
98cd9ca7
RH
1744{
1745 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1746 if (link != 0) {
1747 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1748 }
1749 ctx->iaoq_n = dest;
1750 if (is_n) {
1751 ctx->null_cond.c = TCG_COND_ALWAYS;
1752 }
98cd9ca7
RH
1753 } else {
1754 nullify_over(ctx);
1755
1756 if (link != 0) {
1757 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1758 }
1759
1760 if (is_n && use_nullify_skip(ctx)) {
1761 nullify_set(ctx, 0);
1762 gen_goto_tb(ctx, 0, dest, dest + 4);
1763 } else {
1764 nullify_set(ctx, is_n);
1765 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1766 }
1767
31234768 1768 nullify_end(ctx);
98cd9ca7
RH
1769
1770 nullify_set(ctx, 0);
1771 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
31234768 1772 ctx->base.is_jmp = DISAS_NORETURN;
98cd9ca7 1773 }
01afb7be 1774 return true;
98cd9ca7
RH
1775}
1776
1777/* Emit a conditional branch to a direct target. If the branch itself
1778 is nullified, we should have already used nullify_over. */
01afb7be 1779static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
31234768 1780 DisasCond *cond)
98cd9ca7 1781{
eaa3783b 1782 target_ureg dest = iaoq_dest(ctx, disp);
98cd9ca7
RH
1783 TCGLabel *taken = NULL;
1784 TCGCond c = cond->c;
98cd9ca7
RH
1785 bool n;
1786
1787 assert(ctx->null_cond.c == TCG_COND_NEVER);
1788
1789 /* Handle TRUE and NEVER as direct branches. */
1790 if (c == TCG_COND_ALWAYS) {
01afb7be 1791 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
98cd9ca7
RH
1792 }
1793 if (c == TCG_COND_NEVER) {
01afb7be 1794 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
98cd9ca7
RH
1795 }
1796
1797 taken = gen_new_label();
eaa3783b 1798 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
98cd9ca7
RH
1799 cond_free(cond);
1800
1801 /* Not taken: Condition not satisfied; nullify on backward branches. */
1802 n = is_n && disp < 0;
1803 if (n && use_nullify_skip(ctx)) {
1804 nullify_set(ctx, 0);
a881c8e7 1805 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
98cd9ca7
RH
1806 } else {
1807 if (!n && ctx->null_lab) {
1808 gen_set_label(ctx->null_lab);
1809 ctx->null_lab = NULL;
1810 }
1811 nullify_set(ctx, n);
c301f34e
RH
1812 if (ctx->iaoq_n == -1) {
1813 /* The temporary iaoq_n_var died at the branch above.
1814 Regenerate it here instead of saving it. */
1815 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1816 }
a881c8e7 1817 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
98cd9ca7
RH
1818 }
1819
1820 gen_set_label(taken);
1821
1822 /* Taken: Condition satisfied; nullify on forward branches. */
1823 n = is_n && disp >= 0;
1824 if (n && use_nullify_skip(ctx)) {
1825 nullify_set(ctx, 0);
a881c8e7 1826 gen_goto_tb(ctx, 1, dest, dest + 4);
98cd9ca7
RH
1827 } else {
1828 nullify_set(ctx, n);
a881c8e7 1829 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
98cd9ca7
RH
1830 }
1831
1832 /* Not taken: the branch itself was nullified. */
1833 if (ctx->null_lab) {
1834 gen_set_label(ctx->null_lab);
1835 ctx->null_lab = NULL;
31234768 1836 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
98cd9ca7 1837 } else {
31234768 1838 ctx->base.is_jmp = DISAS_NORETURN;
98cd9ca7 1839 }
01afb7be 1840 return true;
98cd9ca7
RH
1841}
1842
1843/* Emit an unconditional branch to an indirect target. This handles
1844 nullification of the branch itself. */
01afb7be 1845static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
31234768 1846 unsigned link, bool is_n)
98cd9ca7 1847{
eaa3783b 1848 TCGv_reg a0, a1, next, tmp;
98cd9ca7
RH
1849 TCGCond c;
1850
1851 assert(ctx->null_lab == NULL);
1852
1853 if (ctx->null_cond.c == TCG_COND_NEVER) {
1854 if (link != 0) {
1855 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1856 }
1857 next = get_temp(ctx);
eaa3783b 1858 tcg_gen_mov_reg(next, dest);
98cd9ca7 1859 if (is_n) {
c301f34e
RH
1860 if (use_nullify_skip(ctx)) {
1861 tcg_gen_mov_reg(cpu_iaoq_f, next);
1862 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1863 nullify_set(ctx, 0);
31234768 1864 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
01afb7be 1865 return true;
c301f34e 1866 }
98cd9ca7
RH
1867 ctx->null_cond.c = TCG_COND_ALWAYS;
1868 }
c301f34e
RH
1869 ctx->iaoq_n = -1;
1870 ctx->iaoq_n_var = next;
98cd9ca7
RH
1871 } else if (is_n && use_nullify_skip(ctx)) {
1872 /* The (conditional) branch, B, nullifies the next insn, N,
1873 and we're allowed to skip execution N (no single-step or
4137cb83 1874 tracepoint in effect). Since the goto_ptr that we must use
98cd9ca7
RH
1875 for the indirect branch consumes no special resources, we
1876 can (conditionally) skip B and continue execution. */
1877 /* The use_nullify_skip test implies we have a known control path. */
1878 tcg_debug_assert(ctx->iaoq_b != -1);
1879 tcg_debug_assert(ctx->iaoq_n != -1);
1880
1881 /* We do have to handle the non-local temporary, DEST, before
1882 branching. Since IOAQ_F is not really live at this point, we
1883 can simply store DEST optimistically. Similarly with IAOQ_B. */
eaa3783b
RH
1884 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1885 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
98cd9ca7
RH
1886
1887 nullify_over(ctx);
1888 if (link != 0) {
eaa3783b 1889 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
98cd9ca7 1890 }
7f11636d 1891 tcg_gen_lookup_and_goto_ptr();
01afb7be 1892 return nullify_end(ctx);
98cd9ca7 1893 } else {
98cd9ca7
RH
1894 c = ctx->null_cond.c;
1895 a0 = ctx->null_cond.a0;
1896 a1 = ctx->null_cond.a1;
1897
1898 tmp = tcg_temp_new();
1899 next = get_temp(ctx);
1900
1901 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
eaa3783b 1902 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
98cd9ca7
RH
1903 ctx->iaoq_n = -1;
1904 ctx->iaoq_n_var = next;
1905
1906 if (link != 0) {
eaa3783b 1907 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
98cd9ca7
RH
1908 }
1909
1910 if (is_n) {
1911 /* The branch nullifies the next insn, which means the state of N
1912 after the branch is the inverse of the state of N that applied
1913 to the branch. */
eaa3783b 1914 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
98cd9ca7
RH
1915 cond_free(&ctx->null_cond);
1916 ctx->null_cond = cond_make_n();
1917 ctx->psw_n_nonzero = true;
1918 } else {
1919 cond_free(&ctx->null_cond);
1920 }
1921 }
01afb7be 1922 return true;
98cd9ca7
RH
1923}
1924
660eefe1
RH
1925/* Implement
1926 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1927 * IAOQ_Next{30..31} ← GR[b]{30..31};
1928 * else
1929 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1930 * which keeps the privilege level from being increased.
1931 */
1932static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1933{
660eefe1
RH
1934 TCGv_reg dest;
1935 switch (ctx->privilege) {
1936 case 0:
1937 /* Privilege 0 is maximum and is allowed to decrease. */
1938 return offset;
1939 case 3:
993119fe 1940 /* Privilege 3 is minimum and is never allowed to increase. */
660eefe1
RH
1941 dest = get_temp(ctx);
1942 tcg_gen_ori_reg(dest, offset, 3);
1943 break;
1944 default:
993119fe 1945 dest = get_temp(ctx);
660eefe1
RH
1946 tcg_gen_andi_reg(dest, offset, -4);
1947 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1948 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
660eefe1
RH
1949 break;
1950 }
1951 return dest;
660eefe1
RH
1952}
1953
ba1d0b44 1954#ifdef CONFIG_USER_ONLY
7ad439df
RH
1955/* On Linux, page zero is normally marked execute only + gateway.
1956 Therefore normal read or write is supposed to fail, but specific
1957 offsets have kernel code mapped to raise permissions to implement
1958 system calls. Handling this via an explicit check here, rather
1959 in than the "be disp(sr2,r0)" instruction that probably sent us
1960 here, is the easiest way to handle the branch delay slot on the
1961 aforementioned BE. */
31234768 1962static void do_page_zero(DisasContext *ctx)
7ad439df
RH
1963{
1964 /* If by some means we get here with PSW[N]=1, that implies that
1965 the B,GATE instruction would be skipped, and we'd fault on the
8b81968c 1966 next insn within the privileged page. */
7ad439df
RH
1967 switch (ctx->null_cond.c) {
1968 case TCG_COND_NEVER:
1969 break;
1970 case TCG_COND_ALWAYS:
eaa3783b 1971 tcg_gen_movi_reg(cpu_psw_n, 0);
7ad439df
RH
1972 goto do_sigill;
1973 default:
1974 /* Since this is always the first (and only) insn within the
1975 TB, we should know the state of PSW[N] from TB->FLAGS. */
1976 g_assert_not_reached();
1977 }
1978
1979 /* Check that we didn't arrive here via some means that allowed
1980 non-sequential instruction execution. Normally the PSW[B] bit
1981 detects this by disallowing the B,GATE instruction to execute
1982 under such conditions. */
1983 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1984 goto do_sigill;
1985 }
1986
ebd0e151 1987 switch (ctx->iaoq_f & -4) {
7ad439df 1988 case 0x00: /* Null pointer call */
2986721d 1989 gen_excp_1(EXCP_IMP);
31234768
RH
1990 ctx->base.is_jmp = DISAS_NORETURN;
1991 break;
7ad439df
RH
1992
1993 case 0xb0: /* LWS */
1994 gen_excp_1(EXCP_SYSCALL_LWS);
31234768
RH
1995 ctx->base.is_jmp = DISAS_NORETURN;
1996 break;
7ad439df
RH
1997
1998 case 0xe0: /* SET_THREAD_POINTER */
35136a77 1999 tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
ebd0e151 2000 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
eaa3783b 2001 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
31234768
RH
2002 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2003 break;
7ad439df
RH
2004
2005 case 0x100: /* SYSCALL */
2006 gen_excp_1(EXCP_SYSCALL);
31234768
RH
2007 ctx->base.is_jmp = DISAS_NORETURN;
2008 break;
7ad439df
RH
2009
2010 default:
2011 do_sigill:
2986721d 2012 gen_excp_1(EXCP_ILL);
31234768
RH
2013 ctx->base.is_jmp = DISAS_NORETURN;
2014 break;
7ad439df
RH
2015 }
2016}
ba1d0b44 2017#endif
7ad439df 2018
deee69a1 2019static bool trans_nop(DisasContext *ctx, arg_nop *a)
b2167459
RH
2020{
2021 cond_free(&ctx->null_cond);
31234768 2022 return true;
b2167459
RH
2023}
2024
40f9f908 2025static bool trans_break(DisasContext *ctx, arg_break *a)
98a9cb79 2026{
31234768 2027 return gen_excp_iir(ctx, EXCP_BREAK);
98a9cb79
RH
2028}
2029
e36f27ef 2030static bool trans_sync(DisasContext *ctx, arg_sync *a)
98a9cb79
RH
2031{
2032 /* No point in nullifying the memory barrier. */
2033 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2034
2035 cond_free(&ctx->null_cond);
31234768 2036 return true;
98a9cb79
RH
2037}
2038
c603e14a 2039static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
98a9cb79 2040{
c603e14a 2041 unsigned rt = a->t;
eaa3783b
RH
2042 TCGv_reg tmp = dest_gpr(ctx, rt);
2043 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
98a9cb79
RH
2044 save_gpr(ctx, rt, tmp);
2045
2046 cond_free(&ctx->null_cond);
31234768 2047 return true;
98a9cb79
RH
2048}
2049
c603e14a 2050static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
98a9cb79 2051{
c603e14a
RH
2052 unsigned rt = a->t;
2053 unsigned rs = a->sp;
33423472
RH
2054 TCGv_i64 t0 = tcg_temp_new_i64();
2055 TCGv_reg t1 = tcg_temp_new();
98a9cb79 2056
33423472
RH
2057 load_spr(ctx, t0, rs);
2058 tcg_gen_shri_i64(t0, t0, 32);
2059 tcg_gen_trunc_i64_reg(t1, t0);
2060
2061 save_gpr(ctx, rt, t1);
98a9cb79
RH
2062
2063 cond_free(&ctx->null_cond);
31234768 2064 return true;
98a9cb79
RH
2065}
2066
c603e14a 2067static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
98a9cb79 2068{
c603e14a
RH
2069 unsigned rt = a->t;
2070 unsigned ctl = a->r;
eaa3783b 2071 TCGv_reg tmp;
98a9cb79
RH
2072
2073 switch (ctl) {
35136a77 2074 case CR_SAR:
98a9cb79 2075#ifdef TARGET_HPPA64
c603e14a 2076 if (a->e == 0) {
98a9cb79
RH
2077 /* MFSAR without ,W masks low 5 bits. */
2078 tmp = dest_gpr(ctx, rt);
eaa3783b 2079 tcg_gen_andi_reg(tmp, cpu_sar, 31);
98a9cb79 2080 save_gpr(ctx, rt, tmp);
35136a77 2081 goto done;
98a9cb79
RH
2082 }
2083#endif
2084 save_gpr(ctx, rt, cpu_sar);
35136a77
RH
2085 goto done;
2086 case CR_IT: /* Interval Timer */
2087 /* FIXME: Respect PSW_S bit. */
2088 nullify_over(ctx);
98a9cb79 2089 tmp = dest_gpr(ctx, rt);
dfd1b812 2090 if (translator_io_start(&ctx->base)) {
49c29d6c 2091 gen_helper_read_interval_timer(tmp);
31234768 2092 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
49c29d6c
RH
2093 } else {
2094 gen_helper_read_interval_timer(tmp);
49c29d6c 2095 }
98a9cb79 2096 save_gpr(ctx, rt, tmp);
31234768 2097 return nullify_end(ctx);
98a9cb79 2098 case 26:
98a9cb79 2099 case 27:
98a9cb79
RH
2100 break;
2101 default:
2102 /* All other control registers are privileged. */
35136a77
RH
2103 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2104 break;
98a9cb79
RH
2105 }
2106
35136a77
RH
2107 tmp = get_temp(ctx);
2108 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2109 save_gpr(ctx, rt, tmp);
2110
2111 done:
98a9cb79 2112 cond_free(&ctx->null_cond);
31234768 2113 return true;
98a9cb79
RH
2114}
2115
c603e14a 2116static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
33423472 2117{
c603e14a
RH
2118 unsigned rr = a->r;
2119 unsigned rs = a->sp;
33423472
RH
2120 TCGv_i64 t64;
2121
2122 if (rs >= 5) {
2123 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2124 }
2125 nullify_over(ctx);
2126
2127 t64 = tcg_temp_new_i64();
2128 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2129 tcg_gen_shli_i64(t64, t64, 32);
2130
2131 if (rs >= 4) {
2132 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
494737b7 2133 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
33423472
RH
2134 } else {
2135 tcg_gen_mov_i64(cpu_sr[rs], t64);
2136 }
33423472 2137
31234768 2138 return nullify_end(ctx);
33423472
RH
2139}
2140
c603e14a 2141static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
98a9cb79 2142{
c603e14a 2143 unsigned ctl = a->t;
4845f015 2144 TCGv_reg reg;
eaa3783b 2145 TCGv_reg tmp;
98a9cb79 2146
35136a77 2147 if (ctl == CR_SAR) {
4845f015 2148 reg = load_gpr(ctx, a->r);
98a9cb79 2149 tmp = tcg_temp_new();
35136a77 2150 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
98a9cb79 2151 save_or_nullify(ctx, cpu_sar, tmp);
35136a77
RH
2152
2153 cond_free(&ctx->null_cond);
31234768 2154 return true;
98a9cb79
RH
2155 }
2156
35136a77
RH
2157 /* All other control registers are privileged or read-only. */
2158 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2159
c603e14a 2160#ifndef CONFIG_USER_ONLY
35136a77 2161 nullify_over(ctx);
4845f015
SS
2162 reg = load_gpr(ctx, a->r);
2163
35136a77
RH
2164 switch (ctl) {
2165 case CR_IT:
49c29d6c 2166 gen_helper_write_interval_timer(cpu_env, reg);
35136a77 2167 break;
4f5f2548
RH
2168 case CR_EIRR:
2169 gen_helper_write_eirr(cpu_env, reg);
2170 break;
2171 case CR_EIEM:
2172 gen_helper_write_eiem(cpu_env, reg);
31234768 2173 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
4f5f2548
RH
2174 break;
2175
35136a77
RH
2176 case CR_IIASQ:
2177 case CR_IIAOQ:
2178 /* FIXME: Respect PSW_Q bit */
2179 /* The write advances the queue and stores to the back element. */
2180 tmp = get_temp(ctx);
2181 tcg_gen_ld_reg(tmp, cpu_env,
2182 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2183 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2184 tcg_gen_st_reg(reg, cpu_env,
2185 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2186 break;
2187
d5de20bd
SS
2188 case CR_PID1:
2189 case CR_PID2:
2190 case CR_PID3:
2191 case CR_PID4:
2192 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2193#ifndef CONFIG_USER_ONLY
2194 gen_helper_change_prot_id(cpu_env);
2195#endif
2196 break;
2197
35136a77
RH
2198 default:
2199 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2200 break;
2201 }
31234768 2202 return nullify_end(ctx);
4f5f2548 2203#endif
98a9cb79
RH
2204}
2205
c603e14a 2206static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
98a9cb79 2207{
eaa3783b 2208 TCGv_reg tmp = tcg_temp_new();
98a9cb79 2209
c603e14a 2210 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
eaa3783b 2211 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
98a9cb79 2212 save_or_nullify(ctx, cpu_sar, tmp);
98a9cb79
RH
2213
2214 cond_free(&ctx->null_cond);
31234768 2215 return true;
98a9cb79
RH
2216}
2217
e36f27ef 2218static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
98a9cb79 2219{
e36f27ef 2220 TCGv_reg dest = dest_gpr(ctx, a->t);
98a9cb79 2221
2330504c
HD
2222#ifdef CONFIG_USER_ONLY
2223 /* We don't implement space registers in user mode. */
eaa3783b 2224 tcg_gen_movi_reg(dest, 0);
2330504c 2225#else
2330504c
HD
2226 TCGv_i64 t0 = tcg_temp_new_i64();
2227
e36f27ef 2228 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2330504c
HD
2229 tcg_gen_shri_i64(t0, t0, 32);
2230 tcg_gen_trunc_i64_reg(dest, t0);
2330504c 2231#endif
e36f27ef 2232 save_gpr(ctx, a->t, dest);
98a9cb79
RH
2233
2234 cond_free(&ctx->null_cond);
31234768 2235 return true;
98a9cb79
RH
2236}
2237
e36f27ef 2238static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
e1b5a5ed 2239{
e36f27ef
RH
2240 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2241#ifndef CONFIG_USER_ONLY
e1b5a5ed
RH
2242 TCGv_reg tmp;
2243
e1b5a5ed
RH
2244 nullify_over(ctx);
2245
2246 tmp = get_temp(ctx);
2247 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
e36f27ef 2248 tcg_gen_andi_reg(tmp, tmp, ~a->i);
e1b5a5ed 2249 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
e36f27ef 2250 save_gpr(ctx, a->t, tmp);
e1b5a5ed
RH
2251
2252 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
31234768
RH
2253 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2254 return nullify_end(ctx);
e36f27ef 2255#endif
e1b5a5ed
RH
2256}
2257
e36f27ef 2258static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
e1b5a5ed 2259{
e36f27ef
RH
2260 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2261#ifndef CONFIG_USER_ONLY
e1b5a5ed
RH
2262 TCGv_reg tmp;
2263
e1b5a5ed
RH
2264 nullify_over(ctx);
2265
2266 tmp = get_temp(ctx);
2267 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
e36f27ef 2268 tcg_gen_ori_reg(tmp, tmp, a->i);
e1b5a5ed 2269 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
e36f27ef 2270 save_gpr(ctx, a->t, tmp);
e1b5a5ed
RH
2271
2272 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
31234768
RH
2273 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2274 return nullify_end(ctx);
e36f27ef 2275#endif
e1b5a5ed
RH
2276}
2277
c603e14a 2278static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
e1b5a5ed 2279{
e1b5a5ed 2280 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
c603e14a
RH
2281#ifndef CONFIG_USER_ONLY
2282 TCGv_reg tmp, reg;
e1b5a5ed
RH
2283 nullify_over(ctx);
2284
c603e14a 2285 reg = load_gpr(ctx, a->r);
e1b5a5ed
RH
2286 tmp = get_temp(ctx);
2287 gen_helper_swap_system_mask(tmp, cpu_env, reg);
2288
2289 /* Exit the TB to recognize new interrupts. */
31234768
RH
2290 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2291 return nullify_end(ctx);
c603e14a 2292#endif
e1b5a5ed 2293}
f49b3537 2294
e36f27ef 2295static bool do_rfi(DisasContext *ctx, bool rfi_r)
f49b3537 2296{
f49b3537 2297 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
e36f27ef 2298#ifndef CONFIG_USER_ONLY
f49b3537
RH
2299 nullify_over(ctx);
2300
e36f27ef 2301 if (rfi_r) {
f49b3537
RH
2302 gen_helper_rfi_r(cpu_env);
2303 } else {
2304 gen_helper_rfi(cpu_env);
2305 }
31234768 2306 /* Exit the TB to recognize new interrupts. */
8532a14e 2307 tcg_gen_exit_tb(NULL, 0);
31234768 2308 ctx->base.is_jmp = DISAS_NORETURN;
f49b3537 2309
31234768 2310 return nullify_end(ctx);
e36f27ef
RH
2311#endif
2312}
2313
2314static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2315{
2316 return do_rfi(ctx, false);
2317}
2318
2319static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2320{
2321 return do_rfi(ctx, true);
f49b3537 2322}
6210db05 2323
96927adb
RH
2324static bool trans_halt(DisasContext *ctx, arg_halt *a)
2325{
2326 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
e36f27ef 2327#ifndef CONFIG_USER_ONLY
96927adb
RH
2328 nullify_over(ctx);
2329 gen_helper_halt(cpu_env);
2330 ctx->base.is_jmp = DISAS_NORETURN;
2331 return nullify_end(ctx);
2332#endif
2333}
2334
2335static bool trans_reset(DisasContext *ctx, arg_reset *a)
6210db05
HD
2336{
2337 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
96927adb 2338#ifndef CONFIG_USER_ONLY
6210db05 2339 nullify_over(ctx);
96927adb 2340 gen_helper_reset(cpu_env);
31234768
RH
2341 ctx->base.is_jmp = DISAS_NORETURN;
2342 return nullify_end(ctx);
96927adb 2343#endif
6210db05 2344}
e1b5a5ed 2345
4a4554c6
HD
2346static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2347{
2348 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2349#ifndef CONFIG_USER_ONLY
2350 nullify_over(ctx);
2351 gen_helper_getshadowregs(cpu_env);
2352 return nullify_end(ctx);
2353#endif
2354}
2355
deee69a1 2356static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
98a9cb79 2357{
deee69a1
RH
2358 if (a->m) {
2359 TCGv_reg dest = dest_gpr(ctx, a->b);
2360 TCGv_reg src1 = load_gpr(ctx, a->b);
2361 TCGv_reg src2 = load_gpr(ctx, a->x);
98a9cb79 2362
deee69a1
RH
2363 /* The only thing we need to do is the base register modification. */
2364 tcg_gen_add_reg(dest, src1, src2);
2365 save_gpr(ctx, a->b, dest);
2366 }
98a9cb79 2367 cond_free(&ctx->null_cond);
31234768 2368 return true;
98a9cb79
RH
2369}
2370
deee69a1 2371static bool trans_probe(DisasContext *ctx, arg_probe *a)
98a9cb79 2372{
86f8d05f 2373 TCGv_reg dest, ofs;
eed14219 2374 TCGv_i32 level, want;
86f8d05f 2375 TCGv_tl addr;
98a9cb79
RH
2376
2377 nullify_over(ctx);
2378
deee69a1
RH
2379 dest = dest_gpr(ctx, a->t);
2380 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
eed14219 2381
deee69a1 2382 if (a->imm) {
29dd6f64 2383 level = tcg_constant_i32(a->ri);
98a9cb79 2384 } else {
eed14219 2385 level = tcg_temp_new_i32();
deee69a1 2386 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
eed14219 2387 tcg_gen_andi_i32(level, level, 3);
98a9cb79 2388 }
29dd6f64 2389 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
eed14219
RH
2390
2391 gen_helper_probe(dest, cpu_env, addr, level, want);
2392
deee69a1 2393 save_gpr(ctx, a->t, dest);
31234768 2394 return nullify_end(ctx);
98a9cb79
RH
2395}
2396
deee69a1 2397static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
8d6ae7fb 2398{
deee69a1
RH
2399 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2400#ifndef CONFIG_USER_ONLY
8d6ae7fb
RH
2401 TCGv_tl addr;
2402 TCGv_reg ofs, reg;
2403
8d6ae7fb
RH
2404 nullify_over(ctx);
2405
deee69a1
RH
2406 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2407 reg = load_gpr(ctx, a->r);
2408 if (a->addr) {
8d6ae7fb
RH
2409 gen_helper_itlba(cpu_env, addr, reg);
2410 } else {
2411 gen_helper_itlbp(cpu_env, addr, reg);
2412 }
2413
32dc7569
SS
2414 /* Exit TB for TLB change if mmu is enabled. */
2415 if (ctx->tb_flags & PSW_C) {
31234768
RH
2416 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2417 }
2418 return nullify_end(ctx);
deee69a1 2419#endif
8d6ae7fb 2420}
63300a00 2421
deee69a1 2422static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
63300a00 2423{
deee69a1
RH
2424 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2425#ifndef CONFIG_USER_ONLY
63300a00
RH
2426 TCGv_tl addr;
2427 TCGv_reg ofs;
2428
63300a00
RH
2429 nullify_over(ctx);
2430
deee69a1
RH
2431 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2432 if (a->m) {
2433 save_gpr(ctx, a->b, ofs);
63300a00 2434 }
deee69a1 2435 if (a->local) {
63300a00
RH
2436 gen_helper_ptlbe(cpu_env);
2437 } else {
2438 gen_helper_ptlb(cpu_env, addr);
2439 }
2440
2441 /* Exit TB for TLB change if mmu is enabled. */
6797c315
NH
2442 if (ctx->tb_flags & PSW_C) {
2443 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2444 }
2445 return nullify_end(ctx);
2446#endif
2447}
2448
2449/*
2450 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2451 * See
2452 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2453 * page 13-9 (195/206)
2454 */
2455static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2456{
2457 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2458#ifndef CONFIG_USER_ONLY
2459 TCGv_tl addr, atl, stl;
2460 TCGv_reg reg;
2461
2462 nullify_over(ctx);
2463
2464 /*
2465 * FIXME:
2466 * if (not (pcxl or pcxl2))
2467 * return gen_illegal(ctx);
2468 *
2469 * Note for future: these are 32-bit systems; no hppa64.
2470 */
2471
2472 atl = tcg_temp_new_tl();
2473 stl = tcg_temp_new_tl();
2474 addr = tcg_temp_new_tl();
2475
2476 tcg_gen_ld32u_i64(stl, cpu_env,
2477 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2478 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2479 tcg_gen_ld32u_i64(atl, cpu_env,
2480 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2481 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2482 tcg_gen_shli_i64(stl, stl, 32);
2483 tcg_gen_or_tl(addr, atl, stl);
6797c315
NH
2484
2485 reg = load_gpr(ctx, a->r);
2486 if (a->addr) {
2487 gen_helper_itlba(cpu_env, addr, reg);
2488 } else {
2489 gen_helper_itlbp(cpu_env, addr, reg);
2490 }
6797c315
NH
2491
2492 /* Exit TB for TLB change if mmu is enabled. */
32dc7569 2493 if (ctx->tb_flags & PSW_C) {
31234768
RH
2494 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2495 }
2496 return nullify_end(ctx);
deee69a1 2497#endif
63300a00 2498}
2dfcca9f 2499
deee69a1 2500static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2dfcca9f 2501{
deee69a1
RH
2502 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2503#ifndef CONFIG_USER_ONLY
2dfcca9f
RH
2504 TCGv_tl vaddr;
2505 TCGv_reg ofs, paddr;
2506
2dfcca9f
RH
2507 nullify_over(ctx);
2508
deee69a1 2509 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2dfcca9f
RH
2510
2511 paddr = tcg_temp_new();
2512 gen_helper_lpa(paddr, cpu_env, vaddr);
2513
2514 /* Note that physical address result overrides base modification. */
deee69a1
RH
2515 if (a->m) {
2516 save_gpr(ctx, a->b, ofs);
2dfcca9f 2517 }
deee69a1 2518 save_gpr(ctx, a->t, paddr);
2dfcca9f 2519
31234768 2520 return nullify_end(ctx);
deee69a1 2521#endif
2dfcca9f 2522}
43a97b81 2523
deee69a1 2524static bool trans_lci(DisasContext *ctx, arg_lci *a)
43a97b81 2525{
43a97b81
RH
2526 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2527
2528 /* The Coherence Index is an implementation-defined function of the
2529 physical address. Two addresses with the same CI have a coherent
2530 view of the cache. Our implementation is to return 0 for all,
2531 since the entire address space is coherent. */
29dd6f64 2532 save_gpr(ctx, a->t, tcg_constant_reg(0));
43a97b81 2533
31234768
RH
2534 cond_free(&ctx->null_cond);
2535 return true;
43a97b81 2536}
98a9cb79 2537
0c982a28 2538static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
b2167459 2539{
0c982a28
RH
2540 return do_add_reg(ctx, a, false, false, false, false);
2541}
b2167459 2542
0c982a28
RH
2543static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2544{
2545 return do_add_reg(ctx, a, true, false, false, false);
2546}
b2167459 2547
0c982a28
RH
2548static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2549{
2550 return do_add_reg(ctx, a, false, true, false, false);
b2167459
RH
2551}
2552
0c982a28 2553static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
b2167459 2554{
0c982a28
RH
2555 return do_add_reg(ctx, a, false, false, false, true);
2556}
b2167459 2557
0c982a28
RH
2558static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2559{
2560 return do_add_reg(ctx, a, false, true, false, true);
2561}
b2167459 2562
0c982a28
RH
2563static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2564{
2565 return do_sub_reg(ctx, a, false, false, false);
b2167459
RH
2566}
2567
0c982a28 2568static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2569{
0c982a28
RH
2570 return do_sub_reg(ctx, a, true, false, false);
2571}
b2167459 2572
0c982a28
RH
2573static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2574{
2575 return do_sub_reg(ctx, a, false, false, true);
b2167459
RH
2576}
2577
0c982a28 2578static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2579{
0c982a28
RH
2580 return do_sub_reg(ctx, a, true, false, true);
2581}
2582
2583static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2584{
2585 return do_sub_reg(ctx, a, false, true, false);
2586}
2587
2588static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2589{
2590 return do_sub_reg(ctx, a, true, true, false);
2591}
2592
2593static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2594{
2595 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2596}
2597
2598static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2599{
2600 return do_log_reg(ctx, a, tcg_gen_and_reg);
2601}
2602
2603static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2604{
2605 if (a->cf == 0) {
2606 unsigned r2 = a->r2;
2607 unsigned r1 = a->r1;
2608 unsigned rt = a->t;
b2167459 2609
7aee8189
RH
2610 if (rt == 0) { /* NOP */
2611 cond_free(&ctx->null_cond);
2612 return true;
2613 }
2614 if (r2 == 0) { /* COPY */
2615 if (r1 == 0) {
2616 TCGv_reg dest = dest_gpr(ctx, rt);
2617 tcg_gen_movi_reg(dest, 0);
2618 save_gpr(ctx, rt, dest);
2619 } else {
2620 save_gpr(ctx, rt, cpu_gr[r1]);
2621 }
2622 cond_free(&ctx->null_cond);
2623 return true;
2624 }
2625#ifndef CONFIG_USER_ONLY
2626 /* These are QEMU extensions and are nops in the real architecture:
2627 *
2628 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2629 * or %r31,%r31,%r31 -- death loop; offline cpu
2630 * currently implemented as idle.
2631 */
2632 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
7aee8189
RH
2633 /* No need to check for supervisor, as userland can only pause
2634 until the next timer interrupt. */
2635 nullify_over(ctx);
2636
2637 /* Advance the instruction queue. */
2638 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2639 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2640 nullify_set(ctx, 0);
2641
2642 /* Tell the qemu main loop to halt until this cpu has work. */
29dd6f64
RH
2643 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
2644 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
7aee8189
RH
2645 gen_excp_1(EXCP_HALTED);
2646 ctx->base.is_jmp = DISAS_NORETURN;
2647
2648 return nullify_end(ctx);
2649 }
2650#endif
b2167459 2651 }
0c982a28
RH
2652 return do_log_reg(ctx, a, tcg_gen_or_reg);
2653}
7aee8189 2654
0c982a28
RH
2655static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2656{
2657 return do_log_reg(ctx, a, tcg_gen_xor_reg);
b2167459
RH
2658}
2659
0c982a28 2660static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2661{
eaa3783b 2662 TCGv_reg tcg_r1, tcg_r2;
b2167459 2663
0c982a28 2664 if (a->cf) {
b2167459
RH
2665 nullify_over(ctx);
2666 }
0c982a28
RH
2667 tcg_r1 = load_gpr(ctx, a->r1);
2668 tcg_r2 = load_gpr(ctx, a->r2);
2669 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
31234768 2670 return nullify_end(ctx);
b2167459
RH
2671}
2672
0c982a28 2673static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2674{
eaa3783b 2675 TCGv_reg tcg_r1, tcg_r2;
b2167459 2676
0c982a28 2677 if (a->cf) {
b2167459
RH
2678 nullify_over(ctx);
2679 }
0c982a28
RH
2680 tcg_r1 = load_gpr(ctx, a->r1);
2681 tcg_r2 = load_gpr(ctx, a->r2);
2682 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
31234768 2683 return nullify_end(ctx);
b2167459
RH
2684}
2685
0c982a28 2686static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
b2167459 2687{
eaa3783b 2688 TCGv_reg tcg_r1, tcg_r2, tmp;
b2167459 2689
0c982a28 2690 if (a->cf) {
b2167459
RH
2691 nullify_over(ctx);
2692 }
0c982a28
RH
2693 tcg_r1 = load_gpr(ctx, a->r1);
2694 tcg_r2 = load_gpr(ctx, a->r2);
b2167459 2695 tmp = get_temp(ctx);
eaa3783b 2696 tcg_gen_not_reg(tmp, tcg_r2);
0c982a28 2697 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
31234768 2698 return nullify_end(ctx);
b2167459
RH
2699}
2700
0c982a28
RH
2701static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2702{
2703 return do_uaddcm(ctx, a, false);
2704}
2705
2706static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2707{
2708 return do_uaddcm(ctx, a, true);
2709}
2710
2711static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
b2167459 2712{
eaa3783b 2713 TCGv_reg tmp;
b2167459
RH
2714
2715 nullify_over(ctx);
2716
2717 tmp = get_temp(ctx);
eaa3783b 2718 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
b2167459 2719 if (!is_i) {
eaa3783b 2720 tcg_gen_not_reg(tmp, tmp);
b2167459 2721 }
eaa3783b
RH
2722 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2723 tcg_gen_muli_reg(tmp, tmp, 6);
60e29463 2724 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
31234768 2725 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
31234768 2726 return nullify_end(ctx);
b2167459
RH
2727}
2728
0c982a28
RH
2729static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2730{
2731 return do_dcor(ctx, a, false);
2732}
2733
2734static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2735{
2736 return do_dcor(ctx, a, true);
2737}
2738
2739static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2740{
eaa3783b 2741 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
b2167459
RH
2742
2743 nullify_over(ctx);
2744
0c982a28
RH
2745 in1 = load_gpr(ctx, a->r1);
2746 in2 = load_gpr(ctx, a->r2);
b2167459
RH
2747
2748 add1 = tcg_temp_new();
2749 add2 = tcg_temp_new();
2750 addc = tcg_temp_new();
2751 dest = tcg_temp_new();
29dd6f64 2752 zero = tcg_constant_reg(0);
b2167459
RH
2753
2754 /* Form R1 << 1 | PSW[CB]{8}. */
eaa3783b
RH
2755 tcg_gen_add_reg(add1, in1, in1);
2756 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
b2167459
RH
2757
2758 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2759 carry{8} requires that we subtract via + ~R2 + 1, as described in
2760 the manual. By extracting and masking V, we can produce the
2761 proper inputs to the addition without movcond. */
eaa3783b
RH
2762 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2763 tcg_gen_xor_reg(add2, in2, addc);
2764 tcg_gen_andi_reg(addc, addc, 1);
b2167459
RH
2765 /* ??? This is only correct for 32-bit. */
2766 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2767 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2768
b2167459 2769 /* Write back the result register. */
0c982a28 2770 save_gpr(ctx, a->t, dest);
b2167459
RH
2771
2772 /* Write back PSW[CB]. */
eaa3783b
RH
2773 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2774 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
b2167459
RH
2775
2776 /* Write back PSW[V] for the division step. */
eaa3783b
RH
2777 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2778 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
b2167459
RH
2779
2780 /* Install the new nullification. */
0c982a28 2781 if (a->cf) {
eaa3783b 2782 TCGv_reg sv = NULL;
b47a4a02 2783 if (cond_need_sv(a->cf >> 1)) {
b2167459
RH
2784 /* ??? The lshift is supposed to contribute to overflow. */
2785 sv = do_add_sv(ctx, dest, add1, add2);
2786 }
0c982a28 2787 ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
b2167459
RH
2788 }
2789
31234768 2790 return nullify_end(ctx);
b2167459
RH
2791}
2792
0588e061 2793static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
b2167459 2794{
0588e061
RH
2795 return do_add_imm(ctx, a, false, false);
2796}
b2167459 2797
0588e061
RH
2798static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2799{
2800 return do_add_imm(ctx, a, true, false);
b2167459
RH
2801}
2802
0588e061 2803static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
b2167459 2804{
0588e061
RH
2805 return do_add_imm(ctx, a, false, true);
2806}
b2167459 2807
0588e061
RH
2808static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2809{
2810 return do_add_imm(ctx, a, true, true);
2811}
b2167459 2812
0588e061
RH
2813static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2814{
2815 return do_sub_imm(ctx, a, false);
2816}
b2167459 2817
0588e061
RH
2818static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2819{
2820 return do_sub_imm(ctx, a, true);
b2167459
RH
2821}
2822
0588e061 2823static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
b2167459 2824{
eaa3783b 2825 TCGv_reg tcg_im, tcg_r2;
b2167459 2826
0588e061 2827 if (a->cf) {
b2167459
RH
2828 nullify_over(ctx);
2829 }
2830
0588e061
RH
2831 tcg_im = load_const(ctx, a->i);
2832 tcg_r2 = load_gpr(ctx, a->r);
2833 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
b2167459 2834
31234768 2835 return nullify_end(ctx);
b2167459
RH
2836}
2837
1cd012a5 2838static bool trans_ld(DisasContext *ctx, arg_ldst *a)
96d6407f 2839{
0786a3b6
HD
2840 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2841 return gen_illegal(ctx);
2842 } else {
2843 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
1cd012a5 2844 a->disp, a->sp, a->m, a->size | MO_TE);
0786a3b6 2845 }
96d6407f
RH
2846}
2847
1cd012a5 2848static bool trans_st(DisasContext *ctx, arg_ldst *a)
96d6407f 2849{
1cd012a5 2850 assert(a->x == 0 && a->scale == 0);
0786a3b6
HD
2851 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2852 return gen_illegal(ctx);
2853 } else {
2854 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2855 }
96d6407f
RH
2856}
2857
1cd012a5 2858static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
96d6407f 2859{
b1af755c 2860 MemOp mop = MO_TE | MO_ALIGN | a->size;
86f8d05f
RH
2861 TCGv_reg zero, dest, ofs;
2862 TCGv_tl addr;
96d6407f
RH
2863
2864 nullify_over(ctx);
2865
1cd012a5 2866 if (a->m) {
86f8d05f
RH
2867 /* Base register modification. Make sure if RT == RB,
2868 we see the result of the load. */
96d6407f
RH
2869 dest = get_temp(ctx);
2870 } else {
1cd012a5 2871 dest = dest_gpr(ctx, a->t);
96d6407f
RH
2872 }
2873
1cd012a5
RH
2874 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2875 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
b1af755c
RH
2876
2877 /*
2878 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2879 * However actual hardware succeeds with aligned mod 4.
2880 * Detect this case and log a GUEST_ERROR.
2881 *
2882 * TODO: HPPA64 relaxes the over-alignment requirement
2883 * with the ,co completer.
2884 */
2885 gen_helper_ldc_check(addr);
2886
29dd6f64 2887 zero = tcg_constant_reg(0);
86f8d05f 2888 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
b1af755c 2889
1cd012a5
RH
2890 if (a->m) {
2891 save_gpr(ctx, a->b, ofs);
96d6407f 2892 }
1cd012a5 2893 save_gpr(ctx, a->t, dest);
96d6407f 2894
31234768 2895 return nullify_end(ctx);
96d6407f
RH
2896}
2897
1cd012a5 2898static bool trans_stby(DisasContext *ctx, arg_stby *a)
96d6407f 2899{
86f8d05f
RH
2900 TCGv_reg ofs, val;
2901 TCGv_tl addr;
96d6407f
RH
2902
2903 nullify_over(ctx);
2904
1cd012a5 2905 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
86f8d05f 2906 ctx->mmu_idx == MMU_PHYS_IDX);
1cd012a5
RH
2907 val = load_gpr(ctx, a->r);
2908 if (a->a) {
f9f46db4
EC
2909 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2910 gen_helper_stby_e_parallel(cpu_env, addr, val);
2911 } else {
2912 gen_helper_stby_e(cpu_env, addr, val);
2913 }
96d6407f 2914 } else {
f9f46db4
EC
2915 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2916 gen_helper_stby_b_parallel(cpu_env, addr, val);
2917 } else {
2918 gen_helper_stby_b(cpu_env, addr, val);
2919 }
96d6407f 2920 }
1cd012a5 2921 if (a->m) {
86f8d05f 2922 tcg_gen_andi_reg(ofs, ofs, ~3);
1cd012a5 2923 save_gpr(ctx, a->b, ofs);
96d6407f 2924 }
96d6407f 2925
31234768 2926 return nullify_end(ctx);
96d6407f
RH
2927}
2928
1cd012a5 2929static bool trans_lda(DisasContext *ctx, arg_ldst *a)
d0a851cc
RH
2930{
2931 int hold_mmu_idx = ctx->mmu_idx;
d0a851cc
RH
2932
2933 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
d0a851cc 2934 ctx->mmu_idx = MMU_PHYS_IDX;
1cd012a5 2935 trans_ld(ctx, a);
d0a851cc 2936 ctx->mmu_idx = hold_mmu_idx;
31234768 2937 return true;
d0a851cc
RH
2938}
2939
1cd012a5 2940static bool trans_sta(DisasContext *ctx, arg_ldst *a)
d0a851cc
RH
2941{
2942 int hold_mmu_idx = ctx->mmu_idx;
d0a851cc
RH
2943
2944 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
d0a851cc 2945 ctx->mmu_idx = MMU_PHYS_IDX;
1cd012a5 2946 trans_st(ctx, a);
d0a851cc 2947 ctx->mmu_idx = hold_mmu_idx;
31234768 2948 return true;
d0a851cc 2949}
95412a61 2950
0588e061 2951static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
b2167459 2952{
0588e061 2953 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
b2167459 2954
0588e061
RH
2955 tcg_gen_movi_reg(tcg_rt, a->i);
2956 save_gpr(ctx, a->t, tcg_rt);
b2167459 2957 cond_free(&ctx->null_cond);
31234768 2958 return true;
b2167459
RH
2959}
2960
0588e061 2961static bool trans_addil(DisasContext *ctx, arg_addil *a)
b2167459 2962{
0588e061 2963 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
eaa3783b 2964 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
b2167459 2965
0588e061 2966 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
b2167459
RH
2967 save_gpr(ctx, 1, tcg_r1);
2968 cond_free(&ctx->null_cond);
31234768 2969 return true;
b2167459
RH
2970}
2971
0588e061 2972static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
b2167459 2973{
0588e061 2974 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
b2167459
RH
2975
2976 /* Special case rb == 0, for the LDI pseudo-op.
2977 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
0588e061
RH
2978 if (a->b == 0) {
2979 tcg_gen_movi_reg(tcg_rt, a->i);
b2167459 2980 } else {
0588e061 2981 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
b2167459 2982 }
0588e061 2983 save_gpr(ctx, a->t, tcg_rt);
b2167459 2984 cond_free(&ctx->null_cond);
31234768 2985 return true;
b2167459
RH
2986}
2987
01afb7be
RH
2988static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
2989 unsigned c, unsigned f, unsigned n, int disp)
98cd9ca7 2990{
01afb7be 2991 TCGv_reg dest, in2, sv;
98cd9ca7
RH
2992 DisasCond cond;
2993
98cd9ca7
RH
2994 in2 = load_gpr(ctx, r);
2995 dest = get_temp(ctx);
2996
eaa3783b 2997 tcg_gen_sub_reg(dest, in1, in2);
98cd9ca7 2998
f764718d 2999 sv = NULL;
b47a4a02 3000 if (cond_need_sv(c)) {
98cd9ca7
RH
3001 sv = do_sub_sv(ctx, dest, in1, in2);
3002 }
3003
01afb7be
RH
3004 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3005 return do_cbranch(ctx, disp, n, &cond);
98cd9ca7
RH
3006}
3007
01afb7be 3008static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
98cd9ca7 3009{
01afb7be
RH
3010 nullify_over(ctx);
3011 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3012}
98cd9ca7 3013
01afb7be
RH
3014static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3015{
98cd9ca7 3016 nullify_over(ctx);
01afb7be
RH
3017 return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3018}
3019
3020static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3021 unsigned c, unsigned f, unsigned n, int disp)
3022{
3023 TCGv_reg dest, in2, sv, cb_msb;
3024 DisasCond cond;
98cd9ca7 3025
98cd9ca7 3026 in2 = load_gpr(ctx, r);
43675d20 3027 dest = tcg_temp_new();
f764718d
RH
3028 sv = NULL;
3029 cb_msb = NULL;
98cd9ca7 3030
b47a4a02 3031 if (cond_need_cb(c)) {
98cd9ca7 3032 cb_msb = get_temp(ctx);
eaa3783b
RH
3033 tcg_gen_movi_reg(cb_msb, 0);
3034 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
b47a4a02 3035 } else {
eaa3783b 3036 tcg_gen_add_reg(dest, in1, in2);
b47a4a02
SS
3037 }
3038 if (cond_need_sv(c)) {
98cd9ca7 3039 sv = do_add_sv(ctx, dest, in1, in2);
98cd9ca7
RH
3040 }
3041
01afb7be 3042 cond = do_cond(c * 2 + f, dest, cb_msb, sv);
43675d20 3043 save_gpr(ctx, r, dest);
01afb7be 3044 return do_cbranch(ctx, disp, n, &cond);
98cd9ca7
RH
3045}
3046
01afb7be
RH
3047static bool trans_addb(DisasContext *ctx, arg_addb *a)
3048{
3049 nullify_over(ctx);
3050 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3051}
3052
3053static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3054{
3055 nullify_over(ctx);
3056 return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3057}
3058
3059static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
98cd9ca7 3060{
eaa3783b 3061 TCGv_reg tmp, tcg_r;
98cd9ca7
RH
3062 DisasCond cond;
3063
3064 nullify_over(ctx);
3065
3066 tmp = tcg_temp_new();
01afb7be
RH
3067 tcg_r = load_gpr(ctx, a->r);
3068 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
98cd9ca7 3069
01afb7be 3070 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
01afb7be 3071 return do_cbranch(ctx, a->disp, a->n, &cond);
98cd9ca7
RH
3072}
3073
01afb7be
RH
3074static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3075{
3076 TCGv_reg tmp, tcg_r;
3077 DisasCond cond;
3078
3079 nullify_over(ctx);
3080
3081 tmp = tcg_temp_new();
3082 tcg_r = load_gpr(ctx, a->r);
3083 tcg_gen_shli_reg(tmp, tcg_r, a->p);
3084
3085 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
01afb7be
RH
3086 return do_cbranch(ctx, a->disp, a->n, &cond);
3087}
3088
3089static bool trans_movb(DisasContext *ctx, arg_movb *a)
98cd9ca7 3090{
eaa3783b 3091 TCGv_reg dest;
98cd9ca7
RH
3092 DisasCond cond;
3093
3094 nullify_over(ctx);
3095
01afb7be
RH
3096 dest = dest_gpr(ctx, a->r2);
3097 if (a->r1 == 0) {
eaa3783b 3098 tcg_gen_movi_reg(dest, 0);
98cd9ca7 3099 } else {
01afb7be 3100 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
98cd9ca7
RH
3101 }
3102
01afb7be
RH
3103 cond = do_sed_cond(a->c, dest);
3104 return do_cbranch(ctx, a->disp, a->n, &cond);
3105}
3106
3107static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3108{
3109 TCGv_reg dest;
3110 DisasCond cond;
3111
3112 nullify_over(ctx);
3113
3114 dest = dest_gpr(ctx, a->r);
3115 tcg_gen_movi_reg(dest, a->i);
3116
3117 cond = do_sed_cond(a->c, dest);
3118 return do_cbranch(ctx, a->disp, a->n, &cond);
98cd9ca7
RH
3119}
3120
30878590 3121static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
0b1347d2 3122{
eaa3783b 3123 TCGv_reg dest;
0b1347d2 3124
30878590 3125 if (a->c) {
0b1347d2
RH
3126 nullify_over(ctx);
3127 }
3128
30878590
RH
3129 dest = dest_gpr(ctx, a->t);
3130 if (a->r1 == 0) {
3131 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
eaa3783b 3132 tcg_gen_shr_reg(dest, dest, cpu_sar);
30878590 3133 } else if (a->r1 == a->r2) {
0b1347d2 3134 TCGv_i32 t32 = tcg_temp_new_i32();
30878590 3135 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
0b1347d2 3136 tcg_gen_rotr_i32(t32, t32, cpu_sar);
eaa3783b 3137 tcg_gen_extu_i32_reg(dest, t32);
0b1347d2
RH
3138 } else {
3139 TCGv_i64 t = tcg_temp_new_i64();
3140 TCGv_i64 s = tcg_temp_new_i64();
3141
30878590 3142 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
eaa3783b 3143 tcg_gen_extu_reg_i64(s, cpu_sar);
0b1347d2 3144 tcg_gen_shr_i64(t, t, s);
eaa3783b 3145 tcg_gen_trunc_i64_reg(dest, t);
0b1347d2 3146 }
30878590 3147 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3148
3149 /* Install the new nullification. */
3150 cond_free(&ctx->null_cond);
30878590
RH
3151 if (a->c) {
3152 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3153 }
31234768 3154 return nullify_end(ctx);
0b1347d2
RH
3155}
3156
30878590 3157static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
0b1347d2 3158{
30878590 3159 unsigned sa = 31 - a->cpos;
eaa3783b 3160 TCGv_reg dest, t2;
0b1347d2 3161
30878590 3162 if (a->c) {
0b1347d2
RH
3163 nullify_over(ctx);
3164 }
3165
30878590
RH
3166 dest = dest_gpr(ctx, a->t);
3167 t2 = load_gpr(ctx, a->r2);
05bfd4db
RH
3168 if (a->r1 == 0) {
3169 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3170 } else if (TARGET_REGISTER_BITS == 32) {
3171 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3172 } else if (a->r1 == a->r2) {
0b1347d2 3173 TCGv_i32 t32 = tcg_temp_new_i32();
eaa3783b 3174 tcg_gen_trunc_reg_i32(t32, t2);
0b1347d2 3175 tcg_gen_rotri_i32(t32, t32, sa);
eaa3783b 3176 tcg_gen_extu_i32_reg(dest, t32);
0b1347d2 3177 } else {
05bfd4db
RH
3178 TCGv_i64 t64 = tcg_temp_new_i64();
3179 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3180 tcg_gen_shri_i64(t64, t64, sa);
3181 tcg_gen_trunc_i64_reg(dest, t64);
0b1347d2 3182 }
30878590 3183 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3184
3185 /* Install the new nullification. */
3186 cond_free(&ctx->null_cond);
30878590
RH
3187 if (a->c) {
3188 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3189 }
31234768 3190 return nullify_end(ctx);
0b1347d2
RH
3191}
3192
30878590 3193static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
0b1347d2 3194{
30878590 3195 unsigned len = 32 - a->clen;
eaa3783b 3196 TCGv_reg dest, src, tmp;
0b1347d2 3197
30878590 3198 if (a->c) {
0b1347d2
RH
3199 nullify_over(ctx);
3200 }
3201
30878590
RH
3202 dest = dest_gpr(ctx, a->t);
3203 src = load_gpr(ctx, a->r);
0b1347d2
RH
3204 tmp = tcg_temp_new();
3205
3206 /* Recall that SAR is using big-endian bit numbering. */
eaa3783b 3207 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
30878590 3208 if (a->se) {
eaa3783b
RH
3209 tcg_gen_sar_reg(dest, src, tmp);
3210 tcg_gen_sextract_reg(dest, dest, 0, len);
0b1347d2 3211 } else {
eaa3783b
RH
3212 tcg_gen_shr_reg(dest, src, tmp);
3213 tcg_gen_extract_reg(dest, dest, 0, len);
0b1347d2 3214 }
30878590 3215 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3216
3217 /* Install the new nullification. */
3218 cond_free(&ctx->null_cond);
30878590
RH
3219 if (a->c) {
3220 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3221 }
31234768 3222 return nullify_end(ctx);
0b1347d2
RH
3223}
3224
30878590 3225static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
0b1347d2 3226{
30878590
RH
3227 unsigned len = 32 - a->clen;
3228 unsigned cpos = 31 - a->pos;
eaa3783b 3229 TCGv_reg dest, src;
0b1347d2 3230
30878590 3231 if (a->c) {
0b1347d2
RH
3232 nullify_over(ctx);
3233 }
3234
30878590
RH
3235 dest = dest_gpr(ctx, a->t);
3236 src = load_gpr(ctx, a->r);
3237 if (a->se) {
eaa3783b 3238 tcg_gen_sextract_reg(dest, src, cpos, len);
0b1347d2 3239 } else {
eaa3783b 3240 tcg_gen_extract_reg(dest, src, cpos, len);
0b1347d2 3241 }
30878590 3242 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3243
3244 /* Install the new nullification. */
3245 cond_free(&ctx->null_cond);
30878590
RH
3246 if (a->c) {
3247 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3248 }
31234768 3249 return nullify_end(ctx);
0b1347d2
RH
3250}
3251
30878590 3252static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
0b1347d2 3253{
30878590 3254 unsigned len = 32 - a->clen;
eaa3783b
RH
3255 target_sreg mask0, mask1;
3256 TCGv_reg dest;
0b1347d2 3257
30878590 3258 if (a->c) {
0b1347d2
RH
3259 nullify_over(ctx);
3260 }
30878590
RH
3261 if (a->cpos + len > 32) {
3262 len = 32 - a->cpos;
0b1347d2
RH
3263 }
3264
30878590
RH
3265 dest = dest_gpr(ctx, a->t);
3266 mask0 = deposit64(0, a->cpos, len, a->i);
3267 mask1 = deposit64(-1, a->cpos, len, a->i);
0b1347d2 3268
30878590
RH
3269 if (a->nz) {
3270 TCGv_reg src = load_gpr(ctx, a->t);
0b1347d2 3271 if (mask1 != -1) {
eaa3783b 3272 tcg_gen_andi_reg(dest, src, mask1);
0b1347d2
RH
3273 src = dest;
3274 }
eaa3783b 3275 tcg_gen_ori_reg(dest, src, mask0);
0b1347d2 3276 } else {
eaa3783b 3277 tcg_gen_movi_reg(dest, mask0);
0b1347d2 3278 }
30878590 3279 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3280
3281 /* Install the new nullification. */
3282 cond_free(&ctx->null_cond);
30878590
RH
3283 if (a->c) {
3284 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3285 }
31234768 3286 return nullify_end(ctx);
0b1347d2
RH
3287}
3288
30878590 3289static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
0b1347d2 3290{
30878590
RH
3291 unsigned rs = a->nz ? a->t : 0;
3292 unsigned len = 32 - a->clen;
eaa3783b 3293 TCGv_reg dest, val;
0b1347d2 3294
30878590 3295 if (a->c) {
0b1347d2
RH
3296 nullify_over(ctx);
3297 }
30878590
RH
3298 if (a->cpos + len > 32) {
3299 len = 32 - a->cpos;
0b1347d2
RH
3300 }
3301
30878590
RH
3302 dest = dest_gpr(ctx, a->t);
3303 val = load_gpr(ctx, a->r);
0b1347d2 3304 if (rs == 0) {
30878590 3305 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
0b1347d2 3306 } else {
30878590 3307 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
0b1347d2 3308 }
30878590 3309 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3310
3311 /* Install the new nullification. */
3312 cond_free(&ctx->null_cond);
30878590
RH
3313 if (a->c) {
3314 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3315 }
31234768 3316 return nullify_end(ctx);
0b1347d2
RH
3317}
3318
30878590
RH
3319static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3320 unsigned nz, unsigned clen, TCGv_reg val)
0b1347d2 3321{
0b1347d2
RH
3322 unsigned rs = nz ? rt : 0;
3323 unsigned len = 32 - clen;
30878590 3324 TCGv_reg mask, tmp, shift, dest;
0b1347d2
RH
3325 unsigned msb = 1U << (len - 1);
3326
0b1347d2
RH
3327 dest = dest_gpr(ctx, rt);
3328 shift = tcg_temp_new();
3329 tmp = tcg_temp_new();
3330
3331 /* Convert big-endian bit numbering in SAR to left-shift. */
eaa3783b 3332 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
0b1347d2 3333
0992a930
RH
3334 mask = tcg_temp_new();
3335 tcg_gen_movi_reg(mask, msb + (msb - 1));
eaa3783b 3336 tcg_gen_and_reg(tmp, val, mask);
0b1347d2 3337 if (rs) {
eaa3783b
RH
3338 tcg_gen_shl_reg(mask, mask, shift);
3339 tcg_gen_shl_reg(tmp, tmp, shift);
3340 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3341 tcg_gen_or_reg(dest, dest, tmp);
0b1347d2 3342 } else {
eaa3783b 3343 tcg_gen_shl_reg(dest, tmp, shift);
0b1347d2 3344 }
0b1347d2
RH
3345 save_gpr(ctx, rt, dest);
3346
3347 /* Install the new nullification. */
3348 cond_free(&ctx->null_cond);
3349 if (c) {
3350 ctx->null_cond = do_sed_cond(c, dest);
3351 }
31234768 3352 return nullify_end(ctx);
0b1347d2
RH
3353}
3354
30878590
RH
3355static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3356{
a6deecce
SS
3357 if (a->c) {
3358 nullify_over(ctx);
3359 }
30878590
RH
3360 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3361}
3362
3363static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3364{
a6deecce
SS
3365 if (a->c) {
3366 nullify_over(ctx);
3367 }
30878590
RH
3368 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3369}
0b1347d2 3370
8340f534 3371static bool trans_be(DisasContext *ctx, arg_be *a)
98cd9ca7 3372{
660eefe1 3373 TCGv_reg tmp;
98cd9ca7 3374
c301f34e 3375#ifdef CONFIG_USER_ONLY
98cd9ca7
RH
3376 /* ??? It seems like there should be a good way of using
3377 "be disp(sr2, r0)", the canonical gateway entry mechanism
3378 to our advantage. But that appears to be inconvenient to
3379 manage along side branch delay slots. Therefore we handle
3380 entry into the gateway page via absolute address. */
98cd9ca7
RH
3381 /* Since we don't implement spaces, just branch. Do notice the special
3382 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3383 goto_tb to the TB containing the syscall. */
8340f534
RH
3384 if (a->b == 0) {
3385 return do_dbranch(ctx, a->disp, a->l, a->n);
98cd9ca7 3386 }
c301f34e 3387#else
c301f34e 3388 nullify_over(ctx);
660eefe1
RH
3389#endif
3390
3391 tmp = get_temp(ctx);
8340f534 3392 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
660eefe1 3393 tmp = do_ibranch_priv(ctx, tmp);
c301f34e
RH
3394
3395#ifdef CONFIG_USER_ONLY
8340f534 3396 return do_ibranch(ctx, tmp, a->l, a->n);
c301f34e
RH
3397#else
3398 TCGv_i64 new_spc = tcg_temp_new_i64();
3399
8340f534
RH
3400 load_spr(ctx, new_spc, a->sp);
3401 if (a->l) {
c301f34e
RH
3402 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3403 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3404 }
8340f534 3405 if (a->n && use_nullify_skip(ctx)) {
c301f34e
RH
3406 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3407 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3408 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3409 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3410 } else {
3411 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3412 if (ctx->iaoq_b == -1) {
3413 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3414 }
3415 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3416 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
8340f534 3417 nullify_set(ctx, a->n);
c301f34e 3418 }
c301f34e 3419 tcg_gen_lookup_and_goto_ptr();
31234768
RH
3420 ctx->base.is_jmp = DISAS_NORETURN;
3421 return nullify_end(ctx);
c301f34e 3422#endif
98cd9ca7
RH
3423}
3424
8340f534 3425static bool trans_bl(DisasContext *ctx, arg_bl *a)
98cd9ca7 3426{
8340f534 3427 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
98cd9ca7
RH
3428}
3429
8340f534 3430static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
43e05652 3431{
8340f534 3432 target_ureg dest = iaoq_dest(ctx, a->disp);
43e05652 3433
6e5f5300
SS
3434 nullify_over(ctx);
3435
43e05652
RH
3436 /* Make sure the caller hasn't done something weird with the queue.
3437 * ??? This is not quite the same as the PSW[B] bit, which would be
3438 * expensive to track. Real hardware will trap for
3439 * b gateway
3440 * b gateway+4 (in delay slot of first branch)
3441 * However, checking for a non-sequential instruction queue *will*
3442 * diagnose the security hole
3443 * b gateway
3444 * b evil
3445 * in which instructions at evil would run with increased privs.
3446 */
3447 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3448 return gen_illegal(ctx);
3449 }
3450
3451#ifndef CONFIG_USER_ONLY
3452 if (ctx->tb_flags & PSW_C) {
3453 CPUHPPAState *env = ctx->cs->env_ptr;
3454 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3455 /* If we could not find a TLB entry, then we need to generate an
3456 ITLB miss exception so the kernel will provide it.
3457 The resulting TLB fill operation will invalidate this TB and
3458 we will re-translate, at which point we *will* be able to find
3459 the TLB entry and determine if this is in fact a gateway page. */
3460 if (type < 0) {
31234768
RH
3461 gen_excp(ctx, EXCP_ITLB_MISS);
3462 return true;
43e05652
RH
3463 }
3464 /* No change for non-gateway pages or for priv decrease. */
3465 if (type >= 4 && type - 4 < ctx->privilege) {
3466 dest = deposit32(dest, 0, 2, type - 4);
3467 }
3468 } else {
3469 dest &= -4; /* priv = 0 */
3470 }
3471#endif
3472
6e5f5300
SS
3473 if (a->l) {
3474 TCGv_reg tmp = dest_gpr(ctx, a->l);
3475 if (ctx->privilege < 3) {
3476 tcg_gen_andi_reg(tmp, tmp, -4);
3477 }
3478 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3479 save_gpr(ctx, a->l, tmp);
3480 }
3481
3482 return do_dbranch(ctx, dest, 0, a->n);
43e05652
RH
3483}
3484
8340f534 3485static bool trans_blr(DisasContext *ctx, arg_blr *a)
98cd9ca7 3486{
b35aec85
RH
3487 if (a->x) {
3488 TCGv_reg tmp = get_temp(ctx);
3489 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3490 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3491 /* The computation here never changes privilege level. */
3492 return do_ibranch(ctx, tmp, a->l, a->n);
3493 } else {
3494 /* BLR R0,RX is a good way to load PC+8 into RX. */
3495 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3496 }
98cd9ca7
RH
3497}
3498
8340f534 3499static bool trans_bv(DisasContext *ctx, arg_bv *a)
98cd9ca7 3500{
eaa3783b 3501 TCGv_reg dest;
98cd9ca7 3502
8340f534
RH
3503 if (a->x == 0) {
3504 dest = load_gpr(ctx, a->b);
98cd9ca7
RH
3505 } else {
3506 dest = get_temp(ctx);
8340f534
RH
3507 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3508 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
98cd9ca7 3509 }
660eefe1 3510 dest = do_ibranch_priv(ctx, dest);
8340f534 3511 return do_ibranch(ctx, dest, 0, a->n);
98cd9ca7
RH
3512}
3513
8340f534 3514static bool trans_bve(DisasContext *ctx, arg_bve *a)
98cd9ca7 3515{
660eefe1 3516 TCGv_reg dest;
98cd9ca7 3517
c301f34e 3518#ifdef CONFIG_USER_ONLY
8340f534
RH
3519 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3520 return do_ibranch(ctx, dest, a->l, a->n);
c301f34e
RH
3521#else
3522 nullify_over(ctx);
8340f534 3523 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
c301f34e
RH
3524
3525 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3526 if (ctx->iaoq_b == -1) {
3527 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3528 }
3529 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3530 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
8340f534
RH
3531 if (a->l) {
3532 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e 3533 }
8340f534 3534 nullify_set(ctx, a->n);
c301f34e 3535 tcg_gen_lookup_and_goto_ptr();
31234768
RH
3536 ctx->base.is_jmp = DISAS_NORETURN;
3537 return nullify_end(ctx);
c301f34e 3538#endif
98cd9ca7
RH
3539}
3540
1ca74648
RH
3541/*
3542 * Float class 0
3543 */
ebe9383c 3544
1ca74648 3545static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3546{
1ca74648 3547 tcg_gen_mov_i32(dst, src);
ebe9383c
RH
3548}
3549
59f8c04b
HD
3550static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3551{
a300dad3
RH
3552 uint64_t ret;
3553
3554 if (TARGET_REGISTER_BITS == 64) {
3555 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3556 } else {
3557 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3558 }
3559
59f8c04b 3560 nullify_over(ctx);
a300dad3 3561 save_frd(0, tcg_constant_i64(ret));
59f8c04b
HD
3562 return nullify_end(ctx);
3563}
3564
1ca74648 3565static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3566{
1ca74648 3567 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
ebe9383c
RH
3568}
3569
1ca74648 3570static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
ebe9383c 3571{
1ca74648 3572 tcg_gen_mov_i64(dst, src);
ebe9383c
RH
3573}
3574
1ca74648 3575static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3576{
1ca74648 3577 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
ebe9383c
RH
3578}
3579
1ca74648 3580static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3581{
1ca74648 3582 tcg_gen_andi_i32(dst, src, INT32_MAX);
ebe9383c
RH
3583}
3584
1ca74648 3585static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3586{
1ca74648 3587 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
ebe9383c
RH
3588}
3589
1ca74648 3590static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
ebe9383c 3591{
1ca74648 3592 tcg_gen_andi_i64(dst, src, INT64_MAX);
ebe9383c
RH
3593}
3594
1ca74648 3595static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3596{
1ca74648 3597 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
ebe9383c
RH
3598}
3599
1ca74648 3600static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3601{
1ca74648 3602 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
ebe9383c
RH
3603}
3604
1ca74648 3605static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3606{
1ca74648 3607 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
ebe9383c
RH
3608}
3609
1ca74648 3610static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3611{
1ca74648 3612 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
ebe9383c
RH
3613}
3614
1ca74648 3615static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3616{
1ca74648 3617 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
ebe9383c
RH
3618}
3619
1ca74648 3620static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3621{
1ca74648 3622 tcg_gen_xori_i32(dst, src, INT32_MIN);
ebe9383c
RH
3623}
3624
1ca74648 3625static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3626{
1ca74648 3627 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
ebe9383c
RH
3628}
3629
3630static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3631{
3632 tcg_gen_xori_i64(dst, src, INT64_MIN);
3633}
3634
1ca74648
RH
3635static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3636{
3637 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3638}
3639
3640static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c
RH
3641{
3642 tcg_gen_ori_i32(dst, src, INT32_MIN);
3643}
3644
1ca74648
RH
3645static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3646{
3647 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3648}
3649
ebe9383c
RH
3650static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3651{
3652 tcg_gen_ori_i64(dst, src, INT64_MIN);
3653}
3654
1ca74648
RH
3655static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3656{
3657 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3658}
3659
3660/*
3661 * Float class 1
3662 */
3663
3664static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3665{
3666 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3667}
3668
3669static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3670{
3671 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3672}
3673
3674static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3675{
3676 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3677}
3678
3679static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3680{
3681 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3682}
3683
3684static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3685{
3686 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3687}
3688
3689static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3690{
3691 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3692}
3693
3694static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3695{
3696 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3697}
3698
3699static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3700{
3701 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3702}
3703
3704static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3705{
3706 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3707}
3708
3709static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3710{
3711 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3712}
3713
3714static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3715{
3716 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3717}
3718
3719static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3720{
3721 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3722}
3723
3724static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3725{
3726 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3727}
3728
3729static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3730{
3731 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3732}
3733
3734static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3735{
3736 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3737}
3738
3739static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3740{
3741 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3742}
3743
3744static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3745{
3746 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3747}
3748
3749static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3750{
3751 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3752}
3753
3754static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3755{
3756 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3757}
3758
3759static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3760{
3761 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3762}
3763
3764static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3765{
3766 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3767}
3768
3769static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3770{
3771 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3772}
3773
3774static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3775{
3776 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3777}
3778
3779static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3780{
3781 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3782}
3783
3784static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3785{
3786 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3787}
3788
3789static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3790{
3791 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3792}
3793
3794/*
3795 * Float class 2
3796 */
3797
3798static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
ebe9383c
RH
3799{
3800 TCGv_i32 ta, tb, tc, ty;
3801
3802 nullify_over(ctx);
3803
1ca74648
RH
3804 ta = load_frw0_i32(a->r1);
3805 tb = load_frw0_i32(a->r2);
29dd6f64
RH
3806 ty = tcg_constant_i32(a->y);
3807 tc = tcg_constant_i32(a->c);
ebe9383c
RH
3808
3809 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3810
1ca74648 3811 return nullify_end(ctx);
ebe9383c
RH
3812}
3813
1ca74648 3814static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
ebe9383c 3815{
ebe9383c
RH
3816 TCGv_i64 ta, tb;
3817 TCGv_i32 tc, ty;
3818
3819 nullify_over(ctx);
3820
1ca74648
RH
3821 ta = load_frd0(a->r1);
3822 tb = load_frd0(a->r2);
29dd6f64
RH
3823 ty = tcg_constant_i32(a->y);
3824 tc = tcg_constant_i32(a->c);
ebe9383c
RH
3825
3826 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3827
31234768 3828 return nullify_end(ctx);
ebe9383c
RH
3829}
3830
1ca74648 3831static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
ebe9383c 3832{
eaa3783b 3833 TCGv_reg t;
ebe9383c
RH
3834
3835 nullify_over(ctx);
3836
1ca74648 3837 t = get_temp(ctx);
eaa3783b 3838 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
ebe9383c 3839
1ca74648
RH
3840 if (a->y == 1) {
3841 int mask;
3842 bool inv = false;
3843
3844 switch (a->c) {
3845 case 0: /* simple */
3846 tcg_gen_andi_reg(t, t, 0x4000000);
3847 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3848 goto done;
3849 case 2: /* rej */
3850 inv = true;
3851 /* fallthru */
3852 case 1: /* acc */
3853 mask = 0x43ff800;
3854 break;
3855 case 6: /* rej8 */
3856 inv = true;
3857 /* fallthru */
3858 case 5: /* acc8 */
3859 mask = 0x43f8000;
3860 break;
3861 case 9: /* acc6 */
3862 mask = 0x43e0000;
3863 break;
3864 case 13: /* acc4 */
3865 mask = 0x4380000;
3866 break;
3867 case 17: /* acc2 */
3868 mask = 0x4200000;
3869 break;
3870 default:
3871 gen_illegal(ctx);
3872 return true;
3873 }
3874 if (inv) {
3875 TCGv_reg c = load_const(ctx, mask);
3876 tcg_gen_or_reg(t, t, c);
3877 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3878 } else {
3879 tcg_gen_andi_reg(t, t, mask);
3880 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3881 }
3882 } else {
3883 unsigned cbit = (a->y ^ 1) - 1;
3884
3885 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3886 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
1ca74648
RH
3887 }
3888
3889 done:
31234768 3890 return nullify_end(ctx);
ebe9383c
RH
3891}
3892
1ca74648
RH
3893/*
3894 * Float class 2
3895 */
3896
3897static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
ebe9383c 3898{
1ca74648
RH
3899 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3900}
ebe9383c 3901
1ca74648
RH
3902static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3903{
3904 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3905}
ebe9383c 3906
1ca74648
RH
3907static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3908{
3909 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3910}
ebe9383c 3911
1ca74648
RH
3912static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3913{
3914 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
ebe9383c
RH
3915}
3916
1ca74648 3917static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
ebe9383c 3918{
1ca74648
RH
3919 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3920}
3921
3922static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3923{
3924 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3925}
3926
3927static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3928{
3929 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3930}
3931
3932static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3933{
3934 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3935}
3936
3937static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3938{
3939 TCGv_i64 x, y;
ebe9383c
RH
3940
3941 nullify_over(ctx);
3942
1ca74648
RH
3943 x = load_frw0_i64(a->r1);
3944 y = load_frw0_i64(a->r2);
3945 tcg_gen_mul_i64(x, x, y);
3946 save_frd(a->t, x);
ebe9383c 3947
31234768 3948 return nullify_end(ctx);
ebe9383c
RH
3949}
3950
ebe9383c
RH
3951/* Convert the fmpyadd single-precision register encodings to standard. */
3952static inline int fmpyadd_s_reg(unsigned r)
3953{
3954 return (r & 16) * 2 + 16 + (r & 15);
3955}
3956
b1e2af57 3957static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
ebe9383c 3958{
b1e2af57
RH
3959 int tm = fmpyadd_s_reg(a->tm);
3960 int ra = fmpyadd_s_reg(a->ra);
3961 int ta = fmpyadd_s_reg(a->ta);
3962 int rm2 = fmpyadd_s_reg(a->rm2);
3963 int rm1 = fmpyadd_s_reg(a->rm1);
ebe9383c
RH
3964
3965 nullify_over(ctx);
3966
b1e2af57
RH
3967 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3968 do_fop_weww(ctx, ta, ta, ra,
3969 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
ebe9383c 3970
31234768 3971 return nullify_end(ctx);
ebe9383c
RH
3972}
3973
b1e2af57
RH
3974static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
3975{
3976 return do_fmpyadd_s(ctx, a, false);
3977}
3978
3979static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
3980{
3981 return do_fmpyadd_s(ctx, a, true);
3982}
3983
3984static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3985{
3986 nullify_over(ctx);
3987
3988 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
3989 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
3990 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3991
3992 return nullify_end(ctx);
3993}
3994
3995static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
3996{
3997 return do_fmpyadd_d(ctx, a, false);
3998}
3999
4000static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4001{
4002 return do_fmpyadd_d(ctx, a, true);
4003}
4004
c3bad4f8 4005static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
ebe9383c 4006{
c3bad4f8 4007 TCGv_i32 x, y, z;
ebe9383c
RH
4008
4009 nullify_over(ctx);
c3bad4f8
RH
4010 x = load_frw0_i32(a->rm1);
4011 y = load_frw0_i32(a->rm2);
4012 z = load_frw0_i32(a->ra3);
ebe9383c 4013
c3bad4f8
RH
4014 if (a->neg) {
4015 gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
ebe9383c 4016 } else {
c3bad4f8 4017 gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
ebe9383c
RH
4018 }
4019
c3bad4f8 4020 save_frw_i32(a->t, x);
31234768 4021 return nullify_end(ctx);
ebe9383c
RH
4022}
4023
c3bad4f8 4024static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
ebe9383c 4025{
c3bad4f8 4026 TCGv_i64 x, y, z;
ebe9383c
RH
4027
4028 nullify_over(ctx);
c3bad4f8
RH
4029 x = load_frd0(a->rm1);
4030 y = load_frd0(a->rm2);
4031 z = load_frd0(a->ra3);
ebe9383c 4032
c3bad4f8
RH
4033 if (a->neg) {
4034 gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
ebe9383c 4035 } else {
c3bad4f8 4036 gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
ebe9383c
RH
4037 }
4038
c3bad4f8 4039 save_frd(a->t, x);
31234768 4040 return nullify_end(ctx);
ebe9383c
RH
4041}
4042
15da177b
SS
4043static bool trans_diag(DisasContext *ctx, arg_diag *a)
4044{
4045 qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n");
4046 cond_free(&ctx->null_cond);
4047 return true;
4048}
4049
b542683d 4050static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
61766fe9 4051{
51b061fb 4052 DisasContext *ctx = container_of(dcbase, DisasContext, base);
f764718d 4053 int bound;
61766fe9 4054
51b061fb 4055 ctx->cs = cs;
494737b7 4056 ctx->tb_flags = ctx->base.tb->flags;
3d68ee7b
RH
4057
4058#ifdef CONFIG_USER_ONLY
c01e5dfb 4059 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
3d68ee7b 4060 ctx->mmu_idx = MMU_USER_IDX;
c01e5dfb
HD
4061 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4062 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
217d1a5e 4063 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
3d68ee7b 4064#else
494737b7 4065 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
c01e5dfb
HD
4066 ctx->mmu_idx = (ctx->tb_flags & PSW_D ?
4067 PRIV_TO_MMU_IDX(ctx->privilege) : MMU_PHYS_IDX);
3d68ee7b 4068
c301f34e
RH
4069 /* Recover the IAOQ values from the GVA + PRIV. */
4070 uint64_t cs_base = ctx->base.tb->cs_base;
4071 uint64_t iasq_f = cs_base & ~0xffffffffull;
4072 int32_t diff = cs_base;
4073
4074 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4075 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4076#endif
51b061fb 4077 ctx->iaoq_n = -1;
f764718d 4078 ctx->iaoq_n_var = NULL;
61766fe9 4079
3d68ee7b
RH
4080 /* Bound the number of instructions by those left on the page. */
4081 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 4082 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
3d68ee7b 4083
86f8d05f
RH
4084 ctx->ntempr = 0;
4085 ctx->ntempl = 0;
4086 memset(ctx->tempr, 0, sizeof(ctx->tempr));
4087 memset(ctx->templ, 0, sizeof(ctx->templ));
51b061fb 4088}
61766fe9 4089
51b061fb
RH
4090static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4091{
4092 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4093
3d68ee7b 4094 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
51b061fb
RH
4095 ctx->null_cond = cond_make_f();
4096 ctx->psw_n_nonzero = false;
494737b7 4097 if (ctx->tb_flags & PSW_N) {
51b061fb
RH
4098 ctx->null_cond.c = TCG_COND_ALWAYS;
4099 ctx->psw_n_nonzero = true;
129e9cc3 4100 }
51b061fb
RH
4101 ctx->null_lab = NULL;
4102}
129e9cc3 4103
51b061fb
RH
4104static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4105{
4106 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4107
51b061fb
RH
4108 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4109}
4110
51b061fb
RH
4111static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4112{
4113 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4114 CPUHPPAState *env = cs->env_ptr;
4115 DisasJumpType ret;
4116 int i, n;
4117
4118 /* Execute one insn. */
ba1d0b44 4119#ifdef CONFIG_USER_ONLY
c301f34e 4120 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
31234768
RH
4121 do_page_zero(ctx);
4122 ret = ctx->base.is_jmp;
51b061fb 4123 assert(ret != DISAS_NEXT);
ba1d0b44
RH
4124 } else
4125#endif
4126 {
51b061fb
RH
4127 /* Always fetch the insn, even if nullified, so that we check
4128 the page permissions for execute. */
4e116893 4129 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
51b061fb
RH
4130
4131 /* Set up the IA queue for the next insn.
4132 This will be overwritten by a branch. */
4133 if (ctx->iaoq_b == -1) {
4134 ctx->iaoq_n = -1;
4135 ctx->iaoq_n_var = get_temp(ctx);
eaa3783b 4136 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
7ad439df 4137 } else {
51b061fb 4138 ctx->iaoq_n = ctx->iaoq_b + 4;
f764718d 4139 ctx->iaoq_n_var = NULL;
61766fe9
RH
4140 }
4141
51b061fb
RH
4142 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4143 ctx->null_cond.c = TCG_COND_NEVER;
4144 ret = DISAS_NEXT;
4145 } else {
1a19da0d 4146 ctx->insn = insn;
31274b46
RH
4147 if (!decode(ctx, insn)) {
4148 gen_illegal(ctx);
4149 }
31234768 4150 ret = ctx->base.is_jmp;
51b061fb 4151 assert(ctx->null_lab == NULL);
61766fe9 4152 }
51b061fb 4153 }
61766fe9 4154
af187238 4155 /* Forget any temporaries allocated. */
86f8d05f 4156 for (i = 0, n = ctx->ntempr; i < n; ++i) {
86f8d05f
RH
4157 ctx->tempr[i] = NULL;
4158 }
4159 for (i = 0, n = ctx->ntempl; i < n; ++i) {
86f8d05f 4160 ctx->templ[i] = NULL;
51b061fb 4161 }
86f8d05f
RH
4162 ctx->ntempr = 0;
4163 ctx->ntempl = 0;
61766fe9 4164
3d68ee7b
RH
4165 /* Advance the insn queue. Note that this check also detects
4166 a priority change within the instruction queue. */
51b061fb 4167 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
c301f34e
RH
4168 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4169 && use_goto_tb(ctx, ctx->iaoq_b)
4170 && (ctx->null_cond.c == TCG_COND_NEVER
4171 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
51b061fb
RH
4172 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4173 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
31234768 4174 ctx->base.is_jmp = ret = DISAS_NORETURN;
51b061fb 4175 } else {
31234768 4176 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
c301f34e 4177 }
61766fe9 4178 }
51b061fb
RH
4179 ctx->iaoq_f = ctx->iaoq_b;
4180 ctx->iaoq_b = ctx->iaoq_n;
c301f34e 4181 ctx->base.pc_next += 4;
51b061fb 4182
c5d0aec2
RH
4183 switch (ret) {
4184 case DISAS_NORETURN:
4185 case DISAS_IAQ_N_UPDATED:
4186 break;
4187
4188 case DISAS_NEXT:
4189 case DISAS_IAQ_N_STALE:
4190 case DISAS_IAQ_N_STALE_EXIT:
4191 if (ctx->iaoq_f == -1) {
4192 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4193 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e 4194#ifndef CONFIG_USER_ONLY
c5d0aec2 4195 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
c301f34e 4196#endif
c5d0aec2
RH
4197 nullify_save(ctx);
4198 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4199 ? DISAS_EXIT
4200 : DISAS_IAQ_N_UPDATED);
4201 } else if (ctx->iaoq_b == -1) {
4202 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4203 }
4204 break;
4205
4206 default:
4207 g_assert_not_reached();
51b061fb
RH
4208 }
4209}
4210
4211static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4212{
4213 DisasContext *ctx = container_of(dcbase, DisasContext, base);
e1b5a5ed 4214 DisasJumpType is_jmp = ctx->base.is_jmp;
61766fe9 4215
e1b5a5ed 4216 switch (is_jmp) {
869051ea 4217 case DISAS_NORETURN:
61766fe9 4218 break;
51b061fb 4219 case DISAS_TOO_MANY:
869051ea 4220 case DISAS_IAQ_N_STALE:
e1b5a5ed 4221 case DISAS_IAQ_N_STALE_EXIT:
51b061fb
RH
4222 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4223 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4224 nullify_save(ctx);
61766fe9 4225 /* FALLTHRU */
869051ea 4226 case DISAS_IAQ_N_UPDATED:
8532a14e 4227 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
7f11636d 4228 tcg_gen_lookup_and_goto_ptr();
8532a14e 4229 break;
61766fe9 4230 }
c5d0aec2
RH
4231 /* FALLTHRU */
4232 case DISAS_EXIT:
4233 tcg_gen_exit_tb(NULL, 0);
61766fe9
RH
4234 break;
4235 default:
51b061fb 4236 g_assert_not_reached();
61766fe9 4237 }
51b061fb 4238}
61766fe9 4239
8eb806a7
RH
4240static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4241 CPUState *cs, FILE *logfile)
51b061fb 4242{
c301f34e 4243 target_ulong pc = dcbase->pc_first;
61766fe9 4244
ba1d0b44
RH
4245#ifdef CONFIG_USER_ONLY
4246 switch (pc) {
51b061fb 4247 case 0x00:
8eb806a7 4248 fprintf(logfile, "IN:\n0x00000000: (null)\n");
ba1d0b44 4249 return;
51b061fb 4250 case 0xb0:
8eb806a7 4251 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
ba1d0b44 4252 return;
51b061fb 4253 case 0xe0:
8eb806a7 4254 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
ba1d0b44 4255 return;
51b061fb 4256 case 0x100:
8eb806a7 4257 fprintf(logfile, "IN:\n0x00000100: syscall\n");
ba1d0b44 4258 return;
61766fe9 4259 }
ba1d0b44
RH
4260#endif
4261
8eb806a7
RH
4262 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4263 target_disas(logfile, cs, pc, dcbase->tb->size);
51b061fb
RH
4264}
4265
4266static const TranslatorOps hppa_tr_ops = {
4267 .init_disas_context = hppa_tr_init_disas_context,
4268 .tb_start = hppa_tr_tb_start,
4269 .insn_start = hppa_tr_insn_start,
51b061fb
RH
4270 .translate_insn = hppa_tr_translate_insn,
4271 .tb_stop = hppa_tr_tb_stop,
4272 .disas_log = hppa_tr_disas_log,
4273};
4274
597f9b2d 4275void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
306c8721 4276 target_ulong pc, void *host_pc)
51b061fb
RH
4277{
4278 DisasContext ctx;
306c8721 4279 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
61766fe9 4280}