]> git.proxmox.com Git - mirror_qemu.git/blame - target/hppa/translate.c
Merge remote-tracking branch 'remotes/cleber/tags/python-next-pull-request' into...
[mirror_qemu.git] / target / hppa / translate.c
CommitLineData
61766fe9
RH
1/*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "disas/disas.h"
23#include "qemu/host-utils.h"
24#include "exec/exec-all.h"
25#include "tcg-op.h"
26#include "exec/cpu_ldst.h"
61766fe9
RH
27#include "exec/helper-proto.h"
28#include "exec/helper-gen.h"
869051ea 29#include "exec/translator.h"
61766fe9
RH
30#include "trace-tcg.h"
31#include "exec/log.h"
32
eaa3783b
RH
33/* Since we have a distinction between register size and address size,
34 we need to redefine all of these. */
35
36#undef TCGv
37#undef tcg_temp_new
38#undef tcg_global_reg_new
39#undef tcg_global_mem_new
40#undef tcg_temp_local_new
41#undef tcg_temp_free
42
43#if TARGET_LONG_BITS == 64
44#define TCGv_tl TCGv_i64
45#define tcg_temp_new_tl tcg_temp_new_i64
46#define tcg_temp_free_tl tcg_temp_free_i64
47#if TARGET_REGISTER_BITS == 64
48#define tcg_gen_extu_reg_tl tcg_gen_mov_i64
49#else
50#define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
51#endif
52#else
53#define TCGv_tl TCGv_i32
54#define tcg_temp_new_tl tcg_temp_new_i32
55#define tcg_temp_free_tl tcg_temp_free_i32
56#define tcg_gen_extu_reg_tl tcg_gen_mov_i32
57#endif
58
59#if TARGET_REGISTER_BITS == 64
60#define TCGv_reg TCGv_i64
61
62#define tcg_temp_new tcg_temp_new_i64
63#define tcg_global_reg_new tcg_global_reg_new_i64
64#define tcg_global_mem_new tcg_global_mem_new_i64
65#define tcg_temp_local_new tcg_temp_local_new_i64
66#define tcg_temp_free tcg_temp_free_i64
67
68#define tcg_gen_movi_reg tcg_gen_movi_i64
69#define tcg_gen_mov_reg tcg_gen_mov_i64
70#define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
71#define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
72#define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
73#define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
74#define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
75#define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
76#define tcg_gen_ld_reg tcg_gen_ld_i64
77#define tcg_gen_st8_reg tcg_gen_st8_i64
78#define tcg_gen_st16_reg tcg_gen_st16_i64
79#define tcg_gen_st32_reg tcg_gen_st32_i64
80#define tcg_gen_st_reg tcg_gen_st_i64
81#define tcg_gen_add_reg tcg_gen_add_i64
82#define tcg_gen_addi_reg tcg_gen_addi_i64
83#define tcg_gen_sub_reg tcg_gen_sub_i64
84#define tcg_gen_neg_reg tcg_gen_neg_i64
85#define tcg_gen_subfi_reg tcg_gen_subfi_i64
86#define tcg_gen_subi_reg tcg_gen_subi_i64
87#define tcg_gen_and_reg tcg_gen_and_i64
88#define tcg_gen_andi_reg tcg_gen_andi_i64
89#define tcg_gen_or_reg tcg_gen_or_i64
90#define tcg_gen_ori_reg tcg_gen_ori_i64
91#define tcg_gen_xor_reg tcg_gen_xor_i64
92#define tcg_gen_xori_reg tcg_gen_xori_i64
93#define tcg_gen_not_reg tcg_gen_not_i64
94#define tcg_gen_shl_reg tcg_gen_shl_i64
95#define tcg_gen_shli_reg tcg_gen_shli_i64
96#define tcg_gen_shr_reg tcg_gen_shr_i64
97#define tcg_gen_shri_reg tcg_gen_shri_i64
98#define tcg_gen_sar_reg tcg_gen_sar_i64
99#define tcg_gen_sari_reg tcg_gen_sari_i64
100#define tcg_gen_brcond_reg tcg_gen_brcond_i64
101#define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
102#define tcg_gen_setcond_reg tcg_gen_setcond_i64
103#define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
104#define tcg_gen_mul_reg tcg_gen_mul_i64
105#define tcg_gen_muli_reg tcg_gen_muli_i64
106#define tcg_gen_div_reg tcg_gen_div_i64
107#define tcg_gen_rem_reg tcg_gen_rem_i64
108#define tcg_gen_divu_reg tcg_gen_divu_i64
109#define tcg_gen_remu_reg tcg_gen_remu_i64
110#define tcg_gen_discard_reg tcg_gen_discard_i64
111#define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
112#define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
113#define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
114#define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
115#define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
116#define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
117#define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
118#define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
119#define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
120#define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
121#define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
122#define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
123#define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
124#define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
125#define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
126#define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
127#define tcg_gen_andc_reg tcg_gen_andc_i64
128#define tcg_gen_eqv_reg tcg_gen_eqv_i64
129#define tcg_gen_nand_reg tcg_gen_nand_i64
130#define tcg_gen_nor_reg tcg_gen_nor_i64
131#define tcg_gen_orc_reg tcg_gen_orc_i64
132#define tcg_gen_clz_reg tcg_gen_clz_i64
133#define tcg_gen_ctz_reg tcg_gen_ctz_i64
134#define tcg_gen_clzi_reg tcg_gen_clzi_i64
135#define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
136#define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
137#define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
138#define tcg_gen_rotl_reg tcg_gen_rotl_i64
139#define tcg_gen_rotli_reg tcg_gen_rotli_i64
140#define tcg_gen_rotr_reg tcg_gen_rotr_i64
141#define tcg_gen_rotri_reg tcg_gen_rotri_i64
142#define tcg_gen_deposit_reg tcg_gen_deposit_i64
143#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
144#define tcg_gen_extract_reg tcg_gen_extract_i64
145#define tcg_gen_sextract_reg tcg_gen_sextract_i64
146#define tcg_const_reg tcg_const_i64
147#define tcg_const_local_reg tcg_const_local_i64
148#define tcg_gen_movcond_reg tcg_gen_movcond_i64
149#define tcg_gen_add2_reg tcg_gen_add2_i64
150#define tcg_gen_sub2_reg tcg_gen_sub2_i64
151#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
152#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
153#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
5bfa8034 154#define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
eaa3783b
RH
155#else
156#define TCGv_reg TCGv_i32
157#define tcg_temp_new tcg_temp_new_i32
158#define tcg_global_reg_new tcg_global_reg_new_i32
159#define tcg_global_mem_new tcg_global_mem_new_i32
160#define tcg_temp_local_new tcg_temp_local_new_i32
161#define tcg_temp_free tcg_temp_free_i32
162
163#define tcg_gen_movi_reg tcg_gen_movi_i32
164#define tcg_gen_mov_reg tcg_gen_mov_i32
165#define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
166#define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
167#define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
168#define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
169#define tcg_gen_ld32u_reg tcg_gen_ld_i32
170#define tcg_gen_ld32s_reg tcg_gen_ld_i32
171#define tcg_gen_ld_reg tcg_gen_ld_i32
172#define tcg_gen_st8_reg tcg_gen_st8_i32
173#define tcg_gen_st16_reg tcg_gen_st16_i32
174#define tcg_gen_st32_reg tcg_gen_st32_i32
175#define tcg_gen_st_reg tcg_gen_st_i32
176#define tcg_gen_add_reg tcg_gen_add_i32
177#define tcg_gen_addi_reg tcg_gen_addi_i32
178#define tcg_gen_sub_reg tcg_gen_sub_i32
179#define tcg_gen_neg_reg tcg_gen_neg_i32
180#define tcg_gen_subfi_reg tcg_gen_subfi_i32
181#define tcg_gen_subi_reg tcg_gen_subi_i32
182#define tcg_gen_and_reg tcg_gen_and_i32
183#define tcg_gen_andi_reg tcg_gen_andi_i32
184#define tcg_gen_or_reg tcg_gen_or_i32
185#define tcg_gen_ori_reg tcg_gen_ori_i32
186#define tcg_gen_xor_reg tcg_gen_xor_i32
187#define tcg_gen_xori_reg tcg_gen_xori_i32
188#define tcg_gen_not_reg tcg_gen_not_i32
189#define tcg_gen_shl_reg tcg_gen_shl_i32
190#define tcg_gen_shli_reg tcg_gen_shli_i32
191#define tcg_gen_shr_reg tcg_gen_shr_i32
192#define tcg_gen_shri_reg tcg_gen_shri_i32
193#define tcg_gen_sar_reg tcg_gen_sar_i32
194#define tcg_gen_sari_reg tcg_gen_sari_i32
195#define tcg_gen_brcond_reg tcg_gen_brcond_i32
196#define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
197#define tcg_gen_setcond_reg tcg_gen_setcond_i32
198#define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
199#define tcg_gen_mul_reg tcg_gen_mul_i32
200#define tcg_gen_muli_reg tcg_gen_muli_i32
201#define tcg_gen_div_reg tcg_gen_div_i32
202#define tcg_gen_rem_reg tcg_gen_rem_i32
203#define tcg_gen_divu_reg tcg_gen_divu_i32
204#define tcg_gen_remu_reg tcg_gen_remu_i32
205#define tcg_gen_discard_reg tcg_gen_discard_i32
206#define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
207#define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
208#define tcg_gen_extu_i32_reg tcg_gen_mov_i32
209#define tcg_gen_ext_i32_reg tcg_gen_mov_i32
210#define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
211#define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
212#define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
213#define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
214#define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
215#define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
216#define tcg_gen_ext32u_reg tcg_gen_mov_i32
217#define tcg_gen_ext32s_reg tcg_gen_mov_i32
218#define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
219#define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
220#define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
221#define tcg_gen_andc_reg tcg_gen_andc_i32
222#define tcg_gen_eqv_reg tcg_gen_eqv_i32
223#define tcg_gen_nand_reg tcg_gen_nand_i32
224#define tcg_gen_nor_reg tcg_gen_nor_i32
225#define tcg_gen_orc_reg tcg_gen_orc_i32
226#define tcg_gen_clz_reg tcg_gen_clz_i32
227#define tcg_gen_ctz_reg tcg_gen_ctz_i32
228#define tcg_gen_clzi_reg tcg_gen_clzi_i32
229#define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
230#define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
231#define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
232#define tcg_gen_rotl_reg tcg_gen_rotl_i32
233#define tcg_gen_rotli_reg tcg_gen_rotli_i32
234#define tcg_gen_rotr_reg tcg_gen_rotr_i32
235#define tcg_gen_rotri_reg tcg_gen_rotri_i32
236#define tcg_gen_deposit_reg tcg_gen_deposit_i32
237#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
238#define tcg_gen_extract_reg tcg_gen_extract_i32
239#define tcg_gen_sextract_reg tcg_gen_sextract_i32
240#define tcg_const_reg tcg_const_i32
241#define tcg_const_local_reg tcg_const_local_i32
242#define tcg_gen_movcond_reg tcg_gen_movcond_i32
243#define tcg_gen_add2_reg tcg_gen_add2_i32
244#define tcg_gen_sub2_reg tcg_gen_sub2_i32
245#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
246#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
247#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
5bfa8034 248#define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
eaa3783b
RH
249#endif /* TARGET_REGISTER_BITS */
250
61766fe9
RH
251typedef struct DisasCond {
252 TCGCond c;
eaa3783b 253 TCGv_reg a0, a1;
61766fe9
RH
254 bool a0_is_n;
255 bool a1_is_0;
256} DisasCond;
257
258typedef struct DisasContext {
d01a3625 259 DisasContextBase base;
61766fe9
RH
260 CPUState *cs;
261
eaa3783b
RH
262 target_ureg iaoq_f;
263 target_ureg iaoq_b;
264 target_ureg iaoq_n;
265 TCGv_reg iaoq_n_var;
61766fe9 266
86f8d05f 267 int ntempr, ntempl;
5eecd37a 268 TCGv_reg tempr[8];
86f8d05f 269 TCGv_tl templ[4];
61766fe9
RH
270
271 DisasCond null_cond;
272 TCGLabel *null_lab;
273
1a19da0d 274 uint32_t insn;
494737b7 275 uint32_t tb_flags;
3d68ee7b
RH
276 int mmu_idx;
277 int privilege;
61766fe9
RH
278 bool psw_n_nonzero;
279} DisasContext;
280
e36f27ef
RH
281/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
282static int expand_sm_imm(int val)
283{
284 if (val & PSW_SM_E) {
285 val = (val & ~PSW_SM_E) | PSW_E;
286 }
287 if (val & PSW_SM_W) {
288 val = (val & ~PSW_SM_W) | PSW_W;
289 }
290 return val;
291}
292
deee69a1
RH
293/* Inverted space register indicates 0 means sr0 not inferred from base. */
294static int expand_sr3x(int val)
295{
296 return ~val;
297}
298
1cd012a5
RH
299/* Convert the M:A bits within a memory insn to the tri-state value
300 we use for the final M. */
301static int ma_to_m(int val)
302{
303 return val & 2 ? (val & 1 ? -1 : 1) : 0;
304}
305
740038d7
RH
306/* Convert the sign of the displacement to a pre or post-modify. */
307static int pos_to_m(int val)
308{
309 return val ? 1 : -1;
310}
311
312static int neg_to_m(int val)
313{
314 return val ? -1 : 1;
315}
316
317/* Used for branch targets and fp memory ops. */
01afb7be
RH
318static int expand_shl2(int val)
319{
320 return val << 2;
321}
322
740038d7
RH
323/* Used for fp memory ops. */
324static int expand_shl3(int val)
325{
326 return val << 3;
327}
328
0588e061
RH
329/* Used for assemble_21. */
330static int expand_shl11(int val)
331{
332 return val << 11;
333}
334
01afb7be 335
40f9f908
RH
336/* Include the auto-generated decoder. */
337#include "decode.inc.c"
338
869051ea
RH
339/* We are not using a goto_tb (for whatever reason), but have updated
340 the iaq (for whatever reason), so don't do it again on exit. */
341#define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
61766fe9 342
869051ea
RH
343/* We are exiting the TB, but have neither emitted a goto_tb, nor
344 updated the iaq for the next instruction to be executed. */
345#define DISAS_IAQ_N_STALE DISAS_TARGET_1
61766fe9 346
e1b5a5ed
RH
347/* Similarly, but we want to return to the main loop immediately
348 to recognize unmasked interrupts. */
349#define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
350
61766fe9 351/* global register indexes */
eaa3783b 352static TCGv_reg cpu_gr[32];
33423472 353static TCGv_i64 cpu_sr[4];
494737b7 354static TCGv_i64 cpu_srH;
eaa3783b
RH
355static TCGv_reg cpu_iaoq_f;
356static TCGv_reg cpu_iaoq_b;
c301f34e
RH
357static TCGv_i64 cpu_iasq_f;
358static TCGv_i64 cpu_iasq_b;
eaa3783b
RH
359static TCGv_reg cpu_sar;
360static TCGv_reg cpu_psw_n;
361static TCGv_reg cpu_psw_v;
362static TCGv_reg cpu_psw_cb;
363static TCGv_reg cpu_psw_cb_msb;
61766fe9
RH
364
365#include "exec/gen-icount.h"
366
367void hppa_translate_init(void)
368{
369#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
370
eaa3783b 371 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
61766fe9 372 static const GlobalVar vars[] = {
35136a77 373 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
61766fe9
RH
374 DEF_VAR(psw_n),
375 DEF_VAR(psw_v),
376 DEF_VAR(psw_cb),
377 DEF_VAR(psw_cb_msb),
378 DEF_VAR(iaoq_f),
379 DEF_VAR(iaoq_b),
380 };
381
382#undef DEF_VAR
383
384 /* Use the symbolic register names that match the disassembler. */
385 static const char gr_names[32][4] = {
386 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
387 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
388 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
389 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
390 };
33423472 391 /* SR[4-7] are not global registers so that we can index them. */
494737b7
RH
392 static const char sr_names[5][4] = {
393 "sr0", "sr1", "sr2", "sr3", "srH"
33423472 394 };
61766fe9 395
61766fe9
RH
396 int i;
397
f764718d 398 cpu_gr[0] = NULL;
61766fe9
RH
399 for (i = 1; i < 32; i++) {
400 cpu_gr[i] = tcg_global_mem_new(cpu_env,
401 offsetof(CPUHPPAState, gr[i]),
402 gr_names[i]);
403 }
33423472
RH
404 for (i = 0; i < 4; i++) {
405 cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
406 offsetof(CPUHPPAState, sr[i]),
407 sr_names[i]);
408 }
494737b7
RH
409 cpu_srH = tcg_global_mem_new_i64(cpu_env,
410 offsetof(CPUHPPAState, sr[4]),
411 sr_names[4]);
61766fe9
RH
412
413 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
414 const GlobalVar *v = &vars[i];
415 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
416 }
c301f34e
RH
417
418 cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
419 offsetof(CPUHPPAState, iasq_f),
420 "iasq_f");
421 cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
422 offsetof(CPUHPPAState, iasq_b),
423 "iasq_b");
61766fe9
RH
424}
425
129e9cc3
RH
426static DisasCond cond_make_f(void)
427{
f764718d
RH
428 return (DisasCond){
429 .c = TCG_COND_NEVER,
430 .a0 = NULL,
431 .a1 = NULL,
432 };
129e9cc3
RH
433}
434
df0232fe
RH
435static DisasCond cond_make_t(void)
436{
437 return (DisasCond){
438 .c = TCG_COND_ALWAYS,
439 .a0 = NULL,
440 .a1 = NULL,
441 };
442}
443
129e9cc3
RH
444static DisasCond cond_make_n(void)
445{
f764718d
RH
446 return (DisasCond){
447 .c = TCG_COND_NE,
448 .a0 = cpu_psw_n,
449 .a0_is_n = true,
450 .a1 = NULL,
451 .a1_is_0 = true
452 };
129e9cc3
RH
453}
454
b47a4a02 455static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
129e9cc3 456{
129e9cc3 457 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
b47a4a02
SS
458 return (DisasCond){
459 .c = c, .a0 = a0, .a1_is_0 = true
460 };
461}
129e9cc3 462
b47a4a02
SS
463static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
464{
465 TCGv_reg tmp = tcg_temp_new();
466 tcg_gen_mov_reg(tmp, a0);
467 return cond_make_0_tmp(c, tmp);
129e9cc3
RH
468}
469
eaa3783b 470static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
129e9cc3
RH
471{
472 DisasCond r = { .c = c };
473
474 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
475 r.a0 = tcg_temp_new();
eaa3783b 476 tcg_gen_mov_reg(r.a0, a0);
129e9cc3 477 r.a1 = tcg_temp_new();
eaa3783b 478 tcg_gen_mov_reg(r.a1, a1);
129e9cc3
RH
479
480 return r;
481}
482
483static void cond_prep(DisasCond *cond)
484{
485 if (cond->a1_is_0) {
486 cond->a1_is_0 = false;
eaa3783b 487 cond->a1 = tcg_const_reg(0);
129e9cc3
RH
488 }
489}
490
491static void cond_free(DisasCond *cond)
492{
493 switch (cond->c) {
494 default:
495 if (!cond->a0_is_n) {
496 tcg_temp_free(cond->a0);
497 }
498 if (!cond->a1_is_0) {
499 tcg_temp_free(cond->a1);
500 }
501 cond->a0_is_n = false;
502 cond->a1_is_0 = false;
f764718d
RH
503 cond->a0 = NULL;
504 cond->a1 = NULL;
129e9cc3
RH
505 /* fallthru */
506 case TCG_COND_ALWAYS:
507 cond->c = TCG_COND_NEVER;
508 break;
509 case TCG_COND_NEVER:
510 break;
511 }
512}
513
eaa3783b 514static TCGv_reg get_temp(DisasContext *ctx)
61766fe9 515{
86f8d05f
RH
516 unsigned i = ctx->ntempr++;
517 g_assert(i < ARRAY_SIZE(ctx->tempr));
518 return ctx->tempr[i] = tcg_temp_new();
61766fe9
RH
519}
520
86f8d05f
RH
521#ifndef CONFIG_USER_ONLY
522static TCGv_tl get_temp_tl(DisasContext *ctx)
523{
524 unsigned i = ctx->ntempl++;
525 g_assert(i < ARRAY_SIZE(ctx->templ));
526 return ctx->templ[i] = tcg_temp_new_tl();
527}
528#endif
529
eaa3783b 530static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
61766fe9 531{
eaa3783b
RH
532 TCGv_reg t = get_temp(ctx);
533 tcg_gen_movi_reg(t, v);
61766fe9
RH
534 return t;
535}
536
eaa3783b 537static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
61766fe9
RH
538{
539 if (reg == 0) {
eaa3783b
RH
540 TCGv_reg t = get_temp(ctx);
541 tcg_gen_movi_reg(t, 0);
61766fe9
RH
542 return t;
543 } else {
544 return cpu_gr[reg];
545 }
546}
547
eaa3783b 548static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
61766fe9 549{
129e9cc3 550 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
61766fe9
RH
551 return get_temp(ctx);
552 } else {
553 return cpu_gr[reg];
554 }
555}
556
eaa3783b 557static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
129e9cc3
RH
558{
559 if (ctx->null_cond.c != TCG_COND_NEVER) {
560 cond_prep(&ctx->null_cond);
eaa3783b 561 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
129e9cc3
RH
562 ctx->null_cond.a1, dest, t);
563 } else {
eaa3783b 564 tcg_gen_mov_reg(dest, t);
129e9cc3
RH
565 }
566}
567
eaa3783b 568static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
129e9cc3
RH
569{
570 if (reg != 0) {
571 save_or_nullify(ctx, cpu_gr[reg], t);
572 }
573}
574
96d6407f
RH
575#ifdef HOST_WORDS_BIGENDIAN
576# define HI_OFS 0
577# define LO_OFS 4
578#else
579# define HI_OFS 4
580# define LO_OFS 0
581#endif
582
583static TCGv_i32 load_frw_i32(unsigned rt)
584{
585 TCGv_i32 ret = tcg_temp_new_i32();
586 tcg_gen_ld_i32(ret, cpu_env,
587 offsetof(CPUHPPAState, fr[rt & 31])
588 + (rt & 32 ? LO_OFS : HI_OFS));
589 return ret;
590}
591
ebe9383c
RH
592static TCGv_i32 load_frw0_i32(unsigned rt)
593{
594 if (rt == 0) {
595 return tcg_const_i32(0);
596 } else {
597 return load_frw_i32(rt);
598 }
599}
600
601static TCGv_i64 load_frw0_i64(unsigned rt)
602{
603 if (rt == 0) {
604 return tcg_const_i64(0);
605 } else {
606 TCGv_i64 ret = tcg_temp_new_i64();
607 tcg_gen_ld32u_i64(ret, cpu_env,
608 offsetof(CPUHPPAState, fr[rt & 31])
609 + (rt & 32 ? LO_OFS : HI_OFS));
610 return ret;
611 }
612}
613
96d6407f
RH
614static void save_frw_i32(unsigned rt, TCGv_i32 val)
615{
616 tcg_gen_st_i32(val, cpu_env,
617 offsetof(CPUHPPAState, fr[rt & 31])
618 + (rt & 32 ? LO_OFS : HI_OFS));
619}
620
621#undef HI_OFS
622#undef LO_OFS
623
624static TCGv_i64 load_frd(unsigned rt)
625{
626 TCGv_i64 ret = tcg_temp_new_i64();
627 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
628 return ret;
629}
630
ebe9383c
RH
631static TCGv_i64 load_frd0(unsigned rt)
632{
633 if (rt == 0) {
634 return tcg_const_i64(0);
635 } else {
636 return load_frd(rt);
637 }
638}
639
96d6407f
RH
640static void save_frd(unsigned rt, TCGv_i64 val)
641{
642 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
643}
644
33423472
RH
645static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
646{
647#ifdef CONFIG_USER_ONLY
648 tcg_gen_movi_i64(dest, 0);
649#else
650 if (reg < 4) {
651 tcg_gen_mov_i64(dest, cpu_sr[reg]);
494737b7
RH
652 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
653 tcg_gen_mov_i64(dest, cpu_srH);
33423472
RH
654 } else {
655 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
656 }
657#endif
658}
659
129e9cc3
RH
660/* Skip over the implementation of an insn that has been nullified.
661 Use this when the insn is too complex for a conditional move. */
662static void nullify_over(DisasContext *ctx)
663{
664 if (ctx->null_cond.c != TCG_COND_NEVER) {
665 /* The always condition should have been handled in the main loop. */
666 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
667
668 ctx->null_lab = gen_new_label();
669 cond_prep(&ctx->null_cond);
670
671 /* If we're using PSW[N], copy it to a temp because... */
672 if (ctx->null_cond.a0_is_n) {
673 ctx->null_cond.a0_is_n = false;
674 ctx->null_cond.a0 = tcg_temp_new();
eaa3783b 675 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
129e9cc3
RH
676 }
677 /* ... we clear it before branching over the implementation,
678 so that (1) it's clear after nullifying this insn and
679 (2) if this insn nullifies the next, PSW[N] is valid. */
680 if (ctx->psw_n_nonzero) {
681 ctx->psw_n_nonzero = false;
eaa3783b 682 tcg_gen_movi_reg(cpu_psw_n, 0);
129e9cc3
RH
683 }
684
eaa3783b 685 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
129e9cc3
RH
686 ctx->null_cond.a1, ctx->null_lab);
687 cond_free(&ctx->null_cond);
688 }
689}
690
691/* Save the current nullification state to PSW[N]. */
692static void nullify_save(DisasContext *ctx)
693{
694 if (ctx->null_cond.c == TCG_COND_NEVER) {
695 if (ctx->psw_n_nonzero) {
eaa3783b 696 tcg_gen_movi_reg(cpu_psw_n, 0);
129e9cc3
RH
697 }
698 return;
699 }
700 if (!ctx->null_cond.a0_is_n) {
701 cond_prep(&ctx->null_cond);
eaa3783b 702 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
129e9cc3
RH
703 ctx->null_cond.a0, ctx->null_cond.a1);
704 ctx->psw_n_nonzero = true;
705 }
706 cond_free(&ctx->null_cond);
707}
708
709/* Set a PSW[N] to X. The intention is that this is used immediately
710 before a goto_tb/exit_tb, so that there is no fallthru path to other
711 code within the TB. Therefore we do not update psw_n_nonzero. */
712static void nullify_set(DisasContext *ctx, bool x)
713{
714 if (ctx->psw_n_nonzero || x) {
eaa3783b 715 tcg_gen_movi_reg(cpu_psw_n, x);
129e9cc3
RH
716 }
717}
718
719/* Mark the end of an instruction that may have been nullified.
40f9f908
RH
720 This is the pair to nullify_over. Always returns true so that
721 it may be tail-called from a translate function. */
31234768 722static bool nullify_end(DisasContext *ctx)
129e9cc3
RH
723{
724 TCGLabel *null_lab = ctx->null_lab;
31234768 725 DisasJumpType status = ctx->base.is_jmp;
129e9cc3 726
f49b3537
RH
727 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
728 For UPDATED, we cannot update on the nullified path. */
729 assert(status != DISAS_IAQ_N_UPDATED);
730
129e9cc3
RH
731 if (likely(null_lab == NULL)) {
732 /* The current insn wasn't conditional or handled the condition
733 applied to it without a branch, so the (new) setting of
734 NULL_COND can be applied directly to the next insn. */
31234768 735 return true;
129e9cc3
RH
736 }
737 ctx->null_lab = NULL;
738
739 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
740 /* The next instruction will be unconditional,
741 and NULL_COND already reflects that. */
742 gen_set_label(null_lab);
743 } else {
744 /* The insn that we just executed is itself nullifying the next
745 instruction. Store the condition in the PSW[N] global.
746 We asserted PSW[N] = 0 in nullify_over, so that after the
747 label we have the proper value in place. */
748 nullify_save(ctx);
749 gen_set_label(null_lab);
750 ctx->null_cond = cond_make_n();
751 }
869051ea 752 if (status == DISAS_NORETURN) {
31234768 753 ctx->base.is_jmp = DISAS_NEXT;
129e9cc3 754 }
31234768 755 return true;
129e9cc3
RH
756}
757
eaa3783b 758static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
61766fe9
RH
759{
760 if (unlikely(ival == -1)) {
eaa3783b 761 tcg_gen_mov_reg(dest, vval);
61766fe9 762 } else {
eaa3783b 763 tcg_gen_movi_reg(dest, ival);
61766fe9
RH
764 }
765}
766
eaa3783b 767static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
61766fe9
RH
768{
769 return ctx->iaoq_f + disp + 8;
770}
771
772static void gen_excp_1(int exception)
773{
774 TCGv_i32 t = tcg_const_i32(exception);
775 gen_helper_excp(cpu_env, t);
776 tcg_temp_free_i32(t);
777}
778
31234768 779static void gen_excp(DisasContext *ctx, int exception)
61766fe9
RH
780{
781 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
782 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
129e9cc3 783 nullify_save(ctx);
61766fe9 784 gen_excp_1(exception);
31234768 785 ctx->base.is_jmp = DISAS_NORETURN;
61766fe9
RH
786}
787
31234768 788static bool gen_excp_iir(DisasContext *ctx, int exc)
1a19da0d 789{
31234768
RH
790 TCGv_reg tmp;
791
792 nullify_over(ctx);
793 tmp = tcg_const_reg(ctx->insn);
1a19da0d
RH
794 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
795 tcg_temp_free(tmp);
31234768
RH
796 gen_excp(ctx, exc);
797 return nullify_end(ctx);
1a19da0d
RH
798}
799
31234768 800static bool gen_illegal(DisasContext *ctx)
61766fe9 801{
31234768 802 return gen_excp_iir(ctx, EXCP_ILL);
61766fe9
RH
803}
804
40f9f908
RH
805#ifdef CONFIG_USER_ONLY
806#define CHECK_MOST_PRIVILEGED(EXCP) \
807 return gen_excp_iir(ctx, EXCP)
808#else
809#define CHECK_MOST_PRIVILEGED(EXCP) \
31234768
RH
810 do { \
811 if (ctx->privilege != 0) { \
812 return gen_excp_iir(ctx, EXCP); \
813 } \
e1b5a5ed 814 } while (0)
40f9f908 815#endif
e1b5a5ed 816
eaa3783b 817static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
61766fe9
RH
818{
819 /* Suppress goto_tb in the case of single-steping and IO. */
31234768
RH
820 if ((tb_cflags(ctx->base.tb) & CF_LAST_IO)
821 || ctx->base.singlestep_enabled) {
61766fe9
RH
822 return false;
823 }
824 return true;
825}
826
129e9cc3
RH
827/* If the next insn is to be nullified, and it's on the same page,
828 and we're not attempting to set a breakpoint on it, then we can
829 totally skip the nullified insn. This avoids creating and
830 executing a TB that merely branches to the next TB. */
831static bool use_nullify_skip(DisasContext *ctx)
832{
833 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
834 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
835}
836
61766fe9 837static void gen_goto_tb(DisasContext *ctx, int which,
eaa3783b 838 target_ureg f, target_ureg b)
61766fe9
RH
839{
840 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
841 tcg_gen_goto_tb(which);
eaa3783b
RH
842 tcg_gen_movi_reg(cpu_iaoq_f, f);
843 tcg_gen_movi_reg(cpu_iaoq_b, b);
07ea28b4 844 tcg_gen_exit_tb(ctx->base.tb, which);
61766fe9
RH
845 } else {
846 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
847 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
d01a3625 848 if (ctx->base.singlestep_enabled) {
61766fe9
RH
849 gen_excp_1(EXCP_DEBUG);
850 } else {
7f11636d 851 tcg_gen_lookup_and_goto_ptr();
61766fe9
RH
852 }
853 }
854}
855
b47a4a02
SS
856static bool cond_need_sv(int c)
857{
858 return c == 2 || c == 3 || c == 6;
859}
860
861static bool cond_need_cb(int c)
862{
863 return c == 4 || c == 5;
864}
865
866/*
867 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
868 * the Parisc 1.1 Architecture Reference Manual for details.
869 */
b2167459 870
eaa3783b
RH
871static DisasCond do_cond(unsigned cf, TCGv_reg res,
872 TCGv_reg cb_msb, TCGv_reg sv)
b2167459
RH
873{
874 DisasCond cond;
eaa3783b 875 TCGv_reg tmp;
b2167459
RH
876
877 switch (cf >> 1) {
b47a4a02 878 case 0: /* Never / TR (0 / 1) */
b2167459
RH
879 cond = cond_make_f();
880 break;
881 case 1: /* = / <> (Z / !Z) */
882 cond = cond_make_0(TCG_COND_EQ, res);
883 break;
b47a4a02
SS
884 case 2: /* < / >= (N ^ V / !(N ^ V) */
885 tmp = tcg_temp_new();
886 tcg_gen_xor_reg(tmp, res, sv);
887 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
b2167459 888 break;
b47a4a02
SS
889 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
890 /*
891 * Simplify:
892 * (N ^ V) | Z
893 * ((res < 0) ^ (sv < 0)) | !res
894 * ((res ^ sv) < 0) | !res
895 * (~(res ^ sv) >= 0) | !res
896 * !(~(res ^ sv) >> 31) | !res
897 * !(~(res ^ sv) >> 31 & res)
898 */
899 tmp = tcg_temp_new();
900 tcg_gen_eqv_reg(tmp, res, sv);
901 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
902 tcg_gen_and_reg(tmp, tmp, res);
903 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
b2167459
RH
904 break;
905 case 4: /* NUV / UV (!C / C) */
906 cond = cond_make_0(TCG_COND_EQ, cb_msb);
907 break;
908 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
909 tmp = tcg_temp_new();
eaa3783b
RH
910 tcg_gen_neg_reg(tmp, cb_msb);
911 tcg_gen_and_reg(tmp, tmp, res);
b47a4a02 912 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
b2167459
RH
913 break;
914 case 6: /* SV / NSV (V / !V) */
915 cond = cond_make_0(TCG_COND_LT, sv);
916 break;
917 case 7: /* OD / EV */
918 tmp = tcg_temp_new();
eaa3783b 919 tcg_gen_andi_reg(tmp, res, 1);
b47a4a02 920 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
b2167459
RH
921 break;
922 default:
923 g_assert_not_reached();
924 }
925 if (cf & 1) {
926 cond.c = tcg_invert_cond(cond.c);
927 }
928
929 return cond;
930}
931
932/* Similar, but for the special case of subtraction without borrow, we
933 can use the inputs directly. This can allow other computation to be
934 deleted as unused. */
935
eaa3783b
RH
936static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
937 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
b2167459
RH
938{
939 DisasCond cond;
940
941 switch (cf >> 1) {
942 case 1: /* = / <> */
943 cond = cond_make(TCG_COND_EQ, in1, in2);
944 break;
945 case 2: /* < / >= */
946 cond = cond_make(TCG_COND_LT, in1, in2);
947 break;
948 case 3: /* <= / > */
949 cond = cond_make(TCG_COND_LE, in1, in2);
950 break;
951 case 4: /* << / >>= */
952 cond = cond_make(TCG_COND_LTU, in1, in2);
953 break;
954 case 5: /* <<= / >> */
955 cond = cond_make(TCG_COND_LEU, in1, in2);
956 break;
957 default:
b47a4a02 958 return do_cond(cf, res, NULL, sv);
b2167459
RH
959 }
960 if (cf & 1) {
961 cond.c = tcg_invert_cond(cond.c);
962 }
963
964 return cond;
965}
966
df0232fe
RH
967/*
968 * Similar, but for logicals, where the carry and overflow bits are not
969 * computed, and use of them is undefined.
970 *
971 * Undefined or not, hardware does not trap. It seems reasonable to
972 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
973 * how cases c={2,3} are treated.
974 */
b2167459 975
eaa3783b 976static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
b2167459 977{
df0232fe
RH
978 switch (cf) {
979 case 0: /* never */
980 case 9: /* undef, C */
981 case 11: /* undef, C & !Z */
982 case 12: /* undef, V */
983 return cond_make_f();
984
985 case 1: /* true */
986 case 8: /* undef, !C */
987 case 10: /* undef, !C | Z */
988 case 13: /* undef, !V */
989 return cond_make_t();
990
991 case 2: /* == */
992 return cond_make_0(TCG_COND_EQ, res);
993 case 3: /* <> */
994 return cond_make_0(TCG_COND_NE, res);
995 case 4: /* < */
996 return cond_make_0(TCG_COND_LT, res);
997 case 5: /* >= */
998 return cond_make_0(TCG_COND_GE, res);
999 case 6: /* <= */
1000 return cond_make_0(TCG_COND_LE, res);
1001 case 7: /* > */
1002 return cond_make_0(TCG_COND_GT, res);
1003
1004 case 14: /* OD */
1005 case 15: /* EV */
1006 return do_cond(cf, res, NULL, NULL);
1007
1008 default:
1009 g_assert_not_reached();
b2167459 1010 }
b2167459
RH
1011}
1012
98cd9ca7
RH
1013/* Similar, but for shift/extract/deposit conditions. */
1014
eaa3783b 1015static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
98cd9ca7
RH
1016{
1017 unsigned c, f;
1018
1019 /* Convert the compressed condition codes to standard.
1020 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1021 4-7 are the reverse of 0-3. */
1022 c = orig & 3;
1023 if (c == 3) {
1024 c = 7;
1025 }
1026 f = (orig & 4) / 4;
1027
1028 return do_log_cond(c * 2 + f, res);
1029}
1030
b2167459
RH
1031/* Similar, but for unit conditions. */
1032
eaa3783b
RH
1033static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1034 TCGv_reg in1, TCGv_reg in2)
b2167459
RH
1035{
1036 DisasCond cond;
eaa3783b 1037 TCGv_reg tmp, cb = NULL;
b2167459 1038
b2167459
RH
1039 if (cf & 8) {
1040 /* Since we want to test lots of carry-out bits all at once, do not
1041 * do our normal thing and compute carry-in of bit B+1 since that
1042 * leaves us with carry bits spread across two words.
1043 */
1044 cb = tcg_temp_new();
1045 tmp = tcg_temp_new();
eaa3783b
RH
1046 tcg_gen_or_reg(cb, in1, in2);
1047 tcg_gen_and_reg(tmp, in1, in2);
1048 tcg_gen_andc_reg(cb, cb, res);
1049 tcg_gen_or_reg(cb, cb, tmp);
b2167459
RH
1050 tcg_temp_free(tmp);
1051 }
1052
1053 switch (cf >> 1) {
1054 case 0: /* never / TR */
1055 case 1: /* undefined */
1056 case 5: /* undefined */
1057 cond = cond_make_f();
1058 break;
1059
1060 case 2: /* SBZ / NBZ */
1061 /* See hasless(v,1) from
1062 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1063 */
1064 tmp = tcg_temp_new();
eaa3783b
RH
1065 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1066 tcg_gen_andc_reg(tmp, tmp, res);
1067 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
b2167459
RH
1068 cond = cond_make_0(TCG_COND_NE, tmp);
1069 tcg_temp_free(tmp);
1070 break;
1071
1072 case 3: /* SHZ / NHZ */
1073 tmp = tcg_temp_new();
eaa3783b
RH
1074 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1075 tcg_gen_andc_reg(tmp, tmp, res);
1076 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
b2167459
RH
1077 cond = cond_make_0(TCG_COND_NE, tmp);
1078 tcg_temp_free(tmp);
1079 break;
1080
1081 case 4: /* SDC / NDC */
eaa3783b 1082 tcg_gen_andi_reg(cb, cb, 0x88888888u);
b2167459
RH
1083 cond = cond_make_0(TCG_COND_NE, cb);
1084 break;
1085
1086 case 6: /* SBC / NBC */
eaa3783b 1087 tcg_gen_andi_reg(cb, cb, 0x80808080u);
b2167459
RH
1088 cond = cond_make_0(TCG_COND_NE, cb);
1089 break;
1090
1091 case 7: /* SHC / NHC */
eaa3783b 1092 tcg_gen_andi_reg(cb, cb, 0x80008000u);
b2167459
RH
1093 cond = cond_make_0(TCG_COND_NE, cb);
1094 break;
1095
1096 default:
1097 g_assert_not_reached();
1098 }
1099 if (cf & 8) {
1100 tcg_temp_free(cb);
1101 }
1102 if (cf & 1) {
1103 cond.c = tcg_invert_cond(cond.c);
1104 }
1105
1106 return cond;
1107}
1108
1109/* Compute signed overflow for addition. */
eaa3783b
RH
1110static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1111 TCGv_reg in1, TCGv_reg in2)
b2167459 1112{
eaa3783b
RH
1113 TCGv_reg sv = get_temp(ctx);
1114 TCGv_reg tmp = tcg_temp_new();
b2167459 1115
eaa3783b
RH
1116 tcg_gen_xor_reg(sv, res, in1);
1117 tcg_gen_xor_reg(tmp, in1, in2);
1118 tcg_gen_andc_reg(sv, sv, tmp);
b2167459
RH
1119 tcg_temp_free(tmp);
1120
1121 return sv;
1122}
1123
1124/* Compute signed overflow for subtraction. */
eaa3783b
RH
1125static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1126 TCGv_reg in1, TCGv_reg in2)
b2167459 1127{
eaa3783b
RH
1128 TCGv_reg sv = get_temp(ctx);
1129 TCGv_reg tmp = tcg_temp_new();
b2167459 1130
eaa3783b
RH
1131 tcg_gen_xor_reg(sv, res, in1);
1132 tcg_gen_xor_reg(tmp, in1, in2);
1133 tcg_gen_and_reg(sv, sv, tmp);
b2167459
RH
1134 tcg_temp_free(tmp);
1135
1136 return sv;
1137}
1138
31234768
RH
1139static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1140 TCGv_reg in2, unsigned shift, bool is_l,
1141 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
b2167459 1142{
eaa3783b 1143 TCGv_reg dest, cb, cb_msb, sv, tmp;
b2167459
RH
1144 unsigned c = cf >> 1;
1145 DisasCond cond;
1146
1147 dest = tcg_temp_new();
f764718d
RH
1148 cb = NULL;
1149 cb_msb = NULL;
b2167459
RH
1150
1151 if (shift) {
1152 tmp = get_temp(ctx);
eaa3783b 1153 tcg_gen_shli_reg(tmp, in1, shift);
b2167459
RH
1154 in1 = tmp;
1155 }
1156
b47a4a02 1157 if (!is_l || cond_need_cb(c)) {
eaa3783b 1158 TCGv_reg zero = tcg_const_reg(0);
b2167459 1159 cb_msb = get_temp(ctx);
eaa3783b 1160 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
b2167459 1161 if (is_c) {
eaa3783b 1162 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
b2167459
RH
1163 }
1164 tcg_temp_free(zero);
1165 if (!is_l) {
1166 cb = get_temp(ctx);
eaa3783b
RH
1167 tcg_gen_xor_reg(cb, in1, in2);
1168 tcg_gen_xor_reg(cb, cb, dest);
b2167459
RH
1169 }
1170 } else {
eaa3783b 1171 tcg_gen_add_reg(dest, in1, in2);
b2167459 1172 if (is_c) {
eaa3783b 1173 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
b2167459
RH
1174 }
1175 }
1176
1177 /* Compute signed overflow if required. */
f764718d 1178 sv = NULL;
b47a4a02 1179 if (is_tsv || cond_need_sv(c)) {
b2167459
RH
1180 sv = do_add_sv(ctx, dest, in1, in2);
1181 if (is_tsv) {
1182 /* ??? Need to include overflow from shift. */
1183 gen_helper_tsv(cpu_env, sv);
1184 }
1185 }
1186
1187 /* Emit any conditional trap before any writeback. */
1188 cond = do_cond(cf, dest, cb_msb, sv);
1189 if (is_tc) {
1190 cond_prep(&cond);
1191 tmp = tcg_temp_new();
eaa3783b 1192 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
b2167459
RH
1193 gen_helper_tcond(cpu_env, tmp);
1194 tcg_temp_free(tmp);
1195 }
1196
1197 /* Write back the result. */
1198 if (!is_l) {
1199 save_or_nullify(ctx, cpu_psw_cb, cb);
1200 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1201 }
1202 save_gpr(ctx, rt, dest);
1203 tcg_temp_free(dest);
1204
1205 /* Install the new nullification. */
1206 cond_free(&ctx->null_cond);
1207 ctx->null_cond = cond;
b2167459
RH
1208}
1209
0c982a28
RH
1210static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1211 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1212{
1213 TCGv_reg tcg_r1, tcg_r2;
1214
1215 if (a->cf) {
1216 nullify_over(ctx);
1217 }
1218 tcg_r1 = load_gpr(ctx, a->r1);
1219 tcg_r2 = load_gpr(ctx, a->r2);
1220 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1221 return nullify_end(ctx);
1222}
1223
0588e061
RH
1224static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1225 bool is_tsv, bool is_tc)
1226{
1227 TCGv_reg tcg_im, tcg_r2;
1228
1229 if (a->cf) {
1230 nullify_over(ctx);
1231 }
1232 tcg_im = load_const(ctx, a->i);
1233 tcg_r2 = load_gpr(ctx, a->r);
1234 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1235 return nullify_end(ctx);
1236}
1237
31234768
RH
1238static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1239 TCGv_reg in2, bool is_tsv, bool is_b,
1240 bool is_tc, unsigned cf)
b2167459 1241{
eaa3783b 1242 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
b2167459
RH
1243 unsigned c = cf >> 1;
1244 DisasCond cond;
1245
1246 dest = tcg_temp_new();
1247 cb = tcg_temp_new();
1248 cb_msb = tcg_temp_new();
1249
eaa3783b 1250 zero = tcg_const_reg(0);
b2167459
RH
1251 if (is_b) {
1252 /* DEST,C = IN1 + ~IN2 + C. */
eaa3783b
RH
1253 tcg_gen_not_reg(cb, in2);
1254 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1255 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1256 tcg_gen_xor_reg(cb, cb, in1);
1257 tcg_gen_xor_reg(cb, cb, dest);
b2167459
RH
1258 } else {
1259 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1260 operations by seeding the high word with 1 and subtracting. */
eaa3783b
RH
1261 tcg_gen_movi_reg(cb_msb, 1);
1262 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1263 tcg_gen_eqv_reg(cb, in1, in2);
1264 tcg_gen_xor_reg(cb, cb, dest);
b2167459
RH
1265 }
1266 tcg_temp_free(zero);
1267
1268 /* Compute signed overflow if required. */
f764718d 1269 sv = NULL;
b47a4a02 1270 if (is_tsv || cond_need_sv(c)) {
b2167459
RH
1271 sv = do_sub_sv(ctx, dest, in1, in2);
1272 if (is_tsv) {
1273 gen_helper_tsv(cpu_env, sv);
1274 }
1275 }
1276
1277 /* Compute the condition. We cannot use the special case for borrow. */
1278 if (!is_b) {
1279 cond = do_sub_cond(cf, dest, in1, in2, sv);
1280 } else {
1281 cond = do_cond(cf, dest, cb_msb, sv);
1282 }
1283
1284 /* Emit any conditional trap before any writeback. */
1285 if (is_tc) {
1286 cond_prep(&cond);
1287 tmp = tcg_temp_new();
eaa3783b 1288 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
b2167459
RH
1289 gen_helper_tcond(cpu_env, tmp);
1290 tcg_temp_free(tmp);
1291 }
1292
1293 /* Write back the result. */
1294 save_or_nullify(ctx, cpu_psw_cb, cb);
1295 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1296 save_gpr(ctx, rt, dest);
1297 tcg_temp_free(dest);
1298
1299 /* Install the new nullification. */
1300 cond_free(&ctx->null_cond);
1301 ctx->null_cond = cond;
b2167459
RH
1302}
1303
0c982a28
RH
1304static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1305 bool is_tsv, bool is_b, bool is_tc)
1306{
1307 TCGv_reg tcg_r1, tcg_r2;
1308
1309 if (a->cf) {
1310 nullify_over(ctx);
1311 }
1312 tcg_r1 = load_gpr(ctx, a->r1);
1313 tcg_r2 = load_gpr(ctx, a->r2);
1314 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1315 return nullify_end(ctx);
1316}
1317
0588e061
RH
1318static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1319{
1320 TCGv_reg tcg_im, tcg_r2;
1321
1322 if (a->cf) {
1323 nullify_over(ctx);
1324 }
1325 tcg_im = load_const(ctx, a->i);
1326 tcg_r2 = load_gpr(ctx, a->r);
1327 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1328 return nullify_end(ctx);
1329}
1330
31234768
RH
1331static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1332 TCGv_reg in2, unsigned cf)
b2167459 1333{
eaa3783b 1334 TCGv_reg dest, sv;
b2167459
RH
1335 DisasCond cond;
1336
1337 dest = tcg_temp_new();
eaa3783b 1338 tcg_gen_sub_reg(dest, in1, in2);
b2167459
RH
1339
1340 /* Compute signed overflow if required. */
f764718d 1341 sv = NULL;
b47a4a02 1342 if (cond_need_sv(cf >> 1)) {
b2167459
RH
1343 sv = do_sub_sv(ctx, dest, in1, in2);
1344 }
1345
1346 /* Form the condition for the compare. */
1347 cond = do_sub_cond(cf, dest, in1, in2, sv);
1348
1349 /* Clear. */
eaa3783b 1350 tcg_gen_movi_reg(dest, 0);
b2167459
RH
1351 save_gpr(ctx, rt, dest);
1352 tcg_temp_free(dest);
1353
1354 /* Install the new nullification. */
1355 cond_free(&ctx->null_cond);
1356 ctx->null_cond = cond;
b2167459
RH
1357}
1358
31234768
RH
1359static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1360 TCGv_reg in2, unsigned cf,
1361 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
b2167459 1362{
eaa3783b 1363 TCGv_reg dest = dest_gpr(ctx, rt);
b2167459
RH
1364
1365 /* Perform the operation, and writeback. */
1366 fn(dest, in1, in2);
1367 save_gpr(ctx, rt, dest);
1368
1369 /* Install the new nullification. */
1370 cond_free(&ctx->null_cond);
1371 if (cf) {
1372 ctx->null_cond = do_log_cond(cf, dest);
1373 }
b2167459
RH
1374}
1375
0c982a28
RH
1376static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1377 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1378{
1379 TCGv_reg tcg_r1, tcg_r2;
1380
1381 if (a->cf) {
1382 nullify_over(ctx);
1383 }
1384 tcg_r1 = load_gpr(ctx, a->r1);
1385 tcg_r2 = load_gpr(ctx, a->r2);
1386 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1387 return nullify_end(ctx);
1388}
1389
31234768
RH
1390static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1391 TCGv_reg in2, unsigned cf, bool is_tc,
1392 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
b2167459 1393{
eaa3783b 1394 TCGv_reg dest;
b2167459
RH
1395 DisasCond cond;
1396
1397 if (cf == 0) {
1398 dest = dest_gpr(ctx, rt);
1399 fn(dest, in1, in2);
1400 save_gpr(ctx, rt, dest);
1401 cond_free(&ctx->null_cond);
1402 } else {
1403 dest = tcg_temp_new();
1404 fn(dest, in1, in2);
1405
1406 cond = do_unit_cond(cf, dest, in1, in2);
1407
1408 if (is_tc) {
eaa3783b 1409 TCGv_reg tmp = tcg_temp_new();
b2167459 1410 cond_prep(&cond);
eaa3783b 1411 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
b2167459
RH
1412 gen_helper_tcond(cpu_env, tmp);
1413 tcg_temp_free(tmp);
1414 }
1415 save_gpr(ctx, rt, dest);
1416
1417 cond_free(&ctx->null_cond);
1418 ctx->null_cond = cond;
1419 }
b2167459
RH
1420}
1421
86f8d05f 1422#ifndef CONFIG_USER_ONLY
8d6ae7fb
RH
1423/* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1424 from the top 2 bits of the base register. There are a few system
1425 instructions that have a 3-bit space specifier, for which SR0 is
1426 not special. To handle this, pass ~SP. */
86f8d05f
RH
1427static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1428{
1429 TCGv_ptr ptr;
1430 TCGv_reg tmp;
1431 TCGv_i64 spc;
1432
1433 if (sp != 0) {
8d6ae7fb
RH
1434 if (sp < 0) {
1435 sp = ~sp;
1436 }
1437 spc = get_temp_tl(ctx);
1438 load_spr(ctx, spc, sp);
1439 return spc;
86f8d05f 1440 }
494737b7
RH
1441 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1442 return cpu_srH;
1443 }
86f8d05f
RH
1444
1445 ptr = tcg_temp_new_ptr();
1446 tmp = tcg_temp_new();
1447 spc = get_temp_tl(ctx);
1448
1449 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1450 tcg_gen_andi_reg(tmp, tmp, 030);
1451 tcg_gen_trunc_reg_ptr(ptr, tmp);
1452 tcg_temp_free(tmp);
1453
1454 tcg_gen_add_ptr(ptr, ptr, cpu_env);
1455 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1456 tcg_temp_free_ptr(ptr);
1457
1458 return spc;
1459}
1460#endif
1461
1462static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1463 unsigned rb, unsigned rx, int scale, target_sreg disp,
1464 unsigned sp, int modify, bool is_phys)
1465{
1466 TCGv_reg base = load_gpr(ctx, rb);
1467 TCGv_reg ofs;
1468
1469 /* Note that RX is mutually exclusive with DISP. */
1470 if (rx) {
1471 ofs = get_temp(ctx);
1472 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1473 tcg_gen_add_reg(ofs, ofs, base);
1474 } else if (disp || modify) {
1475 ofs = get_temp(ctx);
1476 tcg_gen_addi_reg(ofs, base, disp);
1477 } else {
1478 ofs = base;
1479 }
1480
1481 *pofs = ofs;
1482#ifdef CONFIG_USER_ONLY
1483 *pgva = (modify <= 0 ? ofs : base);
1484#else
1485 TCGv_tl addr = get_temp_tl(ctx);
1486 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
494737b7 1487 if (ctx->tb_flags & PSW_W) {
86f8d05f
RH
1488 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1489 }
1490 if (!is_phys) {
1491 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1492 }
1493 *pgva = addr;
1494#endif
1495}
1496
96d6407f
RH
1497/* Emit a memory load. The modify parameter should be
1498 * < 0 for pre-modify,
1499 * > 0 for post-modify,
1500 * = 0 for no base register update.
1501 */
1502static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
eaa3783b 1503 unsigned rx, int scale, target_sreg disp,
86f8d05f 1504 unsigned sp, int modify, TCGMemOp mop)
96d6407f 1505{
86f8d05f
RH
1506 TCGv_reg ofs;
1507 TCGv_tl addr;
96d6407f
RH
1508
1509 /* Caller uses nullify_over/nullify_end. */
1510 assert(ctx->null_cond.c == TCG_COND_NEVER);
1511
86f8d05f
RH
1512 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1513 ctx->mmu_idx == MMU_PHYS_IDX);
1514 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
1515 if (modify) {
1516 save_gpr(ctx, rb, ofs);
96d6407f 1517 }
96d6407f
RH
1518}
1519
1520static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
eaa3783b 1521 unsigned rx, int scale, target_sreg disp,
86f8d05f 1522 unsigned sp, int modify, TCGMemOp mop)
96d6407f 1523{
86f8d05f
RH
1524 TCGv_reg ofs;
1525 TCGv_tl addr;
96d6407f
RH
1526
1527 /* Caller uses nullify_over/nullify_end. */
1528 assert(ctx->null_cond.c == TCG_COND_NEVER);
1529
86f8d05f
RH
1530 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1531 ctx->mmu_idx == MMU_PHYS_IDX);
1532 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1533 if (modify) {
1534 save_gpr(ctx, rb, ofs);
96d6407f 1535 }
96d6407f
RH
1536}
1537
1538static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
eaa3783b 1539 unsigned rx, int scale, target_sreg disp,
86f8d05f 1540 unsigned sp, int modify, TCGMemOp mop)
96d6407f 1541{
86f8d05f
RH
1542 TCGv_reg ofs;
1543 TCGv_tl addr;
96d6407f
RH
1544
1545 /* Caller uses nullify_over/nullify_end. */
1546 assert(ctx->null_cond.c == TCG_COND_NEVER);
1547
86f8d05f
RH
1548 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1549 ctx->mmu_idx == MMU_PHYS_IDX);
1550 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
1551 if (modify) {
1552 save_gpr(ctx, rb, ofs);
96d6407f 1553 }
96d6407f
RH
1554}
1555
1556static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
eaa3783b 1557 unsigned rx, int scale, target_sreg disp,
86f8d05f 1558 unsigned sp, int modify, TCGMemOp mop)
96d6407f 1559{
86f8d05f
RH
1560 TCGv_reg ofs;
1561 TCGv_tl addr;
96d6407f
RH
1562
1563 /* Caller uses nullify_over/nullify_end. */
1564 assert(ctx->null_cond.c == TCG_COND_NEVER);
1565
86f8d05f
RH
1566 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1567 ctx->mmu_idx == MMU_PHYS_IDX);
1568 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
1569 if (modify) {
1570 save_gpr(ctx, rb, ofs);
96d6407f 1571 }
96d6407f
RH
1572}
1573
eaa3783b
RH
1574#if TARGET_REGISTER_BITS == 64
1575#define do_load_reg do_load_64
1576#define do_store_reg do_store_64
96d6407f 1577#else
eaa3783b
RH
1578#define do_load_reg do_load_32
1579#define do_store_reg do_store_32
96d6407f
RH
1580#endif
1581
1cd012a5 1582static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1583 unsigned rx, int scale, target_sreg disp,
1584 unsigned sp, int modify, TCGMemOp mop)
96d6407f 1585{
eaa3783b 1586 TCGv_reg dest;
96d6407f
RH
1587
1588 nullify_over(ctx);
1589
1590 if (modify == 0) {
1591 /* No base register update. */
1592 dest = dest_gpr(ctx, rt);
1593 } else {
1594 /* Make sure if RT == RB, we see the result of the load. */
1595 dest = get_temp(ctx);
1596 }
86f8d05f 1597 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
96d6407f
RH
1598 save_gpr(ctx, rt, dest);
1599
1cd012a5 1600 return nullify_end(ctx);
96d6407f
RH
1601}
1602
740038d7 1603static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1604 unsigned rx, int scale, target_sreg disp,
1605 unsigned sp, int modify)
96d6407f
RH
1606{
1607 TCGv_i32 tmp;
1608
1609 nullify_over(ctx);
1610
1611 tmp = tcg_temp_new_i32();
86f8d05f 1612 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
96d6407f
RH
1613 save_frw_i32(rt, tmp);
1614 tcg_temp_free_i32(tmp);
1615
1616 if (rt == 0) {
1617 gen_helper_loaded_fr0(cpu_env);
1618 }
1619
740038d7
RH
1620 return nullify_end(ctx);
1621}
1622
1623static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1624{
1625 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1626 a->disp, a->sp, a->m);
96d6407f
RH
1627}
1628
740038d7 1629static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1630 unsigned rx, int scale, target_sreg disp,
1631 unsigned sp, int modify)
96d6407f
RH
1632{
1633 TCGv_i64 tmp;
1634
1635 nullify_over(ctx);
1636
1637 tmp = tcg_temp_new_i64();
86f8d05f 1638 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
96d6407f
RH
1639 save_frd(rt, tmp);
1640 tcg_temp_free_i64(tmp);
1641
1642 if (rt == 0) {
1643 gen_helper_loaded_fr0(cpu_env);
1644 }
1645
740038d7
RH
1646 return nullify_end(ctx);
1647}
1648
1649static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1650{
1651 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1652 a->disp, a->sp, a->m);
96d6407f
RH
1653}
1654
1cd012a5 1655static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1656 target_sreg disp, unsigned sp,
1657 int modify, TCGMemOp mop)
96d6407f
RH
1658{
1659 nullify_over(ctx);
86f8d05f 1660 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1cd012a5 1661 return nullify_end(ctx);
96d6407f
RH
1662}
1663
740038d7 1664static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1665 unsigned rx, int scale, target_sreg disp,
1666 unsigned sp, int modify)
96d6407f
RH
1667{
1668 TCGv_i32 tmp;
1669
1670 nullify_over(ctx);
1671
1672 tmp = load_frw_i32(rt);
86f8d05f 1673 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
96d6407f
RH
1674 tcg_temp_free_i32(tmp);
1675
740038d7
RH
1676 return nullify_end(ctx);
1677}
1678
1679static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1680{
1681 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1682 a->disp, a->sp, a->m);
96d6407f
RH
1683}
1684
740038d7 1685static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1686 unsigned rx, int scale, target_sreg disp,
1687 unsigned sp, int modify)
96d6407f
RH
1688{
1689 TCGv_i64 tmp;
1690
1691 nullify_over(ctx);
1692
1693 tmp = load_frd(rt);
86f8d05f 1694 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
96d6407f
RH
1695 tcg_temp_free_i64(tmp);
1696
740038d7
RH
1697 return nullify_end(ctx);
1698}
1699
1700static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1701{
1702 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1703 a->disp, a->sp, a->m);
96d6407f
RH
1704}
1705
1ca74648 1706static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1707 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
ebe9383c
RH
1708{
1709 TCGv_i32 tmp;
1710
1711 nullify_over(ctx);
1712 tmp = load_frw0_i32(ra);
1713
1714 func(tmp, cpu_env, tmp);
1715
1716 save_frw_i32(rt, tmp);
1717 tcg_temp_free_i32(tmp);
1ca74648 1718 return nullify_end(ctx);
ebe9383c
RH
1719}
1720
1ca74648 1721static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1722 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
ebe9383c
RH
1723{
1724 TCGv_i32 dst;
1725 TCGv_i64 src;
1726
1727 nullify_over(ctx);
1728 src = load_frd(ra);
1729 dst = tcg_temp_new_i32();
1730
1731 func(dst, cpu_env, src);
1732
1733 tcg_temp_free_i64(src);
1734 save_frw_i32(rt, dst);
1735 tcg_temp_free_i32(dst);
1ca74648 1736 return nullify_end(ctx);
ebe9383c
RH
1737}
1738
1ca74648 1739static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1740 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
ebe9383c
RH
1741{
1742 TCGv_i64 tmp;
1743
1744 nullify_over(ctx);
1745 tmp = load_frd0(ra);
1746
1747 func(tmp, cpu_env, tmp);
1748
1749 save_frd(rt, tmp);
1750 tcg_temp_free_i64(tmp);
1ca74648 1751 return nullify_end(ctx);
ebe9383c
RH
1752}
1753
1ca74648 1754static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1755 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
ebe9383c
RH
1756{
1757 TCGv_i32 src;
1758 TCGv_i64 dst;
1759
1760 nullify_over(ctx);
1761 src = load_frw0_i32(ra);
1762 dst = tcg_temp_new_i64();
1763
1764 func(dst, cpu_env, src);
1765
1766 tcg_temp_free_i32(src);
1767 save_frd(rt, dst);
1768 tcg_temp_free_i64(dst);
1ca74648 1769 return nullify_end(ctx);
ebe9383c
RH
1770}
1771
1ca74648 1772static bool do_fop_weww(DisasContext *ctx, unsigned rt,
31234768
RH
1773 unsigned ra, unsigned rb,
1774 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
ebe9383c
RH
1775{
1776 TCGv_i32 a, b;
1777
1778 nullify_over(ctx);
1779 a = load_frw0_i32(ra);
1780 b = load_frw0_i32(rb);
1781
1782 func(a, cpu_env, a, b);
1783
1784 tcg_temp_free_i32(b);
1785 save_frw_i32(rt, a);
1786 tcg_temp_free_i32(a);
1ca74648 1787 return nullify_end(ctx);
ebe9383c
RH
1788}
1789
1ca74648 1790static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
31234768
RH
1791 unsigned ra, unsigned rb,
1792 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
ebe9383c
RH
1793{
1794 TCGv_i64 a, b;
1795
1796 nullify_over(ctx);
1797 a = load_frd0(ra);
1798 b = load_frd0(rb);
1799
1800 func(a, cpu_env, a, b);
1801
1802 tcg_temp_free_i64(b);
1803 save_frd(rt, a);
1804 tcg_temp_free_i64(a);
1ca74648 1805 return nullify_end(ctx);
ebe9383c
RH
1806}
1807
98cd9ca7
RH
1808/* Emit an unconditional branch to a direct target, which may or may not
1809 have already had nullification handled. */
01afb7be 1810static bool do_dbranch(DisasContext *ctx, target_ureg dest,
31234768 1811 unsigned link, bool is_n)
98cd9ca7
RH
1812{
1813 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1814 if (link != 0) {
1815 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1816 }
1817 ctx->iaoq_n = dest;
1818 if (is_n) {
1819 ctx->null_cond.c = TCG_COND_ALWAYS;
1820 }
98cd9ca7
RH
1821 } else {
1822 nullify_over(ctx);
1823
1824 if (link != 0) {
1825 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1826 }
1827
1828 if (is_n && use_nullify_skip(ctx)) {
1829 nullify_set(ctx, 0);
1830 gen_goto_tb(ctx, 0, dest, dest + 4);
1831 } else {
1832 nullify_set(ctx, is_n);
1833 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1834 }
1835
31234768 1836 nullify_end(ctx);
98cd9ca7
RH
1837
1838 nullify_set(ctx, 0);
1839 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
31234768 1840 ctx->base.is_jmp = DISAS_NORETURN;
98cd9ca7 1841 }
01afb7be 1842 return true;
98cd9ca7
RH
1843}
1844
1845/* Emit a conditional branch to a direct target. If the branch itself
1846 is nullified, we should have already used nullify_over. */
01afb7be 1847static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
31234768 1848 DisasCond *cond)
98cd9ca7 1849{
eaa3783b 1850 target_ureg dest = iaoq_dest(ctx, disp);
98cd9ca7
RH
1851 TCGLabel *taken = NULL;
1852 TCGCond c = cond->c;
98cd9ca7
RH
1853 bool n;
1854
1855 assert(ctx->null_cond.c == TCG_COND_NEVER);
1856
1857 /* Handle TRUE and NEVER as direct branches. */
1858 if (c == TCG_COND_ALWAYS) {
01afb7be 1859 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
98cd9ca7
RH
1860 }
1861 if (c == TCG_COND_NEVER) {
01afb7be 1862 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
98cd9ca7
RH
1863 }
1864
1865 taken = gen_new_label();
1866 cond_prep(cond);
eaa3783b 1867 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
98cd9ca7
RH
1868 cond_free(cond);
1869
1870 /* Not taken: Condition not satisfied; nullify on backward branches. */
1871 n = is_n && disp < 0;
1872 if (n && use_nullify_skip(ctx)) {
1873 nullify_set(ctx, 0);
a881c8e7 1874 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
98cd9ca7
RH
1875 } else {
1876 if (!n && ctx->null_lab) {
1877 gen_set_label(ctx->null_lab);
1878 ctx->null_lab = NULL;
1879 }
1880 nullify_set(ctx, n);
c301f34e
RH
1881 if (ctx->iaoq_n == -1) {
1882 /* The temporary iaoq_n_var died at the branch above.
1883 Regenerate it here instead of saving it. */
1884 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1885 }
a881c8e7 1886 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
98cd9ca7
RH
1887 }
1888
1889 gen_set_label(taken);
1890
1891 /* Taken: Condition satisfied; nullify on forward branches. */
1892 n = is_n && disp >= 0;
1893 if (n && use_nullify_skip(ctx)) {
1894 nullify_set(ctx, 0);
a881c8e7 1895 gen_goto_tb(ctx, 1, dest, dest + 4);
98cd9ca7
RH
1896 } else {
1897 nullify_set(ctx, n);
a881c8e7 1898 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
98cd9ca7
RH
1899 }
1900
1901 /* Not taken: the branch itself was nullified. */
1902 if (ctx->null_lab) {
1903 gen_set_label(ctx->null_lab);
1904 ctx->null_lab = NULL;
31234768 1905 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
98cd9ca7 1906 } else {
31234768 1907 ctx->base.is_jmp = DISAS_NORETURN;
98cd9ca7 1908 }
01afb7be 1909 return true;
98cd9ca7
RH
1910}
1911
1912/* Emit an unconditional branch to an indirect target. This handles
1913 nullification of the branch itself. */
01afb7be 1914static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
31234768 1915 unsigned link, bool is_n)
98cd9ca7 1916{
eaa3783b 1917 TCGv_reg a0, a1, next, tmp;
98cd9ca7
RH
1918 TCGCond c;
1919
1920 assert(ctx->null_lab == NULL);
1921
1922 if (ctx->null_cond.c == TCG_COND_NEVER) {
1923 if (link != 0) {
1924 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1925 }
1926 next = get_temp(ctx);
eaa3783b 1927 tcg_gen_mov_reg(next, dest);
98cd9ca7 1928 if (is_n) {
c301f34e
RH
1929 if (use_nullify_skip(ctx)) {
1930 tcg_gen_mov_reg(cpu_iaoq_f, next);
1931 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1932 nullify_set(ctx, 0);
31234768 1933 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
01afb7be 1934 return true;
c301f34e 1935 }
98cd9ca7
RH
1936 ctx->null_cond.c = TCG_COND_ALWAYS;
1937 }
c301f34e
RH
1938 ctx->iaoq_n = -1;
1939 ctx->iaoq_n_var = next;
98cd9ca7
RH
1940 } else if (is_n && use_nullify_skip(ctx)) {
1941 /* The (conditional) branch, B, nullifies the next insn, N,
1942 and we're allowed to skip execution N (no single-step or
4137cb83 1943 tracepoint in effect). Since the goto_ptr that we must use
98cd9ca7
RH
1944 for the indirect branch consumes no special resources, we
1945 can (conditionally) skip B and continue execution. */
1946 /* The use_nullify_skip test implies we have a known control path. */
1947 tcg_debug_assert(ctx->iaoq_b != -1);
1948 tcg_debug_assert(ctx->iaoq_n != -1);
1949
1950 /* We do have to handle the non-local temporary, DEST, before
1951 branching. Since IOAQ_F is not really live at this point, we
1952 can simply store DEST optimistically. Similarly with IAOQ_B. */
eaa3783b
RH
1953 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1954 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
98cd9ca7
RH
1955
1956 nullify_over(ctx);
1957 if (link != 0) {
eaa3783b 1958 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
98cd9ca7 1959 }
7f11636d 1960 tcg_gen_lookup_and_goto_ptr();
01afb7be 1961 return nullify_end(ctx);
98cd9ca7
RH
1962 } else {
1963 cond_prep(&ctx->null_cond);
1964 c = ctx->null_cond.c;
1965 a0 = ctx->null_cond.a0;
1966 a1 = ctx->null_cond.a1;
1967
1968 tmp = tcg_temp_new();
1969 next = get_temp(ctx);
1970
1971 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
eaa3783b 1972 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
98cd9ca7
RH
1973 ctx->iaoq_n = -1;
1974 ctx->iaoq_n_var = next;
1975
1976 if (link != 0) {
eaa3783b 1977 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
98cd9ca7
RH
1978 }
1979
1980 if (is_n) {
1981 /* The branch nullifies the next insn, which means the state of N
1982 after the branch is the inverse of the state of N that applied
1983 to the branch. */
eaa3783b 1984 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
98cd9ca7
RH
1985 cond_free(&ctx->null_cond);
1986 ctx->null_cond = cond_make_n();
1987 ctx->psw_n_nonzero = true;
1988 } else {
1989 cond_free(&ctx->null_cond);
1990 }
1991 }
01afb7be 1992 return true;
98cd9ca7
RH
1993}
1994
660eefe1
RH
1995/* Implement
1996 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1997 * IAOQ_Next{30..31} ← GR[b]{30..31};
1998 * else
1999 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
2000 * which keeps the privilege level from being increased.
2001 */
2002static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
2003{
660eefe1
RH
2004 TCGv_reg dest;
2005 switch (ctx->privilege) {
2006 case 0:
2007 /* Privilege 0 is maximum and is allowed to decrease. */
2008 return offset;
2009 case 3:
2010 /* Privilege 3 is minimum and is never allowed increase. */
2011 dest = get_temp(ctx);
2012 tcg_gen_ori_reg(dest, offset, 3);
2013 break;
2014 default:
2015 dest = tcg_temp_new();
2016 tcg_gen_andi_reg(dest, offset, -4);
2017 tcg_gen_ori_reg(dest, dest, ctx->privilege);
2018 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
2019 tcg_temp_free(dest);
2020 break;
2021 }
2022 return dest;
660eefe1
RH
2023}
2024
ba1d0b44 2025#ifdef CONFIG_USER_ONLY
7ad439df
RH
2026/* On Linux, page zero is normally marked execute only + gateway.
2027 Therefore normal read or write is supposed to fail, but specific
2028 offsets have kernel code mapped to raise permissions to implement
2029 system calls. Handling this via an explicit check here, rather
2030 in than the "be disp(sr2,r0)" instruction that probably sent us
2031 here, is the easiest way to handle the branch delay slot on the
2032 aforementioned BE. */
31234768 2033static void do_page_zero(DisasContext *ctx)
7ad439df
RH
2034{
2035 /* If by some means we get here with PSW[N]=1, that implies that
2036 the B,GATE instruction would be skipped, and we'd fault on the
2037 next insn within the privilaged page. */
2038 switch (ctx->null_cond.c) {
2039 case TCG_COND_NEVER:
2040 break;
2041 case TCG_COND_ALWAYS:
eaa3783b 2042 tcg_gen_movi_reg(cpu_psw_n, 0);
7ad439df
RH
2043 goto do_sigill;
2044 default:
2045 /* Since this is always the first (and only) insn within the
2046 TB, we should know the state of PSW[N] from TB->FLAGS. */
2047 g_assert_not_reached();
2048 }
2049
2050 /* Check that we didn't arrive here via some means that allowed
2051 non-sequential instruction execution. Normally the PSW[B] bit
2052 detects this by disallowing the B,GATE instruction to execute
2053 under such conditions. */
2054 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2055 goto do_sigill;
2056 }
2057
ebd0e151 2058 switch (ctx->iaoq_f & -4) {
7ad439df 2059 case 0x00: /* Null pointer call */
2986721d 2060 gen_excp_1(EXCP_IMP);
31234768
RH
2061 ctx->base.is_jmp = DISAS_NORETURN;
2062 break;
7ad439df
RH
2063
2064 case 0xb0: /* LWS */
2065 gen_excp_1(EXCP_SYSCALL_LWS);
31234768
RH
2066 ctx->base.is_jmp = DISAS_NORETURN;
2067 break;
7ad439df
RH
2068
2069 case 0xe0: /* SET_THREAD_POINTER */
35136a77 2070 tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
ebd0e151 2071 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
eaa3783b 2072 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
31234768
RH
2073 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2074 break;
7ad439df
RH
2075
2076 case 0x100: /* SYSCALL */
2077 gen_excp_1(EXCP_SYSCALL);
31234768
RH
2078 ctx->base.is_jmp = DISAS_NORETURN;
2079 break;
7ad439df
RH
2080
2081 default:
2082 do_sigill:
2986721d 2083 gen_excp_1(EXCP_ILL);
31234768
RH
2084 ctx->base.is_jmp = DISAS_NORETURN;
2085 break;
7ad439df
RH
2086 }
2087}
ba1d0b44 2088#endif
7ad439df 2089
deee69a1 2090static bool trans_nop(DisasContext *ctx, arg_nop *a)
b2167459
RH
2091{
2092 cond_free(&ctx->null_cond);
31234768 2093 return true;
b2167459
RH
2094}
2095
40f9f908 2096static bool trans_break(DisasContext *ctx, arg_break *a)
98a9cb79 2097{
31234768 2098 return gen_excp_iir(ctx, EXCP_BREAK);
98a9cb79
RH
2099}
2100
e36f27ef 2101static bool trans_sync(DisasContext *ctx, arg_sync *a)
98a9cb79
RH
2102{
2103 /* No point in nullifying the memory barrier. */
2104 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2105
2106 cond_free(&ctx->null_cond);
31234768 2107 return true;
98a9cb79
RH
2108}
2109
c603e14a 2110static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
98a9cb79 2111{
c603e14a 2112 unsigned rt = a->t;
eaa3783b
RH
2113 TCGv_reg tmp = dest_gpr(ctx, rt);
2114 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
98a9cb79
RH
2115 save_gpr(ctx, rt, tmp);
2116
2117 cond_free(&ctx->null_cond);
31234768 2118 return true;
98a9cb79
RH
2119}
2120
c603e14a 2121static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
98a9cb79 2122{
c603e14a
RH
2123 unsigned rt = a->t;
2124 unsigned rs = a->sp;
33423472
RH
2125 TCGv_i64 t0 = tcg_temp_new_i64();
2126 TCGv_reg t1 = tcg_temp_new();
98a9cb79 2127
33423472
RH
2128 load_spr(ctx, t0, rs);
2129 tcg_gen_shri_i64(t0, t0, 32);
2130 tcg_gen_trunc_i64_reg(t1, t0);
2131
2132 save_gpr(ctx, rt, t1);
2133 tcg_temp_free(t1);
2134 tcg_temp_free_i64(t0);
98a9cb79
RH
2135
2136 cond_free(&ctx->null_cond);
31234768 2137 return true;
98a9cb79
RH
2138}
2139
c603e14a 2140static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
98a9cb79 2141{
c603e14a
RH
2142 unsigned rt = a->t;
2143 unsigned ctl = a->r;
eaa3783b 2144 TCGv_reg tmp;
98a9cb79
RH
2145
2146 switch (ctl) {
35136a77 2147 case CR_SAR:
98a9cb79 2148#ifdef TARGET_HPPA64
c603e14a 2149 if (a->e == 0) {
98a9cb79
RH
2150 /* MFSAR without ,W masks low 5 bits. */
2151 tmp = dest_gpr(ctx, rt);
eaa3783b 2152 tcg_gen_andi_reg(tmp, cpu_sar, 31);
98a9cb79 2153 save_gpr(ctx, rt, tmp);
35136a77 2154 goto done;
98a9cb79
RH
2155 }
2156#endif
2157 save_gpr(ctx, rt, cpu_sar);
35136a77
RH
2158 goto done;
2159 case CR_IT: /* Interval Timer */
2160 /* FIXME: Respect PSW_S bit. */
2161 nullify_over(ctx);
98a9cb79 2162 tmp = dest_gpr(ctx, rt);
84b41e65 2163 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
49c29d6c
RH
2164 gen_io_start();
2165 gen_helper_read_interval_timer(tmp);
2166 gen_io_end();
31234768 2167 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
49c29d6c
RH
2168 } else {
2169 gen_helper_read_interval_timer(tmp);
49c29d6c 2170 }
98a9cb79 2171 save_gpr(ctx, rt, tmp);
31234768 2172 return nullify_end(ctx);
98a9cb79 2173 case 26:
98a9cb79 2174 case 27:
98a9cb79
RH
2175 break;
2176 default:
2177 /* All other control registers are privileged. */
35136a77
RH
2178 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2179 break;
98a9cb79
RH
2180 }
2181
35136a77
RH
2182 tmp = get_temp(ctx);
2183 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2184 save_gpr(ctx, rt, tmp);
2185
2186 done:
98a9cb79 2187 cond_free(&ctx->null_cond);
31234768 2188 return true;
98a9cb79
RH
2189}
2190
c603e14a 2191static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
33423472 2192{
c603e14a
RH
2193 unsigned rr = a->r;
2194 unsigned rs = a->sp;
33423472
RH
2195 TCGv_i64 t64;
2196
2197 if (rs >= 5) {
2198 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2199 }
2200 nullify_over(ctx);
2201
2202 t64 = tcg_temp_new_i64();
2203 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2204 tcg_gen_shli_i64(t64, t64, 32);
2205
2206 if (rs >= 4) {
2207 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
494737b7 2208 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
33423472
RH
2209 } else {
2210 tcg_gen_mov_i64(cpu_sr[rs], t64);
2211 }
2212 tcg_temp_free_i64(t64);
2213
31234768 2214 return nullify_end(ctx);
33423472
RH
2215}
2216
c603e14a 2217static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
98a9cb79 2218{
c603e14a
RH
2219 unsigned ctl = a->t;
2220 TCGv_reg reg = load_gpr(ctx, a->r);
eaa3783b 2221 TCGv_reg tmp;
98a9cb79 2222
35136a77 2223 if (ctl == CR_SAR) {
98a9cb79 2224 tmp = tcg_temp_new();
35136a77 2225 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
98a9cb79
RH
2226 save_or_nullify(ctx, cpu_sar, tmp);
2227 tcg_temp_free(tmp);
35136a77
RH
2228
2229 cond_free(&ctx->null_cond);
31234768 2230 return true;
98a9cb79
RH
2231 }
2232
35136a77
RH
2233 /* All other control registers are privileged or read-only. */
2234 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2235
c603e14a 2236#ifndef CONFIG_USER_ONLY
35136a77
RH
2237 nullify_over(ctx);
2238 switch (ctl) {
2239 case CR_IT:
49c29d6c 2240 gen_helper_write_interval_timer(cpu_env, reg);
35136a77 2241 break;
4f5f2548
RH
2242 case CR_EIRR:
2243 gen_helper_write_eirr(cpu_env, reg);
2244 break;
2245 case CR_EIEM:
2246 gen_helper_write_eiem(cpu_env, reg);
31234768 2247 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
4f5f2548
RH
2248 break;
2249
35136a77
RH
2250 case CR_IIASQ:
2251 case CR_IIAOQ:
2252 /* FIXME: Respect PSW_Q bit */
2253 /* The write advances the queue and stores to the back element. */
2254 tmp = get_temp(ctx);
2255 tcg_gen_ld_reg(tmp, cpu_env,
2256 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2257 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2258 tcg_gen_st_reg(reg, cpu_env,
2259 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2260 break;
2261
2262 default:
2263 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2264 break;
2265 }
31234768 2266 return nullify_end(ctx);
4f5f2548 2267#endif
98a9cb79
RH
2268}
2269
c603e14a 2270static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
98a9cb79 2271{
eaa3783b 2272 TCGv_reg tmp = tcg_temp_new();
98a9cb79 2273
c603e14a 2274 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
eaa3783b 2275 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
98a9cb79
RH
2276 save_or_nullify(ctx, cpu_sar, tmp);
2277 tcg_temp_free(tmp);
2278
2279 cond_free(&ctx->null_cond);
31234768 2280 return true;
98a9cb79
RH
2281}
2282
e36f27ef 2283static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
98a9cb79 2284{
e36f27ef 2285 TCGv_reg dest = dest_gpr(ctx, a->t);
98a9cb79 2286
2330504c
HD
2287#ifdef CONFIG_USER_ONLY
2288 /* We don't implement space registers in user mode. */
eaa3783b 2289 tcg_gen_movi_reg(dest, 0);
2330504c 2290#else
2330504c
HD
2291 TCGv_i64 t0 = tcg_temp_new_i64();
2292
e36f27ef 2293 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2330504c
HD
2294 tcg_gen_shri_i64(t0, t0, 32);
2295 tcg_gen_trunc_i64_reg(dest, t0);
2296
2297 tcg_temp_free_i64(t0);
2298#endif
e36f27ef 2299 save_gpr(ctx, a->t, dest);
98a9cb79
RH
2300
2301 cond_free(&ctx->null_cond);
31234768 2302 return true;
98a9cb79
RH
2303}
2304
e36f27ef 2305static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
e1b5a5ed 2306{
e36f27ef
RH
2307 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2308#ifndef CONFIG_USER_ONLY
e1b5a5ed
RH
2309 TCGv_reg tmp;
2310
e1b5a5ed
RH
2311 nullify_over(ctx);
2312
2313 tmp = get_temp(ctx);
2314 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
e36f27ef 2315 tcg_gen_andi_reg(tmp, tmp, ~a->i);
e1b5a5ed 2316 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
e36f27ef 2317 save_gpr(ctx, a->t, tmp);
e1b5a5ed
RH
2318
2319 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
31234768
RH
2320 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2321 return nullify_end(ctx);
e36f27ef 2322#endif
e1b5a5ed
RH
2323}
2324
e36f27ef 2325static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
e1b5a5ed 2326{
e36f27ef
RH
2327 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2328#ifndef CONFIG_USER_ONLY
e1b5a5ed
RH
2329 TCGv_reg tmp;
2330
e1b5a5ed
RH
2331 nullify_over(ctx);
2332
2333 tmp = get_temp(ctx);
2334 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
e36f27ef 2335 tcg_gen_ori_reg(tmp, tmp, a->i);
e1b5a5ed 2336 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
e36f27ef 2337 save_gpr(ctx, a->t, tmp);
e1b5a5ed
RH
2338
2339 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
31234768
RH
2340 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2341 return nullify_end(ctx);
e36f27ef 2342#endif
e1b5a5ed
RH
2343}
2344
c603e14a 2345static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
e1b5a5ed 2346{
e1b5a5ed 2347 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
c603e14a
RH
2348#ifndef CONFIG_USER_ONLY
2349 TCGv_reg tmp, reg;
e1b5a5ed
RH
2350 nullify_over(ctx);
2351
c603e14a 2352 reg = load_gpr(ctx, a->r);
e1b5a5ed
RH
2353 tmp = get_temp(ctx);
2354 gen_helper_swap_system_mask(tmp, cpu_env, reg);
2355
2356 /* Exit the TB to recognize new interrupts. */
31234768
RH
2357 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2358 return nullify_end(ctx);
c603e14a 2359#endif
e1b5a5ed 2360}
f49b3537 2361
e36f27ef 2362static bool do_rfi(DisasContext *ctx, bool rfi_r)
f49b3537 2363{
f49b3537 2364 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
e36f27ef 2365#ifndef CONFIG_USER_ONLY
f49b3537
RH
2366 nullify_over(ctx);
2367
e36f27ef 2368 if (rfi_r) {
f49b3537
RH
2369 gen_helper_rfi_r(cpu_env);
2370 } else {
2371 gen_helper_rfi(cpu_env);
2372 }
31234768 2373 /* Exit the TB to recognize new interrupts. */
f49b3537
RH
2374 if (ctx->base.singlestep_enabled) {
2375 gen_excp_1(EXCP_DEBUG);
2376 } else {
07ea28b4 2377 tcg_gen_exit_tb(NULL, 0);
f49b3537 2378 }
31234768 2379 ctx->base.is_jmp = DISAS_NORETURN;
f49b3537 2380
31234768 2381 return nullify_end(ctx);
e36f27ef
RH
2382#endif
2383}
2384
2385static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2386{
2387 return do_rfi(ctx, false);
2388}
2389
2390static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2391{
2392 return do_rfi(ctx, true);
f49b3537 2393}
6210db05 2394
96927adb
RH
2395static bool trans_halt(DisasContext *ctx, arg_halt *a)
2396{
2397 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
e36f27ef 2398#ifndef CONFIG_USER_ONLY
96927adb
RH
2399 nullify_over(ctx);
2400 gen_helper_halt(cpu_env);
2401 ctx->base.is_jmp = DISAS_NORETURN;
2402 return nullify_end(ctx);
2403#endif
2404}
2405
2406static bool trans_reset(DisasContext *ctx, arg_reset *a)
6210db05
HD
2407{
2408 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
96927adb 2409#ifndef CONFIG_USER_ONLY
6210db05 2410 nullify_over(ctx);
96927adb 2411 gen_helper_reset(cpu_env);
31234768
RH
2412 ctx->base.is_jmp = DISAS_NORETURN;
2413 return nullify_end(ctx);
96927adb 2414#endif
6210db05 2415}
e1b5a5ed 2416
deee69a1 2417static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
98a9cb79 2418{
deee69a1
RH
2419 if (a->m) {
2420 TCGv_reg dest = dest_gpr(ctx, a->b);
2421 TCGv_reg src1 = load_gpr(ctx, a->b);
2422 TCGv_reg src2 = load_gpr(ctx, a->x);
98a9cb79 2423
deee69a1
RH
2424 /* The only thing we need to do is the base register modification. */
2425 tcg_gen_add_reg(dest, src1, src2);
2426 save_gpr(ctx, a->b, dest);
2427 }
98a9cb79 2428 cond_free(&ctx->null_cond);
31234768 2429 return true;
98a9cb79
RH
2430}
2431
deee69a1 2432static bool trans_probe(DisasContext *ctx, arg_probe *a)
98a9cb79 2433{
86f8d05f 2434 TCGv_reg dest, ofs;
eed14219 2435 TCGv_i32 level, want;
86f8d05f 2436 TCGv_tl addr;
98a9cb79
RH
2437
2438 nullify_over(ctx);
2439
deee69a1
RH
2440 dest = dest_gpr(ctx, a->t);
2441 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
eed14219 2442
deee69a1
RH
2443 if (a->imm) {
2444 level = tcg_const_i32(a->ri);
98a9cb79 2445 } else {
eed14219 2446 level = tcg_temp_new_i32();
deee69a1 2447 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
eed14219 2448 tcg_gen_andi_i32(level, level, 3);
98a9cb79 2449 }
deee69a1 2450 want = tcg_const_i32(a->write ? PAGE_WRITE : PAGE_READ);
eed14219
RH
2451
2452 gen_helper_probe(dest, cpu_env, addr, level, want);
2453
2454 tcg_temp_free_i32(want);
2455 tcg_temp_free_i32(level);
2456
deee69a1 2457 save_gpr(ctx, a->t, dest);
31234768 2458 return nullify_end(ctx);
98a9cb79
RH
2459}
2460
deee69a1 2461static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
8d6ae7fb 2462{
deee69a1
RH
2463 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2464#ifndef CONFIG_USER_ONLY
8d6ae7fb
RH
2465 TCGv_tl addr;
2466 TCGv_reg ofs, reg;
2467
8d6ae7fb
RH
2468 nullify_over(ctx);
2469
deee69a1
RH
2470 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2471 reg = load_gpr(ctx, a->r);
2472 if (a->addr) {
8d6ae7fb
RH
2473 gen_helper_itlba(cpu_env, addr, reg);
2474 } else {
2475 gen_helper_itlbp(cpu_env, addr, reg);
2476 }
2477
2478 /* Exit TB for ITLB change if mmu is enabled. This *should* not be
2479 the case, since the OS TLB fill handler runs with mmu disabled. */
deee69a1 2480 if (!a->data && (ctx->tb_flags & PSW_C)) {
31234768
RH
2481 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2482 }
2483 return nullify_end(ctx);
deee69a1 2484#endif
8d6ae7fb 2485}
63300a00 2486
deee69a1 2487static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
63300a00 2488{
deee69a1
RH
2489 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2490#ifndef CONFIG_USER_ONLY
63300a00
RH
2491 TCGv_tl addr;
2492 TCGv_reg ofs;
2493
63300a00
RH
2494 nullify_over(ctx);
2495
deee69a1
RH
2496 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2497 if (a->m) {
2498 save_gpr(ctx, a->b, ofs);
63300a00 2499 }
deee69a1 2500 if (a->local) {
63300a00
RH
2501 gen_helper_ptlbe(cpu_env);
2502 } else {
2503 gen_helper_ptlb(cpu_env, addr);
2504 }
2505
2506 /* Exit TB for TLB change if mmu is enabled. */
deee69a1 2507 if (!a->data && (ctx->tb_flags & PSW_C)) {
31234768
RH
2508 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2509 }
2510 return nullify_end(ctx);
deee69a1 2511#endif
63300a00 2512}
2dfcca9f 2513
deee69a1 2514static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2dfcca9f 2515{
deee69a1
RH
2516 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2517#ifndef CONFIG_USER_ONLY
2dfcca9f
RH
2518 TCGv_tl vaddr;
2519 TCGv_reg ofs, paddr;
2520
2dfcca9f
RH
2521 nullify_over(ctx);
2522
deee69a1 2523 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2dfcca9f
RH
2524
2525 paddr = tcg_temp_new();
2526 gen_helper_lpa(paddr, cpu_env, vaddr);
2527
2528 /* Note that physical address result overrides base modification. */
deee69a1
RH
2529 if (a->m) {
2530 save_gpr(ctx, a->b, ofs);
2dfcca9f 2531 }
deee69a1 2532 save_gpr(ctx, a->t, paddr);
2dfcca9f
RH
2533 tcg_temp_free(paddr);
2534
31234768 2535 return nullify_end(ctx);
deee69a1 2536#endif
2dfcca9f 2537}
43a97b81 2538
deee69a1 2539static bool trans_lci(DisasContext *ctx, arg_lci *a)
43a97b81 2540{
43a97b81
RH
2541 TCGv_reg ci;
2542
2543 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2544
2545 /* The Coherence Index is an implementation-defined function of the
2546 physical address. Two addresses with the same CI have a coherent
2547 view of the cache. Our implementation is to return 0 for all,
2548 since the entire address space is coherent. */
2549 ci = tcg_const_reg(0);
deee69a1 2550 save_gpr(ctx, a->t, ci);
43a97b81
RH
2551 tcg_temp_free(ci);
2552
31234768
RH
2553 cond_free(&ctx->null_cond);
2554 return true;
43a97b81 2555}
98a9cb79 2556
0c982a28 2557static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
b2167459 2558{
0c982a28
RH
2559 return do_add_reg(ctx, a, false, false, false, false);
2560}
b2167459 2561
0c982a28
RH
2562static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2563{
2564 return do_add_reg(ctx, a, true, false, false, false);
2565}
b2167459 2566
0c982a28
RH
2567static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2568{
2569 return do_add_reg(ctx, a, false, true, false, false);
b2167459
RH
2570}
2571
0c982a28 2572static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
b2167459 2573{
0c982a28
RH
2574 return do_add_reg(ctx, a, false, false, false, true);
2575}
b2167459 2576
0c982a28
RH
2577static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2578{
2579 return do_add_reg(ctx, a, false, true, false, true);
2580}
b2167459 2581
0c982a28
RH
2582static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2583{
2584 return do_sub_reg(ctx, a, false, false, false);
b2167459
RH
2585}
2586
0c982a28 2587static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2588{
0c982a28
RH
2589 return do_sub_reg(ctx, a, true, false, false);
2590}
b2167459 2591
0c982a28
RH
2592static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2593{
2594 return do_sub_reg(ctx, a, false, false, true);
b2167459
RH
2595}
2596
0c982a28 2597static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2598{
0c982a28
RH
2599 return do_sub_reg(ctx, a, true, false, true);
2600}
2601
2602static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2603{
2604 return do_sub_reg(ctx, a, false, true, false);
2605}
2606
2607static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2608{
2609 return do_sub_reg(ctx, a, true, true, false);
2610}
2611
2612static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2613{
2614 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2615}
2616
2617static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2618{
2619 return do_log_reg(ctx, a, tcg_gen_and_reg);
2620}
2621
2622static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2623{
2624 if (a->cf == 0) {
2625 unsigned r2 = a->r2;
2626 unsigned r1 = a->r1;
2627 unsigned rt = a->t;
b2167459 2628
7aee8189
RH
2629 if (rt == 0) { /* NOP */
2630 cond_free(&ctx->null_cond);
2631 return true;
2632 }
2633 if (r2 == 0) { /* COPY */
2634 if (r1 == 0) {
2635 TCGv_reg dest = dest_gpr(ctx, rt);
2636 tcg_gen_movi_reg(dest, 0);
2637 save_gpr(ctx, rt, dest);
2638 } else {
2639 save_gpr(ctx, rt, cpu_gr[r1]);
2640 }
2641 cond_free(&ctx->null_cond);
2642 return true;
2643 }
2644#ifndef CONFIG_USER_ONLY
2645 /* These are QEMU extensions and are nops in the real architecture:
2646 *
2647 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2648 * or %r31,%r31,%r31 -- death loop; offline cpu
2649 * currently implemented as idle.
2650 */
2651 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2652 TCGv_i32 tmp;
2653
2654 /* No need to check for supervisor, as userland can only pause
2655 until the next timer interrupt. */
2656 nullify_over(ctx);
2657
2658 /* Advance the instruction queue. */
2659 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2660 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2661 nullify_set(ctx, 0);
2662
2663 /* Tell the qemu main loop to halt until this cpu has work. */
2664 tmp = tcg_const_i32(1);
2665 tcg_gen_st_i32(tmp, cpu_env, -offsetof(HPPACPU, env) +
2666 offsetof(CPUState, halted));
2667 tcg_temp_free_i32(tmp);
2668 gen_excp_1(EXCP_HALTED);
2669 ctx->base.is_jmp = DISAS_NORETURN;
2670
2671 return nullify_end(ctx);
2672 }
2673#endif
b2167459 2674 }
0c982a28
RH
2675 return do_log_reg(ctx, a, tcg_gen_or_reg);
2676}
7aee8189 2677
0c982a28
RH
2678static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2679{
2680 return do_log_reg(ctx, a, tcg_gen_xor_reg);
b2167459
RH
2681}
2682
0c982a28 2683static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2684{
eaa3783b 2685 TCGv_reg tcg_r1, tcg_r2;
b2167459 2686
0c982a28 2687 if (a->cf) {
b2167459
RH
2688 nullify_over(ctx);
2689 }
0c982a28
RH
2690 tcg_r1 = load_gpr(ctx, a->r1);
2691 tcg_r2 = load_gpr(ctx, a->r2);
2692 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
31234768 2693 return nullify_end(ctx);
b2167459
RH
2694}
2695
0c982a28 2696static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2697{
eaa3783b 2698 TCGv_reg tcg_r1, tcg_r2;
b2167459 2699
0c982a28 2700 if (a->cf) {
b2167459
RH
2701 nullify_over(ctx);
2702 }
0c982a28
RH
2703 tcg_r1 = load_gpr(ctx, a->r1);
2704 tcg_r2 = load_gpr(ctx, a->r2);
2705 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
31234768 2706 return nullify_end(ctx);
b2167459
RH
2707}
2708
0c982a28 2709static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
b2167459 2710{
eaa3783b 2711 TCGv_reg tcg_r1, tcg_r2, tmp;
b2167459 2712
0c982a28 2713 if (a->cf) {
b2167459
RH
2714 nullify_over(ctx);
2715 }
0c982a28
RH
2716 tcg_r1 = load_gpr(ctx, a->r1);
2717 tcg_r2 = load_gpr(ctx, a->r2);
b2167459 2718 tmp = get_temp(ctx);
eaa3783b 2719 tcg_gen_not_reg(tmp, tcg_r2);
0c982a28 2720 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
31234768 2721 return nullify_end(ctx);
b2167459
RH
2722}
2723
0c982a28
RH
2724static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2725{
2726 return do_uaddcm(ctx, a, false);
2727}
2728
2729static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2730{
2731 return do_uaddcm(ctx, a, true);
2732}
2733
2734static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
b2167459 2735{
eaa3783b 2736 TCGv_reg tmp;
b2167459
RH
2737
2738 nullify_over(ctx);
2739
2740 tmp = get_temp(ctx);
eaa3783b 2741 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
b2167459 2742 if (!is_i) {
eaa3783b 2743 tcg_gen_not_reg(tmp, tmp);
b2167459 2744 }
eaa3783b
RH
2745 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2746 tcg_gen_muli_reg(tmp, tmp, 6);
60e29463 2747 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
31234768 2748 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
31234768 2749 return nullify_end(ctx);
b2167459
RH
2750}
2751
0c982a28
RH
2752static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2753{
2754 return do_dcor(ctx, a, false);
2755}
2756
2757static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2758{
2759 return do_dcor(ctx, a, true);
2760}
2761
2762static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2763{
eaa3783b 2764 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
b2167459
RH
2765
2766 nullify_over(ctx);
2767
0c982a28
RH
2768 in1 = load_gpr(ctx, a->r1);
2769 in2 = load_gpr(ctx, a->r2);
b2167459
RH
2770
2771 add1 = tcg_temp_new();
2772 add2 = tcg_temp_new();
2773 addc = tcg_temp_new();
2774 dest = tcg_temp_new();
eaa3783b 2775 zero = tcg_const_reg(0);
b2167459
RH
2776
2777 /* Form R1 << 1 | PSW[CB]{8}. */
eaa3783b
RH
2778 tcg_gen_add_reg(add1, in1, in1);
2779 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
b2167459
RH
2780
2781 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2782 carry{8} requires that we subtract via + ~R2 + 1, as described in
2783 the manual. By extracting and masking V, we can produce the
2784 proper inputs to the addition without movcond. */
eaa3783b
RH
2785 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2786 tcg_gen_xor_reg(add2, in2, addc);
2787 tcg_gen_andi_reg(addc, addc, 1);
b2167459
RH
2788 /* ??? This is only correct for 32-bit. */
2789 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2790 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2791
2792 tcg_temp_free(addc);
2793 tcg_temp_free(zero);
2794
2795 /* Write back the result register. */
0c982a28 2796 save_gpr(ctx, a->t, dest);
b2167459
RH
2797
2798 /* Write back PSW[CB]. */
eaa3783b
RH
2799 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2800 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
b2167459
RH
2801
2802 /* Write back PSW[V] for the division step. */
eaa3783b
RH
2803 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2804 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
b2167459
RH
2805
2806 /* Install the new nullification. */
0c982a28 2807 if (a->cf) {
eaa3783b 2808 TCGv_reg sv = NULL;
b47a4a02 2809 if (cond_need_sv(a->cf >> 1)) {
b2167459
RH
2810 /* ??? The lshift is supposed to contribute to overflow. */
2811 sv = do_add_sv(ctx, dest, add1, add2);
2812 }
0c982a28 2813 ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
b2167459
RH
2814 }
2815
2816 tcg_temp_free(add1);
2817 tcg_temp_free(add2);
2818 tcg_temp_free(dest);
2819
31234768 2820 return nullify_end(ctx);
b2167459
RH
2821}
2822
0588e061 2823static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
b2167459 2824{
0588e061
RH
2825 return do_add_imm(ctx, a, false, false);
2826}
b2167459 2827
0588e061
RH
2828static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2829{
2830 return do_add_imm(ctx, a, true, false);
b2167459
RH
2831}
2832
0588e061 2833static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
b2167459 2834{
0588e061
RH
2835 return do_add_imm(ctx, a, false, true);
2836}
b2167459 2837
0588e061
RH
2838static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2839{
2840 return do_add_imm(ctx, a, true, true);
2841}
b2167459 2842
0588e061
RH
2843static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2844{
2845 return do_sub_imm(ctx, a, false);
2846}
b2167459 2847
0588e061
RH
2848static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2849{
2850 return do_sub_imm(ctx, a, true);
b2167459
RH
2851}
2852
0588e061 2853static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
b2167459 2854{
eaa3783b 2855 TCGv_reg tcg_im, tcg_r2;
b2167459 2856
0588e061 2857 if (a->cf) {
b2167459
RH
2858 nullify_over(ctx);
2859 }
2860
0588e061
RH
2861 tcg_im = load_const(ctx, a->i);
2862 tcg_r2 = load_gpr(ctx, a->r);
2863 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
b2167459 2864
31234768 2865 return nullify_end(ctx);
b2167459
RH
2866}
2867
1cd012a5 2868static bool trans_ld(DisasContext *ctx, arg_ldst *a)
96d6407f 2869{
1cd012a5
RH
2870 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2871 a->disp, a->sp, a->m, a->size | MO_TE);
96d6407f
RH
2872}
2873
1cd012a5 2874static bool trans_st(DisasContext *ctx, arg_ldst *a)
96d6407f 2875{
1cd012a5
RH
2876 assert(a->x == 0 && a->scale == 0);
2877 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
96d6407f
RH
2878}
2879
1cd012a5 2880static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
96d6407f 2881{
1cd012a5 2882 TCGMemOp mop = MO_TEUL | MO_ALIGN_16 | a->size;
86f8d05f
RH
2883 TCGv_reg zero, dest, ofs;
2884 TCGv_tl addr;
96d6407f
RH
2885
2886 nullify_over(ctx);
2887
1cd012a5 2888 if (a->m) {
86f8d05f
RH
2889 /* Base register modification. Make sure if RT == RB,
2890 we see the result of the load. */
96d6407f
RH
2891 dest = get_temp(ctx);
2892 } else {
1cd012a5 2893 dest = dest_gpr(ctx, a->t);
96d6407f
RH
2894 }
2895
1cd012a5
RH
2896 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2897 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
eaa3783b 2898 zero = tcg_const_reg(0);
86f8d05f 2899 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
1cd012a5
RH
2900 if (a->m) {
2901 save_gpr(ctx, a->b, ofs);
96d6407f 2902 }
1cd012a5 2903 save_gpr(ctx, a->t, dest);
96d6407f 2904
31234768 2905 return nullify_end(ctx);
96d6407f
RH
2906}
2907
1cd012a5 2908static bool trans_stby(DisasContext *ctx, arg_stby *a)
96d6407f 2909{
86f8d05f
RH
2910 TCGv_reg ofs, val;
2911 TCGv_tl addr;
96d6407f
RH
2912
2913 nullify_over(ctx);
2914
1cd012a5 2915 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
86f8d05f 2916 ctx->mmu_idx == MMU_PHYS_IDX);
1cd012a5
RH
2917 val = load_gpr(ctx, a->r);
2918 if (a->a) {
f9f46db4
EC
2919 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2920 gen_helper_stby_e_parallel(cpu_env, addr, val);
2921 } else {
2922 gen_helper_stby_e(cpu_env, addr, val);
2923 }
96d6407f 2924 } else {
f9f46db4
EC
2925 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2926 gen_helper_stby_b_parallel(cpu_env, addr, val);
2927 } else {
2928 gen_helper_stby_b(cpu_env, addr, val);
2929 }
96d6407f 2930 }
1cd012a5 2931 if (a->m) {
86f8d05f 2932 tcg_gen_andi_reg(ofs, ofs, ~3);
1cd012a5 2933 save_gpr(ctx, a->b, ofs);
96d6407f 2934 }
96d6407f 2935
31234768 2936 return nullify_end(ctx);
96d6407f
RH
2937}
2938
1cd012a5 2939static bool trans_lda(DisasContext *ctx, arg_ldst *a)
d0a851cc
RH
2940{
2941 int hold_mmu_idx = ctx->mmu_idx;
d0a851cc
RH
2942
2943 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
d0a851cc 2944 ctx->mmu_idx = MMU_PHYS_IDX;
1cd012a5 2945 trans_ld(ctx, a);
d0a851cc 2946 ctx->mmu_idx = hold_mmu_idx;
31234768 2947 return true;
d0a851cc
RH
2948}
2949
1cd012a5 2950static bool trans_sta(DisasContext *ctx, arg_ldst *a)
d0a851cc
RH
2951{
2952 int hold_mmu_idx = ctx->mmu_idx;
d0a851cc
RH
2953
2954 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
d0a851cc 2955 ctx->mmu_idx = MMU_PHYS_IDX;
1cd012a5 2956 trans_st(ctx, a);
d0a851cc 2957 ctx->mmu_idx = hold_mmu_idx;
31234768 2958 return true;
d0a851cc 2959}
95412a61 2960
0588e061 2961static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
b2167459 2962{
0588e061 2963 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
b2167459 2964
0588e061
RH
2965 tcg_gen_movi_reg(tcg_rt, a->i);
2966 save_gpr(ctx, a->t, tcg_rt);
b2167459 2967 cond_free(&ctx->null_cond);
31234768 2968 return true;
b2167459
RH
2969}
2970
0588e061 2971static bool trans_addil(DisasContext *ctx, arg_addil *a)
b2167459 2972{
0588e061 2973 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
eaa3783b 2974 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
b2167459 2975
0588e061 2976 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
b2167459
RH
2977 save_gpr(ctx, 1, tcg_r1);
2978 cond_free(&ctx->null_cond);
31234768 2979 return true;
b2167459
RH
2980}
2981
0588e061 2982static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
b2167459 2983{
0588e061 2984 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
b2167459
RH
2985
2986 /* Special case rb == 0, for the LDI pseudo-op.
2987 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
0588e061
RH
2988 if (a->b == 0) {
2989 tcg_gen_movi_reg(tcg_rt, a->i);
b2167459 2990 } else {
0588e061 2991 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
b2167459 2992 }
0588e061 2993 save_gpr(ctx, a->t, tcg_rt);
b2167459 2994 cond_free(&ctx->null_cond);
31234768 2995 return true;
b2167459
RH
2996}
2997
01afb7be
RH
2998static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
2999 unsigned c, unsigned f, unsigned n, int disp)
98cd9ca7 3000{
01afb7be 3001 TCGv_reg dest, in2, sv;
98cd9ca7
RH
3002 DisasCond cond;
3003
98cd9ca7
RH
3004 in2 = load_gpr(ctx, r);
3005 dest = get_temp(ctx);
3006
eaa3783b 3007 tcg_gen_sub_reg(dest, in1, in2);
98cd9ca7 3008
f764718d 3009 sv = NULL;
b47a4a02 3010 if (cond_need_sv(c)) {
98cd9ca7
RH
3011 sv = do_sub_sv(ctx, dest, in1, in2);
3012 }
3013
01afb7be
RH
3014 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3015 return do_cbranch(ctx, disp, n, &cond);
98cd9ca7
RH
3016}
3017
01afb7be 3018static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
98cd9ca7 3019{
01afb7be
RH
3020 nullify_over(ctx);
3021 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3022}
98cd9ca7 3023
01afb7be
RH
3024static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3025{
98cd9ca7 3026 nullify_over(ctx);
01afb7be
RH
3027 return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3028}
3029
3030static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3031 unsigned c, unsigned f, unsigned n, int disp)
3032{
3033 TCGv_reg dest, in2, sv, cb_msb;
3034 DisasCond cond;
98cd9ca7 3035
98cd9ca7
RH
3036 in2 = load_gpr(ctx, r);
3037 dest = dest_gpr(ctx, r);
f764718d
RH
3038 sv = NULL;
3039 cb_msb = NULL;
98cd9ca7 3040
b47a4a02 3041 if (cond_need_cb(c)) {
98cd9ca7 3042 cb_msb = get_temp(ctx);
eaa3783b
RH
3043 tcg_gen_movi_reg(cb_msb, 0);
3044 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
b47a4a02 3045 } else {
eaa3783b 3046 tcg_gen_add_reg(dest, in1, in2);
b47a4a02
SS
3047 }
3048 if (cond_need_sv(c)) {
98cd9ca7 3049 sv = do_add_sv(ctx, dest, in1, in2);
98cd9ca7
RH
3050 }
3051
01afb7be
RH
3052 cond = do_cond(c * 2 + f, dest, cb_msb, sv);
3053 return do_cbranch(ctx, disp, n, &cond);
98cd9ca7
RH
3054}
3055
01afb7be
RH
3056static bool trans_addb(DisasContext *ctx, arg_addb *a)
3057{
3058 nullify_over(ctx);
3059 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3060}
3061
3062static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3063{
3064 nullify_over(ctx);
3065 return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3066}
3067
3068static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
98cd9ca7 3069{
eaa3783b 3070 TCGv_reg tmp, tcg_r;
98cd9ca7
RH
3071 DisasCond cond;
3072
3073 nullify_over(ctx);
3074
3075 tmp = tcg_temp_new();
01afb7be
RH
3076 tcg_r = load_gpr(ctx, a->r);
3077 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
98cd9ca7 3078
01afb7be 3079 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
98cd9ca7 3080 tcg_temp_free(tmp);
01afb7be 3081 return do_cbranch(ctx, a->disp, a->n, &cond);
98cd9ca7
RH
3082}
3083
01afb7be
RH
3084static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3085{
3086 TCGv_reg tmp, tcg_r;
3087 DisasCond cond;
3088
3089 nullify_over(ctx);
3090
3091 tmp = tcg_temp_new();
3092 tcg_r = load_gpr(ctx, a->r);
3093 tcg_gen_shli_reg(tmp, tcg_r, a->p);
3094
3095 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3096 tcg_temp_free(tmp);
3097 return do_cbranch(ctx, a->disp, a->n, &cond);
3098}
3099
3100static bool trans_movb(DisasContext *ctx, arg_movb *a)
98cd9ca7 3101{
eaa3783b 3102 TCGv_reg dest;
98cd9ca7
RH
3103 DisasCond cond;
3104
3105 nullify_over(ctx);
3106
01afb7be
RH
3107 dest = dest_gpr(ctx, a->r2);
3108 if (a->r1 == 0) {
eaa3783b 3109 tcg_gen_movi_reg(dest, 0);
98cd9ca7 3110 } else {
01afb7be 3111 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
98cd9ca7
RH
3112 }
3113
01afb7be
RH
3114 cond = do_sed_cond(a->c, dest);
3115 return do_cbranch(ctx, a->disp, a->n, &cond);
3116}
3117
3118static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3119{
3120 TCGv_reg dest;
3121 DisasCond cond;
3122
3123 nullify_over(ctx);
3124
3125 dest = dest_gpr(ctx, a->r);
3126 tcg_gen_movi_reg(dest, a->i);
3127
3128 cond = do_sed_cond(a->c, dest);
3129 return do_cbranch(ctx, a->disp, a->n, &cond);
98cd9ca7
RH
3130}
3131
30878590 3132static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
0b1347d2 3133{
eaa3783b 3134 TCGv_reg dest;
0b1347d2 3135
30878590 3136 if (a->c) {
0b1347d2
RH
3137 nullify_over(ctx);
3138 }
3139
30878590
RH
3140 dest = dest_gpr(ctx, a->t);
3141 if (a->r1 == 0) {
3142 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
eaa3783b 3143 tcg_gen_shr_reg(dest, dest, cpu_sar);
30878590 3144 } else if (a->r1 == a->r2) {
0b1347d2 3145 TCGv_i32 t32 = tcg_temp_new_i32();
30878590 3146 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
0b1347d2 3147 tcg_gen_rotr_i32(t32, t32, cpu_sar);
eaa3783b 3148 tcg_gen_extu_i32_reg(dest, t32);
0b1347d2
RH
3149 tcg_temp_free_i32(t32);
3150 } else {
3151 TCGv_i64 t = tcg_temp_new_i64();
3152 TCGv_i64 s = tcg_temp_new_i64();
3153
30878590 3154 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
eaa3783b 3155 tcg_gen_extu_reg_i64(s, cpu_sar);
0b1347d2 3156 tcg_gen_shr_i64(t, t, s);
eaa3783b 3157 tcg_gen_trunc_i64_reg(dest, t);
0b1347d2
RH
3158
3159 tcg_temp_free_i64(t);
3160 tcg_temp_free_i64(s);
3161 }
30878590 3162 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3163
3164 /* Install the new nullification. */
3165 cond_free(&ctx->null_cond);
30878590
RH
3166 if (a->c) {
3167 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3168 }
31234768 3169 return nullify_end(ctx);
0b1347d2
RH
3170}
3171
30878590 3172static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
0b1347d2 3173{
30878590 3174 unsigned sa = 31 - a->cpos;
eaa3783b 3175 TCGv_reg dest, t2;
0b1347d2 3176
30878590 3177 if (a->c) {
0b1347d2
RH
3178 nullify_over(ctx);
3179 }
3180
30878590
RH
3181 dest = dest_gpr(ctx, a->t);
3182 t2 = load_gpr(ctx, a->r2);
3183 if (a->r1 == a->r2) {
0b1347d2 3184 TCGv_i32 t32 = tcg_temp_new_i32();
eaa3783b 3185 tcg_gen_trunc_reg_i32(t32, t2);
0b1347d2 3186 tcg_gen_rotri_i32(t32, t32, sa);
eaa3783b 3187 tcg_gen_extu_i32_reg(dest, t32);
0b1347d2 3188 tcg_temp_free_i32(t32);
30878590 3189 } else if (a->r1 == 0) {
eaa3783b 3190 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
0b1347d2 3191 } else {
eaa3783b
RH
3192 TCGv_reg t0 = tcg_temp_new();
3193 tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
30878590 3194 tcg_gen_deposit_reg(dest, t0, cpu_gr[a->r1], 32 - sa, sa);
0b1347d2
RH
3195 tcg_temp_free(t0);
3196 }
30878590 3197 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3198
3199 /* Install the new nullification. */
3200 cond_free(&ctx->null_cond);
30878590
RH
3201 if (a->c) {
3202 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3203 }
31234768 3204 return nullify_end(ctx);
0b1347d2
RH
3205}
3206
30878590 3207static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
0b1347d2 3208{
30878590 3209 unsigned len = 32 - a->clen;
eaa3783b 3210 TCGv_reg dest, src, tmp;
0b1347d2 3211
30878590 3212 if (a->c) {
0b1347d2
RH
3213 nullify_over(ctx);
3214 }
3215
30878590
RH
3216 dest = dest_gpr(ctx, a->t);
3217 src = load_gpr(ctx, a->r);
0b1347d2
RH
3218 tmp = tcg_temp_new();
3219
3220 /* Recall that SAR is using big-endian bit numbering. */
eaa3783b 3221 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
30878590 3222 if (a->se) {
eaa3783b
RH
3223 tcg_gen_sar_reg(dest, src, tmp);
3224 tcg_gen_sextract_reg(dest, dest, 0, len);
0b1347d2 3225 } else {
eaa3783b
RH
3226 tcg_gen_shr_reg(dest, src, tmp);
3227 tcg_gen_extract_reg(dest, dest, 0, len);
0b1347d2
RH
3228 }
3229 tcg_temp_free(tmp);
30878590 3230 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3231
3232 /* Install the new nullification. */
3233 cond_free(&ctx->null_cond);
30878590
RH
3234 if (a->c) {
3235 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3236 }
31234768 3237 return nullify_end(ctx);
0b1347d2
RH
3238}
3239
30878590 3240static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
0b1347d2 3241{
30878590
RH
3242 unsigned len = 32 - a->clen;
3243 unsigned cpos = 31 - a->pos;
eaa3783b 3244 TCGv_reg dest, src;
0b1347d2 3245
30878590 3246 if (a->c) {
0b1347d2
RH
3247 nullify_over(ctx);
3248 }
3249
30878590
RH
3250 dest = dest_gpr(ctx, a->t);
3251 src = load_gpr(ctx, a->r);
3252 if (a->se) {
eaa3783b 3253 tcg_gen_sextract_reg(dest, src, cpos, len);
0b1347d2 3254 } else {
eaa3783b 3255 tcg_gen_extract_reg(dest, src, cpos, len);
0b1347d2 3256 }
30878590 3257 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3258
3259 /* Install the new nullification. */
3260 cond_free(&ctx->null_cond);
30878590
RH
3261 if (a->c) {
3262 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3263 }
31234768 3264 return nullify_end(ctx);
0b1347d2
RH
3265}
3266
30878590 3267static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
0b1347d2 3268{
30878590 3269 unsigned len = 32 - a->clen;
eaa3783b
RH
3270 target_sreg mask0, mask1;
3271 TCGv_reg dest;
0b1347d2 3272
30878590 3273 if (a->c) {
0b1347d2
RH
3274 nullify_over(ctx);
3275 }
30878590
RH
3276 if (a->cpos + len > 32) {
3277 len = 32 - a->cpos;
0b1347d2
RH
3278 }
3279
30878590
RH
3280 dest = dest_gpr(ctx, a->t);
3281 mask0 = deposit64(0, a->cpos, len, a->i);
3282 mask1 = deposit64(-1, a->cpos, len, a->i);
0b1347d2 3283
30878590
RH
3284 if (a->nz) {
3285 TCGv_reg src = load_gpr(ctx, a->t);
0b1347d2 3286 if (mask1 != -1) {
eaa3783b 3287 tcg_gen_andi_reg(dest, src, mask1);
0b1347d2
RH
3288 src = dest;
3289 }
eaa3783b 3290 tcg_gen_ori_reg(dest, src, mask0);
0b1347d2 3291 } else {
eaa3783b 3292 tcg_gen_movi_reg(dest, mask0);
0b1347d2 3293 }
30878590 3294 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3295
3296 /* Install the new nullification. */
3297 cond_free(&ctx->null_cond);
30878590
RH
3298 if (a->c) {
3299 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3300 }
31234768 3301 return nullify_end(ctx);
0b1347d2
RH
3302}
3303
30878590 3304static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
0b1347d2 3305{
30878590
RH
3306 unsigned rs = a->nz ? a->t : 0;
3307 unsigned len = 32 - a->clen;
eaa3783b 3308 TCGv_reg dest, val;
0b1347d2 3309
30878590 3310 if (a->c) {
0b1347d2
RH
3311 nullify_over(ctx);
3312 }
30878590
RH
3313 if (a->cpos + len > 32) {
3314 len = 32 - a->cpos;
0b1347d2
RH
3315 }
3316
30878590
RH
3317 dest = dest_gpr(ctx, a->t);
3318 val = load_gpr(ctx, a->r);
0b1347d2 3319 if (rs == 0) {
30878590 3320 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
0b1347d2 3321 } else {
30878590 3322 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
0b1347d2 3323 }
30878590 3324 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3325
3326 /* Install the new nullification. */
3327 cond_free(&ctx->null_cond);
30878590
RH
3328 if (a->c) {
3329 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3330 }
31234768 3331 return nullify_end(ctx);
0b1347d2
RH
3332}
3333
30878590
RH
3334static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3335 unsigned nz, unsigned clen, TCGv_reg val)
0b1347d2 3336{
0b1347d2
RH
3337 unsigned rs = nz ? rt : 0;
3338 unsigned len = 32 - clen;
30878590 3339 TCGv_reg mask, tmp, shift, dest;
0b1347d2
RH
3340 unsigned msb = 1U << (len - 1);
3341
3342 if (c) {
3343 nullify_over(ctx);
3344 }
3345
0b1347d2
RH
3346 dest = dest_gpr(ctx, rt);
3347 shift = tcg_temp_new();
3348 tmp = tcg_temp_new();
3349
3350 /* Convert big-endian bit numbering in SAR to left-shift. */
eaa3783b 3351 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
0b1347d2 3352
eaa3783b
RH
3353 mask = tcg_const_reg(msb + (msb - 1));
3354 tcg_gen_and_reg(tmp, val, mask);
0b1347d2 3355 if (rs) {
eaa3783b
RH
3356 tcg_gen_shl_reg(mask, mask, shift);
3357 tcg_gen_shl_reg(tmp, tmp, shift);
3358 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3359 tcg_gen_or_reg(dest, dest, tmp);
0b1347d2 3360 } else {
eaa3783b 3361 tcg_gen_shl_reg(dest, tmp, shift);
0b1347d2
RH
3362 }
3363 tcg_temp_free(shift);
3364 tcg_temp_free(mask);
3365 tcg_temp_free(tmp);
3366 save_gpr(ctx, rt, dest);
3367
3368 /* Install the new nullification. */
3369 cond_free(&ctx->null_cond);
3370 if (c) {
3371 ctx->null_cond = do_sed_cond(c, dest);
3372 }
31234768 3373 return nullify_end(ctx);
0b1347d2
RH
3374}
3375
30878590
RH
3376static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3377{
3378 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3379}
3380
3381static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3382{
3383 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3384}
0b1347d2 3385
8340f534 3386static bool trans_be(DisasContext *ctx, arg_be *a)
98cd9ca7 3387{
660eefe1 3388 TCGv_reg tmp;
98cd9ca7 3389
c301f34e 3390#ifdef CONFIG_USER_ONLY
98cd9ca7
RH
3391 /* ??? It seems like there should be a good way of using
3392 "be disp(sr2, r0)", the canonical gateway entry mechanism
3393 to our advantage. But that appears to be inconvenient to
3394 manage along side branch delay slots. Therefore we handle
3395 entry into the gateway page via absolute address. */
98cd9ca7
RH
3396 /* Since we don't implement spaces, just branch. Do notice the special
3397 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3398 goto_tb to the TB containing the syscall. */
8340f534
RH
3399 if (a->b == 0) {
3400 return do_dbranch(ctx, a->disp, a->l, a->n);
98cd9ca7 3401 }
c301f34e 3402#else
c301f34e 3403 nullify_over(ctx);
660eefe1
RH
3404#endif
3405
3406 tmp = get_temp(ctx);
8340f534 3407 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
660eefe1 3408 tmp = do_ibranch_priv(ctx, tmp);
c301f34e
RH
3409
3410#ifdef CONFIG_USER_ONLY
8340f534 3411 return do_ibranch(ctx, tmp, a->l, a->n);
c301f34e
RH
3412#else
3413 TCGv_i64 new_spc = tcg_temp_new_i64();
3414
8340f534
RH
3415 load_spr(ctx, new_spc, a->sp);
3416 if (a->l) {
c301f34e
RH
3417 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3418 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3419 }
8340f534 3420 if (a->n && use_nullify_skip(ctx)) {
c301f34e
RH
3421 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3422 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3423 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3424 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3425 } else {
3426 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3427 if (ctx->iaoq_b == -1) {
3428 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3429 }
3430 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3431 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
8340f534 3432 nullify_set(ctx, a->n);
c301f34e
RH
3433 }
3434 tcg_temp_free_i64(new_spc);
3435 tcg_gen_lookup_and_goto_ptr();
31234768
RH
3436 ctx->base.is_jmp = DISAS_NORETURN;
3437 return nullify_end(ctx);
c301f34e 3438#endif
98cd9ca7
RH
3439}
3440
8340f534 3441static bool trans_bl(DisasContext *ctx, arg_bl *a)
98cd9ca7 3442{
8340f534 3443 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
98cd9ca7
RH
3444}
3445
8340f534 3446static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
43e05652 3447{
8340f534 3448 target_ureg dest = iaoq_dest(ctx, a->disp);
43e05652
RH
3449
3450 /* Make sure the caller hasn't done something weird with the queue.
3451 * ??? This is not quite the same as the PSW[B] bit, which would be
3452 * expensive to track. Real hardware will trap for
3453 * b gateway
3454 * b gateway+4 (in delay slot of first branch)
3455 * However, checking for a non-sequential instruction queue *will*
3456 * diagnose the security hole
3457 * b gateway
3458 * b evil
3459 * in which instructions at evil would run with increased privs.
3460 */
3461 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3462 return gen_illegal(ctx);
3463 }
3464
3465#ifndef CONFIG_USER_ONLY
3466 if (ctx->tb_flags & PSW_C) {
3467 CPUHPPAState *env = ctx->cs->env_ptr;
3468 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3469 /* If we could not find a TLB entry, then we need to generate an
3470 ITLB miss exception so the kernel will provide it.
3471 The resulting TLB fill operation will invalidate this TB and
3472 we will re-translate, at which point we *will* be able to find
3473 the TLB entry and determine if this is in fact a gateway page. */
3474 if (type < 0) {
31234768
RH
3475 gen_excp(ctx, EXCP_ITLB_MISS);
3476 return true;
43e05652
RH
3477 }
3478 /* No change for non-gateway pages or for priv decrease. */
3479 if (type >= 4 && type - 4 < ctx->privilege) {
3480 dest = deposit32(dest, 0, 2, type - 4);
3481 }
3482 } else {
3483 dest &= -4; /* priv = 0 */
3484 }
3485#endif
3486
8340f534 3487 return do_dbranch(ctx, dest, a->l, a->n);
43e05652
RH
3488}
3489
8340f534 3490static bool trans_blr(DisasContext *ctx, arg_blr *a)
98cd9ca7 3491{
eaa3783b 3492 TCGv_reg tmp = get_temp(ctx);
98cd9ca7 3493
8340f534 3494 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
eaa3783b 3495 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
660eefe1 3496 /* The computation here never changes privilege level. */
8340f534 3497 return do_ibranch(ctx, tmp, a->l, a->n);
98cd9ca7
RH
3498}
3499
8340f534 3500static bool trans_bv(DisasContext *ctx, arg_bv *a)
98cd9ca7 3501{
eaa3783b 3502 TCGv_reg dest;
98cd9ca7 3503
8340f534
RH
3504 if (a->x == 0) {
3505 dest = load_gpr(ctx, a->b);
98cd9ca7
RH
3506 } else {
3507 dest = get_temp(ctx);
8340f534
RH
3508 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3509 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
98cd9ca7 3510 }
660eefe1 3511 dest = do_ibranch_priv(ctx, dest);
8340f534 3512 return do_ibranch(ctx, dest, 0, a->n);
98cd9ca7
RH
3513}
3514
8340f534 3515static bool trans_bve(DisasContext *ctx, arg_bve *a)
98cd9ca7 3516{
660eefe1 3517 TCGv_reg dest;
98cd9ca7 3518
c301f34e 3519#ifdef CONFIG_USER_ONLY
8340f534
RH
3520 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3521 return do_ibranch(ctx, dest, a->l, a->n);
c301f34e
RH
3522#else
3523 nullify_over(ctx);
8340f534 3524 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
c301f34e
RH
3525
3526 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3527 if (ctx->iaoq_b == -1) {
3528 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3529 }
3530 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3531 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
8340f534
RH
3532 if (a->l) {
3533 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e 3534 }
8340f534 3535 nullify_set(ctx, a->n);
c301f34e 3536 tcg_gen_lookup_and_goto_ptr();
31234768
RH
3537 ctx->base.is_jmp = DISAS_NORETURN;
3538 return nullify_end(ctx);
c301f34e 3539#endif
98cd9ca7
RH
3540}
3541
1ca74648
RH
3542/*
3543 * Float class 0
3544 */
ebe9383c 3545
1ca74648 3546static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3547{
1ca74648 3548 tcg_gen_mov_i32(dst, src);
ebe9383c
RH
3549}
3550
1ca74648 3551static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3552{
1ca74648 3553 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
ebe9383c
RH
3554}
3555
1ca74648 3556static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
ebe9383c 3557{
1ca74648 3558 tcg_gen_mov_i64(dst, src);
ebe9383c
RH
3559}
3560
1ca74648 3561static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3562{
1ca74648 3563 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
ebe9383c
RH
3564}
3565
1ca74648 3566static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3567{
1ca74648 3568 tcg_gen_andi_i32(dst, src, INT32_MAX);
ebe9383c
RH
3569}
3570
1ca74648 3571static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3572{
1ca74648 3573 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
ebe9383c
RH
3574}
3575
1ca74648 3576static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
ebe9383c 3577{
1ca74648 3578 tcg_gen_andi_i64(dst, src, INT64_MAX);
ebe9383c
RH
3579}
3580
1ca74648 3581static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3582{
1ca74648 3583 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
ebe9383c
RH
3584}
3585
1ca74648 3586static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3587{
1ca74648 3588 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
ebe9383c
RH
3589}
3590
1ca74648 3591static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3592{
1ca74648 3593 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
ebe9383c
RH
3594}
3595
1ca74648 3596static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3597{
1ca74648 3598 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
ebe9383c
RH
3599}
3600
1ca74648 3601static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3602{
1ca74648 3603 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
ebe9383c
RH
3604}
3605
1ca74648 3606static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3607{
1ca74648 3608 tcg_gen_xori_i32(dst, src, INT32_MIN);
ebe9383c
RH
3609}
3610
1ca74648 3611static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3612{
1ca74648 3613 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
ebe9383c
RH
3614}
3615
3616static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3617{
3618 tcg_gen_xori_i64(dst, src, INT64_MIN);
3619}
3620
1ca74648
RH
3621static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3622{
3623 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3624}
3625
3626static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c
RH
3627{
3628 tcg_gen_ori_i32(dst, src, INT32_MIN);
3629}
3630
1ca74648
RH
3631static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3632{
3633 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3634}
3635
ebe9383c
RH
3636static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3637{
3638 tcg_gen_ori_i64(dst, src, INT64_MIN);
3639}
3640
1ca74648
RH
3641static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3642{
3643 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3644}
3645
3646/*
3647 * Float class 1
3648 */
3649
3650static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3651{
3652 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3653}
3654
3655static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3656{
3657 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3658}
3659
3660static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3661{
3662 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3663}
3664
3665static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3666{
3667 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3668}
3669
3670static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3671{
3672 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3673}
3674
3675static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3676{
3677 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3678}
3679
3680static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3681{
3682 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3683}
3684
3685static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3686{
3687 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3688}
3689
3690static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3691{
3692 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3693}
3694
3695static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3696{
3697 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3698}
3699
3700static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3701{
3702 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3703}
3704
3705static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3706{
3707 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3708}
3709
3710static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3711{
3712 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3713}
3714
3715static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3716{
3717 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3718}
3719
3720static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3721{
3722 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3723}
3724
3725static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3726{
3727 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3728}
3729
3730static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3731{
3732 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3733}
3734
3735static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3736{
3737 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3738}
3739
3740static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3741{
3742 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3743}
3744
3745static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3746{
3747 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3748}
3749
3750static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3751{
3752 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3753}
3754
3755static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3756{
3757 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3758}
3759
3760static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3761{
3762 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3763}
3764
3765static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3766{
3767 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3768}
3769
3770static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3771{
3772 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3773}
3774
3775static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3776{
3777 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3778}
3779
3780/*
3781 * Float class 2
3782 */
3783
3784static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
ebe9383c
RH
3785{
3786 TCGv_i32 ta, tb, tc, ty;
3787
3788 nullify_over(ctx);
3789
1ca74648
RH
3790 ta = load_frw0_i32(a->r1);
3791 tb = load_frw0_i32(a->r2);
3792 ty = tcg_const_i32(a->y);
3793 tc = tcg_const_i32(a->c);
ebe9383c
RH
3794
3795 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3796
3797 tcg_temp_free_i32(ta);
3798 tcg_temp_free_i32(tb);
3799 tcg_temp_free_i32(ty);
3800 tcg_temp_free_i32(tc);
3801
1ca74648 3802 return nullify_end(ctx);
ebe9383c
RH
3803}
3804
1ca74648 3805static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
ebe9383c 3806{
ebe9383c
RH
3807 TCGv_i64 ta, tb;
3808 TCGv_i32 tc, ty;
3809
3810 nullify_over(ctx);
3811
1ca74648
RH
3812 ta = load_frd0(a->r1);
3813 tb = load_frd0(a->r2);
3814 ty = tcg_const_i32(a->y);
3815 tc = tcg_const_i32(a->c);
ebe9383c
RH
3816
3817 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3818
3819 tcg_temp_free_i64(ta);
3820 tcg_temp_free_i64(tb);
3821 tcg_temp_free_i32(ty);
3822 tcg_temp_free_i32(tc);
3823
31234768 3824 return nullify_end(ctx);
ebe9383c
RH
3825}
3826
1ca74648 3827static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
ebe9383c 3828{
eaa3783b 3829 TCGv_reg t;
ebe9383c
RH
3830
3831 nullify_over(ctx);
3832
1ca74648 3833 t = get_temp(ctx);
eaa3783b 3834 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
ebe9383c 3835
1ca74648
RH
3836 if (a->y == 1) {
3837 int mask;
3838 bool inv = false;
3839
3840 switch (a->c) {
3841 case 0: /* simple */
3842 tcg_gen_andi_reg(t, t, 0x4000000);
3843 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3844 goto done;
3845 case 2: /* rej */
3846 inv = true;
3847 /* fallthru */
3848 case 1: /* acc */
3849 mask = 0x43ff800;
3850 break;
3851 case 6: /* rej8 */
3852 inv = true;
3853 /* fallthru */
3854 case 5: /* acc8 */
3855 mask = 0x43f8000;
3856 break;
3857 case 9: /* acc6 */
3858 mask = 0x43e0000;
3859 break;
3860 case 13: /* acc4 */
3861 mask = 0x4380000;
3862 break;
3863 case 17: /* acc2 */
3864 mask = 0x4200000;
3865 break;
3866 default:
3867 gen_illegal(ctx);
3868 return true;
3869 }
3870 if (inv) {
3871 TCGv_reg c = load_const(ctx, mask);
3872 tcg_gen_or_reg(t, t, c);
3873 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3874 } else {
3875 tcg_gen_andi_reg(t, t, mask);
3876 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3877 }
3878 } else {
3879 unsigned cbit = (a->y ^ 1) - 1;
3880
3881 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3882 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3883 tcg_temp_free(t);
3884 }
3885
3886 done:
31234768 3887 return nullify_end(ctx);
ebe9383c
RH
3888}
3889
1ca74648
RH
3890/*
3891 * Float class 2
3892 */
3893
3894static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
ebe9383c 3895{
1ca74648
RH
3896 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3897}
ebe9383c 3898
1ca74648
RH
3899static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3900{
3901 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3902}
ebe9383c 3903
1ca74648
RH
3904static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3905{
3906 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3907}
ebe9383c 3908
1ca74648
RH
3909static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3910{
3911 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
ebe9383c
RH
3912}
3913
1ca74648 3914static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
ebe9383c 3915{
1ca74648
RH
3916 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3917}
3918
3919static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3920{
3921 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3922}
3923
3924static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3925{
3926 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3927}
3928
3929static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3930{
3931 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3932}
3933
3934static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3935{
3936 TCGv_i64 x, y;
ebe9383c
RH
3937
3938 nullify_over(ctx);
3939
1ca74648
RH
3940 x = load_frw0_i64(a->r1);
3941 y = load_frw0_i64(a->r2);
3942 tcg_gen_mul_i64(x, x, y);
3943 save_frd(a->t, x);
3944 tcg_temp_free_i64(x);
3945 tcg_temp_free_i64(y);
ebe9383c 3946
31234768 3947 return nullify_end(ctx);
ebe9383c
RH
3948}
3949
ebe9383c
RH
3950/* Convert the fmpyadd single-precision register encodings to standard. */
3951static inline int fmpyadd_s_reg(unsigned r)
3952{
3953 return (r & 16) * 2 + 16 + (r & 15);
3954}
3955
b1e2af57 3956static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
ebe9383c 3957{
b1e2af57
RH
3958 int tm = fmpyadd_s_reg(a->tm);
3959 int ra = fmpyadd_s_reg(a->ra);
3960 int ta = fmpyadd_s_reg(a->ta);
3961 int rm2 = fmpyadd_s_reg(a->rm2);
3962 int rm1 = fmpyadd_s_reg(a->rm1);
ebe9383c
RH
3963
3964 nullify_over(ctx);
3965
b1e2af57
RH
3966 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3967 do_fop_weww(ctx, ta, ta, ra,
3968 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
ebe9383c 3969
31234768 3970 return nullify_end(ctx);
ebe9383c
RH
3971}
3972
b1e2af57
RH
3973static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
3974{
3975 return do_fmpyadd_s(ctx, a, false);
3976}
3977
3978static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
3979{
3980 return do_fmpyadd_s(ctx, a, true);
3981}
3982
3983static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3984{
3985 nullify_over(ctx);
3986
3987 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
3988 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
3989 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3990
3991 return nullify_end(ctx);
3992}
3993
3994static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
3995{
3996 return do_fmpyadd_d(ctx, a, false);
3997}
3998
3999static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4000{
4001 return do_fmpyadd_d(ctx, a, true);
4002}
4003
c3bad4f8 4004static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
ebe9383c 4005{
c3bad4f8 4006 TCGv_i32 x, y, z;
ebe9383c
RH
4007
4008 nullify_over(ctx);
c3bad4f8
RH
4009 x = load_frw0_i32(a->rm1);
4010 y = load_frw0_i32(a->rm2);
4011 z = load_frw0_i32(a->ra3);
ebe9383c 4012
c3bad4f8
RH
4013 if (a->neg) {
4014 gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
ebe9383c 4015 } else {
c3bad4f8 4016 gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
ebe9383c
RH
4017 }
4018
c3bad4f8
RH
4019 tcg_temp_free_i32(y);
4020 tcg_temp_free_i32(z);
4021 save_frw_i32(a->t, x);
4022 tcg_temp_free_i32(x);
31234768 4023 return nullify_end(ctx);
ebe9383c
RH
4024}
4025
c3bad4f8 4026static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
ebe9383c 4027{
c3bad4f8 4028 TCGv_i64 x, y, z;
ebe9383c
RH
4029
4030 nullify_over(ctx);
c3bad4f8
RH
4031 x = load_frd0(a->rm1);
4032 y = load_frd0(a->rm2);
4033 z = load_frd0(a->ra3);
ebe9383c 4034
c3bad4f8
RH
4035 if (a->neg) {
4036 gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
ebe9383c 4037 } else {
c3bad4f8 4038 gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
ebe9383c
RH
4039 }
4040
c3bad4f8
RH
4041 tcg_temp_free_i64(y);
4042 tcg_temp_free_i64(z);
4043 save_frd(a->t, x);
4044 tcg_temp_free_i64(x);
31234768 4045 return nullify_end(ctx);
ebe9383c
RH
4046}
4047
b542683d 4048static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
61766fe9 4049{
51b061fb 4050 DisasContext *ctx = container_of(dcbase, DisasContext, base);
f764718d 4051 int bound;
61766fe9 4052
51b061fb 4053 ctx->cs = cs;
494737b7 4054 ctx->tb_flags = ctx->base.tb->flags;
3d68ee7b
RH
4055
4056#ifdef CONFIG_USER_ONLY
4057 ctx->privilege = MMU_USER_IDX;
4058 ctx->mmu_idx = MMU_USER_IDX;
ebd0e151
RH
4059 ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4060 ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
3d68ee7b 4061#else
494737b7
RH
4062 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4063 ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
3d68ee7b 4064
c301f34e
RH
4065 /* Recover the IAOQ values from the GVA + PRIV. */
4066 uint64_t cs_base = ctx->base.tb->cs_base;
4067 uint64_t iasq_f = cs_base & ~0xffffffffull;
4068 int32_t diff = cs_base;
4069
4070 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4071 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4072#endif
51b061fb 4073 ctx->iaoq_n = -1;
f764718d 4074 ctx->iaoq_n_var = NULL;
61766fe9 4075
3d68ee7b
RH
4076 /* Bound the number of instructions by those left on the page. */
4077 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 4078 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
3d68ee7b 4079
86f8d05f
RH
4080 ctx->ntempr = 0;
4081 ctx->ntempl = 0;
4082 memset(ctx->tempr, 0, sizeof(ctx->tempr));
4083 memset(ctx->templ, 0, sizeof(ctx->templ));
51b061fb 4084}
61766fe9 4085
51b061fb
RH
4086static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4087{
4088 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4089
3d68ee7b 4090 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
51b061fb
RH
4091 ctx->null_cond = cond_make_f();
4092 ctx->psw_n_nonzero = false;
494737b7 4093 if (ctx->tb_flags & PSW_N) {
51b061fb
RH
4094 ctx->null_cond.c = TCG_COND_ALWAYS;
4095 ctx->psw_n_nonzero = true;
129e9cc3 4096 }
51b061fb
RH
4097 ctx->null_lab = NULL;
4098}
129e9cc3 4099
51b061fb
RH
4100static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4101{
4102 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4103
51b061fb
RH
4104 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4105}
4106
4107static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
4108 const CPUBreakpoint *bp)
4109{
4110 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4111
31234768 4112 gen_excp(ctx, EXCP_DEBUG);
c301f34e 4113 ctx->base.pc_next += 4;
51b061fb
RH
4114 return true;
4115}
4116
4117static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4118{
4119 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4120 CPUHPPAState *env = cs->env_ptr;
4121 DisasJumpType ret;
4122 int i, n;
4123
4124 /* Execute one insn. */
ba1d0b44 4125#ifdef CONFIG_USER_ONLY
c301f34e 4126 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
31234768
RH
4127 do_page_zero(ctx);
4128 ret = ctx->base.is_jmp;
51b061fb 4129 assert(ret != DISAS_NEXT);
ba1d0b44
RH
4130 } else
4131#endif
4132 {
51b061fb
RH
4133 /* Always fetch the insn, even if nullified, so that we check
4134 the page permissions for execute. */
c301f34e 4135 uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next);
51b061fb
RH
4136
4137 /* Set up the IA queue for the next insn.
4138 This will be overwritten by a branch. */
4139 if (ctx->iaoq_b == -1) {
4140 ctx->iaoq_n = -1;
4141 ctx->iaoq_n_var = get_temp(ctx);
eaa3783b 4142 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
7ad439df 4143 } else {
51b061fb 4144 ctx->iaoq_n = ctx->iaoq_b + 4;
f764718d 4145 ctx->iaoq_n_var = NULL;
61766fe9
RH
4146 }
4147
51b061fb
RH
4148 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4149 ctx->null_cond.c = TCG_COND_NEVER;
4150 ret = DISAS_NEXT;
4151 } else {
1a19da0d 4152 ctx->insn = insn;
31274b46
RH
4153 if (!decode(ctx, insn)) {
4154 gen_illegal(ctx);
4155 }
31234768 4156 ret = ctx->base.is_jmp;
51b061fb 4157 assert(ctx->null_lab == NULL);
61766fe9 4158 }
51b061fb 4159 }
61766fe9 4160
51b061fb 4161 /* Free any temporaries allocated. */
86f8d05f
RH
4162 for (i = 0, n = ctx->ntempr; i < n; ++i) {
4163 tcg_temp_free(ctx->tempr[i]);
4164 ctx->tempr[i] = NULL;
4165 }
4166 for (i = 0, n = ctx->ntempl; i < n; ++i) {
4167 tcg_temp_free_tl(ctx->templ[i]);
4168 ctx->templ[i] = NULL;
51b061fb 4169 }
86f8d05f
RH
4170 ctx->ntempr = 0;
4171 ctx->ntempl = 0;
61766fe9 4172
3d68ee7b
RH
4173 /* Advance the insn queue. Note that this check also detects
4174 a priority change within the instruction queue. */
51b061fb 4175 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
c301f34e
RH
4176 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4177 && use_goto_tb(ctx, ctx->iaoq_b)
4178 && (ctx->null_cond.c == TCG_COND_NEVER
4179 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
51b061fb
RH
4180 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4181 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
31234768 4182 ctx->base.is_jmp = ret = DISAS_NORETURN;
51b061fb 4183 } else {
31234768 4184 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
c301f34e 4185 }
61766fe9 4186 }
51b061fb
RH
4187 ctx->iaoq_f = ctx->iaoq_b;
4188 ctx->iaoq_b = ctx->iaoq_n;
c301f34e 4189 ctx->base.pc_next += 4;
51b061fb
RH
4190
4191 if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
4192 return;
4193 }
4194 if (ctx->iaoq_f == -1) {
eaa3783b 4195 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
51b061fb 4196 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e
RH
4197#ifndef CONFIG_USER_ONLY
4198 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4199#endif
51b061fb
RH
4200 nullify_save(ctx);
4201 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
4202 } else if (ctx->iaoq_b == -1) {
eaa3783b 4203 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
51b061fb
RH
4204 }
4205}
4206
4207static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4208{
4209 DisasContext *ctx = container_of(dcbase, DisasContext, base);
e1b5a5ed 4210 DisasJumpType is_jmp = ctx->base.is_jmp;
61766fe9 4211
e1b5a5ed 4212 switch (is_jmp) {
869051ea 4213 case DISAS_NORETURN:
61766fe9 4214 break;
51b061fb 4215 case DISAS_TOO_MANY:
869051ea 4216 case DISAS_IAQ_N_STALE:
e1b5a5ed 4217 case DISAS_IAQ_N_STALE_EXIT:
51b061fb
RH
4218 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4219 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4220 nullify_save(ctx);
61766fe9 4221 /* FALLTHRU */
869051ea 4222 case DISAS_IAQ_N_UPDATED:
51b061fb 4223 if (ctx->base.singlestep_enabled) {
61766fe9 4224 gen_excp_1(EXCP_DEBUG);
e1b5a5ed 4225 } else if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
07ea28b4 4226 tcg_gen_exit_tb(NULL, 0);
61766fe9 4227 } else {
7f11636d 4228 tcg_gen_lookup_and_goto_ptr();
61766fe9
RH
4229 }
4230 break;
4231 default:
51b061fb 4232 g_assert_not_reached();
61766fe9 4233 }
51b061fb 4234}
61766fe9 4235
51b061fb
RH
4236static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4237{
c301f34e 4238 target_ulong pc = dcbase->pc_first;
61766fe9 4239
ba1d0b44
RH
4240#ifdef CONFIG_USER_ONLY
4241 switch (pc) {
51b061fb
RH
4242 case 0x00:
4243 qemu_log("IN:\n0x00000000: (null)\n");
ba1d0b44 4244 return;
51b061fb
RH
4245 case 0xb0:
4246 qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
ba1d0b44 4247 return;
51b061fb
RH
4248 case 0xe0:
4249 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
ba1d0b44 4250 return;
51b061fb
RH
4251 case 0x100:
4252 qemu_log("IN:\n0x00000100: syscall\n");
ba1d0b44 4253 return;
61766fe9 4254 }
ba1d0b44
RH
4255#endif
4256
4257 qemu_log("IN: %s\n", lookup_symbol(pc));
eaa3783b 4258 log_target_disas(cs, pc, dcbase->tb->size);
51b061fb
RH
4259}
4260
4261static const TranslatorOps hppa_tr_ops = {
4262 .init_disas_context = hppa_tr_init_disas_context,
4263 .tb_start = hppa_tr_tb_start,
4264 .insn_start = hppa_tr_insn_start,
4265 .breakpoint_check = hppa_tr_breakpoint_check,
4266 .translate_insn = hppa_tr_translate_insn,
4267 .tb_stop = hppa_tr_tb_stop,
4268 .disas_log = hppa_tr_disas_log,
4269};
4270
4271void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4272
4273{
4274 DisasContext ctx;
4275 translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
61766fe9
RH
4276}
4277
4278void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4279 target_ulong *data)
4280{
4281 env->iaoq_f = data[0];
86f8d05f 4282 if (data[1] != (target_ureg)-1) {
61766fe9
RH
4283 env->iaoq_b = data[1];
4284 }
4285 /* Since we were executing the instruction at IAOQ_F, and took some
4286 sort of action that provoked the cpu_restore_state, we can infer
4287 that the instruction was not nullified. */
4288 env->psw_n = 0;
4289}