]> git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/translate.c
hw/hppa: Allow up to 16 emulated CPUs
[mirror_qemu.git] / target / hppa / translate.c
1 /*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31
32 /* Since we have a distinction between register size and address size,
33 we need to redefine all of these. */
34
35 #undef TCGv
36 #undef tcg_temp_new
37 #undef tcg_global_mem_new
38 #undef tcg_temp_local_new
39 #undef tcg_temp_free
40
41 #if TARGET_LONG_BITS == 64
42 #define TCGv_tl TCGv_i64
43 #define tcg_temp_new_tl tcg_temp_new_i64
44 #define tcg_temp_free_tl tcg_temp_free_i64
45 #if TARGET_REGISTER_BITS == 64
46 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
47 #else
48 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
49 #endif
50 #else
51 #define TCGv_tl TCGv_i32
52 #define tcg_temp_new_tl tcg_temp_new_i32
53 #define tcg_temp_free_tl tcg_temp_free_i32
54 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
55 #endif
56
57 #if TARGET_REGISTER_BITS == 64
58 #define TCGv_reg TCGv_i64
59
60 #define tcg_temp_new tcg_temp_new_i64
61 #define tcg_global_mem_new tcg_global_mem_new_i64
62 #define tcg_temp_local_new tcg_temp_local_new_i64
63 #define tcg_temp_free tcg_temp_free_i64
64
65 #define tcg_gen_movi_reg tcg_gen_movi_i64
66 #define tcg_gen_mov_reg tcg_gen_mov_i64
67 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
68 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
69 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
70 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
71 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
72 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
73 #define tcg_gen_ld_reg tcg_gen_ld_i64
74 #define tcg_gen_st8_reg tcg_gen_st8_i64
75 #define tcg_gen_st16_reg tcg_gen_st16_i64
76 #define tcg_gen_st32_reg tcg_gen_st32_i64
77 #define tcg_gen_st_reg tcg_gen_st_i64
78 #define tcg_gen_add_reg tcg_gen_add_i64
79 #define tcg_gen_addi_reg tcg_gen_addi_i64
80 #define tcg_gen_sub_reg tcg_gen_sub_i64
81 #define tcg_gen_neg_reg tcg_gen_neg_i64
82 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
83 #define tcg_gen_subi_reg tcg_gen_subi_i64
84 #define tcg_gen_and_reg tcg_gen_and_i64
85 #define tcg_gen_andi_reg tcg_gen_andi_i64
86 #define tcg_gen_or_reg tcg_gen_or_i64
87 #define tcg_gen_ori_reg tcg_gen_ori_i64
88 #define tcg_gen_xor_reg tcg_gen_xor_i64
89 #define tcg_gen_xori_reg tcg_gen_xori_i64
90 #define tcg_gen_not_reg tcg_gen_not_i64
91 #define tcg_gen_shl_reg tcg_gen_shl_i64
92 #define tcg_gen_shli_reg tcg_gen_shli_i64
93 #define tcg_gen_shr_reg tcg_gen_shr_i64
94 #define tcg_gen_shri_reg tcg_gen_shri_i64
95 #define tcg_gen_sar_reg tcg_gen_sar_i64
96 #define tcg_gen_sari_reg tcg_gen_sari_i64
97 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
98 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
99 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
100 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
101 #define tcg_gen_mul_reg tcg_gen_mul_i64
102 #define tcg_gen_muli_reg tcg_gen_muli_i64
103 #define tcg_gen_div_reg tcg_gen_div_i64
104 #define tcg_gen_rem_reg tcg_gen_rem_i64
105 #define tcg_gen_divu_reg tcg_gen_divu_i64
106 #define tcg_gen_remu_reg tcg_gen_remu_i64
107 #define tcg_gen_discard_reg tcg_gen_discard_i64
108 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
109 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
110 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
111 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
112 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
113 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
114 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
115 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
116 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
117 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
118 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
119 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
120 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
121 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
122 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
123 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
124 #define tcg_gen_andc_reg tcg_gen_andc_i64
125 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
126 #define tcg_gen_nand_reg tcg_gen_nand_i64
127 #define tcg_gen_nor_reg tcg_gen_nor_i64
128 #define tcg_gen_orc_reg tcg_gen_orc_i64
129 #define tcg_gen_clz_reg tcg_gen_clz_i64
130 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
131 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
132 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
133 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
134 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
135 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
136 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
137 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
138 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
139 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
140 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
141 #define tcg_gen_extract_reg tcg_gen_extract_i64
142 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
143 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
144 #define tcg_const_reg tcg_const_i64
145 #define tcg_const_local_reg tcg_const_local_i64
146 #define tcg_constant_reg tcg_constant_i64
147 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
148 #define tcg_gen_add2_reg tcg_gen_add2_i64
149 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
150 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
151 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
152 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
153 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
154 #else
155 #define TCGv_reg TCGv_i32
156 #define tcg_temp_new tcg_temp_new_i32
157 #define tcg_global_mem_new tcg_global_mem_new_i32
158 #define tcg_temp_local_new tcg_temp_local_new_i32
159 #define tcg_temp_free tcg_temp_free_i32
160
161 #define tcg_gen_movi_reg tcg_gen_movi_i32
162 #define tcg_gen_mov_reg tcg_gen_mov_i32
163 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
164 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
165 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
166 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
167 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
168 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
169 #define tcg_gen_ld_reg tcg_gen_ld_i32
170 #define tcg_gen_st8_reg tcg_gen_st8_i32
171 #define tcg_gen_st16_reg tcg_gen_st16_i32
172 #define tcg_gen_st32_reg tcg_gen_st32_i32
173 #define tcg_gen_st_reg tcg_gen_st_i32
174 #define tcg_gen_add_reg tcg_gen_add_i32
175 #define tcg_gen_addi_reg tcg_gen_addi_i32
176 #define tcg_gen_sub_reg tcg_gen_sub_i32
177 #define tcg_gen_neg_reg tcg_gen_neg_i32
178 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
179 #define tcg_gen_subi_reg tcg_gen_subi_i32
180 #define tcg_gen_and_reg tcg_gen_and_i32
181 #define tcg_gen_andi_reg tcg_gen_andi_i32
182 #define tcg_gen_or_reg tcg_gen_or_i32
183 #define tcg_gen_ori_reg tcg_gen_ori_i32
184 #define tcg_gen_xor_reg tcg_gen_xor_i32
185 #define tcg_gen_xori_reg tcg_gen_xori_i32
186 #define tcg_gen_not_reg tcg_gen_not_i32
187 #define tcg_gen_shl_reg tcg_gen_shl_i32
188 #define tcg_gen_shli_reg tcg_gen_shli_i32
189 #define tcg_gen_shr_reg tcg_gen_shr_i32
190 #define tcg_gen_shri_reg tcg_gen_shri_i32
191 #define tcg_gen_sar_reg tcg_gen_sar_i32
192 #define tcg_gen_sari_reg tcg_gen_sari_i32
193 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
194 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
195 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
196 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
197 #define tcg_gen_mul_reg tcg_gen_mul_i32
198 #define tcg_gen_muli_reg tcg_gen_muli_i32
199 #define tcg_gen_div_reg tcg_gen_div_i32
200 #define tcg_gen_rem_reg tcg_gen_rem_i32
201 #define tcg_gen_divu_reg tcg_gen_divu_i32
202 #define tcg_gen_remu_reg tcg_gen_remu_i32
203 #define tcg_gen_discard_reg tcg_gen_discard_i32
204 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
205 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
206 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
207 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
208 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
209 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
210 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
211 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
212 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
213 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
214 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
215 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
216 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
217 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
218 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
219 #define tcg_gen_andc_reg tcg_gen_andc_i32
220 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
221 #define tcg_gen_nand_reg tcg_gen_nand_i32
222 #define tcg_gen_nor_reg tcg_gen_nor_i32
223 #define tcg_gen_orc_reg tcg_gen_orc_i32
224 #define tcg_gen_clz_reg tcg_gen_clz_i32
225 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
226 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
227 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
228 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
229 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
230 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
231 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
232 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
233 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
234 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
235 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
236 #define tcg_gen_extract_reg tcg_gen_extract_i32
237 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
238 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
239 #define tcg_const_reg tcg_const_i32
240 #define tcg_const_local_reg tcg_const_local_i32
241 #define tcg_constant_reg tcg_constant_i32
242 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
243 #define tcg_gen_add2_reg tcg_gen_add2_i32
244 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
245 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
246 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
247 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
248 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
249 #endif /* TARGET_REGISTER_BITS */
250
251 typedef struct DisasCond {
252 TCGCond c;
253 TCGv_reg a0, a1;
254 } DisasCond;
255
256 typedef struct DisasContext {
257 DisasContextBase base;
258 CPUState *cs;
259
260 target_ureg iaoq_f;
261 target_ureg iaoq_b;
262 target_ureg iaoq_n;
263 TCGv_reg iaoq_n_var;
264
265 int ntempr, ntempl;
266 TCGv_reg tempr[8];
267 TCGv_tl templ[4];
268
269 DisasCond null_cond;
270 TCGLabel *null_lab;
271
272 uint32_t insn;
273 uint32_t tb_flags;
274 int mmu_idx;
275 int privilege;
276 bool psw_n_nonzero;
277
278 #ifdef CONFIG_USER_ONLY
279 MemOp unalign;
280 #endif
281 } DisasContext;
282
283 #ifdef CONFIG_USER_ONLY
284 #define UNALIGN(C) (C)->unalign
285 #else
286 #define UNALIGN(C) 0
287 #endif
288
289 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
290 static int expand_sm_imm(DisasContext *ctx, int val)
291 {
292 if (val & PSW_SM_E) {
293 val = (val & ~PSW_SM_E) | PSW_E;
294 }
295 if (val & PSW_SM_W) {
296 val = (val & ~PSW_SM_W) | PSW_W;
297 }
298 return val;
299 }
300
301 /* Inverted space register indicates 0 means sr0 not inferred from base. */
302 static int expand_sr3x(DisasContext *ctx, int val)
303 {
304 return ~val;
305 }
306
307 /* Convert the M:A bits within a memory insn to the tri-state value
308 we use for the final M. */
309 static int ma_to_m(DisasContext *ctx, int val)
310 {
311 return val & 2 ? (val & 1 ? -1 : 1) : 0;
312 }
313
314 /* Convert the sign of the displacement to a pre or post-modify. */
315 static int pos_to_m(DisasContext *ctx, int val)
316 {
317 return val ? 1 : -1;
318 }
319
320 static int neg_to_m(DisasContext *ctx, int val)
321 {
322 return val ? -1 : 1;
323 }
324
325 /* Used for branch targets and fp memory ops. */
326 static int expand_shl2(DisasContext *ctx, int val)
327 {
328 return val << 2;
329 }
330
331 /* Used for fp memory ops. */
332 static int expand_shl3(DisasContext *ctx, int val)
333 {
334 return val << 3;
335 }
336
337 /* Used for assemble_21. */
338 static int expand_shl11(DisasContext *ctx, int val)
339 {
340 return val << 11;
341 }
342
343
344 /* Include the auto-generated decoder. */
345 #include "decode-insns.c.inc"
346
347 /* We are not using a goto_tb (for whatever reason), but have updated
348 the iaq (for whatever reason), so don't do it again on exit. */
349 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
350
351 /* We are exiting the TB, but have neither emitted a goto_tb, nor
352 updated the iaq for the next instruction to be executed. */
353 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
354
355 /* Similarly, but we want to return to the main loop immediately
356 to recognize unmasked interrupts. */
357 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
358 #define DISAS_EXIT DISAS_TARGET_3
359
360 /* global register indexes */
361 static TCGv_reg cpu_gr[32];
362 static TCGv_i64 cpu_sr[4];
363 static TCGv_i64 cpu_srH;
364 static TCGv_reg cpu_iaoq_f;
365 static TCGv_reg cpu_iaoq_b;
366 static TCGv_i64 cpu_iasq_f;
367 static TCGv_i64 cpu_iasq_b;
368 static TCGv_reg cpu_sar;
369 static TCGv_reg cpu_psw_n;
370 static TCGv_reg cpu_psw_v;
371 static TCGv_reg cpu_psw_cb;
372 static TCGv_reg cpu_psw_cb_msb;
373
374 #include "exec/gen-icount.h"
375
376 void hppa_translate_init(void)
377 {
378 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
379
380 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
381 static const GlobalVar vars[] = {
382 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
383 DEF_VAR(psw_n),
384 DEF_VAR(psw_v),
385 DEF_VAR(psw_cb),
386 DEF_VAR(psw_cb_msb),
387 DEF_VAR(iaoq_f),
388 DEF_VAR(iaoq_b),
389 };
390
391 #undef DEF_VAR
392
393 /* Use the symbolic register names that match the disassembler. */
394 static const char gr_names[32][4] = {
395 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
396 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
397 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
398 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
399 };
400 /* SR[4-7] are not global registers so that we can index them. */
401 static const char sr_names[5][4] = {
402 "sr0", "sr1", "sr2", "sr3", "srH"
403 };
404
405 int i;
406
407 cpu_gr[0] = NULL;
408 for (i = 1; i < 32; i++) {
409 cpu_gr[i] = tcg_global_mem_new(cpu_env,
410 offsetof(CPUHPPAState, gr[i]),
411 gr_names[i]);
412 }
413 for (i = 0; i < 4; i++) {
414 cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
415 offsetof(CPUHPPAState, sr[i]),
416 sr_names[i]);
417 }
418 cpu_srH = tcg_global_mem_new_i64(cpu_env,
419 offsetof(CPUHPPAState, sr[4]),
420 sr_names[4]);
421
422 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
423 const GlobalVar *v = &vars[i];
424 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
425 }
426
427 cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
428 offsetof(CPUHPPAState, iasq_f),
429 "iasq_f");
430 cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
431 offsetof(CPUHPPAState, iasq_b),
432 "iasq_b");
433 }
434
435 static DisasCond cond_make_f(void)
436 {
437 return (DisasCond){
438 .c = TCG_COND_NEVER,
439 .a0 = NULL,
440 .a1 = NULL,
441 };
442 }
443
444 static DisasCond cond_make_t(void)
445 {
446 return (DisasCond){
447 .c = TCG_COND_ALWAYS,
448 .a0 = NULL,
449 .a1 = NULL,
450 };
451 }
452
453 static DisasCond cond_make_n(void)
454 {
455 return (DisasCond){
456 .c = TCG_COND_NE,
457 .a0 = cpu_psw_n,
458 .a1 = tcg_constant_reg(0)
459 };
460 }
461
462 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
463 {
464 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
465 return (DisasCond){
466 .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
467 };
468 }
469
470 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
471 {
472 TCGv_reg tmp = tcg_temp_new();
473 tcg_gen_mov_reg(tmp, a0);
474 return cond_make_0_tmp(c, tmp);
475 }
476
477 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
478 {
479 DisasCond r = { .c = c };
480
481 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
482 r.a0 = tcg_temp_new();
483 tcg_gen_mov_reg(r.a0, a0);
484 r.a1 = tcg_temp_new();
485 tcg_gen_mov_reg(r.a1, a1);
486
487 return r;
488 }
489
490 static void cond_free(DisasCond *cond)
491 {
492 switch (cond->c) {
493 default:
494 if (cond->a0 != cpu_psw_n) {
495 tcg_temp_free(cond->a0);
496 }
497 tcg_temp_free(cond->a1);
498 cond->a0 = NULL;
499 cond->a1 = NULL;
500 /* fallthru */
501 case TCG_COND_ALWAYS:
502 cond->c = TCG_COND_NEVER;
503 break;
504 case TCG_COND_NEVER:
505 break;
506 }
507 }
508
509 static TCGv_reg get_temp(DisasContext *ctx)
510 {
511 unsigned i = ctx->ntempr++;
512 g_assert(i < ARRAY_SIZE(ctx->tempr));
513 return ctx->tempr[i] = tcg_temp_new();
514 }
515
516 #ifndef CONFIG_USER_ONLY
517 static TCGv_tl get_temp_tl(DisasContext *ctx)
518 {
519 unsigned i = ctx->ntempl++;
520 g_assert(i < ARRAY_SIZE(ctx->templ));
521 return ctx->templ[i] = tcg_temp_new_tl();
522 }
523 #endif
524
525 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
526 {
527 TCGv_reg t = get_temp(ctx);
528 tcg_gen_movi_reg(t, v);
529 return t;
530 }
531
532 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
533 {
534 if (reg == 0) {
535 TCGv_reg t = get_temp(ctx);
536 tcg_gen_movi_reg(t, 0);
537 return t;
538 } else {
539 return cpu_gr[reg];
540 }
541 }
542
543 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
544 {
545 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
546 return get_temp(ctx);
547 } else {
548 return cpu_gr[reg];
549 }
550 }
551
552 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
553 {
554 if (ctx->null_cond.c != TCG_COND_NEVER) {
555 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
556 ctx->null_cond.a1, dest, t);
557 } else {
558 tcg_gen_mov_reg(dest, t);
559 }
560 }
561
562 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
563 {
564 if (reg != 0) {
565 save_or_nullify(ctx, cpu_gr[reg], t);
566 }
567 }
568
569 #ifdef HOST_WORDS_BIGENDIAN
570 # define HI_OFS 0
571 # define LO_OFS 4
572 #else
573 # define HI_OFS 4
574 # define LO_OFS 0
575 #endif
576
577 static TCGv_i32 load_frw_i32(unsigned rt)
578 {
579 TCGv_i32 ret = tcg_temp_new_i32();
580 tcg_gen_ld_i32(ret, cpu_env,
581 offsetof(CPUHPPAState, fr[rt & 31])
582 + (rt & 32 ? LO_OFS : HI_OFS));
583 return ret;
584 }
585
586 static TCGv_i32 load_frw0_i32(unsigned rt)
587 {
588 if (rt == 0) {
589 return tcg_const_i32(0);
590 } else {
591 return load_frw_i32(rt);
592 }
593 }
594
595 static TCGv_i64 load_frw0_i64(unsigned rt)
596 {
597 if (rt == 0) {
598 return tcg_const_i64(0);
599 } else {
600 TCGv_i64 ret = tcg_temp_new_i64();
601 tcg_gen_ld32u_i64(ret, cpu_env,
602 offsetof(CPUHPPAState, fr[rt & 31])
603 + (rt & 32 ? LO_OFS : HI_OFS));
604 return ret;
605 }
606 }
607
608 static void save_frw_i32(unsigned rt, TCGv_i32 val)
609 {
610 tcg_gen_st_i32(val, cpu_env,
611 offsetof(CPUHPPAState, fr[rt & 31])
612 + (rt & 32 ? LO_OFS : HI_OFS));
613 }
614
615 #undef HI_OFS
616 #undef LO_OFS
617
618 static TCGv_i64 load_frd(unsigned rt)
619 {
620 TCGv_i64 ret = tcg_temp_new_i64();
621 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
622 return ret;
623 }
624
625 static TCGv_i64 load_frd0(unsigned rt)
626 {
627 if (rt == 0) {
628 return tcg_const_i64(0);
629 } else {
630 return load_frd(rt);
631 }
632 }
633
634 static void save_frd(unsigned rt, TCGv_i64 val)
635 {
636 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
637 }
638
639 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
640 {
641 #ifdef CONFIG_USER_ONLY
642 tcg_gen_movi_i64(dest, 0);
643 #else
644 if (reg < 4) {
645 tcg_gen_mov_i64(dest, cpu_sr[reg]);
646 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
647 tcg_gen_mov_i64(dest, cpu_srH);
648 } else {
649 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
650 }
651 #endif
652 }
653
654 /* Skip over the implementation of an insn that has been nullified.
655 Use this when the insn is too complex for a conditional move. */
656 static void nullify_over(DisasContext *ctx)
657 {
658 if (ctx->null_cond.c != TCG_COND_NEVER) {
659 /* The always condition should have been handled in the main loop. */
660 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
661
662 ctx->null_lab = gen_new_label();
663
664 /* If we're using PSW[N], copy it to a temp because... */
665 if (ctx->null_cond.a0 == cpu_psw_n) {
666 ctx->null_cond.a0 = tcg_temp_new();
667 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
668 }
669 /* ... we clear it before branching over the implementation,
670 so that (1) it's clear after nullifying this insn and
671 (2) if this insn nullifies the next, PSW[N] is valid. */
672 if (ctx->psw_n_nonzero) {
673 ctx->psw_n_nonzero = false;
674 tcg_gen_movi_reg(cpu_psw_n, 0);
675 }
676
677 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
678 ctx->null_cond.a1, ctx->null_lab);
679 cond_free(&ctx->null_cond);
680 }
681 }
682
683 /* Save the current nullification state to PSW[N]. */
684 static void nullify_save(DisasContext *ctx)
685 {
686 if (ctx->null_cond.c == TCG_COND_NEVER) {
687 if (ctx->psw_n_nonzero) {
688 tcg_gen_movi_reg(cpu_psw_n, 0);
689 }
690 return;
691 }
692 if (ctx->null_cond.a0 != cpu_psw_n) {
693 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
694 ctx->null_cond.a0, ctx->null_cond.a1);
695 ctx->psw_n_nonzero = true;
696 }
697 cond_free(&ctx->null_cond);
698 }
699
700 /* Set a PSW[N] to X. The intention is that this is used immediately
701 before a goto_tb/exit_tb, so that there is no fallthru path to other
702 code within the TB. Therefore we do not update psw_n_nonzero. */
703 static void nullify_set(DisasContext *ctx, bool x)
704 {
705 if (ctx->psw_n_nonzero || x) {
706 tcg_gen_movi_reg(cpu_psw_n, x);
707 }
708 }
709
710 /* Mark the end of an instruction that may have been nullified.
711 This is the pair to nullify_over. Always returns true so that
712 it may be tail-called from a translate function. */
713 static bool nullify_end(DisasContext *ctx)
714 {
715 TCGLabel *null_lab = ctx->null_lab;
716 DisasJumpType status = ctx->base.is_jmp;
717
718 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
719 For UPDATED, we cannot update on the nullified path. */
720 assert(status != DISAS_IAQ_N_UPDATED);
721
722 if (likely(null_lab == NULL)) {
723 /* The current insn wasn't conditional or handled the condition
724 applied to it without a branch, so the (new) setting of
725 NULL_COND can be applied directly to the next insn. */
726 return true;
727 }
728 ctx->null_lab = NULL;
729
730 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
731 /* The next instruction will be unconditional,
732 and NULL_COND already reflects that. */
733 gen_set_label(null_lab);
734 } else {
735 /* The insn that we just executed is itself nullifying the next
736 instruction. Store the condition in the PSW[N] global.
737 We asserted PSW[N] = 0 in nullify_over, so that after the
738 label we have the proper value in place. */
739 nullify_save(ctx);
740 gen_set_label(null_lab);
741 ctx->null_cond = cond_make_n();
742 }
743 if (status == DISAS_NORETURN) {
744 ctx->base.is_jmp = DISAS_NEXT;
745 }
746 return true;
747 }
748
749 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
750 {
751 if (unlikely(ival == -1)) {
752 tcg_gen_mov_reg(dest, vval);
753 } else {
754 tcg_gen_movi_reg(dest, ival);
755 }
756 }
757
758 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
759 {
760 return ctx->iaoq_f + disp + 8;
761 }
762
763 static void gen_excp_1(int exception)
764 {
765 gen_helper_excp(cpu_env, tcg_constant_i32(exception));
766 }
767
768 static void gen_excp(DisasContext *ctx, int exception)
769 {
770 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
771 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
772 nullify_save(ctx);
773 gen_excp_1(exception);
774 ctx->base.is_jmp = DISAS_NORETURN;
775 }
776
777 static bool gen_excp_iir(DisasContext *ctx, int exc)
778 {
779 nullify_over(ctx);
780 tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
781 cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
782 gen_excp(ctx, exc);
783 return nullify_end(ctx);
784 }
785
786 static bool gen_illegal(DisasContext *ctx)
787 {
788 return gen_excp_iir(ctx, EXCP_ILL);
789 }
790
791 #ifdef CONFIG_USER_ONLY
792 #define CHECK_MOST_PRIVILEGED(EXCP) \
793 return gen_excp_iir(ctx, EXCP)
794 #else
795 #define CHECK_MOST_PRIVILEGED(EXCP) \
796 do { \
797 if (ctx->privilege != 0) { \
798 return gen_excp_iir(ctx, EXCP); \
799 } \
800 } while (0)
801 #endif
802
803 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
804 {
805 return translator_use_goto_tb(&ctx->base, dest);
806 }
807
808 /* If the next insn is to be nullified, and it's on the same page,
809 and we're not attempting to set a breakpoint on it, then we can
810 totally skip the nullified insn. This avoids creating and
811 executing a TB that merely branches to the next TB. */
812 static bool use_nullify_skip(DisasContext *ctx)
813 {
814 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
815 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
816 }
817
818 static void gen_goto_tb(DisasContext *ctx, int which,
819 target_ureg f, target_ureg b)
820 {
821 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
822 tcg_gen_goto_tb(which);
823 tcg_gen_movi_reg(cpu_iaoq_f, f);
824 tcg_gen_movi_reg(cpu_iaoq_b, b);
825 tcg_gen_exit_tb(ctx->base.tb, which);
826 } else {
827 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
828 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
829 tcg_gen_lookup_and_goto_ptr();
830 }
831 }
832
833 static bool cond_need_sv(int c)
834 {
835 return c == 2 || c == 3 || c == 6;
836 }
837
838 static bool cond_need_cb(int c)
839 {
840 return c == 4 || c == 5;
841 }
842
843 /*
844 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
845 * the Parisc 1.1 Architecture Reference Manual for details.
846 */
847
848 static DisasCond do_cond(unsigned cf, TCGv_reg res,
849 TCGv_reg cb_msb, TCGv_reg sv)
850 {
851 DisasCond cond;
852 TCGv_reg tmp;
853
854 switch (cf >> 1) {
855 case 0: /* Never / TR (0 / 1) */
856 cond = cond_make_f();
857 break;
858 case 1: /* = / <> (Z / !Z) */
859 cond = cond_make_0(TCG_COND_EQ, res);
860 break;
861 case 2: /* < / >= (N ^ V / !(N ^ V) */
862 tmp = tcg_temp_new();
863 tcg_gen_xor_reg(tmp, res, sv);
864 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
865 break;
866 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
867 /*
868 * Simplify:
869 * (N ^ V) | Z
870 * ((res < 0) ^ (sv < 0)) | !res
871 * ((res ^ sv) < 0) | !res
872 * (~(res ^ sv) >= 0) | !res
873 * !(~(res ^ sv) >> 31) | !res
874 * !(~(res ^ sv) >> 31 & res)
875 */
876 tmp = tcg_temp_new();
877 tcg_gen_eqv_reg(tmp, res, sv);
878 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
879 tcg_gen_and_reg(tmp, tmp, res);
880 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
881 break;
882 case 4: /* NUV / UV (!C / C) */
883 cond = cond_make_0(TCG_COND_EQ, cb_msb);
884 break;
885 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
886 tmp = tcg_temp_new();
887 tcg_gen_neg_reg(tmp, cb_msb);
888 tcg_gen_and_reg(tmp, tmp, res);
889 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
890 break;
891 case 6: /* SV / NSV (V / !V) */
892 cond = cond_make_0(TCG_COND_LT, sv);
893 break;
894 case 7: /* OD / EV */
895 tmp = tcg_temp_new();
896 tcg_gen_andi_reg(tmp, res, 1);
897 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
898 break;
899 default:
900 g_assert_not_reached();
901 }
902 if (cf & 1) {
903 cond.c = tcg_invert_cond(cond.c);
904 }
905
906 return cond;
907 }
908
909 /* Similar, but for the special case of subtraction without borrow, we
910 can use the inputs directly. This can allow other computation to be
911 deleted as unused. */
912
913 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
914 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
915 {
916 DisasCond cond;
917
918 switch (cf >> 1) {
919 case 1: /* = / <> */
920 cond = cond_make(TCG_COND_EQ, in1, in2);
921 break;
922 case 2: /* < / >= */
923 cond = cond_make(TCG_COND_LT, in1, in2);
924 break;
925 case 3: /* <= / > */
926 cond = cond_make(TCG_COND_LE, in1, in2);
927 break;
928 case 4: /* << / >>= */
929 cond = cond_make(TCG_COND_LTU, in1, in2);
930 break;
931 case 5: /* <<= / >> */
932 cond = cond_make(TCG_COND_LEU, in1, in2);
933 break;
934 default:
935 return do_cond(cf, res, NULL, sv);
936 }
937 if (cf & 1) {
938 cond.c = tcg_invert_cond(cond.c);
939 }
940
941 return cond;
942 }
943
944 /*
945 * Similar, but for logicals, where the carry and overflow bits are not
946 * computed, and use of them is undefined.
947 *
948 * Undefined or not, hardware does not trap. It seems reasonable to
949 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
950 * how cases c={2,3} are treated.
951 */
952
953 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
954 {
955 switch (cf) {
956 case 0: /* never */
957 case 9: /* undef, C */
958 case 11: /* undef, C & !Z */
959 case 12: /* undef, V */
960 return cond_make_f();
961
962 case 1: /* true */
963 case 8: /* undef, !C */
964 case 10: /* undef, !C | Z */
965 case 13: /* undef, !V */
966 return cond_make_t();
967
968 case 2: /* == */
969 return cond_make_0(TCG_COND_EQ, res);
970 case 3: /* <> */
971 return cond_make_0(TCG_COND_NE, res);
972 case 4: /* < */
973 return cond_make_0(TCG_COND_LT, res);
974 case 5: /* >= */
975 return cond_make_0(TCG_COND_GE, res);
976 case 6: /* <= */
977 return cond_make_0(TCG_COND_LE, res);
978 case 7: /* > */
979 return cond_make_0(TCG_COND_GT, res);
980
981 case 14: /* OD */
982 case 15: /* EV */
983 return do_cond(cf, res, NULL, NULL);
984
985 default:
986 g_assert_not_reached();
987 }
988 }
989
990 /* Similar, but for shift/extract/deposit conditions. */
991
992 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
993 {
994 unsigned c, f;
995
996 /* Convert the compressed condition codes to standard.
997 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
998 4-7 are the reverse of 0-3. */
999 c = orig & 3;
1000 if (c == 3) {
1001 c = 7;
1002 }
1003 f = (orig & 4) / 4;
1004
1005 return do_log_cond(c * 2 + f, res);
1006 }
1007
1008 /* Similar, but for unit conditions. */
1009
1010 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1011 TCGv_reg in1, TCGv_reg in2)
1012 {
1013 DisasCond cond;
1014 TCGv_reg tmp, cb = NULL;
1015
1016 if (cf & 8) {
1017 /* Since we want to test lots of carry-out bits all at once, do not
1018 * do our normal thing and compute carry-in of bit B+1 since that
1019 * leaves us with carry bits spread across two words.
1020 */
1021 cb = tcg_temp_new();
1022 tmp = tcg_temp_new();
1023 tcg_gen_or_reg(cb, in1, in2);
1024 tcg_gen_and_reg(tmp, in1, in2);
1025 tcg_gen_andc_reg(cb, cb, res);
1026 tcg_gen_or_reg(cb, cb, tmp);
1027 tcg_temp_free(tmp);
1028 }
1029
1030 switch (cf >> 1) {
1031 case 0: /* never / TR */
1032 case 1: /* undefined */
1033 case 5: /* undefined */
1034 cond = cond_make_f();
1035 break;
1036
1037 case 2: /* SBZ / NBZ */
1038 /* See hasless(v,1) from
1039 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1040 */
1041 tmp = tcg_temp_new();
1042 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1043 tcg_gen_andc_reg(tmp, tmp, res);
1044 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1045 cond = cond_make_0(TCG_COND_NE, tmp);
1046 tcg_temp_free(tmp);
1047 break;
1048
1049 case 3: /* SHZ / NHZ */
1050 tmp = tcg_temp_new();
1051 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1052 tcg_gen_andc_reg(tmp, tmp, res);
1053 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1054 cond = cond_make_0(TCG_COND_NE, tmp);
1055 tcg_temp_free(tmp);
1056 break;
1057
1058 case 4: /* SDC / NDC */
1059 tcg_gen_andi_reg(cb, cb, 0x88888888u);
1060 cond = cond_make_0(TCG_COND_NE, cb);
1061 break;
1062
1063 case 6: /* SBC / NBC */
1064 tcg_gen_andi_reg(cb, cb, 0x80808080u);
1065 cond = cond_make_0(TCG_COND_NE, cb);
1066 break;
1067
1068 case 7: /* SHC / NHC */
1069 tcg_gen_andi_reg(cb, cb, 0x80008000u);
1070 cond = cond_make_0(TCG_COND_NE, cb);
1071 break;
1072
1073 default:
1074 g_assert_not_reached();
1075 }
1076 if (cf & 8) {
1077 tcg_temp_free(cb);
1078 }
1079 if (cf & 1) {
1080 cond.c = tcg_invert_cond(cond.c);
1081 }
1082
1083 return cond;
1084 }
1085
1086 /* Compute signed overflow for addition. */
1087 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1088 TCGv_reg in1, TCGv_reg in2)
1089 {
1090 TCGv_reg sv = get_temp(ctx);
1091 TCGv_reg tmp = tcg_temp_new();
1092
1093 tcg_gen_xor_reg(sv, res, in1);
1094 tcg_gen_xor_reg(tmp, in1, in2);
1095 tcg_gen_andc_reg(sv, sv, tmp);
1096 tcg_temp_free(tmp);
1097
1098 return sv;
1099 }
1100
1101 /* Compute signed overflow for subtraction. */
1102 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1103 TCGv_reg in1, TCGv_reg in2)
1104 {
1105 TCGv_reg sv = get_temp(ctx);
1106 TCGv_reg tmp = tcg_temp_new();
1107
1108 tcg_gen_xor_reg(sv, res, in1);
1109 tcg_gen_xor_reg(tmp, in1, in2);
1110 tcg_gen_and_reg(sv, sv, tmp);
1111 tcg_temp_free(tmp);
1112
1113 return sv;
1114 }
1115
1116 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1117 TCGv_reg in2, unsigned shift, bool is_l,
1118 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1119 {
1120 TCGv_reg dest, cb, cb_msb, sv, tmp;
1121 unsigned c = cf >> 1;
1122 DisasCond cond;
1123
1124 dest = tcg_temp_new();
1125 cb = NULL;
1126 cb_msb = NULL;
1127
1128 if (shift) {
1129 tmp = get_temp(ctx);
1130 tcg_gen_shli_reg(tmp, in1, shift);
1131 in1 = tmp;
1132 }
1133
1134 if (!is_l || cond_need_cb(c)) {
1135 TCGv_reg zero = tcg_constant_reg(0);
1136 cb_msb = get_temp(ctx);
1137 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1138 if (is_c) {
1139 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1140 }
1141 if (!is_l) {
1142 cb = get_temp(ctx);
1143 tcg_gen_xor_reg(cb, in1, in2);
1144 tcg_gen_xor_reg(cb, cb, dest);
1145 }
1146 } else {
1147 tcg_gen_add_reg(dest, in1, in2);
1148 if (is_c) {
1149 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1150 }
1151 }
1152
1153 /* Compute signed overflow if required. */
1154 sv = NULL;
1155 if (is_tsv || cond_need_sv(c)) {
1156 sv = do_add_sv(ctx, dest, in1, in2);
1157 if (is_tsv) {
1158 /* ??? Need to include overflow from shift. */
1159 gen_helper_tsv(cpu_env, sv);
1160 }
1161 }
1162
1163 /* Emit any conditional trap before any writeback. */
1164 cond = do_cond(cf, dest, cb_msb, sv);
1165 if (is_tc) {
1166 tmp = tcg_temp_new();
1167 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1168 gen_helper_tcond(cpu_env, tmp);
1169 tcg_temp_free(tmp);
1170 }
1171
1172 /* Write back the result. */
1173 if (!is_l) {
1174 save_or_nullify(ctx, cpu_psw_cb, cb);
1175 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1176 }
1177 save_gpr(ctx, rt, dest);
1178 tcg_temp_free(dest);
1179
1180 /* Install the new nullification. */
1181 cond_free(&ctx->null_cond);
1182 ctx->null_cond = cond;
1183 }
1184
1185 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1186 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1187 {
1188 TCGv_reg tcg_r1, tcg_r2;
1189
1190 if (a->cf) {
1191 nullify_over(ctx);
1192 }
1193 tcg_r1 = load_gpr(ctx, a->r1);
1194 tcg_r2 = load_gpr(ctx, a->r2);
1195 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1196 return nullify_end(ctx);
1197 }
1198
1199 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1200 bool is_tsv, bool is_tc)
1201 {
1202 TCGv_reg tcg_im, tcg_r2;
1203
1204 if (a->cf) {
1205 nullify_over(ctx);
1206 }
1207 tcg_im = load_const(ctx, a->i);
1208 tcg_r2 = load_gpr(ctx, a->r);
1209 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1210 return nullify_end(ctx);
1211 }
1212
1213 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1214 TCGv_reg in2, bool is_tsv, bool is_b,
1215 bool is_tc, unsigned cf)
1216 {
1217 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1218 unsigned c = cf >> 1;
1219 DisasCond cond;
1220
1221 dest = tcg_temp_new();
1222 cb = tcg_temp_new();
1223 cb_msb = tcg_temp_new();
1224
1225 zero = tcg_constant_reg(0);
1226 if (is_b) {
1227 /* DEST,C = IN1 + ~IN2 + C. */
1228 tcg_gen_not_reg(cb, in2);
1229 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1230 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1231 tcg_gen_xor_reg(cb, cb, in1);
1232 tcg_gen_xor_reg(cb, cb, dest);
1233 } else {
1234 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1235 operations by seeding the high word with 1 and subtracting. */
1236 tcg_gen_movi_reg(cb_msb, 1);
1237 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1238 tcg_gen_eqv_reg(cb, in1, in2);
1239 tcg_gen_xor_reg(cb, cb, dest);
1240 }
1241
1242 /* Compute signed overflow if required. */
1243 sv = NULL;
1244 if (is_tsv || cond_need_sv(c)) {
1245 sv = do_sub_sv(ctx, dest, in1, in2);
1246 if (is_tsv) {
1247 gen_helper_tsv(cpu_env, sv);
1248 }
1249 }
1250
1251 /* Compute the condition. We cannot use the special case for borrow. */
1252 if (!is_b) {
1253 cond = do_sub_cond(cf, dest, in1, in2, sv);
1254 } else {
1255 cond = do_cond(cf, dest, cb_msb, sv);
1256 }
1257
1258 /* Emit any conditional trap before any writeback. */
1259 if (is_tc) {
1260 tmp = tcg_temp_new();
1261 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1262 gen_helper_tcond(cpu_env, tmp);
1263 tcg_temp_free(tmp);
1264 }
1265
1266 /* Write back the result. */
1267 save_or_nullify(ctx, cpu_psw_cb, cb);
1268 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1269 save_gpr(ctx, rt, dest);
1270 tcg_temp_free(dest);
1271 tcg_temp_free(cb);
1272 tcg_temp_free(cb_msb);
1273
1274 /* Install the new nullification. */
1275 cond_free(&ctx->null_cond);
1276 ctx->null_cond = cond;
1277 }
1278
1279 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1280 bool is_tsv, bool is_b, bool is_tc)
1281 {
1282 TCGv_reg tcg_r1, tcg_r2;
1283
1284 if (a->cf) {
1285 nullify_over(ctx);
1286 }
1287 tcg_r1 = load_gpr(ctx, a->r1);
1288 tcg_r2 = load_gpr(ctx, a->r2);
1289 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1290 return nullify_end(ctx);
1291 }
1292
1293 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1294 {
1295 TCGv_reg tcg_im, tcg_r2;
1296
1297 if (a->cf) {
1298 nullify_over(ctx);
1299 }
1300 tcg_im = load_const(ctx, a->i);
1301 tcg_r2 = load_gpr(ctx, a->r);
1302 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1303 return nullify_end(ctx);
1304 }
1305
1306 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1307 TCGv_reg in2, unsigned cf)
1308 {
1309 TCGv_reg dest, sv;
1310 DisasCond cond;
1311
1312 dest = tcg_temp_new();
1313 tcg_gen_sub_reg(dest, in1, in2);
1314
1315 /* Compute signed overflow if required. */
1316 sv = NULL;
1317 if (cond_need_sv(cf >> 1)) {
1318 sv = do_sub_sv(ctx, dest, in1, in2);
1319 }
1320
1321 /* Form the condition for the compare. */
1322 cond = do_sub_cond(cf, dest, in1, in2, sv);
1323
1324 /* Clear. */
1325 tcg_gen_movi_reg(dest, 0);
1326 save_gpr(ctx, rt, dest);
1327 tcg_temp_free(dest);
1328
1329 /* Install the new nullification. */
1330 cond_free(&ctx->null_cond);
1331 ctx->null_cond = cond;
1332 }
1333
1334 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1335 TCGv_reg in2, unsigned cf,
1336 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1337 {
1338 TCGv_reg dest = dest_gpr(ctx, rt);
1339
1340 /* Perform the operation, and writeback. */
1341 fn(dest, in1, in2);
1342 save_gpr(ctx, rt, dest);
1343
1344 /* Install the new nullification. */
1345 cond_free(&ctx->null_cond);
1346 if (cf) {
1347 ctx->null_cond = do_log_cond(cf, dest);
1348 }
1349 }
1350
1351 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1352 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1353 {
1354 TCGv_reg tcg_r1, tcg_r2;
1355
1356 if (a->cf) {
1357 nullify_over(ctx);
1358 }
1359 tcg_r1 = load_gpr(ctx, a->r1);
1360 tcg_r2 = load_gpr(ctx, a->r2);
1361 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1362 return nullify_end(ctx);
1363 }
1364
1365 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1366 TCGv_reg in2, unsigned cf, bool is_tc,
1367 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1368 {
1369 TCGv_reg dest;
1370 DisasCond cond;
1371
1372 if (cf == 0) {
1373 dest = dest_gpr(ctx, rt);
1374 fn(dest, in1, in2);
1375 save_gpr(ctx, rt, dest);
1376 cond_free(&ctx->null_cond);
1377 } else {
1378 dest = tcg_temp_new();
1379 fn(dest, in1, in2);
1380
1381 cond = do_unit_cond(cf, dest, in1, in2);
1382
1383 if (is_tc) {
1384 TCGv_reg tmp = tcg_temp_new();
1385 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1386 gen_helper_tcond(cpu_env, tmp);
1387 tcg_temp_free(tmp);
1388 }
1389 save_gpr(ctx, rt, dest);
1390
1391 cond_free(&ctx->null_cond);
1392 ctx->null_cond = cond;
1393 }
1394 }
1395
1396 #ifndef CONFIG_USER_ONLY
1397 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1398 from the top 2 bits of the base register. There are a few system
1399 instructions that have a 3-bit space specifier, for which SR0 is
1400 not special. To handle this, pass ~SP. */
1401 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1402 {
1403 TCGv_ptr ptr;
1404 TCGv_reg tmp;
1405 TCGv_i64 spc;
1406
1407 if (sp != 0) {
1408 if (sp < 0) {
1409 sp = ~sp;
1410 }
1411 spc = get_temp_tl(ctx);
1412 load_spr(ctx, spc, sp);
1413 return spc;
1414 }
1415 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1416 return cpu_srH;
1417 }
1418
1419 ptr = tcg_temp_new_ptr();
1420 tmp = tcg_temp_new();
1421 spc = get_temp_tl(ctx);
1422
1423 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1424 tcg_gen_andi_reg(tmp, tmp, 030);
1425 tcg_gen_trunc_reg_ptr(ptr, tmp);
1426 tcg_temp_free(tmp);
1427
1428 tcg_gen_add_ptr(ptr, ptr, cpu_env);
1429 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1430 tcg_temp_free_ptr(ptr);
1431
1432 return spc;
1433 }
1434 #endif
1435
1436 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1437 unsigned rb, unsigned rx, int scale, target_sreg disp,
1438 unsigned sp, int modify, bool is_phys)
1439 {
1440 TCGv_reg base = load_gpr(ctx, rb);
1441 TCGv_reg ofs;
1442
1443 /* Note that RX is mutually exclusive with DISP. */
1444 if (rx) {
1445 ofs = get_temp(ctx);
1446 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1447 tcg_gen_add_reg(ofs, ofs, base);
1448 } else if (disp || modify) {
1449 ofs = get_temp(ctx);
1450 tcg_gen_addi_reg(ofs, base, disp);
1451 } else {
1452 ofs = base;
1453 }
1454
1455 *pofs = ofs;
1456 #ifdef CONFIG_USER_ONLY
1457 *pgva = (modify <= 0 ? ofs : base);
1458 #else
1459 TCGv_tl addr = get_temp_tl(ctx);
1460 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1461 if (ctx->tb_flags & PSW_W) {
1462 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1463 }
1464 if (!is_phys) {
1465 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1466 }
1467 *pgva = addr;
1468 #endif
1469 }
1470
1471 /* Emit a memory load. The modify parameter should be
1472 * < 0 for pre-modify,
1473 * > 0 for post-modify,
1474 * = 0 for no base register update.
1475 */
1476 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1477 unsigned rx, int scale, target_sreg disp,
1478 unsigned sp, int modify, MemOp mop)
1479 {
1480 TCGv_reg ofs;
1481 TCGv_tl addr;
1482
1483 /* Caller uses nullify_over/nullify_end. */
1484 assert(ctx->null_cond.c == TCG_COND_NEVER);
1485
1486 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1487 ctx->mmu_idx == MMU_PHYS_IDX);
1488 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1489 if (modify) {
1490 save_gpr(ctx, rb, ofs);
1491 }
1492 }
1493
1494 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1495 unsigned rx, int scale, target_sreg disp,
1496 unsigned sp, int modify, MemOp mop)
1497 {
1498 TCGv_reg ofs;
1499 TCGv_tl addr;
1500
1501 /* Caller uses nullify_over/nullify_end. */
1502 assert(ctx->null_cond.c == TCG_COND_NEVER);
1503
1504 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1505 ctx->mmu_idx == MMU_PHYS_IDX);
1506 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1507 if (modify) {
1508 save_gpr(ctx, rb, ofs);
1509 }
1510 }
1511
1512 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1513 unsigned rx, int scale, target_sreg disp,
1514 unsigned sp, int modify, MemOp mop)
1515 {
1516 TCGv_reg ofs;
1517 TCGv_tl addr;
1518
1519 /* Caller uses nullify_over/nullify_end. */
1520 assert(ctx->null_cond.c == TCG_COND_NEVER);
1521
1522 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1523 ctx->mmu_idx == MMU_PHYS_IDX);
1524 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1525 if (modify) {
1526 save_gpr(ctx, rb, ofs);
1527 }
1528 }
1529
1530 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1531 unsigned rx, int scale, target_sreg disp,
1532 unsigned sp, int modify, MemOp mop)
1533 {
1534 TCGv_reg ofs;
1535 TCGv_tl addr;
1536
1537 /* Caller uses nullify_over/nullify_end. */
1538 assert(ctx->null_cond.c == TCG_COND_NEVER);
1539
1540 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1541 ctx->mmu_idx == MMU_PHYS_IDX);
1542 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1543 if (modify) {
1544 save_gpr(ctx, rb, ofs);
1545 }
1546 }
1547
1548 #if TARGET_REGISTER_BITS == 64
1549 #define do_load_reg do_load_64
1550 #define do_store_reg do_store_64
1551 #else
1552 #define do_load_reg do_load_32
1553 #define do_store_reg do_store_32
1554 #endif
1555
1556 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1557 unsigned rx, int scale, target_sreg disp,
1558 unsigned sp, int modify, MemOp mop)
1559 {
1560 TCGv_reg dest;
1561
1562 nullify_over(ctx);
1563
1564 if (modify == 0) {
1565 /* No base register update. */
1566 dest = dest_gpr(ctx, rt);
1567 } else {
1568 /* Make sure if RT == RB, we see the result of the load. */
1569 dest = get_temp(ctx);
1570 }
1571 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1572 save_gpr(ctx, rt, dest);
1573
1574 return nullify_end(ctx);
1575 }
1576
1577 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1578 unsigned rx, int scale, target_sreg disp,
1579 unsigned sp, int modify)
1580 {
1581 TCGv_i32 tmp;
1582
1583 nullify_over(ctx);
1584
1585 tmp = tcg_temp_new_i32();
1586 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1587 save_frw_i32(rt, tmp);
1588 tcg_temp_free_i32(tmp);
1589
1590 if (rt == 0) {
1591 gen_helper_loaded_fr0(cpu_env);
1592 }
1593
1594 return nullify_end(ctx);
1595 }
1596
1597 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1598 {
1599 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1600 a->disp, a->sp, a->m);
1601 }
1602
1603 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1604 unsigned rx, int scale, target_sreg disp,
1605 unsigned sp, int modify)
1606 {
1607 TCGv_i64 tmp;
1608
1609 nullify_over(ctx);
1610
1611 tmp = tcg_temp_new_i64();
1612 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1613 save_frd(rt, tmp);
1614 tcg_temp_free_i64(tmp);
1615
1616 if (rt == 0) {
1617 gen_helper_loaded_fr0(cpu_env);
1618 }
1619
1620 return nullify_end(ctx);
1621 }
1622
1623 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1624 {
1625 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1626 a->disp, a->sp, a->m);
1627 }
1628
1629 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1630 target_sreg disp, unsigned sp,
1631 int modify, MemOp mop)
1632 {
1633 nullify_over(ctx);
1634 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1635 return nullify_end(ctx);
1636 }
1637
1638 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1639 unsigned rx, int scale, target_sreg disp,
1640 unsigned sp, int modify)
1641 {
1642 TCGv_i32 tmp;
1643
1644 nullify_over(ctx);
1645
1646 tmp = load_frw_i32(rt);
1647 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1648 tcg_temp_free_i32(tmp);
1649
1650 return nullify_end(ctx);
1651 }
1652
1653 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1654 {
1655 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1656 a->disp, a->sp, a->m);
1657 }
1658
1659 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1660 unsigned rx, int scale, target_sreg disp,
1661 unsigned sp, int modify)
1662 {
1663 TCGv_i64 tmp;
1664
1665 nullify_over(ctx);
1666
1667 tmp = load_frd(rt);
1668 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1669 tcg_temp_free_i64(tmp);
1670
1671 return nullify_end(ctx);
1672 }
1673
1674 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1675 {
1676 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1677 a->disp, a->sp, a->m);
1678 }
1679
1680 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1681 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1682 {
1683 TCGv_i32 tmp;
1684
1685 nullify_over(ctx);
1686 tmp = load_frw0_i32(ra);
1687
1688 func(tmp, cpu_env, tmp);
1689
1690 save_frw_i32(rt, tmp);
1691 tcg_temp_free_i32(tmp);
1692 return nullify_end(ctx);
1693 }
1694
1695 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1696 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1697 {
1698 TCGv_i32 dst;
1699 TCGv_i64 src;
1700
1701 nullify_over(ctx);
1702 src = load_frd(ra);
1703 dst = tcg_temp_new_i32();
1704
1705 func(dst, cpu_env, src);
1706
1707 tcg_temp_free_i64(src);
1708 save_frw_i32(rt, dst);
1709 tcg_temp_free_i32(dst);
1710 return nullify_end(ctx);
1711 }
1712
1713 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1714 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1715 {
1716 TCGv_i64 tmp;
1717
1718 nullify_over(ctx);
1719 tmp = load_frd0(ra);
1720
1721 func(tmp, cpu_env, tmp);
1722
1723 save_frd(rt, tmp);
1724 tcg_temp_free_i64(tmp);
1725 return nullify_end(ctx);
1726 }
1727
1728 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1729 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1730 {
1731 TCGv_i32 src;
1732 TCGv_i64 dst;
1733
1734 nullify_over(ctx);
1735 src = load_frw0_i32(ra);
1736 dst = tcg_temp_new_i64();
1737
1738 func(dst, cpu_env, src);
1739
1740 tcg_temp_free_i32(src);
1741 save_frd(rt, dst);
1742 tcg_temp_free_i64(dst);
1743 return nullify_end(ctx);
1744 }
1745
1746 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1747 unsigned ra, unsigned rb,
1748 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1749 {
1750 TCGv_i32 a, b;
1751
1752 nullify_over(ctx);
1753 a = load_frw0_i32(ra);
1754 b = load_frw0_i32(rb);
1755
1756 func(a, cpu_env, a, b);
1757
1758 tcg_temp_free_i32(b);
1759 save_frw_i32(rt, a);
1760 tcg_temp_free_i32(a);
1761 return nullify_end(ctx);
1762 }
1763
1764 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1765 unsigned ra, unsigned rb,
1766 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1767 {
1768 TCGv_i64 a, b;
1769
1770 nullify_over(ctx);
1771 a = load_frd0(ra);
1772 b = load_frd0(rb);
1773
1774 func(a, cpu_env, a, b);
1775
1776 tcg_temp_free_i64(b);
1777 save_frd(rt, a);
1778 tcg_temp_free_i64(a);
1779 return nullify_end(ctx);
1780 }
1781
1782 /* Emit an unconditional branch to a direct target, which may or may not
1783 have already had nullification handled. */
1784 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1785 unsigned link, bool is_n)
1786 {
1787 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1788 if (link != 0) {
1789 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1790 }
1791 ctx->iaoq_n = dest;
1792 if (is_n) {
1793 ctx->null_cond.c = TCG_COND_ALWAYS;
1794 }
1795 } else {
1796 nullify_over(ctx);
1797
1798 if (link != 0) {
1799 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1800 }
1801
1802 if (is_n && use_nullify_skip(ctx)) {
1803 nullify_set(ctx, 0);
1804 gen_goto_tb(ctx, 0, dest, dest + 4);
1805 } else {
1806 nullify_set(ctx, is_n);
1807 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1808 }
1809
1810 nullify_end(ctx);
1811
1812 nullify_set(ctx, 0);
1813 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1814 ctx->base.is_jmp = DISAS_NORETURN;
1815 }
1816 return true;
1817 }
1818
1819 /* Emit a conditional branch to a direct target. If the branch itself
1820 is nullified, we should have already used nullify_over. */
1821 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1822 DisasCond *cond)
1823 {
1824 target_ureg dest = iaoq_dest(ctx, disp);
1825 TCGLabel *taken = NULL;
1826 TCGCond c = cond->c;
1827 bool n;
1828
1829 assert(ctx->null_cond.c == TCG_COND_NEVER);
1830
1831 /* Handle TRUE and NEVER as direct branches. */
1832 if (c == TCG_COND_ALWAYS) {
1833 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1834 }
1835 if (c == TCG_COND_NEVER) {
1836 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1837 }
1838
1839 taken = gen_new_label();
1840 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1841 cond_free(cond);
1842
1843 /* Not taken: Condition not satisfied; nullify on backward branches. */
1844 n = is_n && disp < 0;
1845 if (n && use_nullify_skip(ctx)) {
1846 nullify_set(ctx, 0);
1847 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1848 } else {
1849 if (!n && ctx->null_lab) {
1850 gen_set_label(ctx->null_lab);
1851 ctx->null_lab = NULL;
1852 }
1853 nullify_set(ctx, n);
1854 if (ctx->iaoq_n == -1) {
1855 /* The temporary iaoq_n_var died at the branch above.
1856 Regenerate it here instead of saving it. */
1857 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1858 }
1859 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1860 }
1861
1862 gen_set_label(taken);
1863
1864 /* Taken: Condition satisfied; nullify on forward branches. */
1865 n = is_n && disp >= 0;
1866 if (n && use_nullify_skip(ctx)) {
1867 nullify_set(ctx, 0);
1868 gen_goto_tb(ctx, 1, dest, dest + 4);
1869 } else {
1870 nullify_set(ctx, n);
1871 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1872 }
1873
1874 /* Not taken: the branch itself was nullified. */
1875 if (ctx->null_lab) {
1876 gen_set_label(ctx->null_lab);
1877 ctx->null_lab = NULL;
1878 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1879 } else {
1880 ctx->base.is_jmp = DISAS_NORETURN;
1881 }
1882 return true;
1883 }
1884
1885 /* Emit an unconditional branch to an indirect target. This handles
1886 nullification of the branch itself. */
1887 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1888 unsigned link, bool is_n)
1889 {
1890 TCGv_reg a0, a1, next, tmp;
1891 TCGCond c;
1892
1893 assert(ctx->null_lab == NULL);
1894
1895 if (ctx->null_cond.c == TCG_COND_NEVER) {
1896 if (link != 0) {
1897 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1898 }
1899 next = get_temp(ctx);
1900 tcg_gen_mov_reg(next, dest);
1901 if (is_n) {
1902 if (use_nullify_skip(ctx)) {
1903 tcg_gen_mov_reg(cpu_iaoq_f, next);
1904 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1905 nullify_set(ctx, 0);
1906 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1907 return true;
1908 }
1909 ctx->null_cond.c = TCG_COND_ALWAYS;
1910 }
1911 ctx->iaoq_n = -1;
1912 ctx->iaoq_n_var = next;
1913 } else if (is_n && use_nullify_skip(ctx)) {
1914 /* The (conditional) branch, B, nullifies the next insn, N,
1915 and we're allowed to skip execution N (no single-step or
1916 tracepoint in effect). Since the goto_ptr that we must use
1917 for the indirect branch consumes no special resources, we
1918 can (conditionally) skip B and continue execution. */
1919 /* The use_nullify_skip test implies we have a known control path. */
1920 tcg_debug_assert(ctx->iaoq_b != -1);
1921 tcg_debug_assert(ctx->iaoq_n != -1);
1922
1923 /* We do have to handle the non-local temporary, DEST, before
1924 branching. Since IOAQ_F is not really live at this point, we
1925 can simply store DEST optimistically. Similarly with IAOQ_B. */
1926 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1927 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1928
1929 nullify_over(ctx);
1930 if (link != 0) {
1931 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1932 }
1933 tcg_gen_lookup_and_goto_ptr();
1934 return nullify_end(ctx);
1935 } else {
1936 c = ctx->null_cond.c;
1937 a0 = ctx->null_cond.a0;
1938 a1 = ctx->null_cond.a1;
1939
1940 tmp = tcg_temp_new();
1941 next = get_temp(ctx);
1942
1943 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1944 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1945 ctx->iaoq_n = -1;
1946 ctx->iaoq_n_var = next;
1947
1948 if (link != 0) {
1949 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1950 }
1951
1952 if (is_n) {
1953 /* The branch nullifies the next insn, which means the state of N
1954 after the branch is the inverse of the state of N that applied
1955 to the branch. */
1956 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1957 cond_free(&ctx->null_cond);
1958 ctx->null_cond = cond_make_n();
1959 ctx->psw_n_nonzero = true;
1960 } else {
1961 cond_free(&ctx->null_cond);
1962 }
1963 }
1964 return true;
1965 }
1966
1967 /* Implement
1968 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1969 * IAOQ_Next{30..31} ← GR[b]{30..31};
1970 * else
1971 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1972 * which keeps the privilege level from being increased.
1973 */
1974 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1975 {
1976 TCGv_reg dest;
1977 switch (ctx->privilege) {
1978 case 0:
1979 /* Privilege 0 is maximum and is allowed to decrease. */
1980 return offset;
1981 case 3:
1982 /* Privilege 3 is minimum and is never allowed to increase. */
1983 dest = get_temp(ctx);
1984 tcg_gen_ori_reg(dest, offset, 3);
1985 break;
1986 default:
1987 dest = get_temp(ctx);
1988 tcg_gen_andi_reg(dest, offset, -4);
1989 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1990 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1991 break;
1992 }
1993 return dest;
1994 }
1995
1996 #ifdef CONFIG_USER_ONLY
1997 /* On Linux, page zero is normally marked execute only + gateway.
1998 Therefore normal read or write is supposed to fail, but specific
1999 offsets have kernel code mapped to raise permissions to implement
2000 system calls. Handling this via an explicit check here, rather
2001 in than the "be disp(sr2,r0)" instruction that probably sent us
2002 here, is the easiest way to handle the branch delay slot on the
2003 aforementioned BE. */
2004 static void do_page_zero(DisasContext *ctx)
2005 {
2006 /* If by some means we get here with PSW[N]=1, that implies that
2007 the B,GATE instruction would be skipped, and we'd fault on the
2008 next insn within the privilaged page. */
2009 switch (ctx->null_cond.c) {
2010 case TCG_COND_NEVER:
2011 break;
2012 case TCG_COND_ALWAYS:
2013 tcg_gen_movi_reg(cpu_psw_n, 0);
2014 goto do_sigill;
2015 default:
2016 /* Since this is always the first (and only) insn within the
2017 TB, we should know the state of PSW[N] from TB->FLAGS. */
2018 g_assert_not_reached();
2019 }
2020
2021 /* Check that we didn't arrive here via some means that allowed
2022 non-sequential instruction execution. Normally the PSW[B] bit
2023 detects this by disallowing the B,GATE instruction to execute
2024 under such conditions. */
2025 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2026 goto do_sigill;
2027 }
2028
2029 switch (ctx->iaoq_f & -4) {
2030 case 0x00: /* Null pointer call */
2031 gen_excp_1(EXCP_IMP);
2032 ctx->base.is_jmp = DISAS_NORETURN;
2033 break;
2034
2035 case 0xb0: /* LWS */
2036 gen_excp_1(EXCP_SYSCALL_LWS);
2037 ctx->base.is_jmp = DISAS_NORETURN;
2038 break;
2039
2040 case 0xe0: /* SET_THREAD_POINTER */
2041 tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
2042 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
2043 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
2044 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2045 break;
2046
2047 case 0x100: /* SYSCALL */
2048 gen_excp_1(EXCP_SYSCALL);
2049 ctx->base.is_jmp = DISAS_NORETURN;
2050 break;
2051
2052 default:
2053 do_sigill:
2054 gen_excp_1(EXCP_ILL);
2055 ctx->base.is_jmp = DISAS_NORETURN;
2056 break;
2057 }
2058 }
2059 #endif
2060
2061 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2062 {
2063 cond_free(&ctx->null_cond);
2064 return true;
2065 }
2066
2067 static bool trans_break(DisasContext *ctx, arg_break *a)
2068 {
2069 return gen_excp_iir(ctx, EXCP_BREAK);
2070 }
2071
2072 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2073 {
2074 /* No point in nullifying the memory barrier. */
2075 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2076
2077 cond_free(&ctx->null_cond);
2078 return true;
2079 }
2080
2081 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2082 {
2083 unsigned rt = a->t;
2084 TCGv_reg tmp = dest_gpr(ctx, rt);
2085 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2086 save_gpr(ctx, rt, tmp);
2087
2088 cond_free(&ctx->null_cond);
2089 return true;
2090 }
2091
2092 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2093 {
2094 unsigned rt = a->t;
2095 unsigned rs = a->sp;
2096 TCGv_i64 t0 = tcg_temp_new_i64();
2097 TCGv_reg t1 = tcg_temp_new();
2098
2099 load_spr(ctx, t0, rs);
2100 tcg_gen_shri_i64(t0, t0, 32);
2101 tcg_gen_trunc_i64_reg(t1, t0);
2102
2103 save_gpr(ctx, rt, t1);
2104 tcg_temp_free(t1);
2105 tcg_temp_free_i64(t0);
2106
2107 cond_free(&ctx->null_cond);
2108 return true;
2109 }
2110
2111 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2112 {
2113 unsigned rt = a->t;
2114 unsigned ctl = a->r;
2115 TCGv_reg tmp;
2116
2117 switch (ctl) {
2118 case CR_SAR:
2119 #ifdef TARGET_HPPA64
2120 if (a->e == 0) {
2121 /* MFSAR without ,W masks low 5 bits. */
2122 tmp = dest_gpr(ctx, rt);
2123 tcg_gen_andi_reg(tmp, cpu_sar, 31);
2124 save_gpr(ctx, rt, tmp);
2125 goto done;
2126 }
2127 #endif
2128 save_gpr(ctx, rt, cpu_sar);
2129 goto done;
2130 case CR_IT: /* Interval Timer */
2131 /* FIXME: Respect PSW_S bit. */
2132 nullify_over(ctx);
2133 tmp = dest_gpr(ctx, rt);
2134 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2135 gen_io_start();
2136 gen_helper_read_interval_timer(tmp);
2137 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2138 } else {
2139 gen_helper_read_interval_timer(tmp);
2140 }
2141 save_gpr(ctx, rt, tmp);
2142 return nullify_end(ctx);
2143 case 26:
2144 case 27:
2145 break;
2146 default:
2147 /* All other control registers are privileged. */
2148 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2149 break;
2150 }
2151
2152 tmp = get_temp(ctx);
2153 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2154 save_gpr(ctx, rt, tmp);
2155
2156 done:
2157 cond_free(&ctx->null_cond);
2158 return true;
2159 }
2160
2161 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2162 {
2163 unsigned rr = a->r;
2164 unsigned rs = a->sp;
2165 TCGv_i64 t64;
2166
2167 if (rs >= 5) {
2168 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2169 }
2170 nullify_over(ctx);
2171
2172 t64 = tcg_temp_new_i64();
2173 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2174 tcg_gen_shli_i64(t64, t64, 32);
2175
2176 if (rs >= 4) {
2177 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2178 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2179 } else {
2180 tcg_gen_mov_i64(cpu_sr[rs], t64);
2181 }
2182 tcg_temp_free_i64(t64);
2183
2184 return nullify_end(ctx);
2185 }
2186
2187 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2188 {
2189 unsigned ctl = a->t;
2190 TCGv_reg reg;
2191 TCGv_reg tmp;
2192
2193 if (ctl == CR_SAR) {
2194 reg = load_gpr(ctx, a->r);
2195 tmp = tcg_temp_new();
2196 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2197 save_or_nullify(ctx, cpu_sar, tmp);
2198 tcg_temp_free(tmp);
2199
2200 cond_free(&ctx->null_cond);
2201 return true;
2202 }
2203
2204 /* All other control registers are privileged or read-only. */
2205 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2206
2207 #ifndef CONFIG_USER_ONLY
2208 nullify_over(ctx);
2209 reg = load_gpr(ctx, a->r);
2210
2211 switch (ctl) {
2212 case CR_IT:
2213 gen_helper_write_interval_timer(cpu_env, reg);
2214 break;
2215 case CR_EIRR:
2216 gen_helper_write_eirr(cpu_env, reg);
2217 break;
2218 case CR_EIEM:
2219 gen_helper_write_eiem(cpu_env, reg);
2220 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2221 break;
2222
2223 case CR_IIASQ:
2224 case CR_IIAOQ:
2225 /* FIXME: Respect PSW_Q bit */
2226 /* The write advances the queue and stores to the back element. */
2227 tmp = get_temp(ctx);
2228 tcg_gen_ld_reg(tmp, cpu_env,
2229 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2230 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2231 tcg_gen_st_reg(reg, cpu_env,
2232 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2233 break;
2234
2235 case CR_PID1:
2236 case CR_PID2:
2237 case CR_PID3:
2238 case CR_PID4:
2239 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2240 #ifndef CONFIG_USER_ONLY
2241 gen_helper_change_prot_id(cpu_env);
2242 #endif
2243 break;
2244
2245 default:
2246 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2247 break;
2248 }
2249 return nullify_end(ctx);
2250 #endif
2251 }
2252
2253 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2254 {
2255 TCGv_reg tmp = tcg_temp_new();
2256
2257 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2258 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2259 save_or_nullify(ctx, cpu_sar, tmp);
2260 tcg_temp_free(tmp);
2261
2262 cond_free(&ctx->null_cond);
2263 return true;
2264 }
2265
2266 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2267 {
2268 TCGv_reg dest = dest_gpr(ctx, a->t);
2269
2270 #ifdef CONFIG_USER_ONLY
2271 /* We don't implement space registers in user mode. */
2272 tcg_gen_movi_reg(dest, 0);
2273 #else
2274 TCGv_i64 t0 = tcg_temp_new_i64();
2275
2276 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2277 tcg_gen_shri_i64(t0, t0, 32);
2278 tcg_gen_trunc_i64_reg(dest, t0);
2279
2280 tcg_temp_free_i64(t0);
2281 #endif
2282 save_gpr(ctx, a->t, dest);
2283
2284 cond_free(&ctx->null_cond);
2285 return true;
2286 }
2287
2288 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2289 {
2290 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2291 #ifndef CONFIG_USER_ONLY
2292 TCGv_reg tmp;
2293
2294 nullify_over(ctx);
2295
2296 tmp = get_temp(ctx);
2297 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2298 tcg_gen_andi_reg(tmp, tmp, ~a->i);
2299 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2300 save_gpr(ctx, a->t, tmp);
2301
2302 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2303 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2304 return nullify_end(ctx);
2305 #endif
2306 }
2307
2308 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2309 {
2310 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2311 #ifndef CONFIG_USER_ONLY
2312 TCGv_reg tmp;
2313
2314 nullify_over(ctx);
2315
2316 tmp = get_temp(ctx);
2317 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2318 tcg_gen_ori_reg(tmp, tmp, a->i);
2319 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2320 save_gpr(ctx, a->t, tmp);
2321
2322 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2323 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2324 return nullify_end(ctx);
2325 #endif
2326 }
2327
2328 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2329 {
2330 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2331 #ifndef CONFIG_USER_ONLY
2332 TCGv_reg tmp, reg;
2333 nullify_over(ctx);
2334
2335 reg = load_gpr(ctx, a->r);
2336 tmp = get_temp(ctx);
2337 gen_helper_swap_system_mask(tmp, cpu_env, reg);
2338
2339 /* Exit the TB to recognize new interrupts. */
2340 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2341 return nullify_end(ctx);
2342 #endif
2343 }
2344
2345 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2346 {
2347 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2348 #ifndef CONFIG_USER_ONLY
2349 nullify_over(ctx);
2350
2351 if (rfi_r) {
2352 gen_helper_rfi_r(cpu_env);
2353 } else {
2354 gen_helper_rfi(cpu_env);
2355 }
2356 /* Exit the TB to recognize new interrupts. */
2357 tcg_gen_exit_tb(NULL, 0);
2358 ctx->base.is_jmp = DISAS_NORETURN;
2359
2360 return nullify_end(ctx);
2361 #endif
2362 }
2363
2364 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2365 {
2366 return do_rfi(ctx, false);
2367 }
2368
2369 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2370 {
2371 return do_rfi(ctx, true);
2372 }
2373
2374 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2375 {
2376 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2377 #ifndef CONFIG_USER_ONLY
2378 nullify_over(ctx);
2379 gen_helper_halt(cpu_env);
2380 ctx->base.is_jmp = DISAS_NORETURN;
2381 return nullify_end(ctx);
2382 #endif
2383 }
2384
2385 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2386 {
2387 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2388 #ifndef CONFIG_USER_ONLY
2389 nullify_over(ctx);
2390 gen_helper_reset(cpu_env);
2391 ctx->base.is_jmp = DISAS_NORETURN;
2392 return nullify_end(ctx);
2393 #endif
2394 }
2395
2396 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2397 {
2398 if (a->m) {
2399 TCGv_reg dest = dest_gpr(ctx, a->b);
2400 TCGv_reg src1 = load_gpr(ctx, a->b);
2401 TCGv_reg src2 = load_gpr(ctx, a->x);
2402
2403 /* The only thing we need to do is the base register modification. */
2404 tcg_gen_add_reg(dest, src1, src2);
2405 save_gpr(ctx, a->b, dest);
2406 }
2407 cond_free(&ctx->null_cond);
2408 return true;
2409 }
2410
2411 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2412 {
2413 TCGv_reg dest, ofs;
2414 TCGv_i32 level, want;
2415 TCGv_tl addr;
2416
2417 nullify_over(ctx);
2418
2419 dest = dest_gpr(ctx, a->t);
2420 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2421
2422 if (a->imm) {
2423 level = tcg_constant_i32(a->ri);
2424 } else {
2425 level = tcg_temp_new_i32();
2426 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2427 tcg_gen_andi_i32(level, level, 3);
2428 }
2429 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2430
2431 gen_helper_probe(dest, cpu_env, addr, level, want);
2432
2433 tcg_temp_free_i32(level);
2434
2435 save_gpr(ctx, a->t, dest);
2436 return nullify_end(ctx);
2437 }
2438
2439 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2440 {
2441 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2442 #ifndef CONFIG_USER_ONLY
2443 TCGv_tl addr;
2444 TCGv_reg ofs, reg;
2445
2446 nullify_over(ctx);
2447
2448 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2449 reg = load_gpr(ctx, a->r);
2450 if (a->addr) {
2451 gen_helper_itlba(cpu_env, addr, reg);
2452 } else {
2453 gen_helper_itlbp(cpu_env, addr, reg);
2454 }
2455
2456 /* Exit TB for TLB change if mmu is enabled. */
2457 if (ctx->tb_flags & PSW_C) {
2458 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2459 }
2460 return nullify_end(ctx);
2461 #endif
2462 }
2463
2464 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2465 {
2466 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2467 #ifndef CONFIG_USER_ONLY
2468 TCGv_tl addr;
2469 TCGv_reg ofs;
2470
2471 nullify_over(ctx);
2472
2473 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2474 if (a->m) {
2475 save_gpr(ctx, a->b, ofs);
2476 }
2477 if (a->local) {
2478 gen_helper_ptlbe(cpu_env);
2479 } else {
2480 gen_helper_ptlb(cpu_env, addr);
2481 }
2482
2483 /* Exit TB for TLB change if mmu is enabled. */
2484 if (ctx->tb_flags & PSW_C) {
2485 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2486 }
2487 return nullify_end(ctx);
2488 #endif
2489 }
2490
2491 /*
2492 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2493 * See
2494 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2495 * page 13-9 (195/206)
2496 */
2497 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2498 {
2499 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2500 #ifndef CONFIG_USER_ONLY
2501 TCGv_tl addr, atl, stl;
2502 TCGv_reg reg;
2503
2504 nullify_over(ctx);
2505
2506 /*
2507 * FIXME:
2508 * if (not (pcxl or pcxl2))
2509 * return gen_illegal(ctx);
2510 *
2511 * Note for future: these are 32-bit systems; no hppa64.
2512 */
2513
2514 atl = tcg_temp_new_tl();
2515 stl = tcg_temp_new_tl();
2516 addr = tcg_temp_new_tl();
2517
2518 tcg_gen_ld32u_i64(stl, cpu_env,
2519 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2520 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2521 tcg_gen_ld32u_i64(atl, cpu_env,
2522 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2523 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2524 tcg_gen_shli_i64(stl, stl, 32);
2525 tcg_gen_or_tl(addr, atl, stl);
2526 tcg_temp_free_tl(atl);
2527 tcg_temp_free_tl(stl);
2528
2529 reg = load_gpr(ctx, a->r);
2530 if (a->addr) {
2531 gen_helper_itlba(cpu_env, addr, reg);
2532 } else {
2533 gen_helper_itlbp(cpu_env, addr, reg);
2534 }
2535 tcg_temp_free_tl(addr);
2536
2537 /* Exit TB for TLB change if mmu is enabled. */
2538 if (ctx->tb_flags & PSW_C) {
2539 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2540 }
2541 return nullify_end(ctx);
2542 #endif
2543 }
2544
2545 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2546 {
2547 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2548 #ifndef CONFIG_USER_ONLY
2549 TCGv_tl vaddr;
2550 TCGv_reg ofs, paddr;
2551
2552 nullify_over(ctx);
2553
2554 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2555
2556 paddr = tcg_temp_new();
2557 gen_helper_lpa(paddr, cpu_env, vaddr);
2558
2559 /* Note that physical address result overrides base modification. */
2560 if (a->m) {
2561 save_gpr(ctx, a->b, ofs);
2562 }
2563 save_gpr(ctx, a->t, paddr);
2564 tcg_temp_free(paddr);
2565
2566 return nullify_end(ctx);
2567 #endif
2568 }
2569
2570 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2571 {
2572 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2573
2574 /* The Coherence Index is an implementation-defined function of the
2575 physical address. Two addresses with the same CI have a coherent
2576 view of the cache. Our implementation is to return 0 for all,
2577 since the entire address space is coherent. */
2578 save_gpr(ctx, a->t, tcg_constant_reg(0));
2579
2580 cond_free(&ctx->null_cond);
2581 return true;
2582 }
2583
2584 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2585 {
2586 return do_add_reg(ctx, a, false, false, false, false);
2587 }
2588
2589 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2590 {
2591 return do_add_reg(ctx, a, true, false, false, false);
2592 }
2593
2594 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2595 {
2596 return do_add_reg(ctx, a, false, true, false, false);
2597 }
2598
2599 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2600 {
2601 return do_add_reg(ctx, a, false, false, false, true);
2602 }
2603
2604 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2605 {
2606 return do_add_reg(ctx, a, false, true, false, true);
2607 }
2608
2609 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2610 {
2611 return do_sub_reg(ctx, a, false, false, false);
2612 }
2613
2614 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2615 {
2616 return do_sub_reg(ctx, a, true, false, false);
2617 }
2618
2619 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2620 {
2621 return do_sub_reg(ctx, a, false, false, true);
2622 }
2623
2624 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2625 {
2626 return do_sub_reg(ctx, a, true, false, true);
2627 }
2628
2629 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2630 {
2631 return do_sub_reg(ctx, a, false, true, false);
2632 }
2633
2634 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2635 {
2636 return do_sub_reg(ctx, a, true, true, false);
2637 }
2638
2639 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2640 {
2641 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2642 }
2643
2644 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2645 {
2646 return do_log_reg(ctx, a, tcg_gen_and_reg);
2647 }
2648
2649 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2650 {
2651 if (a->cf == 0) {
2652 unsigned r2 = a->r2;
2653 unsigned r1 = a->r1;
2654 unsigned rt = a->t;
2655
2656 if (rt == 0) { /* NOP */
2657 cond_free(&ctx->null_cond);
2658 return true;
2659 }
2660 if (r2 == 0) { /* COPY */
2661 if (r1 == 0) {
2662 TCGv_reg dest = dest_gpr(ctx, rt);
2663 tcg_gen_movi_reg(dest, 0);
2664 save_gpr(ctx, rt, dest);
2665 } else {
2666 save_gpr(ctx, rt, cpu_gr[r1]);
2667 }
2668 cond_free(&ctx->null_cond);
2669 return true;
2670 }
2671 #ifndef CONFIG_USER_ONLY
2672 /* These are QEMU extensions and are nops in the real architecture:
2673 *
2674 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2675 * or %r31,%r31,%r31 -- death loop; offline cpu
2676 * currently implemented as idle.
2677 */
2678 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2679 /* No need to check for supervisor, as userland can only pause
2680 until the next timer interrupt. */
2681 nullify_over(ctx);
2682
2683 /* Advance the instruction queue. */
2684 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2685 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2686 nullify_set(ctx, 0);
2687
2688 /* Tell the qemu main loop to halt until this cpu has work. */
2689 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
2690 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2691 gen_excp_1(EXCP_HALTED);
2692 ctx->base.is_jmp = DISAS_NORETURN;
2693
2694 return nullify_end(ctx);
2695 }
2696 #endif
2697 }
2698 return do_log_reg(ctx, a, tcg_gen_or_reg);
2699 }
2700
2701 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2702 {
2703 return do_log_reg(ctx, a, tcg_gen_xor_reg);
2704 }
2705
2706 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2707 {
2708 TCGv_reg tcg_r1, tcg_r2;
2709
2710 if (a->cf) {
2711 nullify_over(ctx);
2712 }
2713 tcg_r1 = load_gpr(ctx, a->r1);
2714 tcg_r2 = load_gpr(ctx, a->r2);
2715 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2716 return nullify_end(ctx);
2717 }
2718
2719 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2720 {
2721 TCGv_reg tcg_r1, tcg_r2;
2722
2723 if (a->cf) {
2724 nullify_over(ctx);
2725 }
2726 tcg_r1 = load_gpr(ctx, a->r1);
2727 tcg_r2 = load_gpr(ctx, a->r2);
2728 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2729 return nullify_end(ctx);
2730 }
2731
2732 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2733 {
2734 TCGv_reg tcg_r1, tcg_r2, tmp;
2735
2736 if (a->cf) {
2737 nullify_over(ctx);
2738 }
2739 tcg_r1 = load_gpr(ctx, a->r1);
2740 tcg_r2 = load_gpr(ctx, a->r2);
2741 tmp = get_temp(ctx);
2742 tcg_gen_not_reg(tmp, tcg_r2);
2743 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2744 return nullify_end(ctx);
2745 }
2746
2747 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2748 {
2749 return do_uaddcm(ctx, a, false);
2750 }
2751
2752 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2753 {
2754 return do_uaddcm(ctx, a, true);
2755 }
2756
2757 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2758 {
2759 TCGv_reg tmp;
2760
2761 nullify_over(ctx);
2762
2763 tmp = get_temp(ctx);
2764 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2765 if (!is_i) {
2766 tcg_gen_not_reg(tmp, tmp);
2767 }
2768 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2769 tcg_gen_muli_reg(tmp, tmp, 6);
2770 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
2771 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2772 return nullify_end(ctx);
2773 }
2774
2775 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2776 {
2777 return do_dcor(ctx, a, false);
2778 }
2779
2780 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2781 {
2782 return do_dcor(ctx, a, true);
2783 }
2784
2785 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2786 {
2787 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2788
2789 nullify_over(ctx);
2790
2791 in1 = load_gpr(ctx, a->r1);
2792 in2 = load_gpr(ctx, a->r2);
2793
2794 add1 = tcg_temp_new();
2795 add2 = tcg_temp_new();
2796 addc = tcg_temp_new();
2797 dest = tcg_temp_new();
2798 zero = tcg_constant_reg(0);
2799
2800 /* Form R1 << 1 | PSW[CB]{8}. */
2801 tcg_gen_add_reg(add1, in1, in1);
2802 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2803
2804 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2805 carry{8} requires that we subtract via + ~R2 + 1, as described in
2806 the manual. By extracting and masking V, we can produce the
2807 proper inputs to the addition without movcond. */
2808 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2809 tcg_gen_xor_reg(add2, in2, addc);
2810 tcg_gen_andi_reg(addc, addc, 1);
2811 /* ??? This is only correct for 32-bit. */
2812 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2813 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2814
2815 tcg_temp_free(addc);
2816
2817 /* Write back the result register. */
2818 save_gpr(ctx, a->t, dest);
2819
2820 /* Write back PSW[CB]. */
2821 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2822 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2823
2824 /* Write back PSW[V] for the division step. */
2825 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2826 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2827
2828 /* Install the new nullification. */
2829 if (a->cf) {
2830 TCGv_reg sv = NULL;
2831 if (cond_need_sv(a->cf >> 1)) {
2832 /* ??? The lshift is supposed to contribute to overflow. */
2833 sv = do_add_sv(ctx, dest, add1, add2);
2834 }
2835 ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
2836 }
2837
2838 tcg_temp_free(add1);
2839 tcg_temp_free(add2);
2840 tcg_temp_free(dest);
2841
2842 return nullify_end(ctx);
2843 }
2844
2845 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2846 {
2847 return do_add_imm(ctx, a, false, false);
2848 }
2849
2850 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2851 {
2852 return do_add_imm(ctx, a, true, false);
2853 }
2854
2855 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2856 {
2857 return do_add_imm(ctx, a, false, true);
2858 }
2859
2860 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2861 {
2862 return do_add_imm(ctx, a, true, true);
2863 }
2864
2865 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2866 {
2867 return do_sub_imm(ctx, a, false);
2868 }
2869
2870 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2871 {
2872 return do_sub_imm(ctx, a, true);
2873 }
2874
2875 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2876 {
2877 TCGv_reg tcg_im, tcg_r2;
2878
2879 if (a->cf) {
2880 nullify_over(ctx);
2881 }
2882
2883 tcg_im = load_const(ctx, a->i);
2884 tcg_r2 = load_gpr(ctx, a->r);
2885 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2886
2887 return nullify_end(ctx);
2888 }
2889
2890 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2891 {
2892 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2893 a->disp, a->sp, a->m, a->size | MO_TE);
2894 }
2895
2896 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2897 {
2898 assert(a->x == 0 && a->scale == 0);
2899 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2900 }
2901
2902 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2903 {
2904 MemOp mop = MO_TE | MO_ALIGN | a->size;
2905 TCGv_reg zero, dest, ofs;
2906 TCGv_tl addr;
2907
2908 nullify_over(ctx);
2909
2910 if (a->m) {
2911 /* Base register modification. Make sure if RT == RB,
2912 we see the result of the load. */
2913 dest = get_temp(ctx);
2914 } else {
2915 dest = dest_gpr(ctx, a->t);
2916 }
2917
2918 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2919 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2920
2921 /*
2922 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2923 * However actual hardware succeeds with aligned mod 4.
2924 * Detect this case and log a GUEST_ERROR.
2925 *
2926 * TODO: HPPA64 relaxes the over-alignment requirement
2927 * with the ,co completer.
2928 */
2929 gen_helper_ldc_check(addr);
2930
2931 zero = tcg_constant_reg(0);
2932 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2933
2934 if (a->m) {
2935 save_gpr(ctx, a->b, ofs);
2936 }
2937 save_gpr(ctx, a->t, dest);
2938
2939 return nullify_end(ctx);
2940 }
2941
2942 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2943 {
2944 TCGv_reg ofs, val;
2945 TCGv_tl addr;
2946
2947 nullify_over(ctx);
2948
2949 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2950 ctx->mmu_idx == MMU_PHYS_IDX);
2951 val = load_gpr(ctx, a->r);
2952 if (a->a) {
2953 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2954 gen_helper_stby_e_parallel(cpu_env, addr, val);
2955 } else {
2956 gen_helper_stby_e(cpu_env, addr, val);
2957 }
2958 } else {
2959 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2960 gen_helper_stby_b_parallel(cpu_env, addr, val);
2961 } else {
2962 gen_helper_stby_b(cpu_env, addr, val);
2963 }
2964 }
2965 if (a->m) {
2966 tcg_gen_andi_reg(ofs, ofs, ~3);
2967 save_gpr(ctx, a->b, ofs);
2968 }
2969
2970 return nullify_end(ctx);
2971 }
2972
2973 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2974 {
2975 int hold_mmu_idx = ctx->mmu_idx;
2976
2977 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2978 ctx->mmu_idx = MMU_PHYS_IDX;
2979 trans_ld(ctx, a);
2980 ctx->mmu_idx = hold_mmu_idx;
2981 return true;
2982 }
2983
2984 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2985 {
2986 int hold_mmu_idx = ctx->mmu_idx;
2987
2988 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2989 ctx->mmu_idx = MMU_PHYS_IDX;
2990 trans_st(ctx, a);
2991 ctx->mmu_idx = hold_mmu_idx;
2992 return true;
2993 }
2994
2995 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2996 {
2997 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2998
2999 tcg_gen_movi_reg(tcg_rt, a->i);
3000 save_gpr(ctx, a->t, tcg_rt);
3001 cond_free(&ctx->null_cond);
3002 return true;
3003 }
3004
3005 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3006 {
3007 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
3008 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
3009
3010 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
3011 save_gpr(ctx, 1, tcg_r1);
3012 cond_free(&ctx->null_cond);
3013 return true;
3014 }
3015
3016 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3017 {
3018 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
3019
3020 /* Special case rb == 0, for the LDI pseudo-op.
3021 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
3022 if (a->b == 0) {
3023 tcg_gen_movi_reg(tcg_rt, a->i);
3024 } else {
3025 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
3026 }
3027 save_gpr(ctx, a->t, tcg_rt);
3028 cond_free(&ctx->null_cond);
3029 return true;
3030 }
3031
3032 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3033 unsigned c, unsigned f, unsigned n, int disp)
3034 {
3035 TCGv_reg dest, in2, sv;
3036 DisasCond cond;
3037
3038 in2 = load_gpr(ctx, r);
3039 dest = get_temp(ctx);
3040
3041 tcg_gen_sub_reg(dest, in1, in2);
3042
3043 sv = NULL;
3044 if (cond_need_sv(c)) {
3045 sv = do_sub_sv(ctx, dest, in1, in2);
3046 }
3047
3048 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3049 return do_cbranch(ctx, disp, n, &cond);
3050 }
3051
3052 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3053 {
3054 nullify_over(ctx);
3055 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3056 }
3057
3058 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3059 {
3060 nullify_over(ctx);
3061 return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3062 }
3063
3064 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3065 unsigned c, unsigned f, unsigned n, int disp)
3066 {
3067 TCGv_reg dest, in2, sv, cb_msb;
3068 DisasCond cond;
3069
3070 in2 = load_gpr(ctx, r);
3071 dest = tcg_temp_new();
3072 sv = NULL;
3073 cb_msb = NULL;
3074
3075 if (cond_need_cb(c)) {
3076 cb_msb = get_temp(ctx);
3077 tcg_gen_movi_reg(cb_msb, 0);
3078 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3079 } else {
3080 tcg_gen_add_reg(dest, in1, in2);
3081 }
3082 if (cond_need_sv(c)) {
3083 sv = do_add_sv(ctx, dest, in1, in2);
3084 }
3085
3086 cond = do_cond(c * 2 + f, dest, cb_msb, sv);
3087 save_gpr(ctx, r, dest);
3088 tcg_temp_free(dest);
3089 return do_cbranch(ctx, disp, n, &cond);
3090 }
3091
3092 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3093 {
3094 nullify_over(ctx);
3095 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3096 }
3097
3098 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3099 {
3100 nullify_over(ctx);
3101 return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3102 }
3103
3104 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3105 {
3106 TCGv_reg tmp, tcg_r;
3107 DisasCond cond;
3108
3109 nullify_over(ctx);
3110
3111 tmp = tcg_temp_new();
3112 tcg_r = load_gpr(ctx, a->r);
3113 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3114
3115 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3116 tcg_temp_free(tmp);
3117 return do_cbranch(ctx, a->disp, a->n, &cond);
3118 }
3119
3120 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3121 {
3122 TCGv_reg tmp, tcg_r;
3123 DisasCond cond;
3124
3125 nullify_over(ctx);
3126
3127 tmp = tcg_temp_new();
3128 tcg_r = load_gpr(ctx, a->r);
3129 tcg_gen_shli_reg(tmp, tcg_r, a->p);
3130
3131 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3132 tcg_temp_free(tmp);
3133 return do_cbranch(ctx, a->disp, a->n, &cond);
3134 }
3135
3136 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3137 {
3138 TCGv_reg dest;
3139 DisasCond cond;
3140
3141 nullify_over(ctx);
3142
3143 dest = dest_gpr(ctx, a->r2);
3144 if (a->r1 == 0) {
3145 tcg_gen_movi_reg(dest, 0);
3146 } else {
3147 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3148 }
3149
3150 cond = do_sed_cond(a->c, dest);
3151 return do_cbranch(ctx, a->disp, a->n, &cond);
3152 }
3153
3154 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3155 {
3156 TCGv_reg dest;
3157 DisasCond cond;
3158
3159 nullify_over(ctx);
3160
3161 dest = dest_gpr(ctx, a->r);
3162 tcg_gen_movi_reg(dest, a->i);
3163
3164 cond = do_sed_cond(a->c, dest);
3165 return do_cbranch(ctx, a->disp, a->n, &cond);
3166 }
3167
3168 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3169 {
3170 TCGv_reg dest;
3171
3172 if (a->c) {
3173 nullify_over(ctx);
3174 }
3175
3176 dest = dest_gpr(ctx, a->t);
3177 if (a->r1 == 0) {
3178 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3179 tcg_gen_shr_reg(dest, dest, cpu_sar);
3180 } else if (a->r1 == a->r2) {
3181 TCGv_i32 t32 = tcg_temp_new_i32();
3182 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3183 tcg_gen_rotr_i32(t32, t32, cpu_sar);
3184 tcg_gen_extu_i32_reg(dest, t32);
3185 tcg_temp_free_i32(t32);
3186 } else {
3187 TCGv_i64 t = tcg_temp_new_i64();
3188 TCGv_i64 s = tcg_temp_new_i64();
3189
3190 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3191 tcg_gen_extu_reg_i64(s, cpu_sar);
3192 tcg_gen_shr_i64(t, t, s);
3193 tcg_gen_trunc_i64_reg(dest, t);
3194
3195 tcg_temp_free_i64(t);
3196 tcg_temp_free_i64(s);
3197 }
3198 save_gpr(ctx, a->t, dest);
3199
3200 /* Install the new nullification. */
3201 cond_free(&ctx->null_cond);
3202 if (a->c) {
3203 ctx->null_cond = do_sed_cond(a->c, dest);
3204 }
3205 return nullify_end(ctx);
3206 }
3207
3208 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3209 {
3210 unsigned sa = 31 - a->cpos;
3211 TCGv_reg dest, t2;
3212
3213 if (a->c) {
3214 nullify_over(ctx);
3215 }
3216
3217 dest = dest_gpr(ctx, a->t);
3218 t2 = load_gpr(ctx, a->r2);
3219 if (a->r1 == 0) {
3220 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3221 } else if (TARGET_REGISTER_BITS == 32) {
3222 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3223 } else if (a->r1 == a->r2) {
3224 TCGv_i32 t32 = tcg_temp_new_i32();
3225 tcg_gen_trunc_reg_i32(t32, t2);
3226 tcg_gen_rotri_i32(t32, t32, sa);
3227 tcg_gen_extu_i32_reg(dest, t32);
3228 tcg_temp_free_i32(t32);
3229 } else {
3230 TCGv_i64 t64 = tcg_temp_new_i64();
3231 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3232 tcg_gen_shri_i64(t64, t64, sa);
3233 tcg_gen_trunc_i64_reg(dest, t64);
3234 tcg_temp_free_i64(t64);
3235 }
3236 save_gpr(ctx, a->t, dest);
3237
3238 /* Install the new nullification. */
3239 cond_free(&ctx->null_cond);
3240 if (a->c) {
3241 ctx->null_cond = do_sed_cond(a->c, dest);
3242 }
3243 return nullify_end(ctx);
3244 }
3245
3246 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3247 {
3248 unsigned len = 32 - a->clen;
3249 TCGv_reg dest, src, tmp;
3250
3251 if (a->c) {
3252 nullify_over(ctx);
3253 }
3254
3255 dest = dest_gpr(ctx, a->t);
3256 src = load_gpr(ctx, a->r);
3257 tmp = tcg_temp_new();
3258
3259 /* Recall that SAR is using big-endian bit numbering. */
3260 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3261 if (a->se) {
3262 tcg_gen_sar_reg(dest, src, tmp);
3263 tcg_gen_sextract_reg(dest, dest, 0, len);
3264 } else {
3265 tcg_gen_shr_reg(dest, src, tmp);
3266 tcg_gen_extract_reg(dest, dest, 0, len);
3267 }
3268 tcg_temp_free(tmp);
3269 save_gpr(ctx, a->t, dest);
3270
3271 /* Install the new nullification. */
3272 cond_free(&ctx->null_cond);
3273 if (a->c) {
3274 ctx->null_cond = do_sed_cond(a->c, dest);
3275 }
3276 return nullify_end(ctx);
3277 }
3278
3279 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3280 {
3281 unsigned len = 32 - a->clen;
3282 unsigned cpos = 31 - a->pos;
3283 TCGv_reg dest, src;
3284
3285 if (a->c) {
3286 nullify_over(ctx);
3287 }
3288
3289 dest = dest_gpr(ctx, a->t);
3290 src = load_gpr(ctx, a->r);
3291 if (a->se) {
3292 tcg_gen_sextract_reg(dest, src, cpos, len);
3293 } else {
3294 tcg_gen_extract_reg(dest, src, cpos, len);
3295 }
3296 save_gpr(ctx, a->t, dest);
3297
3298 /* Install the new nullification. */
3299 cond_free(&ctx->null_cond);
3300 if (a->c) {
3301 ctx->null_cond = do_sed_cond(a->c, dest);
3302 }
3303 return nullify_end(ctx);
3304 }
3305
3306 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3307 {
3308 unsigned len = 32 - a->clen;
3309 target_sreg mask0, mask1;
3310 TCGv_reg dest;
3311
3312 if (a->c) {
3313 nullify_over(ctx);
3314 }
3315 if (a->cpos + len > 32) {
3316 len = 32 - a->cpos;
3317 }
3318
3319 dest = dest_gpr(ctx, a->t);
3320 mask0 = deposit64(0, a->cpos, len, a->i);
3321 mask1 = deposit64(-1, a->cpos, len, a->i);
3322
3323 if (a->nz) {
3324 TCGv_reg src = load_gpr(ctx, a->t);
3325 if (mask1 != -1) {
3326 tcg_gen_andi_reg(dest, src, mask1);
3327 src = dest;
3328 }
3329 tcg_gen_ori_reg(dest, src, mask0);
3330 } else {
3331 tcg_gen_movi_reg(dest, mask0);
3332 }
3333 save_gpr(ctx, a->t, dest);
3334
3335 /* Install the new nullification. */
3336 cond_free(&ctx->null_cond);
3337 if (a->c) {
3338 ctx->null_cond = do_sed_cond(a->c, dest);
3339 }
3340 return nullify_end(ctx);
3341 }
3342
3343 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3344 {
3345 unsigned rs = a->nz ? a->t : 0;
3346 unsigned len = 32 - a->clen;
3347 TCGv_reg dest, val;
3348
3349 if (a->c) {
3350 nullify_over(ctx);
3351 }
3352 if (a->cpos + len > 32) {
3353 len = 32 - a->cpos;
3354 }
3355
3356 dest = dest_gpr(ctx, a->t);
3357 val = load_gpr(ctx, a->r);
3358 if (rs == 0) {
3359 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3360 } else {
3361 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3362 }
3363 save_gpr(ctx, a->t, dest);
3364
3365 /* Install the new nullification. */
3366 cond_free(&ctx->null_cond);
3367 if (a->c) {
3368 ctx->null_cond = do_sed_cond(a->c, dest);
3369 }
3370 return nullify_end(ctx);
3371 }
3372
3373 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3374 unsigned nz, unsigned clen, TCGv_reg val)
3375 {
3376 unsigned rs = nz ? rt : 0;
3377 unsigned len = 32 - clen;
3378 TCGv_reg mask, tmp, shift, dest;
3379 unsigned msb = 1U << (len - 1);
3380
3381 dest = dest_gpr(ctx, rt);
3382 shift = tcg_temp_new();
3383 tmp = tcg_temp_new();
3384
3385 /* Convert big-endian bit numbering in SAR to left-shift. */
3386 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3387
3388 mask = tcg_const_reg(msb + (msb - 1));
3389 tcg_gen_and_reg(tmp, val, mask);
3390 if (rs) {
3391 tcg_gen_shl_reg(mask, mask, shift);
3392 tcg_gen_shl_reg(tmp, tmp, shift);
3393 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3394 tcg_gen_or_reg(dest, dest, tmp);
3395 } else {
3396 tcg_gen_shl_reg(dest, tmp, shift);
3397 }
3398 tcg_temp_free(shift);
3399 tcg_temp_free(mask);
3400 tcg_temp_free(tmp);
3401 save_gpr(ctx, rt, dest);
3402
3403 /* Install the new nullification. */
3404 cond_free(&ctx->null_cond);
3405 if (c) {
3406 ctx->null_cond = do_sed_cond(c, dest);
3407 }
3408 return nullify_end(ctx);
3409 }
3410
3411 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3412 {
3413 if (a->c) {
3414 nullify_over(ctx);
3415 }
3416 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3417 }
3418
3419 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3420 {
3421 if (a->c) {
3422 nullify_over(ctx);
3423 }
3424 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3425 }
3426
3427 static bool trans_be(DisasContext *ctx, arg_be *a)
3428 {
3429 TCGv_reg tmp;
3430
3431 #ifdef CONFIG_USER_ONLY
3432 /* ??? It seems like there should be a good way of using
3433 "be disp(sr2, r0)", the canonical gateway entry mechanism
3434 to our advantage. But that appears to be inconvenient to
3435 manage along side branch delay slots. Therefore we handle
3436 entry into the gateway page via absolute address. */
3437 /* Since we don't implement spaces, just branch. Do notice the special
3438 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3439 goto_tb to the TB containing the syscall. */
3440 if (a->b == 0) {
3441 return do_dbranch(ctx, a->disp, a->l, a->n);
3442 }
3443 #else
3444 nullify_over(ctx);
3445 #endif
3446
3447 tmp = get_temp(ctx);
3448 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3449 tmp = do_ibranch_priv(ctx, tmp);
3450
3451 #ifdef CONFIG_USER_ONLY
3452 return do_ibranch(ctx, tmp, a->l, a->n);
3453 #else
3454 TCGv_i64 new_spc = tcg_temp_new_i64();
3455
3456 load_spr(ctx, new_spc, a->sp);
3457 if (a->l) {
3458 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3459 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3460 }
3461 if (a->n && use_nullify_skip(ctx)) {
3462 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3463 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3464 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3465 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3466 } else {
3467 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3468 if (ctx->iaoq_b == -1) {
3469 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3470 }
3471 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3472 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3473 nullify_set(ctx, a->n);
3474 }
3475 tcg_temp_free_i64(new_spc);
3476 tcg_gen_lookup_and_goto_ptr();
3477 ctx->base.is_jmp = DISAS_NORETURN;
3478 return nullify_end(ctx);
3479 #endif
3480 }
3481
3482 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3483 {
3484 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3485 }
3486
3487 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3488 {
3489 target_ureg dest = iaoq_dest(ctx, a->disp);
3490
3491 nullify_over(ctx);
3492
3493 /* Make sure the caller hasn't done something weird with the queue.
3494 * ??? This is not quite the same as the PSW[B] bit, which would be
3495 * expensive to track. Real hardware will trap for
3496 * b gateway
3497 * b gateway+4 (in delay slot of first branch)
3498 * However, checking for a non-sequential instruction queue *will*
3499 * diagnose the security hole
3500 * b gateway
3501 * b evil
3502 * in which instructions at evil would run with increased privs.
3503 */
3504 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3505 return gen_illegal(ctx);
3506 }
3507
3508 #ifndef CONFIG_USER_ONLY
3509 if (ctx->tb_flags & PSW_C) {
3510 CPUHPPAState *env = ctx->cs->env_ptr;
3511 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3512 /* If we could not find a TLB entry, then we need to generate an
3513 ITLB miss exception so the kernel will provide it.
3514 The resulting TLB fill operation will invalidate this TB and
3515 we will re-translate, at which point we *will* be able to find
3516 the TLB entry and determine if this is in fact a gateway page. */
3517 if (type < 0) {
3518 gen_excp(ctx, EXCP_ITLB_MISS);
3519 return true;
3520 }
3521 /* No change for non-gateway pages or for priv decrease. */
3522 if (type >= 4 && type - 4 < ctx->privilege) {
3523 dest = deposit32(dest, 0, 2, type - 4);
3524 }
3525 } else {
3526 dest &= -4; /* priv = 0 */
3527 }
3528 #endif
3529
3530 if (a->l) {
3531 TCGv_reg tmp = dest_gpr(ctx, a->l);
3532 if (ctx->privilege < 3) {
3533 tcg_gen_andi_reg(tmp, tmp, -4);
3534 }
3535 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3536 save_gpr(ctx, a->l, tmp);
3537 }
3538
3539 return do_dbranch(ctx, dest, 0, a->n);
3540 }
3541
3542 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3543 {
3544 if (a->x) {
3545 TCGv_reg tmp = get_temp(ctx);
3546 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3547 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3548 /* The computation here never changes privilege level. */
3549 return do_ibranch(ctx, tmp, a->l, a->n);
3550 } else {
3551 /* BLR R0,RX is a good way to load PC+8 into RX. */
3552 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3553 }
3554 }
3555
3556 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3557 {
3558 TCGv_reg dest;
3559
3560 if (a->x == 0) {
3561 dest = load_gpr(ctx, a->b);
3562 } else {
3563 dest = get_temp(ctx);
3564 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3565 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3566 }
3567 dest = do_ibranch_priv(ctx, dest);
3568 return do_ibranch(ctx, dest, 0, a->n);
3569 }
3570
3571 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3572 {
3573 TCGv_reg dest;
3574
3575 #ifdef CONFIG_USER_ONLY
3576 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3577 return do_ibranch(ctx, dest, a->l, a->n);
3578 #else
3579 nullify_over(ctx);
3580 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3581
3582 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3583 if (ctx->iaoq_b == -1) {
3584 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3585 }
3586 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3587 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3588 if (a->l) {
3589 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3590 }
3591 nullify_set(ctx, a->n);
3592 tcg_gen_lookup_and_goto_ptr();
3593 ctx->base.is_jmp = DISAS_NORETURN;
3594 return nullify_end(ctx);
3595 #endif
3596 }
3597
3598 /*
3599 * Float class 0
3600 */
3601
3602 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3603 {
3604 tcg_gen_mov_i32(dst, src);
3605 }
3606
3607 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3608 {
3609 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3610 }
3611
3612 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3613 {
3614 tcg_gen_mov_i64(dst, src);
3615 }
3616
3617 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3618 {
3619 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3620 }
3621
3622 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3623 {
3624 tcg_gen_andi_i32(dst, src, INT32_MAX);
3625 }
3626
3627 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3628 {
3629 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3630 }
3631
3632 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3633 {
3634 tcg_gen_andi_i64(dst, src, INT64_MAX);
3635 }
3636
3637 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3638 {
3639 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3640 }
3641
3642 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3643 {
3644 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3645 }
3646
3647 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3648 {
3649 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3650 }
3651
3652 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3653 {
3654 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3655 }
3656
3657 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3658 {
3659 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3660 }
3661
3662 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3663 {
3664 tcg_gen_xori_i32(dst, src, INT32_MIN);
3665 }
3666
3667 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3668 {
3669 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3670 }
3671
3672 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3673 {
3674 tcg_gen_xori_i64(dst, src, INT64_MIN);
3675 }
3676
3677 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3678 {
3679 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3680 }
3681
3682 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3683 {
3684 tcg_gen_ori_i32(dst, src, INT32_MIN);
3685 }
3686
3687 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3688 {
3689 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3690 }
3691
3692 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3693 {
3694 tcg_gen_ori_i64(dst, src, INT64_MIN);
3695 }
3696
3697 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3698 {
3699 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3700 }
3701
3702 /*
3703 * Float class 1
3704 */
3705
3706 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3707 {
3708 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3709 }
3710
3711 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3712 {
3713 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3714 }
3715
3716 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3717 {
3718 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3719 }
3720
3721 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3722 {
3723 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3724 }
3725
3726 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3727 {
3728 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3729 }
3730
3731 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3732 {
3733 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3734 }
3735
3736 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3737 {
3738 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3739 }
3740
3741 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3742 {
3743 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3744 }
3745
3746 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3747 {
3748 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3749 }
3750
3751 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3752 {
3753 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3754 }
3755
3756 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3757 {
3758 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3759 }
3760
3761 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3762 {
3763 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3764 }
3765
3766 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3767 {
3768 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3769 }
3770
3771 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3772 {
3773 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3774 }
3775
3776 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3777 {
3778 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3779 }
3780
3781 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3782 {
3783 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3784 }
3785
3786 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3787 {
3788 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3789 }
3790
3791 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3792 {
3793 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3794 }
3795
3796 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3797 {
3798 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3799 }
3800
3801 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3802 {
3803 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3804 }
3805
3806 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3807 {
3808 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3809 }
3810
3811 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3812 {
3813 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3814 }
3815
3816 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3817 {
3818 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3819 }
3820
3821 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3822 {
3823 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3824 }
3825
3826 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3827 {
3828 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3829 }
3830
3831 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3832 {
3833 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3834 }
3835
3836 /*
3837 * Float class 2
3838 */
3839
3840 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3841 {
3842 TCGv_i32 ta, tb, tc, ty;
3843
3844 nullify_over(ctx);
3845
3846 ta = load_frw0_i32(a->r1);
3847 tb = load_frw0_i32(a->r2);
3848 ty = tcg_constant_i32(a->y);
3849 tc = tcg_constant_i32(a->c);
3850
3851 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3852
3853 tcg_temp_free_i32(ta);
3854 tcg_temp_free_i32(tb);
3855
3856 return nullify_end(ctx);
3857 }
3858
3859 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3860 {
3861 TCGv_i64 ta, tb;
3862 TCGv_i32 tc, ty;
3863
3864 nullify_over(ctx);
3865
3866 ta = load_frd0(a->r1);
3867 tb = load_frd0(a->r2);
3868 ty = tcg_constant_i32(a->y);
3869 tc = tcg_constant_i32(a->c);
3870
3871 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3872
3873 tcg_temp_free_i64(ta);
3874 tcg_temp_free_i64(tb);
3875
3876 return nullify_end(ctx);
3877 }
3878
3879 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3880 {
3881 TCGv_reg t;
3882
3883 nullify_over(ctx);
3884
3885 t = get_temp(ctx);
3886 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3887
3888 if (a->y == 1) {
3889 int mask;
3890 bool inv = false;
3891
3892 switch (a->c) {
3893 case 0: /* simple */
3894 tcg_gen_andi_reg(t, t, 0x4000000);
3895 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3896 goto done;
3897 case 2: /* rej */
3898 inv = true;
3899 /* fallthru */
3900 case 1: /* acc */
3901 mask = 0x43ff800;
3902 break;
3903 case 6: /* rej8 */
3904 inv = true;
3905 /* fallthru */
3906 case 5: /* acc8 */
3907 mask = 0x43f8000;
3908 break;
3909 case 9: /* acc6 */
3910 mask = 0x43e0000;
3911 break;
3912 case 13: /* acc4 */
3913 mask = 0x4380000;
3914 break;
3915 case 17: /* acc2 */
3916 mask = 0x4200000;
3917 break;
3918 default:
3919 gen_illegal(ctx);
3920 return true;
3921 }
3922 if (inv) {
3923 TCGv_reg c = load_const(ctx, mask);
3924 tcg_gen_or_reg(t, t, c);
3925 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3926 } else {
3927 tcg_gen_andi_reg(t, t, mask);
3928 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3929 }
3930 } else {
3931 unsigned cbit = (a->y ^ 1) - 1;
3932
3933 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3934 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3935 tcg_temp_free(t);
3936 }
3937
3938 done:
3939 return nullify_end(ctx);
3940 }
3941
3942 /*
3943 * Float class 2
3944 */
3945
3946 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3947 {
3948 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3949 }
3950
3951 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3952 {
3953 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3954 }
3955
3956 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3957 {
3958 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3959 }
3960
3961 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3962 {
3963 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
3964 }
3965
3966 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
3967 {
3968 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3969 }
3970
3971 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3972 {
3973 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3974 }
3975
3976 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3977 {
3978 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3979 }
3980
3981 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3982 {
3983 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3984 }
3985
3986 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3987 {
3988 TCGv_i64 x, y;
3989
3990 nullify_over(ctx);
3991
3992 x = load_frw0_i64(a->r1);
3993 y = load_frw0_i64(a->r2);
3994 tcg_gen_mul_i64(x, x, y);
3995 save_frd(a->t, x);
3996 tcg_temp_free_i64(x);
3997 tcg_temp_free_i64(y);
3998
3999 return nullify_end(ctx);
4000 }
4001
4002 /* Convert the fmpyadd single-precision register encodings to standard. */
4003 static inline int fmpyadd_s_reg(unsigned r)
4004 {
4005 return (r & 16) * 2 + 16 + (r & 15);
4006 }
4007
4008 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4009 {
4010 int tm = fmpyadd_s_reg(a->tm);
4011 int ra = fmpyadd_s_reg(a->ra);
4012 int ta = fmpyadd_s_reg(a->ta);
4013 int rm2 = fmpyadd_s_reg(a->rm2);
4014 int rm1 = fmpyadd_s_reg(a->rm1);
4015
4016 nullify_over(ctx);
4017
4018 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4019 do_fop_weww(ctx, ta, ta, ra,
4020 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4021
4022 return nullify_end(ctx);
4023 }
4024
4025 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4026 {
4027 return do_fmpyadd_s(ctx, a, false);
4028 }
4029
4030 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4031 {
4032 return do_fmpyadd_s(ctx, a, true);
4033 }
4034
4035 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4036 {
4037 nullify_over(ctx);
4038
4039 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4040 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4041 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4042
4043 return nullify_end(ctx);
4044 }
4045
4046 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4047 {
4048 return do_fmpyadd_d(ctx, a, false);
4049 }
4050
4051 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4052 {
4053 return do_fmpyadd_d(ctx, a, true);
4054 }
4055
4056 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4057 {
4058 TCGv_i32 x, y, z;
4059
4060 nullify_over(ctx);
4061 x = load_frw0_i32(a->rm1);
4062 y = load_frw0_i32(a->rm2);
4063 z = load_frw0_i32(a->ra3);
4064
4065 if (a->neg) {
4066 gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
4067 } else {
4068 gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
4069 }
4070
4071 tcg_temp_free_i32(y);
4072 tcg_temp_free_i32(z);
4073 save_frw_i32(a->t, x);
4074 tcg_temp_free_i32(x);
4075 return nullify_end(ctx);
4076 }
4077
4078 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4079 {
4080 TCGv_i64 x, y, z;
4081
4082 nullify_over(ctx);
4083 x = load_frd0(a->rm1);
4084 y = load_frd0(a->rm2);
4085 z = load_frd0(a->ra3);
4086
4087 if (a->neg) {
4088 gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
4089 } else {
4090 gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
4091 }
4092
4093 tcg_temp_free_i64(y);
4094 tcg_temp_free_i64(z);
4095 save_frd(a->t, x);
4096 tcg_temp_free_i64(x);
4097 return nullify_end(ctx);
4098 }
4099
4100 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4101 {
4102 qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n");
4103 cond_free(&ctx->null_cond);
4104 return true;
4105 }
4106
4107 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4108 {
4109 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4110 int bound;
4111
4112 ctx->cs = cs;
4113 ctx->tb_flags = ctx->base.tb->flags;
4114
4115 #ifdef CONFIG_USER_ONLY
4116 ctx->privilege = MMU_USER_IDX;
4117 ctx->mmu_idx = MMU_USER_IDX;
4118 ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4119 ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
4120 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4121 #else
4122 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4123 ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4124
4125 /* Recover the IAOQ values from the GVA + PRIV. */
4126 uint64_t cs_base = ctx->base.tb->cs_base;
4127 uint64_t iasq_f = cs_base & ~0xffffffffull;
4128 int32_t diff = cs_base;
4129
4130 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4131 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4132 #endif
4133 ctx->iaoq_n = -1;
4134 ctx->iaoq_n_var = NULL;
4135
4136 /* Bound the number of instructions by those left on the page. */
4137 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4138 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4139
4140 ctx->ntempr = 0;
4141 ctx->ntempl = 0;
4142 memset(ctx->tempr, 0, sizeof(ctx->tempr));
4143 memset(ctx->templ, 0, sizeof(ctx->templ));
4144 }
4145
4146 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4147 {
4148 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4149
4150 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4151 ctx->null_cond = cond_make_f();
4152 ctx->psw_n_nonzero = false;
4153 if (ctx->tb_flags & PSW_N) {
4154 ctx->null_cond.c = TCG_COND_ALWAYS;
4155 ctx->psw_n_nonzero = true;
4156 }
4157 ctx->null_lab = NULL;
4158 }
4159
4160 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4161 {
4162 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4163
4164 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4165 }
4166
4167 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4168 {
4169 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4170 CPUHPPAState *env = cs->env_ptr;
4171 DisasJumpType ret;
4172 int i, n;
4173
4174 /* Execute one insn. */
4175 #ifdef CONFIG_USER_ONLY
4176 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4177 do_page_zero(ctx);
4178 ret = ctx->base.is_jmp;
4179 assert(ret != DISAS_NEXT);
4180 } else
4181 #endif
4182 {
4183 /* Always fetch the insn, even if nullified, so that we check
4184 the page permissions for execute. */
4185 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4186
4187 /* Set up the IA queue for the next insn.
4188 This will be overwritten by a branch. */
4189 if (ctx->iaoq_b == -1) {
4190 ctx->iaoq_n = -1;
4191 ctx->iaoq_n_var = get_temp(ctx);
4192 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4193 } else {
4194 ctx->iaoq_n = ctx->iaoq_b + 4;
4195 ctx->iaoq_n_var = NULL;
4196 }
4197
4198 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4199 ctx->null_cond.c = TCG_COND_NEVER;
4200 ret = DISAS_NEXT;
4201 } else {
4202 ctx->insn = insn;
4203 if (!decode(ctx, insn)) {
4204 gen_illegal(ctx);
4205 }
4206 ret = ctx->base.is_jmp;
4207 assert(ctx->null_lab == NULL);
4208 }
4209 }
4210
4211 /* Free any temporaries allocated. */
4212 for (i = 0, n = ctx->ntempr; i < n; ++i) {
4213 tcg_temp_free(ctx->tempr[i]);
4214 ctx->tempr[i] = NULL;
4215 }
4216 for (i = 0, n = ctx->ntempl; i < n; ++i) {
4217 tcg_temp_free_tl(ctx->templ[i]);
4218 ctx->templ[i] = NULL;
4219 }
4220 ctx->ntempr = 0;
4221 ctx->ntempl = 0;
4222
4223 /* Advance the insn queue. Note that this check also detects
4224 a priority change within the instruction queue. */
4225 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4226 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4227 && use_goto_tb(ctx, ctx->iaoq_b)
4228 && (ctx->null_cond.c == TCG_COND_NEVER
4229 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4230 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4231 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4232 ctx->base.is_jmp = ret = DISAS_NORETURN;
4233 } else {
4234 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4235 }
4236 }
4237 ctx->iaoq_f = ctx->iaoq_b;
4238 ctx->iaoq_b = ctx->iaoq_n;
4239 ctx->base.pc_next += 4;
4240
4241 switch (ret) {
4242 case DISAS_NORETURN:
4243 case DISAS_IAQ_N_UPDATED:
4244 break;
4245
4246 case DISAS_NEXT:
4247 case DISAS_IAQ_N_STALE:
4248 case DISAS_IAQ_N_STALE_EXIT:
4249 if (ctx->iaoq_f == -1) {
4250 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4251 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4252 #ifndef CONFIG_USER_ONLY
4253 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4254 #endif
4255 nullify_save(ctx);
4256 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4257 ? DISAS_EXIT
4258 : DISAS_IAQ_N_UPDATED);
4259 } else if (ctx->iaoq_b == -1) {
4260 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4261 }
4262 break;
4263
4264 default:
4265 g_assert_not_reached();
4266 }
4267 }
4268
4269 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4270 {
4271 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4272 DisasJumpType is_jmp = ctx->base.is_jmp;
4273
4274 switch (is_jmp) {
4275 case DISAS_NORETURN:
4276 break;
4277 case DISAS_TOO_MANY:
4278 case DISAS_IAQ_N_STALE:
4279 case DISAS_IAQ_N_STALE_EXIT:
4280 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4281 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4282 nullify_save(ctx);
4283 /* FALLTHRU */
4284 case DISAS_IAQ_N_UPDATED:
4285 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4286 tcg_gen_lookup_and_goto_ptr();
4287 break;
4288 }
4289 /* FALLTHRU */
4290 case DISAS_EXIT:
4291 tcg_gen_exit_tb(NULL, 0);
4292 break;
4293 default:
4294 g_assert_not_reached();
4295 }
4296 }
4297
4298 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4299 {
4300 target_ulong pc = dcbase->pc_first;
4301
4302 #ifdef CONFIG_USER_ONLY
4303 switch (pc) {
4304 case 0x00:
4305 qemu_log("IN:\n0x00000000: (null)\n");
4306 return;
4307 case 0xb0:
4308 qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
4309 return;
4310 case 0xe0:
4311 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
4312 return;
4313 case 0x100:
4314 qemu_log("IN:\n0x00000100: syscall\n");
4315 return;
4316 }
4317 #endif
4318
4319 qemu_log("IN: %s\n", lookup_symbol(pc));
4320 log_target_disas(cs, pc, dcbase->tb->size);
4321 }
4322
4323 static const TranslatorOps hppa_tr_ops = {
4324 .init_disas_context = hppa_tr_init_disas_context,
4325 .tb_start = hppa_tr_tb_start,
4326 .insn_start = hppa_tr_insn_start,
4327 .translate_insn = hppa_tr_translate_insn,
4328 .tb_stop = hppa_tr_tb_stop,
4329 .disas_log = hppa_tr_disas_log,
4330 };
4331
4332 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
4333 {
4334 DisasContext ctx;
4335 translator_loop(&hppa_tr_ops, &ctx.base, cs, tb, max_insns);
4336 }
4337
4338 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4339 target_ulong *data)
4340 {
4341 env->iaoq_f = data[0];
4342 if (data[1] != (target_ureg)-1) {
4343 env->iaoq_b = data[1];
4344 }
4345 /* Since we were executing the instruction at IAOQ_F, and took some
4346 sort of action that provoked the cpu_restore_state, we can infer
4347 that the instruction was not nullified. */
4348 env->psw_n = 0;
4349 }