]> git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/translate.c
target/hppa: Fix do_add, do_sub for hppa64
[mirror_qemu.git] / target / hppa / translate.c
1 /*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/log.h"
30
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef HELPER_H
34
35
36 /* Since we have a distinction between register size and address size,
37 we need to redefine all of these. */
38
39 #undef TCGv
40 #undef tcg_temp_new
41 #undef tcg_global_mem_new
42
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #if TARGET_REGISTER_BITS == 64
47 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
48 #else
49 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
50 #endif
51 #else
52 #define TCGv_tl TCGv_i32
53 #define tcg_temp_new_tl tcg_temp_new_i32
54 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
55 #endif
56
57 #if TARGET_REGISTER_BITS == 64
58 #define TCGv_reg TCGv_i64
59
60 #define tcg_temp_new tcg_temp_new_i64
61 #define tcg_global_mem_new tcg_global_mem_new_i64
62
63 #define tcg_gen_movi_reg tcg_gen_movi_i64
64 #define tcg_gen_mov_reg tcg_gen_mov_i64
65 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
66 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
67 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
68 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
69 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
70 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
71 #define tcg_gen_ld_reg tcg_gen_ld_i64
72 #define tcg_gen_st8_reg tcg_gen_st8_i64
73 #define tcg_gen_st16_reg tcg_gen_st16_i64
74 #define tcg_gen_st32_reg tcg_gen_st32_i64
75 #define tcg_gen_st_reg tcg_gen_st_i64
76 #define tcg_gen_add_reg tcg_gen_add_i64
77 #define tcg_gen_addi_reg tcg_gen_addi_i64
78 #define tcg_gen_sub_reg tcg_gen_sub_i64
79 #define tcg_gen_neg_reg tcg_gen_neg_i64
80 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
81 #define tcg_gen_subi_reg tcg_gen_subi_i64
82 #define tcg_gen_and_reg tcg_gen_and_i64
83 #define tcg_gen_andi_reg tcg_gen_andi_i64
84 #define tcg_gen_or_reg tcg_gen_or_i64
85 #define tcg_gen_ori_reg tcg_gen_ori_i64
86 #define tcg_gen_xor_reg tcg_gen_xor_i64
87 #define tcg_gen_xori_reg tcg_gen_xori_i64
88 #define tcg_gen_not_reg tcg_gen_not_i64
89 #define tcg_gen_shl_reg tcg_gen_shl_i64
90 #define tcg_gen_shli_reg tcg_gen_shli_i64
91 #define tcg_gen_shr_reg tcg_gen_shr_i64
92 #define tcg_gen_shri_reg tcg_gen_shri_i64
93 #define tcg_gen_sar_reg tcg_gen_sar_i64
94 #define tcg_gen_sari_reg tcg_gen_sari_i64
95 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
96 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
97 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
98 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
99 #define tcg_gen_mul_reg tcg_gen_mul_i64
100 #define tcg_gen_muli_reg tcg_gen_muli_i64
101 #define tcg_gen_div_reg tcg_gen_div_i64
102 #define tcg_gen_rem_reg tcg_gen_rem_i64
103 #define tcg_gen_divu_reg tcg_gen_divu_i64
104 #define tcg_gen_remu_reg tcg_gen_remu_i64
105 #define tcg_gen_discard_reg tcg_gen_discard_i64
106 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
107 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
108 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
109 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
110 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
111 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
112 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
113 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
114 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
115 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
116 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
117 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
118 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
119 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
120 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
121 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
122 #define tcg_gen_andc_reg tcg_gen_andc_i64
123 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
124 #define tcg_gen_nand_reg tcg_gen_nand_i64
125 #define tcg_gen_nor_reg tcg_gen_nor_i64
126 #define tcg_gen_orc_reg tcg_gen_orc_i64
127 #define tcg_gen_clz_reg tcg_gen_clz_i64
128 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
129 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
130 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
131 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
132 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
133 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
134 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
135 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
136 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
137 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
138 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
139 #define tcg_gen_extract_reg tcg_gen_extract_i64
140 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
141 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
142 #define tcg_constant_reg tcg_constant_i64
143 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
144 #define tcg_gen_add2_reg tcg_gen_add2_i64
145 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
146 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
147 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
148 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
149 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
150 #else
151 #define TCGv_reg TCGv_i32
152 #define tcg_temp_new tcg_temp_new_i32
153 #define tcg_global_mem_new tcg_global_mem_new_i32
154
155 #define tcg_gen_movi_reg tcg_gen_movi_i32
156 #define tcg_gen_mov_reg tcg_gen_mov_i32
157 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
158 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
159 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
160 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
161 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
162 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
163 #define tcg_gen_ld_reg tcg_gen_ld_i32
164 #define tcg_gen_st8_reg tcg_gen_st8_i32
165 #define tcg_gen_st16_reg tcg_gen_st16_i32
166 #define tcg_gen_st32_reg tcg_gen_st32_i32
167 #define tcg_gen_st_reg tcg_gen_st_i32
168 #define tcg_gen_add_reg tcg_gen_add_i32
169 #define tcg_gen_addi_reg tcg_gen_addi_i32
170 #define tcg_gen_sub_reg tcg_gen_sub_i32
171 #define tcg_gen_neg_reg tcg_gen_neg_i32
172 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
173 #define tcg_gen_subi_reg tcg_gen_subi_i32
174 #define tcg_gen_and_reg tcg_gen_and_i32
175 #define tcg_gen_andi_reg tcg_gen_andi_i32
176 #define tcg_gen_or_reg tcg_gen_or_i32
177 #define tcg_gen_ori_reg tcg_gen_ori_i32
178 #define tcg_gen_xor_reg tcg_gen_xor_i32
179 #define tcg_gen_xori_reg tcg_gen_xori_i32
180 #define tcg_gen_not_reg tcg_gen_not_i32
181 #define tcg_gen_shl_reg tcg_gen_shl_i32
182 #define tcg_gen_shli_reg tcg_gen_shli_i32
183 #define tcg_gen_shr_reg tcg_gen_shr_i32
184 #define tcg_gen_shri_reg tcg_gen_shri_i32
185 #define tcg_gen_sar_reg tcg_gen_sar_i32
186 #define tcg_gen_sari_reg tcg_gen_sari_i32
187 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
188 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
189 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
190 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
191 #define tcg_gen_mul_reg tcg_gen_mul_i32
192 #define tcg_gen_muli_reg tcg_gen_muli_i32
193 #define tcg_gen_div_reg tcg_gen_div_i32
194 #define tcg_gen_rem_reg tcg_gen_rem_i32
195 #define tcg_gen_divu_reg tcg_gen_divu_i32
196 #define tcg_gen_remu_reg tcg_gen_remu_i32
197 #define tcg_gen_discard_reg tcg_gen_discard_i32
198 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
199 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
200 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
201 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
202 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
203 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
204 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
205 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
206 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
207 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
208 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
209 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
210 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
211 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
212 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
213 #define tcg_gen_andc_reg tcg_gen_andc_i32
214 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
215 #define tcg_gen_nand_reg tcg_gen_nand_i32
216 #define tcg_gen_nor_reg tcg_gen_nor_i32
217 #define tcg_gen_orc_reg tcg_gen_orc_i32
218 #define tcg_gen_clz_reg tcg_gen_clz_i32
219 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
220 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
221 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
222 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
223 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
224 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
225 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
226 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
227 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
228 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
229 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
230 #define tcg_gen_extract_reg tcg_gen_extract_i32
231 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
232 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
233 #define tcg_constant_reg tcg_constant_i32
234 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
235 #define tcg_gen_add2_reg tcg_gen_add2_i32
236 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
237 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
238 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
239 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
240 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
241 #endif /* TARGET_REGISTER_BITS */
242
243 typedef struct DisasCond {
244 TCGCond c;
245 TCGv_reg a0, a1;
246 } DisasCond;
247
248 typedef struct DisasContext {
249 DisasContextBase base;
250 CPUState *cs;
251
252 target_ureg iaoq_f;
253 target_ureg iaoq_b;
254 target_ureg iaoq_n;
255 TCGv_reg iaoq_n_var;
256
257 DisasCond null_cond;
258 TCGLabel *null_lab;
259
260 uint32_t insn;
261 uint32_t tb_flags;
262 int mmu_idx;
263 int privilege;
264 bool psw_n_nonzero;
265
266 #ifdef CONFIG_USER_ONLY
267 MemOp unalign;
268 #endif
269 } DisasContext;
270
271 #ifdef CONFIG_USER_ONLY
272 #define UNALIGN(C) (C)->unalign
273 #else
274 #define UNALIGN(C) MO_ALIGN
275 #endif
276
277 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
278 static int expand_sm_imm(DisasContext *ctx, int val)
279 {
280 if (val & PSW_SM_E) {
281 val = (val & ~PSW_SM_E) | PSW_E;
282 }
283 if (val & PSW_SM_W) {
284 val = (val & ~PSW_SM_W) | PSW_W;
285 }
286 return val;
287 }
288
289 /* Inverted space register indicates 0 means sr0 not inferred from base. */
290 static int expand_sr3x(DisasContext *ctx, int val)
291 {
292 return ~val;
293 }
294
295 /* Convert the M:A bits within a memory insn to the tri-state value
296 we use for the final M. */
297 static int ma_to_m(DisasContext *ctx, int val)
298 {
299 return val & 2 ? (val & 1 ? -1 : 1) : 0;
300 }
301
302 /* Convert the sign of the displacement to a pre or post-modify. */
303 static int pos_to_m(DisasContext *ctx, int val)
304 {
305 return val ? 1 : -1;
306 }
307
308 static int neg_to_m(DisasContext *ctx, int val)
309 {
310 return val ? -1 : 1;
311 }
312
313 /* Used for branch targets and fp memory ops. */
314 static int expand_shl2(DisasContext *ctx, int val)
315 {
316 return val << 2;
317 }
318
319 /* Used for fp memory ops. */
320 static int expand_shl3(DisasContext *ctx, int val)
321 {
322 return val << 3;
323 }
324
325 /* Used for assemble_21. */
326 static int expand_shl11(DisasContext *ctx, int val)
327 {
328 return val << 11;
329 }
330
331
332 /* Include the auto-generated decoder. */
333 #include "decode-insns.c.inc"
334
335 /* We are not using a goto_tb (for whatever reason), but have updated
336 the iaq (for whatever reason), so don't do it again on exit. */
337 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
338
339 /* We are exiting the TB, but have neither emitted a goto_tb, nor
340 updated the iaq for the next instruction to be executed. */
341 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
342
343 /* Similarly, but we want to return to the main loop immediately
344 to recognize unmasked interrupts. */
345 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
346 #define DISAS_EXIT DISAS_TARGET_3
347
348 /* global register indexes */
349 static TCGv_reg cpu_gr[32];
350 static TCGv_i64 cpu_sr[4];
351 static TCGv_i64 cpu_srH;
352 static TCGv_reg cpu_iaoq_f;
353 static TCGv_reg cpu_iaoq_b;
354 static TCGv_i64 cpu_iasq_f;
355 static TCGv_i64 cpu_iasq_b;
356 static TCGv_reg cpu_sar;
357 static TCGv_reg cpu_psw_n;
358 static TCGv_reg cpu_psw_v;
359 static TCGv_reg cpu_psw_cb;
360 static TCGv_reg cpu_psw_cb_msb;
361
362 void hppa_translate_init(void)
363 {
364 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
365
366 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
367 static const GlobalVar vars[] = {
368 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
369 DEF_VAR(psw_n),
370 DEF_VAR(psw_v),
371 DEF_VAR(psw_cb),
372 DEF_VAR(psw_cb_msb),
373 DEF_VAR(iaoq_f),
374 DEF_VAR(iaoq_b),
375 };
376
377 #undef DEF_VAR
378
379 /* Use the symbolic register names that match the disassembler. */
380 static const char gr_names[32][4] = {
381 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
382 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
383 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
384 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
385 };
386 /* SR[4-7] are not global registers so that we can index them. */
387 static const char sr_names[5][4] = {
388 "sr0", "sr1", "sr2", "sr3", "srH"
389 };
390
391 int i;
392
393 cpu_gr[0] = NULL;
394 for (i = 1; i < 32; i++) {
395 cpu_gr[i] = tcg_global_mem_new(tcg_env,
396 offsetof(CPUHPPAState, gr[i]),
397 gr_names[i]);
398 }
399 for (i = 0; i < 4; i++) {
400 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
401 offsetof(CPUHPPAState, sr[i]),
402 sr_names[i]);
403 }
404 cpu_srH = tcg_global_mem_new_i64(tcg_env,
405 offsetof(CPUHPPAState, sr[4]),
406 sr_names[4]);
407
408 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
409 const GlobalVar *v = &vars[i];
410 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
411 }
412
413 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
414 offsetof(CPUHPPAState, iasq_f),
415 "iasq_f");
416 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
417 offsetof(CPUHPPAState, iasq_b),
418 "iasq_b");
419 }
420
421 static DisasCond cond_make_f(void)
422 {
423 return (DisasCond){
424 .c = TCG_COND_NEVER,
425 .a0 = NULL,
426 .a1 = NULL,
427 };
428 }
429
430 static DisasCond cond_make_t(void)
431 {
432 return (DisasCond){
433 .c = TCG_COND_ALWAYS,
434 .a0 = NULL,
435 .a1 = NULL,
436 };
437 }
438
439 static DisasCond cond_make_n(void)
440 {
441 return (DisasCond){
442 .c = TCG_COND_NE,
443 .a0 = cpu_psw_n,
444 .a1 = tcg_constant_reg(0)
445 };
446 }
447
448 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
449 {
450 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
451 return (DisasCond){
452 .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
453 };
454 }
455
456 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
457 {
458 TCGv_reg tmp = tcg_temp_new();
459 tcg_gen_mov_reg(tmp, a0);
460 return cond_make_0_tmp(c, tmp);
461 }
462
463 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
464 {
465 DisasCond r = { .c = c };
466
467 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
468 r.a0 = tcg_temp_new();
469 tcg_gen_mov_reg(r.a0, a0);
470 r.a1 = tcg_temp_new();
471 tcg_gen_mov_reg(r.a1, a1);
472
473 return r;
474 }
475
476 static void cond_free(DisasCond *cond)
477 {
478 switch (cond->c) {
479 default:
480 cond->a0 = NULL;
481 cond->a1 = NULL;
482 /* fallthru */
483 case TCG_COND_ALWAYS:
484 cond->c = TCG_COND_NEVER;
485 break;
486 case TCG_COND_NEVER:
487 break;
488 }
489 }
490
491 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
492 {
493 if (reg == 0) {
494 TCGv_reg t = tcg_temp_new();
495 tcg_gen_movi_reg(t, 0);
496 return t;
497 } else {
498 return cpu_gr[reg];
499 }
500 }
501
502 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
503 {
504 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
505 return tcg_temp_new();
506 } else {
507 return cpu_gr[reg];
508 }
509 }
510
511 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
512 {
513 if (ctx->null_cond.c != TCG_COND_NEVER) {
514 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
515 ctx->null_cond.a1, dest, t);
516 } else {
517 tcg_gen_mov_reg(dest, t);
518 }
519 }
520
521 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
522 {
523 if (reg != 0) {
524 save_or_nullify(ctx, cpu_gr[reg], t);
525 }
526 }
527
528 #if HOST_BIG_ENDIAN
529 # define HI_OFS 0
530 # define LO_OFS 4
531 #else
532 # define HI_OFS 4
533 # define LO_OFS 0
534 #endif
535
536 static TCGv_i32 load_frw_i32(unsigned rt)
537 {
538 TCGv_i32 ret = tcg_temp_new_i32();
539 tcg_gen_ld_i32(ret, tcg_env,
540 offsetof(CPUHPPAState, fr[rt & 31])
541 + (rt & 32 ? LO_OFS : HI_OFS));
542 return ret;
543 }
544
545 static TCGv_i32 load_frw0_i32(unsigned rt)
546 {
547 if (rt == 0) {
548 TCGv_i32 ret = tcg_temp_new_i32();
549 tcg_gen_movi_i32(ret, 0);
550 return ret;
551 } else {
552 return load_frw_i32(rt);
553 }
554 }
555
556 static TCGv_i64 load_frw0_i64(unsigned rt)
557 {
558 TCGv_i64 ret = tcg_temp_new_i64();
559 if (rt == 0) {
560 tcg_gen_movi_i64(ret, 0);
561 } else {
562 tcg_gen_ld32u_i64(ret, tcg_env,
563 offsetof(CPUHPPAState, fr[rt & 31])
564 + (rt & 32 ? LO_OFS : HI_OFS));
565 }
566 return ret;
567 }
568
569 static void save_frw_i32(unsigned rt, TCGv_i32 val)
570 {
571 tcg_gen_st_i32(val, tcg_env,
572 offsetof(CPUHPPAState, fr[rt & 31])
573 + (rt & 32 ? LO_OFS : HI_OFS));
574 }
575
576 #undef HI_OFS
577 #undef LO_OFS
578
579 static TCGv_i64 load_frd(unsigned rt)
580 {
581 TCGv_i64 ret = tcg_temp_new_i64();
582 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
583 return ret;
584 }
585
586 static TCGv_i64 load_frd0(unsigned rt)
587 {
588 if (rt == 0) {
589 TCGv_i64 ret = tcg_temp_new_i64();
590 tcg_gen_movi_i64(ret, 0);
591 return ret;
592 } else {
593 return load_frd(rt);
594 }
595 }
596
597 static void save_frd(unsigned rt, TCGv_i64 val)
598 {
599 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
600 }
601
602 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
603 {
604 #ifdef CONFIG_USER_ONLY
605 tcg_gen_movi_i64(dest, 0);
606 #else
607 if (reg < 4) {
608 tcg_gen_mov_i64(dest, cpu_sr[reg]);
609 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
610 tcg_gen_mov_i64(dest, cpu_srH);
611 } else {
612 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
613 }
614 #endif
615 }
616
617 /* Skip over the implementation of an insn that has been nullified.
618 Use this when the insn is too complex for a conditional move. */
619 static void nullify_over(DisasContext *ctx)
620 {
621 if (ctx->null_cond.c != TCG_COND_NEVER) {
622 /* The always condition should have been handled in the main loop. */
623 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
624
625 ctx->null_lab = gen_new_label();
626
627 /* If we're using PSW[N], copy it to a temp because... */
628 if (ctx->null_cond.a0 == cpu_psw_n) {
629 ctx->null_cond.a0 = tcg_temp_new();
630 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
631 }
632 /* ... we clear it before branching over the implementation,
633 so that (1) it's clear after nullifying this insn and
634 (2) if this insn nullifies the next, PSW[N] is valid. */
635 if (ctx->psw_n_nonzero) {
636 ctx->psw_n_nonzero = false;
637 tcg_gen_movi_reg(cpu_psw_n, 0);
638 }
639
640 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
641 ctx->null_cond.a1, ctx->null_lab);
642 cond_free(&ctx->null_cond);
643 }
644 }
645
646 /* Save the current nullification state to PSW[N]. */
647 static void nullify_save(DisasContext *ctx)
648 {
649 if (ctx->null_cond.c == TCG_COND_NEVER) {
650 if (ctx->psw_n_nonzero) {
651 tcg_gen_movi_reg(cpu_psw_n, 0);
652 }
653 return;
654 }
655 if (ctx->null_cond.a0 != cpu_psw_n) {
656 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
657 ctx->null_cond.a0, ctx->null_cond.a1);
658 ctx->psw_n_nonzero = true;
659 }
660 cond_free(&ctx->null_cond);
661 }
662
663 /* Set a PSW[N] to X. The intention is that this is used immediately
664 before a goto_tb/exit_tb, so that there is no fallthru path to other
665 code within the TB. Therefore we do not update psw_n_nonzero. */
666 static void nullify_set(DisasContext *ctx, bool x)
667 {
668 if (ctx->psw_n_nonzero || x) {
669 tcg_gen_movi_reg(cpu_psw_n, x);
670 }
671 }
672
673 /* Mark the end of an instruction that may have been nullified.
674 This is the pair to nullify_over. Always returns true so that
675 it may be tail-called from a translate function. */
676 static bool nullify_end(DisasContext *ctx)
677 {
678 TCGLabel *null_lab = ctx->null_lab;
679 DisasJumpType status = ctx->base.is_jmp;
680
681 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
682 For UPDATED, we cannot update on the nullified path. */
683 assert(status != DISAS_IAQ_N_UPDATED);
684
685 if (likely(null_lab == NULL)) {
686 /* The current insn wasn't conditional or handled the condition
687 applied to it without a branch, so the (new) setting of
688 NULL_COND can be applied directly to the next insn. */
689 return true;
690 }
691 ctx->null_lab = NULL;
692
693 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
694 /* The next instruction will be unconditional,
695 and NULL_COND already reflects that. */
696 gen_set_label(null_lab);
697 } else {
698 /* The insn that we just executed is itself nullifying the next
699 instruction. Store the condition in the PSW[N] global.
700 We asserted PSW[N] = 0 in nullify_over, so that after the
701 label we have the proper value in place. */
702 nullify_save(ctx);
703 gen_set_label(null_lab);
704 ctx->null_cond = cond_make_n();
705 }
706 if (status == DISAS_NORETURN) {
707 ctx->base.is_jmp = DISAS_NEXT;
708 }
709 return true;
710 }
711
712 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
713 {
714 if (unlikely(ival == -1)) {
715 tcg_gen_mov_reg(dest, vval);
716 } else {
717 tcg_gen_movi_reg(dest, ival);
718 }
719 }
720
721 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
722 {
723 return ctx->iaoq_f + disp + 8;
724 }
725
726 static void gen_excp_1(int exception)
727 {
728 gen_helper_excp(tcg_env, tcg_constant_i32(exception));
729 }
730
731 static void gen_excp(DisasContext *ctx, int exception)
732 {
733 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
734 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
735 nullify_save(ctx);
736 gen_excp_1(exception);
737 ctx->base.is_jmp = DISAS_NORETURN;
738 }
739
740 static bool gen_excp_iir(DisasContext *ctx, int exc)
741 {
742 nullify_over(ctx);
743 tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
744 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
745 gen_excp(ctx, exc);
746 return nullify_end(ctx);
747 }
748
749 static bool gen_illegal(DisasContext *ctx)
750 {
751 return gen_excp_iir(ctx, EXCP_ILL);
752 }
753
754 #ifdef CONFIG_USER_ONLY
755 #define CHECK_MOST_PRIVILEGED(EXCP) \
756 return gen_excp_iir(ctx, EXCP)
757 #else
758 #define CHECK_MOST_PRIVILEGED(EXCP) \
759 do { \
760 if (ctx->privilege != 0) { \
761 return gen_excp_iir(ctx, EXCP); \
762 } \
763 } while (0)
764 #endif
765
766 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
767 {
768 return translator_use_goto_tb(&ctx->base, dest);
769 }
770
771 /* If the next insn is to be nullified, and it's on the same page,
772 and we're not attempting to set a breakpoint on it, then we can
773 totally skip the nullified insn. This avoids creating and
774 executing a TB that merely branches to the next TB. */
775 static bool use_nullify_skip(DisasContext *ctx)
776 {
777 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
778 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
779 }
780
781 static void gen_goto_tb(DisasContext *ctx, int which,
782 target_ureg f, target_ureg b)
783 {
784 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
785 tcg_gen_goto_tb(which);
786 tcg_gen_movi_reg(cpu_iaoq_f, f);
787 tcg_gen_movi_reg(cpu_iaoq_b, b);
788 tcg_gen_exit_tb(ctx->base.tb, which);
789 } else {
790 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
791 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
792 tcg_gen_lookup_and_goto_ptr();
793 }
794 }
795
796 static bool cond_need_sv(int c)
797 {
798 return c == 2 || c == 3 || c == 6;
799 }
800
801 static bool cond_need_cb(int c)
802 {
803 return c == 4 || c == 5;
804 }
805
806 /* Need extensions from TCGv_i32 to TCGv_reg. */
807 static bool cond_need_ext(DisasContext *ctx, bool d)
808 {
809 return TARGET_REGISTER_BITS == 64 && !d;
810 }
811
812 /*
813 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
814 * the Parisc 1.1 Architecture Reference Manual for details.
815 */
816
817 static DisasCond do_cond(unsigned cf, TCGv_reg res,
818 TCGv_reg cb_msb, TCGv_reg sv)
819 {
820 DisasCond cond;
821 TCGv_reg tmp;
822
823 switch (cf >> 1) {
824 case 0: /* Never / TR (0 / 1) */
825 cond = cond_make_f();
826 break;
827 case 1: /* = / <> (Z / !Z) */
828 cond = cond_make_0(TCG_COND_EQ, res);
829 break;
830 case 2: /* < / >= (N ^ V / !(N ^ V) */
831 tmp = tcg_temp_new();
832 tcg_gen_xor_reg(tmp, res, sv);
833 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
834 break;
835 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
836 /*
837 * Simplify:
838 * (N ^ V) | Z
839 * ((res < 0) ^ (sv < 0)) | !res
840 * ((res ^ sv) < 0) | !res
841 * (~(res ^ sv) >= 0) | !res
842 * !(~(res ^ sv) >> 31) | !res
843 * !(~(res ^ sv) >> 31 & res)
844 */
845 tmp = tcg_temp_new();
846 tcg_gen_eqv_reg(tmp, res, sv);
847 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
848 tcg_gen_and_reg(tmp, tmp, res);
849 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
850 break;
851 case 4: /* NUV / UV (!C / C) */
852 cond = cond_make_0(TCG_COND_EQ, cb_msb);
853 break;
854 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
855 tmp = tcg_temp_new();
856 tcg_gen_neg_reg(tmp, cb_msb);
857 tcg_gen_and_reg(tmp, tmp, res);
858 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
859 break;
860 case 6: /* SV / NSV (V / !V) */
861 cond = cond_make_0(TCG_COND_LT, sv);
862 break;
863 case 7: /* OD / EV */
864 tmp = tcg_temp_new();
865 tcg_gen_andi_reg(tmp, res, 1);
866 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
867 break;
868 default:
869 g_assert_not_reached();
870 }
871 if (cf & 1) {
872 cond.c = tcg_invert_cond(cond.c);
873 }
874
875 return cond;
876 }
877
878 /* Similar, but for the special case of subtraction without borrow, we
879 can use the inputs directly. This can allow other computation to be
880 deleted as unused. */
881
882 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
883 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
884 {
885 DisasCond cond;
886
887 switch (cf >> 1) {
888 case 1: /* = / <> */
889 cond = cond_make(TCG_COND_EQ, in1, in2);
890 break;
891 case 2: /* < / >= */
892 cond = cond_make(TCG_COND_LT, in1, in2);
893 break;
894 case 3: /* <= / > */
895 cond = cond_make(TCG_COND_LE, in1, in2);
896 break;
897 case 4: /* << / >>= */
898 cond = cond_make(TCG_COND_LTU, in1, in2);
899 break;
900 case 5: /* <<= / >> */
901 cond = cond_make(TCG_COND_LEU, in1, in2);
902 break;
903 default:
904 return do_cond(cf, res, NULL, sv);
905 }
906 if (cf & 1) {
907 cond.c = tcg_invert_cond(cond.c);
908 }
909
910 return cond;
911 }
912
913 /*
914 * Similar, but for logicals, where the carry and overflow bits are not
915 * computed, and use of them is undefined.
916 *
917 * Undefined or not, hardware does not trap. It seems reasonable to
918 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
919 * how cases c={2,3} are treated.
920 */
921
922 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
923 {
924 switch (cf) {
925 case 0: /* never */
926 case 9: /* undef, C */
927 case 11: /* undef, C & !Z */
928 case 12: /* undef, V */
929 return cond_make_f();
930
931 case 1: /* true */
932 case 8: /* undef, !C */
933 case 10: /* undef, !C | Z */
934 case 13: /* undef, !V */
935 return cond_make_t();
936
937 case 2: /* == */
938 return cond_make_0(TCG_COND_EQ, res);
939 case 3: /* <> */
940 return cond_make_0(TCG_COND_NE, res);
941 case 4: /* < */
942 return cond_make_0(TCG_COND_LT, res);
943 case 5: /* >= */
944 return cond_make_0(TCG_COND_GE, res);
945 case 6: /* <= */
946 return cond_make_0(TCG_COND_LE, res);
947 case 7: /* > */
948 return cond_make_0(TCG_COND_GT, res);
949
950 case 14: /* OD */
951 case 15: /* EV */
952 return do_cond(cf, res, NULL, NULL);
953
954 default:
955 g_assert_not_reached();
956 }
957 }
958
959 /* Similar, but for shift/extract/deposit conditions. */
960
961 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
962 {
963 unsigned c, f;
964
965 /* Convert the compressed condition codes to standard.
966 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
967 4-7 are the reverse of 0-3. */
968 c = orig & 3;
969 if (c == 3) {
970 c = 7;
971 }
972 f = (orig & 4) / 4;
973
974 return do_log_cond(c * 2 + f, res);
975 }
976
977 /* Similar, but for unit conditions. */
978
979 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
980 TCGv_reg in1, TCGv_reg in2)
981 {
982 DisasCond cond;
983 TCGv_reg tmp, cb = NULL;
984
985 if (cf & 8) {
986 /* Since we want to test lots of carry-out bits all at once, do not
987 * do our normal thing and compute carry-in of bit B+1 since that
988 * leaves us with carry bits spread across two words.
989 */
990 cb = tcg_temp_new();
991 tmp = tcg_temp_new();
992 tcg_gen_or_reg(cb, in1, in2);
993 tcg_gen_and_reg(tmp, in1, in2);
994 tcg_gen_andc_reg(cb, cb, res);
995 tcg_gen_or_reg(cb, cb, tmp);
996 }
997
998 switch (cf >> 1) {
999 case 0: /* never / TR */
1000 case 1: /* undefined */
1001 case 5: /* undefined */
1002 cond = cond_make_f();
1003 break;
1004
1005 case 2: /* SBZ / NBZ */
1006 /* See hasless(v,1) from
1007 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1008 */
1009 tmp = tcg_temp_new();
1010 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1011 tcg_gen_andc_reg(tmp, tmp, res);
1012 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1013 cond = cond_make_0(TCG_COND_NE, tmp);
1014 break;
1015
1016 case 3: /* SHZ / NHZ */
1017 tmp = tcg_temp_new();
1018 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1019 tcg_gen_andc_reg(tmp, tmp, res);
1020 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1021 cond = cond_make_0(TCG_COND_NE, tmp);
1022 break;
1023
1024 case 4: /* SDC / NDC */
1025 tcg_gen_andi_reg(cb, cb, 0x88888888u);
1026 cond = cond_make_0(TCG_COND_NE, cb);
1027 break;
1028
1029 case 6: /* SBC / NBC */
1030 tcg_gen_andi_reg(cb, cb, 0x80808080u);
1031 cond = cond_make_0(TCG_COND_NE, cb);
1032 break;
1033
1034 case 7: /* SHC / NHC */
1035 tcg_gen_andi_reg(cb, cb, 0x80008000u);
1036 cond = cond_make_0(TCG_COND_NE, cb);
1037 break;
1038
1039 default:
1040 g_assert_not_reached();
1041 }
1042 if (cf & 1) {
1043 cond.c = tcg_invert_cond(cond.c);
1044 }
1045
1046 return cond;
1047 }
1048
1049 static TCGv_reg get_carry(DisasContext *ctx, bool d,
1050 TCGv_reg cb, TCGv_reg cb_msb)
1051 {
1052 if (cond_need_ext(ctx, d)) {
1053 TCGv_reg t = tcg_temp_new();
1054 tcg_gen_extract_reg(t, cb, 32, 1);
1055 return t;
1056 }
1057 return cb_msb;
1058 }
1059
1060 static TCGv_reg get_psw_carry(DisasContext *ctx, bool d)
1061 {
1062 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1063 }
1064
1065 /* Compute signed overflow for addition. */
1066 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1067 TCGv_reg in1, TCGv_reg in2)
1068 {
1069 TCGv_reg sv = tcg_temp_new();
1070 TCGv_reg tmp = tcg_temp_new();
1071
1072 tcg_gen_xor_reg(sv, res, in1);
1073 tcg_gen_xor_reg(tmp, in1, in2);
1074 tcg_gen_andc_reg(sv, sv, tmp);
1075
1076 return sv;
1077 }
1078
1079 /* Compute signed overflow for subtraction. */
1080 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1081 TCGv_reg in1, TCGv_reg in2)
1082 {
1083 TCGv_reg sv = tcg_temp_new();
1084 TCGv_reg tmp = tcg_temp_new();
1085
1086 tcg_gen_xor_reg(sv, res, in1);
1087 tcg_gen_xor_reg(tmp, in1, in2);
1088 tcg_gen_and_reg(sv, sv, tmp);
1089
1090 return sv;
1091 }
1092
1093 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1094 TCGv_reg in2, unsigned shift, bool is_l,
1095 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1096 {
1097 TCGv_reg dest, cb, cb_msb, cb_cond, sv, tmp;
1098 unsigned c = cf >> 1;
1099 DisasCond cond;
1100 bool d = false;
1101
1102 dest = tcg_temp_new();
1103 cb = NULL;
1104 cb_msb = NULL;
1105 cb_cond = NULL;
1106
1107 if (shift) {
1108 tmp = tcg_temp_new();
1109 tcg_gen_shli_reg(tmp, in1, shift);
1110 in1 = tmp;
1111 }
1112
1113 if (!is_l || cond_need_cb(c)) {
1114 TCGv_reg zero = tcg_constant_reg(0);
1115 cb_msb = tcg_temp_new();
1116 cb = tcg_temp_new();
1117
1118 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1119 if (is_c) {
1120 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb,
1121 get_psw_carry(ctx, d), zero);
1122 }
1123 tcg_gen_xor_reg(cb, in1, in2);
1124 tcg_gen_xor_reg(cb, cb, dest);
1125 if (cond_need_cb(c)) {
1126 cb_cond = get_carry(ctx, d, cb, cb_msb);
1127 }
1128 } else {
1129 tcg_gen_add_reg(dest, in1, in2);
1130 if (is_c) {
1131 tcg_gen_add_reg(dest, dest, get_psw_carry(ctx, d));
1132 }
1133 }
1134
1135 /* Compute signed overflow if required. */
1136 sv = NULL;
1137 if (is_tsv || cond_need_sv(c)) {
1138 sv = do_add_sv(ctx, dest, in1, in2);
1139 if (is_tsv) {
1140 /* ??? Need to include overflow from shift. */
1141 gen_helper_tsv(tcg_env, sv);
1142 }
1143 }
1144
1145 /* Emit any conditional trap before any writeback. */
1146 cond = do_cond(cf, dest, cb_cond, sv);
1147 if (is_tc) {
1148 tmp = tcg_temp_new();
1149 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1150 gen_helper_tcond(tcg_env, tmp);
1151 }
1152
1153 /* Write back the result. */
1154 if (!is_l) {
1155 save_or_nullify(ctx, cpu_psw_cb, cb);
1156 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1157 }
1158 save_gpr(ctx, rt, dest);
1159
1160 /* Install the new nullification. */
1161 cond_free(&ctx->null_cond);
1162 ctx->null_cond = cond;
1163 }
1164
1165 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1166 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1167 {
1168 TCGv_reg tcg_r1, tcg_r2;
1169
1170 if (a->cf) {
1171 nullify_over(ctx);
1172 }
1173 tcg_r1 = load_gpr(ctx, a->r1);
1174 tcg_r2 = load_gpr(ctx, a->r2);
1175 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1176 return nullify_end(ctx);
1177 }
1178
1179 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1180 bool is_tsv, bool is_tc)
1181 {
1182 TCGv_reg tcg_im, tcg_r2;
1183
1184 if (a->cf) {
1185 nullify_over(ctx);
1186 }
1187 tcg_im = tcg_constant_reg(a->i);
1188 tcg_r2 = load_gpr(ctx, a->r);
1189 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1190 return nullify_end(ctx);
1191 }
1192
1193 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1194 TCGv_reg in2, bool is_tsv, bool is_b,
1195 bool is_tc, unsigned cf)
1196 {
1197 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1198 unsigned c = cf >> 1;
1199 DisasCond cond;
1200 bool d = false;
1201
1202 dest = tcg_temp_new();
1203 cb = tcg_temp_new();
1204 cb_msb = tcg_temp_new();
1205
1206 zero = tcg_constant_reg(0);
1207 if (is_b) {
1208 /* DEST,C = IN1 + ~IN2 + C. */
1209 tcg_gen_not_reg(cb, in2);
1210 tcg_gen_add2_reg(dest, cb_msb, in1, zero, get_psw_carry(ctx, d), zero);
1211 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1212 tcg_gen_xor_reg(cb, cb, in1);
1213 tcg_gen_xor_reg(cb, cb, dest);
1214 } else {
1215 /*
1216 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1217 * operations by seeding the high word with 1 and subtracting.
1218 */
1219 TCGv_reg one = tcg_constant_reg(1);
1220 tcg_gen_sub2_reg(dest, cb_msb, in1, one, in2, zero);
1221 tcg_gen_eqv_reg(cb, in1, in2);
1222 tcg_gen_xor_reg(cb, cb, dest);
1223 }
1224
1225 /* Compute signed overflow if required. */
1226 sv = NULL;
1227 if (is_tsv || cond_need_sv(c)) {
1228 sv = do_sub_sv(ctx, dest, in1, in2);
1229 if (is_tsv) {
1230 gen_helper_tsv(tcg_env, sv);
1231 }
1232 }
1233
1234 /* Compute the condition. We cannot use the special case for borrow. */
1235 if (!is_b) {
1236 cond = do_sub_cond(cf, dest, in1, in2, sv);
1237 } else {
1238 cond = do_cond(cf, dest, get_carry(ctx, d, cb, cb_msb), sv);
1239 }
1240
1241 /* Emit any conditional trap before any writeback. */
1242 if (is_tc) {
1243 tmp = tcg_temp_new();
1244 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1245 gen_helper_tcond(tcg_env, tmp);
1246 }
1247
1248 /* Write back the result. */
1249 save_or_nullify(ctx, cpu_psw_cb, cb);
1250 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1251 save_gpr(ctx, rt, dest);
1252
1253 /* Install the new nullification. */
1254 cond_free(&ctx->null_cond);
1255 ctx->null_cond = cond;
1256 }
1257
1258 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1259 bool is_tsv, bool is_b, bool is_tc)
1260 {
1261 TCGv_reg tcg_r1, tcg_r2;
1262
1263 if (a->cf) {
1264 nullify_over(ctx);
1265 }
1266 tcg_r1 = load_gpr(ctx, a->r1);
1267 tcg_r2 = load_gpr(ctx, a->r2);
1268 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1269 return nullify_end(ctx);
1270 }
1271
1272 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1273 {
1274 TCGv_reg tcg_im, tcg_r2;
1275
1276 if (a->cf) {
1277 nullify_over(ctx);
1278 }
1279 tcg_im = tcg_constant_reg(a->i);
1280 tcg_r2 = load_gpr(ctx, a->r);
1281 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1282 return nullify_end(ctx);
1283 }
1284
1285 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1286 TCGv_reg in2, unsigned cf)
1287 {
1288 TCGv_reg dest, sv;
1289 DisasCond cond;
1290
1291 dest = tcg_temp_new();
1292 tcg_gen_sub_reg(dest, in1, in2);
1293
1294 /* Compute signed overflow if required. */
1295 sv = NULL;
1296 if (cond_need_sv(cf >> 1)) {
1297 sv = do_sub_sv(ctx, dest, in1, in2);
1298 }
1299
1300 /* Form the condition for the compare. */
1301 cond = do_sub_cond(cf, dest, in1, in2, sv);
1302
1303 /* Clear. */
1304 tcg_gen_movi_reg(dest, 0);
1305 save_gpr(ctx, rt, dest);
1306
1307 /* Install the new nullification. */
1308 cond_free(&ctx->null_cond);
1309 ctx->null_cond = cond;
1310 }
1311
1312 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1313 TCGv_reg in2, unsigned cf,
1314 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1315 {
1316 TCGv_reg dest = dest_gpr(ctx, rt);
1317
1318 /* Perform the operation, and writeback. */
1319 fn(dest, in1, in2);
1320 save_gpr(ctx, rt, dest);
1321
1322 /* Install the new nullification. */
1323 cond_free(&ctx->null_cond);
1324 if (cf) {
1325 ctx->null_cond = do_log_cond(cf, dest);
1326 }
1327 }
1328
1329 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1330 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1331 {
1332 TCGv_reg tcg_r1, tcg_r2;
1333
1334 if (a->cf) {
1335 nullify_over(ctx);
1336 }
1337 tcg_r1 = load_gpr(ctx, a->r1);
1338 tcg_r2 = load_gpr(ctx, a->r2);
1339 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1340 return nullify_end(ctx);
1341 }
1342
1343 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1344 TCGv_reg in2, unsigned cf, bool is_tc,
1345 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1346 {
1347 TCGv_reg dest;
1348 DisasCond cond;
1349
1350 if (cf == 0) {
1351 dest = dest_gpr(ctx, rt);
1352 fn(dest, in1, in2);
1353 save_gpr(ctx, rt, dest);
1354 cond_free(&ctx->null_cond);
1355 } else {
1356 dest = tcg_temp_new();
1357 fn(dest, in1, in2);
1358
1359 cond = do_unit_cond(cf, dest, in1, in2);
1360
1361 if (is_tc) {
1362 TCGv_reg tmp = tcg_temp_new();
1363 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1364 gen_helper_tcond(tcg_env, tmp);
1365 }
1366 save_gpr(ctx, rt, dest);
1367
1368 cond_free(&ctx->null_cond);
1369 ctx->null_cond = cond;
1370 }
1371 }
1372
1373 #ifndef CONFIG_USER_ONLY
1374 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1375 from the top 2 bits of the base register. There are a few system
1376 instructions that have a 3-bit space specifier, for which SR0 is
1377 not special. To handle this, pass ~SP. */
1378 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1379 {
1380 TCGv_ptr ptr;
1381 TCGv_reg tmp;
1382 TCGv_i64 spc;
1383
1384 if (sp != 0) {
1385 if (sp < 0) {
1386 sp = ~sp;
1387 }
1388 spc = tcg_temp_new_tl();
1389 load_spr(ctx, spc, sp);
1390 return spc;
1391 }
1392 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1393 return cpu_srH;
1394 }
1395
1396 ptr = tcg_temp_new_ptr();
1397 tmp = tcg_temp_new();
1398 spc = tcg_temp_new_tl();
1399
1400 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1401 tcg_gen_andi_reg(tmp, tmp, 030);
1402 tcg_gen_trunc_reg_ptr(ptr, tmp);
1403
1404 tcg_gen_add_ptr(ptr, ptr, tcg_env);
1405 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1406
1407 return spc;
1408 }
1409 #endif
1410
1411 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1412 unsigned rb, unsigned rx, int scale, target_sreg disp,
1413 unsigned sp, int modify, bool is_phys)
1414 {
1415 TCGv_reg base = load_gpr(ctx, rb);
1416 TCGv_reg ofs;
1417
1418 /* Note that RX is mutually exclusive with DISP. */
1419 if (rx) {
1420 ofs = tcg_temp_new();
1421 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1422 tcg_gen_add_reg(ofs, ofs, base);
1423 } else if (disp || modify) {
1424 ofs = tcg_temp_new();
1425 tcg_gen_addi_reg(ofs, base, disp);
1426 } else {
1427 ofs = base;
1428 }
1429
1430 *pofs = ofs;
1431 #ifdef CONFIG_USER_ONLY
1432 *pgva = (modify <= 0 ? ofs : base);
1433 #else
1434 TCGv_tl addr = tcg_temp_new_tl();
1435 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1436 if (ctx->tb_flags & PSW_W) {
1437 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1438 }
1439 if (!is_phys) {
1440 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1441 }
1442 *pgva = addr;
1443 #endif
1444 }
1445
1446 /* Emit a memory load. The modify parameter should be
1447 * < 0 for pre-modify,
1448 * > 0 for post-modify,
1449 * = 0 for no base register update.
1450 */
1451 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1452 unsigned rx, int scale, target_sreg disp,
1453 unsigned sp, int modify, MemOp mop)
1454 {
1455 TCGv_reg ofs;
1456 TCGv_tl addr;
1457
1458 /* Caller uses nullify_over/nullify_end. */
1459 assert(ctx->null_cond.c == TCG_COND_NEVER);
1460
1461 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1462 ctx->mmu_idx == MMU_PHYS_IDX);
1463 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1464 if (modify) {
1465 save_gpr(ctx, rb, ofs);
1466 }
1467 }
1468
1469 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1470 unsigned rx, int scale, target_sreg disp,
1471 unsigned sp, int modify, MemOp mop)
1472 {
1473 TCGv_reg ofs;
1474 TCGv_tl addr;
1475
1476 /* Caller uses nullify_over/nullify_end. */
1477 assert(ctx->null_cond.c == TCG_COND_NEVER);
1478
1479 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1480 ctx->mmu_idx == MMU_PHYS_IDX);
1481 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1482 if (modify) {
1483 save_gpr(ctx, rb, ofs);
1484 }
1485 }
1486
1487 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1488 unsigned rx, int scale, target_sreg disp,
1489 unsigned sp, int modify, MemOp mop)
1490 {
1491 TCGv_reg ofs;
1492 TCGv_tl addr;
1493
1494 /* Caller uses nullify_over/nullify_end. */
1495 assert(ctx->null_cond.c == TCG_COND_NEVER);
1496
1497 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1498 ctx->mmu_idx == MMU_PHYS_IDX);
1499 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1500 if (modify) {
1501 save_gpr(ctx, rb, ofs);
1502 }
1503 }
1504
1505 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1506 unsigned rx, int scale, target_sreg disp,
1507 unsigned sp, int modify, MemOp mop)
1508 {
1509 TCGv_reg ofs;
1510 TCGv_tl addr;
1511
1512 /* Caller uses nullify_over/nullify_end. */
1513 assert(ctx->null_cond.c == TCG_COND_NEVER);
1514
1515 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1516 ctx->mmu_idx == MMU_PHYS_IDX);
1517 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1518 if (modify) {
1519 save_gpr(ctx, rb, ofs);
1520 }
1521 }
1522
1523 #if TARGET_REGISTER_BITS == 64
1524 #define do_load_reg do_load_64
1525 #define do_store_reg do_store_64
1526 #else
1527 #define do_load_reg do_load_32
1528 #define do_store_reg do_store_32
1529 #endif
1530
1531 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1532 unsigned rx, int scale, target_sreg disp,
1533 unsigned sp, int modify, MemOp mop)
1534 {
1535 TCGv_reg dest;
1536
1537 nullify_over(ctx);
1538
1539 if (modify == 0) {
1540 /* No base register update. */
1541 dest = dest_gpr(ctx, rt);
1542 } else {
1543 /* Make sure if RT == RB, we see the result of the load. */
1544 dest = tcg_temp_new();
1545 }
1546 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1547 save_gpr(ctx, rt, dest);
1548
1549 return nullify_end(ctx);
1550 }
1551
1552 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1553 unsigned rx, int scale, target_sreg disp,
1554 unsigned sp, int modify)
1555 {
1556 TCGv_i32 tmp;
1557
1558 nullify_over(ctx);
1559
1560 tmp = tcg_temp_new_i32();
1561 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1562 save_frw_i32(rt, tmp);
1563
1564 if (rt == 0) {
1565 gen_helper_loaded_fr0(tcg_env);
1566 }
1567
1568 return nullify_end(ctx);
1569 }
1570
1571 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1572 {
1573 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1574 a->disp, a->sp, a->m);
1575 }
1576
1577 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1578 unsigned rx, int scale, target_sreg disp,
1579 unsigned sp, int modify)
1580 {
1581 TCGv_i64 tmp;
1582
1583 nullify_over(ctx);
1584
1585 tmp = tcg_temp_new_i64();
1586 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1587 save_frd(rt, tmp);
1588
1589 if (rt == 0) {
1590 gen_helper_loaded_fr0(tcg_env);
1591 }
1592
1593 return nullify_end(ctx);
1594 }
1595
1596 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1597 {
1598 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1599 a->disp, a->sp, a->m);
1600 }
1601
1602 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1603 target_sreg disp, unsigned sp,
1604 int modify, MemOp mop)
1605 {
1606 nullify_over(ctx);
1607 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1608 return nullify_end(ctx);
1609 }
1610
1611 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1612 unsigned rx, int scale, target_sreg disp,
1613 unsigned sp, int modify)
1614 {
1615 TCGv_i32 tmp;
1616
1617 nullify_over(ctx);
1618
1619 tmp = load_frw_i32(rt);
1620 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1621
1622 return nullify_end(ctx);
1623 }
1624
1625 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1626 {
1627 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1628 a->disp, a->sp, a->m);
1629 }
1630
1631 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1632 unsigned rx, int scale, target_sreg disp,
1633 unsigned sp, int modify)
1634 {
1635 TCGv_i64 tmp;
1636
1637 nullify_over(ctx);
1638
1639 tmp = load_frd(rt);
1640 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1641
1642 return nullify_end(ctx);
1643 }
1644
1645 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1646 {
1647 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1648 a->disp, a->sp, a->m);
1649 }
1650
1651 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1652 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1653 {
1654 TCGv_i32 tmp;
1655
1656 nullify_over(ctx);
1657 tmp = load_frw0_i32(ra);
1658
1659 func(tmp, tcg_env, tmp);
1660
1661 save_frw_i32(rt, tmp);
1662 return nullify_end(ctx);
1663 }
1664
1665 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1666 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1667 {
1668 TCGv_i32 dst;
1669 TCGv_i64 src;
1670
1671 nullify_over(ctx);
1672 src = load_frd(ra);
1673 dst = tcg_temp_new_i32();
1674
1675 func(dst, tcg_env, src);
1676
1677 save_frw_i32(rt, dst);
1678 return nullify_end(ctx);
1679 }
1680
1681 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1682 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1683 {
1684 TCGv_i64 tmp;
1685
1686 nullify_over(ctx);
1687 tmp = load_frd0(ra);
1688
1689 func(tmp, tcg_env, tmp);
1690
1691 save_frd(rt, tmp);
1692 return nullify_end(ctx);
1693 }
1694
1695 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1696 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1697 {
1698 TCGv_i32 src;
1699 TCGv_i64 dst;
1700
1701 nullify_over(ctx);
1702 src = load_frw0_i32(ra);
1703 dst = tcg_temp_new_i64();
1704
1705 func(dst, tcg_env, src);
1706
1707 save_frd(rt, dst);
1708 return nullify_end(ctx);
1709 }
1710
1711 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1712 unsigned ra, unsigned rb,
1713 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1714 {
1715 TCGv_i32 a, b;
1716
1717 nullify_over(ctx);
1718 a = load_frw0_i32(ra);
1719 b = load_frw0_i32(rb);
1720
1721 func(a, tcg_env, a, b);
1722
1723 save_frw_i32(rt, a);
1724 return nullify_end(ctx);
1725 }
1726
1727 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1728 unsigned ra, unsigned rb,
1729 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1730 {
1731 TCGv_i64 a, b;
1732
1733 nullify_over(ctx);
1734 a = load_frd0(ra);
1735 b = load_frd0(rb);
1736
1737 func(a, tcg_env, a, b);
1738
1739 save_frd(rt, a);
1740 return nullify_end(ctx);
1741 }
1742
1743 /* Emit an unconditional branch to a direct target, which may or may not
1744 have already had nullification handled. */
1745 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1746 unsigned link, bool is_n)
1747 {
1748 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1749 if (link != 0) {
1750 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1751 }
1752 ctx->iaoq_n = dest;
1753 if (is_n) {
1754 ctx->null_cond.c = TCG_COND_ALWAYS;
1755 }
1756 } else {
1757 nullify_over(ctx);
1758
1759 if (link != 0) {
1760 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1761 }
1762
1763 if (is_n && use_nullify_skip(ctx)) {
1764 nullify_set(ctx, 0);
1765 gen_goto_tb(ctx, 0, dest, dest + 4);
1766 } else {
1767 nullify_set(ctx, is_n);
1768 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1769 }
1770
1771 nullify_end(ctx);
1772
1773 nullify_set(ctx, 0);
1774 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1775 ctx->base.is_jmp = DISAS_NORETURN;
1776 }
1777 return true;
1778 }
1779
1780 /* Emit a conditional branch to a direct target. If the branch itself
1781 is nullified, we should have already used nullify_over. */
1782 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1783 DisasCond *cond)
1784 {
1785 target_ureg dest = iaoq_dest(ctx, disp);
1786 TCGLabel *taken = NULL;
1787 TCGCond c = cond->c;
1788 bool n;
1789
1790 assert(ctx->null_cond.c == TCG_COND_NEVER);
1791
1792 /* Handle TRUE and NEVER as direct branches. */
1793 if (c == TCG_COND_ALWAYS) {
1794 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1795 }
1796 if (c == TCG_COND_NEVER) {
1797 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1798 }
1799
1800 taken = gen_new_label();
1801 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1802 cond_free(cond);
1803
1804 /* Not taken: Condition not satisfied; nullify on backward branches. */
1805 n = is_n && disp < 0;
1806 if (n && use_nullify_skip(ctx)) {
1807 nullify_set(ctx, 0);
1808 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1809 } else {
1810 if (!n && ctx->null_lab) {
1811 gen_set_label(ctx->null_lab);
1812 ctx->null_lab = NULL;
1813 }
1814 nullify_set(ctx, n);
1815 if (ctx->iaoq_n == -1) {
1816 /* The temporary iaoq_n_var died at the branch above.
1817 Regenerate it here instead of saving it. */
1818 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1819 }
1820 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1821 }
1822
1823 gen_set_label(taken);
1824
1825 /* Taken: Condition satisfied; nullify on forward branches. */
1826 n = is_n && disp >= 0;
1827 if (n && use_nullify_skip(ctx)) {
1828 nullify_set(ctx, 0);
1829 gen_goto_tb(ctx, 1, dest, dest + 4);
1830 } else {
1831 nullify_set(ctx, n);
1832 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1833 }
1834
1835 /* Not taken: the branch itself was nullified. */
1836 if (ctx->null_lab) {
1837 gen_set_label(ctx->null_lab);
1838 ctx->null_lab = NULL;
1839 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1840 } else {
1841 ctx->base.is_jmp = DISAS_NORETURN;
1842 }
1843 return true;
1844 }
1845
1846 /* Emit an unconditional branch to an indirect target. This handles
1847 nullification of the branch itself. */
1848 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1849 unsigned link, bool is_n)
1850 {
1851 TCGv_reg a0, a1, next, tmp;
1852 TCGCond c;
1853
1854 assert(ctx->null_lab == NULL);
1855
1856 if (ctx->null_cond.c == TCG_COND_NEVER) {
1857 if (link != 0) {
1858 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1859 }
1860 next = tcg_temp_new();
1861 tcg_gen_mov_reg(next, dest);
1862 if (is_n) {
1863 if (use_nullify_skip(ctx)) {
1864 tcg_gen_mov_reg(cpu_iaoq_f, next);
1865 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1866 nullify_set(ctx, 0);
1867 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1868 return true;
1869 }
1870 ctx->null_cond.c = TCG_COND_ALWAYS;
1871 }
1872 ctx->iaoq_n = -1;
1873 ctx->iaoq_n_var = next;
1874 } else if (is_n && use_nullify_skip(ctx)) {
1875 /* The (conditional) branch, B, nullifies the next insn, N,
1876 and we're allowed to skip execution N (no single-step or
1877 tracepoint in effect). Since the goto_ptr that we must use
1878 for the indirect branch consumes no special resources, we
1879 can (conditionally) skip B and continue execution. */
1880 /* The use_nullify_skip test implies we have a known control path. */
1881 tcg_debug_assert(ctx->iaoq_b != -1);
1882 tcg_debug_assert(ctx->iaoq_n != -1);
1883
1884 /* We do have to handle the non-local temporary, DEST, before
1885 branching. Since IOAQ_F is not really live at this point, we
1886 can simply store DEST optimistically. Similarly with IAOQ_B. */
1887 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1888 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1889
1890 nullify_over(ctx);
1891 if (link != 0) {
1892 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1893 }
1894 tcg_gen_lookup_and_goto_ptr();
1895 return nullify_end(ctx);
1896 } else {
1897 c = ctx->null_cond.c;
1898 a0 = ctx->null_cond.a0;
1899 a1 = ctx->null_cond.a1;
1900
1901 tmp = tcg_temp_new();
1902 next = tcg_temp_new();
1903
1904 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1905 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1906 ctx->iaoq_n = -1;
1907 ctx->iaoq_n_var = next;
1908
1909 if (link != 0) {
1910 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1911 }
1912
1913 if (is_n) {
1914 /* The branch nullifies the next insn, which means the state of N
1915 after the branch is the inverse of the state of N that applied
1916 to the branch. */
1917 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1918 cond_free(&ctx->null_cond);
1919 ctx->null_cond = cond_make_n();
1920 ctx->psw_n_nonzero = true;
1921 } else {
1922 cond_free(&ctx->null_cond);
1923 }
1924 }
1925 return true;
1926 }
1927
1928 /* Implement
1929 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1930 * IAOQ_Next{30..31} ← GR[b]{30..31};
1931 * else
1932 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1933 * which keeps the privilege level from being increased.
1934 */
1935 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1936 {
1937 TCGv_reg dest;
1938 switch (ctx->privilege) {
1939 case 0:
1940 /* Privilege 0 is maximum and is allowed to decrease. */
1941 return offset;
1942 case 3:
1943 /* Privilege 3 is minimum and is never allowed to increase. */
1944 dest = tcg_temp_new();
1945 tcg_gen_ori_reg(dest, offset, 3);
1946 break;
1947 default:
1948 dest = tcg_temp_new();
1949 tcg_gen_andi_reg(dest, offset, -4);
1950 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1951 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1952 break;
1953 }
1954 return dest;
1955 }
1956
1957 #ifdef CONFIG_USER_ONLY
1958 /* On Linux, page zero is normally marked execute only + gateway.
1959 Therefore normal read or write is supposed to fail, but specific
1960 offsets have kernel code mapped to raise permissions to implement
1961 system calls. Handling this via an explicit check here, rather
1962 in than the "be disp(sr2,r0)" instruction that probably sent us
1963 here, is the easiest way to handle the branch delay slot on the
1964 aforementioned BE. */
1965 static void do_page_zero(DisasContext *ctx)
1966 {
1967 /* If by some means we get here with PSW[N]=1, that implies that
1968 the B,GATE instruction would be skipped, and we'd fault on the
1969 next insn within the privileged page. */
1970 switch (ctx->null_cond.c) {
1971 case TCG_COND_NEVER:
1972 break;
1973 case TCG_COND_ALWAYS:
1974 tcg_gen_movi_reg(cpu_psw_n, 0);
1975 goto do_sigill;
1976 default:
1977 /* Since this is always the first (and only) insn within the
1978 TB, we should know the state of PSW[N] from TB->FLAGS. */
1979 g_assert_not_reached();
1980 }
1981
1982 /* Check that we didn't arrive here via some means that allowed
1983 non-sequential instruction execution. Normally the PSW[B] bit
1984 detects this by disallowing the B,GATE instruction to execute
1985 under such conditions. */
1986 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1987 goto do_sigill;
1988 }
1989
1990 switch (ctx->iaoq_f & -4) {
1991 case 0x00: /* Null pointer call */
1992 gen_excp_1(EXCP_IMP);
1993 ctx->base.is_jmp = DISAS_NORETURN;
1994 break;
1995
1996 case 0xb0: /* LWS */
1997 gen_excp_1(EXCP_SYSCALL_LWS);
1998 ctx->base.is_jmp = DISAS_NORETURN;
1999 break;
2000
2001 case 0xe0: /* SET_THREAD_POINTER */
2002 tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
2003 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
2004 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
2005 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2006 break;
2007
2008 case 0x100: /* SYSCALL */
2009 gen_excp_1(EXCP_SYSCALL);
2010 ctx->base.is_jmp = DISAS_NORETURN;
2011 break;
2012
2013 default:
2014 do_sigill:
2015 gen_excp_1(EXCP_ILL);
2016 ctx->base.is_jmp = DISAS_NORETURN;
2017 break;
2018 }
2019 }
2020 #endif
2021
2022 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2023 {
2024 cond_free(&ctx->null_cond);
2025 return true;
2026 }
2027
2028 static bool trans_break(DisasContext *ctx, arg_break *a)
2029 {
2030 return gen_excp_iir(ctx, EXCP_BREAK);
2031 }
2032
2033 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2034 {
2035 /* No point in nullifying the memory barrier. */
2036 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2037
2038 cond_free(&ctx->null_cond);
2039 return true;
2040 }
2041
2042 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2043 {
2044 unsigned rt = a->t;
2045 TCGv_reg tmp = dest_gpr(ctx, rt);
2046 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2047 save_gpr(ctx, rt, tmp);
2048
2049 cond_free(&ctx->null_cond);
2050 return true;
2051 }
2052
2053 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2054 {
2055 unsigned rt = a->t;
2056 unsigned rs = a->sp;
2057 TCGv_i64 t0 = tcg_temp_new_i64();
2058 TCGv_reg t1 = tcg_temp_new();
2059
2060 load_spr(ctx, t0, rs);
2061 tcg_gen_shri_i64(t0, t0, 32);
2062 tcg_gen_trunc_i64_reg(t1, t0);
2063
2064 save_gpr(ctx, rt, t1);
2065
2066 cond_free(&ctx->null_cond);
2067 return true;
2068 }
2069
2070 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2071 {
2072 unsigned rt = a->t;
2073 unsigned ctl = a->r;
2074 TCGv_reg tmp;
2075
2076 switch (ctl) {
2077 case CR_SAR:
2078 #ifdef TARGET_HPPA64
2079 if (a->e == 0) {
2080 /* MFSAR without ,W masks low 5 bits. */
2081 tmp = dest_gpr(ctx, rt);
2082 tcg_gen_andi_reg(tmp, cpu_sar, 31);
2083 save_gpr(ctx, rt, tmp);
2084 goto done;
2085 }
2086 #endif
2087 save_gpr(ctx, rt, cpu_sar);
2088 goto done;
2089 case CR_IT: /* Interval Timer */
2090 /* FIXME: Respect PSW_S bit. */
2091 nullify_over(ctx);
2092 tmp = dest_gpr(ctx, rt);
2093 if (translator_io_start(&ctx->base)) {
2094 gen_helper_read_interval_timer(tmp);
2095 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2096 } else {
2097 gen_helper_read_interval_timer(tmp);
2098 }
2099 save_gpr(ctx, rt, tmp);
2100 return nullify_end(ctx);
2101 case 26:
2102 case 27:
2103 break;
2104 default:
2105 /* All other control registers are privileged. */
2106 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2107 break;
2108 }
2109
2110 tmp = tcg_temp_new();
2111 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2112 save_gpr(ctx, rt, tmp);
2113
2114 done:
2115 cond_free(&ctx->null_cond);
2116 return true;
2117 }
2118
2119 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2120 {
2121 unsigned rr = a->r;
2122 unsigned rs = a->sp;
2123 TCGv_i64 t64;
2124
2125 if (rs >= 5) {
2126 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2127 }
2128 nullify_over(ctx);
2129
2130 t64 = tcg_temp_new_i64();
2131 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2132 tcg_gen_shli_i64(t64, t64, 32);
2133
2134 if (rs >= 4) {
2135 tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2136 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2137 } else {
2138 tcg_gen_mov_i64(cpu_sr[rs], t64);
2139 }
2140
2141 return nullify_end(ctx);
2142 }
2143
2144 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2145 {
2146 unsigned ctl = a->t;
2147 TCGv_reg reg;
2148 TCGv_reg tmp;
2149
2150 if (ctl == CR_SAR) {
2151 reg = load_gpr(ctx, a->r);
2152 tmp = tcg_temp_new();
2153 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2154 save_or_nullify(ctx, cpu_sar, tmp);
2155
2156 cond_free(&ctx->null_cond);
2157 return true;
2158 }
2159
2160 /* All other control registers are privileged or read-only. */
2161 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2162
2163 #ifndef CONFIG_USER_ONLY
2164 nullify_over(ctx);
2165 reg = load_gpr(ctx, a->r);
2166
2167 switch (ctl) {
2168 case CR_IT:
2169 gen_helper_write_interval_timer(tcg_env, reg);
2170 break;
2171 case CR_EIRR:
2172 gen_helper_write_eirr(tcg_env, reg);
2173 break;
2174 case CR_EIEM:
2175 gen_helper_write_eiem(tcg_env, reg);
2176 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2177 break;
2178
2179 case CR_IIASQ:
2180 case CR_IIAOQ:
2181 /* FIXME: Respect PSW_Q bit */
2182 /* The write advances the queue and stores to the back element. */
2183 tmp = tcg_temp_new();
2184 tcg_gen_ld_reg(tmp, tcg_env,
2185 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2186 tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2187 tcg_gen_st_reg(reg, tcg_env,
2188 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2189 break;
2190
2191 case CR_PID1:
2192 case CR_PID2:
2193 case CR_PID3:
2194 case CR_PID4:
2195 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2196 #ifndef CONFIG_USER_ONLY
2197 gen_helper_change_prot_id(tcg_env);
2198 #endif
2199 break;
2200
2201 default:
2202 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2203 break;
2204 }
2205 return nullify_end(ctx);
2206 #endif
2207 }
2208
2209 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2210 {
2211 TCGv_reg tmp = tcg_temp_new();
2212
2213 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2214 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2215 save_or_nullify(ctx, cpu_sar, tmp);
2216
2217 cond_free(&ctx->null_cond);
2218 return true;
2219 }
2220
2221 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2222 {
2223 TCGv_reg dest = dest_gpr(ctx, a->t);
2224
2225 #ifdef CONFIG_USER_ONLY
2226 /* We don't implement space registers in user mode. */
2227 tcg_gen_movi_reg(dest, 0);
2228 #else
2229 TCGv_i64 t0 = tcg_temp_new_i64();
2230
2231 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2232 tcg_gen_shri_i64(t0, t0, 32);
2233 tcg_gen_trunc_i64_reg(dest, t0);
2234 #endif
2235 save_gpr(ctx, a->t, dest);
2236
2237 cond_free(&ctx->null_cond);
2238 return true;
2239 }
2240
2241 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2242 {
2243 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2244 #ifndef CONFIG_USER_ONLY
2245 TCGv_reg tmp;
2246
2247 nullify_over(ctx);
2248
2249 tmp = tcg_temp_new();
2250 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2251 tcg_gen_andi_reg(tmp, tmp, ~a->i);
2252 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2253 save_gpr(ctx, a->t, tmp);
2254
2255 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2256 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2257 return nullify_end(ctx);
2258 #endif
2259 }
2260
2261 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2262 {
2263 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2264 #ifndef CONFIG_USER_ONLY
2265 TCGv_reg tmp;
2266
2267 nullify_over(ctx);
2268
2269 tmp = tcg_temp_new();
2270 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2271 tcg_gen_ori_reg(tmp, tmp, a->i);
2272 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2273 save_gpr(ctx, a->t, tmp);
2274
2275 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2276 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2277 return nullify_end(ctx);
2278 #endif
2279 }
2280
2281 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2282 {
2283 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2284 #ifndef CONFIG_USER_ONLY
2285 TCGv_reg tmp, reg;
2286 nullify_over(ctx);
2287
2288 reg = load_gpr(ctx, a->r);
2289 tmp = tcg_temp_new();
2290 gen_helper_swap_system_mask(tmp, tcg_env, reg);
2291
2292 /* Exit the TB to recognize new interrupts. */
2293 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2294 return nullify_end(ctx);
2295 #endif
2296 }
2297
2298 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2299 {
2300 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2301 #ifndef CONFIG_USER_ONLY
2302 nullify_over(ctx);
2303
2304 if (rfi_r) {
2305 gen_helper_rfi_r(tcg_env);
2306 } else {
2307 gen_helper_rfi(tcg_env);
2308 }
2309 /* Exit the TB to recognize new interrupts. */
2310 tcg_gen_exit_tb(NULL, 0);
2311 ctx->base.is_jmp = DISAS_NORETURN;
2312
2313 return nullify_end(ctx);
2314 #endif
2315 }
2316
2317 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2318 {
2319 return do_rfi(ctx, false);
2320 }
2321
2322 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2323 {
2324 return do_rfi(ctx, true);
2325 }
2326
2327 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2328 {
2329 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2330 #ifndef CONFIG_USER_ONLY
2331 nullify_over(ctx);
2332 gen_helper_halt(tcg_env);
2333 ctx->base.is_jmp = DISAS_NORETURN;
2334 return nullify_end(ctx);
2335 #endif
2336 }
2337
2338 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2339 {
2340 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2341 #ifndef CONFIG_USER_ONLY
2342 nullify_over(ctx);
2343 gen_helper_reset(tcg_env);
2344 ctx->base.is_jmp = DISAS_NORETURN;
2345 return nullify_end(ctx);
2346 #endif
2347 }
2348
2349 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2350 {
2351 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2352 #ifndef CONFIG_USER_ONLY
2353 nullify_over(ctx);
2354 gen_helper_getshadowregs(tcg_env);
2355 return nullify_end(ctx);
2356 #endif
2357 }
2358
2359 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2360 {
2361 if (a->m) {
2362 TCGv_reg dest = dest_gpr(ctx, a->b);
2363 TCGv_reg src1 = load_gpr(ctx, a->b);
2364 TCGv_reg src2 = load_gpr(ctx, a->x);
2365
2366 /* The only thing we need to do is the base register modification. */
2367 tcg_gen_add_reg(dest, src1, src2);
2368 save_gpr(ctx, a->b, dest);
2369 }
2370 cond_free(&ctx->null_cond);
2371 return true;
2372 }
2373
2374 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2375 {
2376 TCGv_reg dest, ofs;
2377 TCGv_i32 level, want;
2378 TCGv_tl addr;
2379
2380 nullify_over(ctx);
2381
2382 dest = dest_gpr(ctx, a->t);
2383 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2384
2385 if (a->imm) {
2386 level = tcg_constant_i32(a->ri);
2387 } else {
2388 level = tcg_temp_new_i32();
2389 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2390 tcg_gen_andi_i32(level, level, 3);
2391 }
2392 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2393
2394 gen_helper_probe(dest, tcg_env, addr, level, want);
2395
2396 save_gpr(ctx, a->t, dest);
2397 return nullify_end(ctx);
2398 }
2399
2400 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2401 {
2402 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2403 #ifndef CONFIG_USER_ONLY
2404 TCGv_tl addr;
2405 TCGv_reg ofs, reg;
2406
2407 nullify_over(ctx);
2408
2409 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2410 reg = load_gpr(ctx, a->r);
2411 if (a->addr) {
2412 gen_helper_itlba(tcg_env, addr, reg);
2413 } else {
2414 gen_helper_itlbp(tcg_env, addr, reg);
2415 }
2416
2417 /* Exit TB for TLB change if mmu is enabled. */
2418 if (ctx->tb_flags & PSW_C) {
2419 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2420 }
2421 return nullify_end(ctx);
2422 #endif
2423 }
2424
2425 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2426 {
2427 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2428 #ifndef CONFIG_USER_ONLY
2429 TCGv_tl addr;
2430 TCGv_reg ofs;
2431
2432 nullify_over(ctx);
2433
2434 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2435 if (a->m) {
2436 save_gpr(ctx, a->b, ofs);
2437 }
2438 if (a->local) {
2439 gen_helper_ptlbe(tcg_env);
2440 } else {
2441 gen_helper_ptlb(tcg_env, addr);
2442 }
2443
2444 /* Exit TB for TLB change if mmu is enabled. */
2445 if (ctx->tb_flags & PSW_C) {
2446 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2447 }
2448 return nullify_end(ctx);
2449 #endif
2450 }
2451
2452 /*
2453 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2454 * See
2455 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2456 * page 13-9 (195/206)
2457 */
2458 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2459 {
2460 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2461 #ifndef CONFIG_USER_ONLY
2462 TCGv_tl addr, atl, stl;
2463 TCGv_reg reg;
2464
2465 nullify_over(ctx);
2466
2467 /*
2468 * FIXME:
2469 * if (not (pcxl or pcxl2))
2470 * return gen_illegal(ctx);
2471 *
2472 * Note for future: these are 32-bit systems; no hppa64.
2473 */
2474
2475 atl = tcg_temp_new_tl();
2476 stl = tcg_temp_new_tl();
2477 addr = tcg_temp_new_tl();
2478
2479 tcg_gen_ld32u_i64(stl, tcg_env,
2480 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2481 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2482 tcg_gen_ld32u_i64(atl, tcg_env,
2483 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2484 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2485 tcg_gen_shli_i64(stl, stl, 32);
2486 tcg_gen_or_tl(addr, atl, stl);
2487
2488 reg = load_gpr(ctx, a->r);
2489 if (a->addr) {
2490 gen_helper_itlba(tcg_env, addr, reg);
2491 } else {
2492 gen_helper_itlbp(tcg_env, addr, reg);
2493 }
2494
2495 /* Exit TB for TLB change if mmu is enabled. */
2496 if (ctx->tb_flags & PSW_C) {
2497 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2498 }
2499 return nullify_end(ctx);
2500 #endif
2501 }
2502
2503 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2504 {
2505 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2506 #ifndef CONFIG_USER_ONLY
2507 TCGv_tl vaddr;
2508 TCGv_reg ofs, paddr;
2509
2510 nullify_over(ctx);
2511
2512 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2513
2514 paddr = tcg_temp_new();
2515 gen_helper_lpa(paddr, tcg_env, vaddr);
2516
2517 /* Note that physical address result overrides base modification. */
2518 if (a->m) {
2519 save_gpr(ctx, a->b, ofs);
2520 }
2521 save_gpr(ctx, a->t, paddr);
2522
2523 return nullify_end(ctx);
2524 #endif
2525 }
2526
2527 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2528 {
2529 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2530
2531 /* The Coherence Index is an implementation-defined function of the
2532 physical address. Two addresses with the same CI have a coherent
2533 view of the cache. Our implementation is to return 0 for all,
2534 since the entire address space is coherent. */
2535 save_gpr(ctx, a->t, tcg_constant_reg(0));
2536
2537 cond_free(&ctx->null_cond);
2538 return true;
2539 }
2540
2541 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2542 {
2543 return do_add_reg(ctx, a, false, false, false, false);
2544 }
2545
2546 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2547 {
2548 return do_add_reg(ctx, a, true, false, false, false);
2549 }
2550
2551 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2552 {
2553 return do_add_reg(ctx, a, false, true, false, false);
2554 }
2555
2556 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2557 {
2558 return do_add_reg(ctx, a, false, false, false, true);
2559 }
2560
2561 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2562 {
2563 return do_add_reg(ctx, a, false, true, false, true);
2564 }
2565
2566 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2567 {
2568 return do_sub_reg(ctx, a, false, false, false);
2569 }
2570
2571 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2572 {
2573 return do_sub_reg(ctx, a, true, false, false);
2574 }
2575
2576 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2577 {
2578 return do_sub_reg(ctx, a, false, false, true);
2579 }
2580
2581 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2582 {
2583 return do_sub_reg(ctx, a, true, false, true);
2584 }
2585
2586 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2587 {
2588 return do_sub_reg(ctx, a, false, true, false);
2589 }
2590
2591 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2592 {
2593 return do_sub_reg(ctx, a, true, true, false);
2594 }
2595
2596 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2597 {
2598 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2599 }
2600
2601 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2602 {
2603 return do_log_reg(ctx, a, tcg_gen_and_reg);
2604 }
2605
2606 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2607 {
2608 if (a->cf == 0) {
2609 unsigned r2 = a->r2;
2610 unsigned r1 = a->r1;
2611 unsigned rt = a->t;
2612
2613 if (rt == 0) { /* NOP */
2614 cond_free(&ctx->null_cond);
2615 return true;
2616 }
2617 if (r2 == 0) { /* COPY */
2618 if (r1 == 0) {
2619 TCGv_reg dest = dest_gpr(ctx, rt);
2620 tcg_gen_movi_reg(dest, 0);
2621 save_gpr(ctx, rt, dest);
2622 } else {
2623 save_gpr(ctx, rt, cpu_gr[r1]);
2624 }
2625 cond_free(&ctx->null_cond);
2626 return true;
2627 }
2628 #ifndef CONFIG_USER_ONLY
2629 /* These are QEMU extensions and are nops in the real architecture:
2630 *
2631 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2632 * or %r31,%r31,%r31 -- death loop; offline cpu
2633 * currently implemented as idle.
2634 */
2635 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2636 /* No need to check for supervisor, as userland can only pause
2637 until the next timer interrupt. */
2638 nullify_over(ctx);
2639
2640 /* Advance the instruction queue. */
2641 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2642 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2643 nullify_set(ctx, 0);
2644
2645 /* Tell the qemu main loop to halt until this cpu has work. */
2646 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2647 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2648 gen_excp_1(EXCP_HALTED);
2649 ctx->base.is_jmp = DISAS_NORETURN;
2650
2651 return nullify_end(ctx);
2652 }
2653 #endif
2654 }
2655 return do_log_reg(ctx, a, tcg_gen_or_reg);
2656 }
2657
2658 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2659 {
2660 return do_log_reg(ctx, a, tcg_gen_xor_reg);
2661 }
2662
2663 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2664 {
2665 TCGv_reg tcg_r1, tcg_r2;
2666
2667 if (a->cf) {
2668 nullify_over(ctx);
2669 }
2670 tcg_r1 = load_gpr(ctx, a->r1);
2671 tcg_r2 = load_gpr(ctx, a->r2);
2672 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2673 return nullify_end(ctx);
2674 }
2675
2676 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2677 {
2678 TCGv_reg tcg_r1, tcg_r2;
2679
2680 if (a->cf) {
2681 nullify_over(ctx);
2682 }
2683 tcg_r1 = load_gpr(ctx, a->r1);
2684 tcg_r2 = load_gpr(ctx, a->r2);
2685 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2686 return nullify_end(ctx);
2687 }
2688
2689 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2690 {
2691 TCGv_reg tcg_r1, tcg_r2, tmp;
2692
2693 if (a->cf) {
2694 nullify_over(ctx);
2695 }
2696 tcg_r1 = load_gpr(ctx, a->r1);
2697 tcg_r2 = load_gpr(ctx, a->r2);
2698 tmp = tcg_temp_new();
2699 tcg_gen_not_reg(tmp, tcg_r2);
2700 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2701 return nullify_end(ctx);
2702 }
2703
2704 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2705 {
2706 return do_uaddcm(ctx, a, false);
2707 }
2708
2709 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2710 {
2711 return do_uaddcm(ctx, a, true);
2712 }
2713
2714 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2715 {
2716 TCGv_reg tmp;
2717
2718 nullify_over(ctx);
2719
2720 tmp = tcg_temp_new();
2721 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2722 if (!is_i) {
2723 tcg_gen_not_reg(tmp, tmp);
2724 }
2725 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2726 tcg_gen_muli_reg(tmp, tmp, 6);
2727 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
2728 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2729 return nullify_end(ctx);
2730 }
2731
2732 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2733 {
2734 return do_dcor(ctx, a, false);
2735 }
2736
2737 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2738 {
2739 return do_dcor(ctx, a, true);
2740 }
2741
2742 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2743 {
2744 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2745 TCGv_reg cout;
2746
2747 nullify_over(ctx);
2748
2749 in1 = load_gpr(ctx, a->r1);
2750 in2 = load_gpr(ctx, a->r2);
2751
2752 add1 = tcg_temp_new();
2753 add2 = tcg_temp_new();
2754 addc = tcg_temp_new();
2755 dest = tcg_temp_new();
2756 zero = tcg_constant_reg(0);
2757
2758 /* Form R1 << 1 | PSW[CB]{8}. */
2759 tcg_gen_add_reg(add1, in1, in1);
2760 tcg_gen_add_reg(add1, add1, get_psw_carry(ctx, false));
2761
2762 /*
2763 * Add or subtract R2, depending on PSW[V]. Proper computation of
2764 * carry requires that we subtract via + ~R2 + 1, as described in
2765 * the manual. By extracting and masking V, we can produce the
2766 * proper inputs to the addition without movcond.
2767 */
2768 tcg_gen_sextract_reg(addc, cpu_psw_v, 31, 1);
2769 tcg_gen_xor_reg(add2, in2, addc);
2770 tcg_gen_andi_reg(addc, addc, 1);
2771
2772 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2773 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2774
2775 /* Write back the result register. */
2776 save_gpr(ctx, a->t, dest);
2777
2778 /* Write back PSW[CB]. */
2779 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2780 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2781
2782 /* Write back PSW[V] for the division step. */
2783 cout = get_psw_carry(ctx, false);
2784 tcg_gen_neg_reg(cpu_psw_v, cout);
2785 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2786
2787 /* Install the new nullification. */
2788 if (a->cf) {
2789 TCGv_reg sv = NULL;
2790 if (cond_need_sv(a->cf >> 1)) {
2791 /* ??? The lshift is supposed to contribute to overflow. */
2792 sv = do_add_sv(ctx, dest, add1, add2);
2793 }
2794 ctx->null_cond = do_cond(a->cf, dest, cout, sv);
2795 }
2796
2797 return nullify_end(ctx);
2798 }
2799
2800 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2801 {
2802 return do_add_imm(ctx, a, false, false);
2803 }
2804
2805 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2806 {
2807 return do_add_imm(ctx, a, true, false);
2808 }
2809
2810 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2811 {
2812 return do_add_imm(ctx, a, false, true);
2813 }
2814
2815 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2816 {
2817 return do_add_imm(ctx, a, true, true);
2818 }
2819
2820 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2821 {
2822 return do_sub_imm(ctx, a, false);
2823 }
2824
2825 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2826 {
2827 return do_sub_imm(ctx, a, true);
2828 }
2829
2830 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2831 {
2832 TCGv_reg tcg_im, tcg_r2;
2833
2834 if (a->cf) {
2835 nullify_over(ctx);
2836 }
2837
2838 tcg_im = tcg_constant_reg(a->i);
2839 tcg_r2 = load_gpr(ctx, a->r);
2840 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2841
2842 return nullify_end(ctx);
2843 }
2844
2845 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2846 {
2847 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2848 return gen_illegal(ctx);
2849 } else {
2850 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2851 a->disp, a->sp, a->m, a->size | MO_TE);
2852 }
2853 }
2854
2855 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2856 {
2857 assert(a->x == 0 && a->scale == 0);
2858 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2859 return gen_illegal(ctx);
2860 } else {
2861 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2862 }
2863 }
2864
2865 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2866 {
2867 MemOp mop = MO_TE | MO_ALIGN | a->size;
2868 TCGv_reg zero, dest, ofs;
2869 TCGv_tl addr;
2870
2871 nullify_over(ctx);
2872
2873 if (a->m) {
2874 /* Base register modification. Make sure if RT == RB,
2875 we see the result of the load. */
2876 dest = tcg_temp_new();
2877 } else {
2878 dest = dest_gpr(ctx, a->t);
2879 }
2880
2881 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2882 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2883
2884 /*
2885 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2886 * However actual hardware succeeds with aligned mod 4.
2887 * Detect this case and log a GUEST_ERROR.
2888 *
2889 * TODO: HPPA64 relaxes the over-alignment requirement
2890 * with the ,co completer.
2891 */
2892 gen_helper_ldc_check(addr);
2893
2894 zero = tcg_constant_reg(0);
2895 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2896
2897 if (a->m) {
2898 save_gpr(ctx, a->b, ofs);
2899 }
2900 save_gpr(ctx, a->t, dest);
2901
2902 return nullify_end(ctx);
2903 }
2904
2905 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2906 {
2907 TCGv_reg ofs, val;
2908 TCGv_tl addr;
2909
2910 nullify_over(ctx);
2911
2912 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2913 ctx->mmu_idx == MMU_PHYS_IDX);
2914 val = load_gpr(ctx, a->r);
2915 if (a->a) {
2916 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2917 gen_helper_stby_e_parallel(tcg_env, addr, val);
2918 } else {
2919 gen_helper_stby_e(tcg_env, addr, val);
2920 }
2921 } else {
2922 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2923 gen_helper_stby_b_parallel(tcg_env, addr, val);
2924 } else {
2925 gen_helper_stby_b(tcg_env, addr, val);
2926 }
2927 }
2928 if (a->m) {
2929 tcg_gen_andi_reg(ofs, ofs, ~3);
2930 save_gpr(ctx, a->b, ofs);
2931 }
2932
2933 return nullify_end(ctx);
2934 }
2935
2936 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2937 {
2938 int hold_mmu_idx = ctx->mmu_idx;
2939
2940 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2941 ctx->mmu_idx = MMU_PHYS_IDX;
2942 trans_ld(ctx, a);
2943 ctx->mmu_idx = hold_mmu_idx;
2944 return true;
2945 }
2946
2947 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2948 {
2949 int hold_mmu_idx = ctx->mmu_idx;
2950
2951 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2952 ctx->mmu_idx = MMU_PHYS_IDX;
2953 trans_st(ctx, a);
2954 ctx->mmu_idx = hold_mmu_idx;
2955 return true;
2956 }
2957
2958 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2959 {
2960 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2961
2962 tcg_gen_movi_reg(tcg_rt, a->i);
2963 save_gpr(ctx, a->t, tcg_rt);
2964 cond_free(&ctx->null_cond);
2965 return true;
2966 }
2967
2968 static bool trans_addil(DisasContext *ctx, arg_addil *a)
2969 {
2970 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
2971 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
2972
2973 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
2974 save_gpr(ctx, 1, tcg_r1);
2975 cond_free(&ctx->null_cond);
2976 return true;
2977 }
2978
2979 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
2980 {
2981 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2982
2983 /* Special case rb == 0, for the LDI pseudo-op.
2984 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2985 if (a->b == 0) {
2986 tcg_gen_movi_reg(tcg_rt, a->i);
2987 } else {
2988 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
2989 }
2990 save_gpr(ctx, a->t, tcg_rt);
2991 cond_free(&ctx->null_cond);
2992 return true;
2993 }
2994
2995 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
2996 unsigned c, unsigned f, unsigned n, int disp)
2997 {
2998 TCGv_reg dest, in2, sv;
2999 DisasCond cond;
3000
3001 in2 = load_gpr(ctx, r);
3002 dest = tcg_temp_new();
3003
3004 tcg_gen_sub_reg(dest, in1, in2);
3005
3006 sv = NULL;
3007 if (cond_need_sv(c)) {
3008 sv = do_sub_sv(ctx, dest, in1, in2);
3009 }
3010
3011 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3012 return do_cbranch(ctx, disp, n, &cond);
3013 }
3014
3015 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3016 {
3017 nullify_over(ctx);
3018 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3019 }
3020
3021 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3022 {
3023 nullify_over(ctx);
3024 return do_cmpb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
3025 }
3026
3027 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3028 unsigned c, unsigned f, unsigned n, int disp)
3029 {
3030 TCGv_reg dest, in2, sv, cb_cond;
3031 DisasCond cond;
3032 bool d = false;
3033
3034 in2 = load_gpr(ctx, r);
3035 dest = tcg_temp_new();
3036 sv = NULL;
3037 cb_cond = NULL;
3038
3039 if (cond_need_cb(c)) {
3040 TCGv_reg cb = tcg_temp_new();
3041 TCGv_reg cb_msb = tcg_temp_new();
3042
3043 tcg_gen_movi_reg(cb_msb, 0);
3044 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3045 tcg_gen_xor_reg(cb, in1, in2);
3046 tcg_gen_xor_reg(cb, cb, dest);
3047 cb_cond = get_carry(ctx, d, cb, cb_msb);
3048 } else {
3049 tcg_gen_add_reg(dest, in1, in2);
3050 }
3051 if (cond_need_sv(c)) {
3052 sv = do_add_sv(ctx, dest, in1, in2);
3053 }
3054
3055 cond = do_cond(c * 2 + f, dest, cb_cond, sv);
3056 save_gpr(ctx, r, dest);
3057 return do_cbranch(ctx, disp, n, &cond);
3058 }
3059
3060 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3061 {
3062 nullify_over(ctx);
3063 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3064 }
3065
3066 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3067 {
3068 nullify_over(ctx);
3069 return do_addb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
3070 }
3071
3072 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3073 {
3074 TCGv_reg tmp, tcg_r;
3075 DisasCond cond;
3076
3077 nullify_over(ctx);
3078
3079 tmp = tcg_temp_new();
3080 tcg_r = load_gpr(ctx, a->r);
3081 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3082
3083 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3084 return do_cbranch(ctx, a->disp, a->n, &cond);
3085 }
3086
3087 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3088 {
3089 TCGv_reg tmp, tcg_r;
3090 DisasCond cond;
3091
3092 nullify_over(ctx);
3093
3094 tmp = tcg_temp_new();
3095 tcg_r = load_gpr(ctx, a->r);
3096 tcg_gen_shli_reg(tmp, tcg_r, a->p);
3097
3098 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3099 return do_cbranch(ctx, a->disp, a->n, &cond);
3100 }
3101
3102 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3103 {
3104 TCGv_reg dest;
3105 DisasCond cond;
3106
3107 nullify_over(ctx);
3108
3109 dest = dest_gpr(ctx, a->r2);
3110 if (a->r1 == 0) {
3111 tcg_gen_movi_reg(dest, 0);
3112 } else {
3113 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3114 }
3115
3116 cond = do_sed_cond(a->c, dest);
3117 return do_cbranch(ctx, a->disp, a->n, &cond);
3118 }
3119
3120 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3121 {
3122 TCGv_reg dest;
3123 DisasCond cond;
3124
3125 nullify_over(ctx);
3126
3127 dest = dest_gpr(ctx, a->r);
3128 tcg_gen_movi_reg(dest, a->i);
3129
3130 cond = do_sed_cond(a->c, dest);
3131 return do_cbranch(ctx, a->disp, a->n, &cond);
3132 }
3133
3134 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3135 {
3136 TCGv_reg dest;
3137
3138 if (a->c) {
3139 nullify_over(ctx);
3140 }
3141
3142 dest = dest_gpr(ctx, a->t);
3143 if (a->r1 == 0) {
3144 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3145 tcg_gen_shr_reg(dest, dest, cpu_sar);
3146 } else if (a->r1 == a->r2) {
3147 TCGv_i32 t32 = tcg_temp_new_i32();
3148 TCGv_i32 s32 = tcg_temp_new_i32();
3149
3150 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3151 tcg_gen_trunc_reg_i32(s32, cpu_sar);
3152 tcg_gen_rotr_i32(t32, t32, s32);
3153 tcg_gen_extu_i32_reg(dest, t32);
3154 } else {
3155 TCGv_i64 t = tcg_temp_new_i64();
3156 TCGv_i64 s = tcg_temp_new_i64();
3157
3158 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3159 tcg_gen_extu_reg_i64(s, cpu_sar);
3160 tcg_gen_shr_i64(t, t, s);
3161 tcg_gen_trunc_i64_reg(dest, t);
3162 }
3163 save_gpr(ctx, a->t, dest);
3164
3165 /* Install the new nullification. */
3166 cond_free(&ctx->null_cond);
3167 if (a->c) {
3168 ctx->null_cond = do_sed_cond(a->c, dest);
3169 }
3170 return nullify_end(ctx);
3171 }
3172
3173 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3174 {
3175 unsigned sa = 31 - a->cpos;
3176 TCGv_reg dest, t2;
3177
3178 if (a->c) {
3179 nullify_over(ctx);
3180 }
3181
3182 dest = dest_gpr(ctx, a->t);
3183 t2 = load_gpr(ctx, a->r2);
3184 if (a->r1 == 0) {
3185 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3186 } else if (TARGET_REGISTER_BITS == 32) {
3187 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3188 } else if (a->r1 == a->r2) {
3189 TCGv_i32 t32 = tcg_temp_new_i32();
3190 tcg_gen_trunc_reg_i32(t32, t2);
3191 tcg_gen_rotri_i32(t32, t32, sa);
3192 tcg_gen_extu_i32_reg(dest, t32);
3193 } else {
3194 TCGv_i64 t64 = tcg_temp_new_i64();
3195 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3196 tcg_gen_shri_i64(t64, t64, sa);
3197 tcg_gen_trunc_i64_reg(dest, t64);
3198 }
3199 save_gpr(ctx, a->t, dest);
3200
3201 /* Install the new nullification. */
3202 cond_free(&ctx->null_cond);
3203 if (a->c) {
3204 ctx->null_cond = do_sed_cond(a->c, dest);
3205 }
3206 return nullify_end(ctx);
3207 }
3208
3209 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3210 {
3211 unsigned len = 32 - a->clen;
3212 TCGv_reg dest, src, tmp;
3213
3214 if (a->c) {
3215 nullify_over(ctx);
3216 }
3217
3218 dest = dest_gpr(ctx, a->t);
3219 src = load_gpr(ctx, a->r);
3220 tmp = tcg_temp_new();
3221
3222 /* Recall that SAR is using big-endian bit numbering. */
3223 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3224 if (a->se) {
3225 tcg_gen_sar_reg(dest, src, tmp);
3226 tcg_gen_sextract_reg(dest, dest, 0, len);
3227 } else {
3228 tcg_gen_shr_reg(dest, src, tmp);
3229 tcg_gen_extract_reg(dest, dest, 0, len);
3230 }
3231 save_gpr(ctx, a->t, dest);
3232
3233 /* Install the new nullification. */
3234 cond_free(&ctx->null_cond);
3235 if (a->c) {
3236 ctx->null_cond = do_sed_cond(a->c, dest);
3237 }
3238 return nullify_end(ctx);
3239 }
3240
3241 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3242 {
3243 unsigned len = 32 - a->clen;
3244 unsigned cpos = 31 - a->pos;
3245 TCGv_reg dest, src;
3246
3247 if (a->c) {
3248 nullify_over(ctx);
3249 }
3250
3251 dest = dest_gpr(ctx, a->t);
3252 src = load_gpr(ctx, a->r);
3253 if (a->se) {
3254 tcg_gen_sextract_reg(dest, src, cpos, len);
3255 } else {
3256 tcg_gen_extract_reg(dest, src, cpos, len);
3257 }
3258 save_gpr(ctx, a->t, dest);
3259
3260 /* Install the new nullification. */
3261 cond_free(&ctx->null_cond);
3262 if (a->c) {
3263 ctx->null_cond = do_sed_cond(a->c, dest);
3264 }
3265 return nullify_end(ctx);
3266 }
3267
3268 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3269 {
3270 unsigned len = 32 - a->clen;
3271 target_sreg mask0, mask1;
3272 TCGv_reg dest;
3273
3274 if (a->c) {
3275 nullify_over(ctx);
3276 }
3277 if (a->cpos + len > 32) {
3278 len = 32 - a->cpos;
3279 }
3280
3281 dest = dest_gpr(ctx, a->t);
3282 mask0 = deposit64(0, a->cpos, len, a->i);
3283 mask1 = deposit64(-1, a->cpos, len, a->i);
3284
3285 if (a->nz) {
3286 TCGv_reg src = load_gpr(ctx, a->t);
3287 if (mask1 != -1) {
3288 tcg_gen_andi_reg(dest, src, mask1);
3289 src = dest;
3290 }
3291 tcg_gen_ori_reg(dest, src, mask0);
3292 } else {
3293 tcg_gen_movi_reg(dest, mask0);
3294 }
3295 save_gpr(ctx, a->t, dest);
3296
3297 /* Install the new nullification. */
3298 cond_free(&ctx->null_cond);
3299 if (a->c) {
3300 ctx->null_cond = do_sed_cond(a->c, dest);
3301 }
3302 return nullify_end(ctx);
3303 }
3304
3305 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3306 {
3307 unsigned rs = a->nz ? a->t : 0;
3308 unsigned len = 32 - a->clen;
3309 TCGv_reg dest, val;
3310
3311 if (a->c) {
3312 nullify_over(ctx);
3313 }
3314 if (a->cpos + len > 32) {
3315 len = 32 - a->cpos;
3316 }
3317
3318 dest = dest_gpr(ctx, a->t);
3319 val = load_gpr(ctx, a->r);
3320 if (rs == 0) {
3321 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3322 } else {
3323 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3324 }
3325 save_gpr(ctx, a->t, dest);
3326
3327 /* Install the new nullification. */
3328 cond_free(&ctx->null_cond);
3329 if (a->c) {
3330 ctx->null_cond = do_sed_cond(a->c, dest);
3331 }
3332 return nullify_end(ctx);
3333 }
3334
3335 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3336 unsigned nz, unsigned clen, TCGv_reg val)
3337 {
3338 unsigned rs = nz ? rt : 0;
3339 unsigned len = 32 - clen;
3340 TCGv_reg mask, tmp, shift, dest;
3341 unsigned msb = 1U << (len - 1);
3342
3343 dest = dest_gpr(ctx, rt);
3344 shift = tcg_temp_new();
3345 tmp = tcg_temp_new();
3346
3347 /* Convert big-endian bit numbering in SAR to left-shift. */
3348 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3349
3350 mask = tcg_temp_new();
3351 tcg_gen_movi_reg(mask, msb + (msb - 1));
3352 tcg_gen_and_reg(tmp, val, mask);
3353 if (rs) {
3354 tcg_gen_shl_reg(mask, mask, shift);
3355 tcg_gen_shl_reg(tmp, tmp, shift);
3356 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3357 tcg_gen_or_reg(dest, dest, tmp);
3358 } else {
3359 tcg_gen_shl_reg(dest, tmp, shift);
3360 }
3361 save_gpr(ctx, rt, dest);
3362
3363 /* Install the new nullification. */
3364 cond_free(&ctx->null_cond);
3365 if (c) {
3366 ctx->null_cond = do_sed_cond(c, dest);
3367 }
3368 return nullify_end(ctx);
3369 }
3370
3371 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3372 {
3373 if (a->c) {
3374 nullify_over(ctx);
3375 }
3376 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3377 }
3378
3379 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3380 {
3381 if (a->c) {
3382 nullify_over(ctx);
3383 }
3384 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, tcg_constant_reg(a->i));
3385 }
3386
3387 static bool trans_be(DisasContext *ctx, arg_be *a)
3388 {
3389 TCGv_reg tmp;
3390
3391 #ifdef CONFIG_USER_ONLY
3392 /* ??? It seems like there should be a good way of using
3393 "be disp(sr2, r0)", the canonical gateway entry mechanism
3394 to our advantage. But that appears to be inconvenient to
3395 manage along side branch delay slots. Therefore we handle
3396 entry into the gateway page via absolute address. */
3397 /* Since we don't implement spaces, just branch. Do notice the special
3398 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3399 goto_tb to the TB containing the syscall. */
3400 if (a->b == 0) {
3401 return do_dbranch(ctx, a->disp, a->l, a->n);
3402 }
3403 #else
3404 nullify_over(ctx);
3405 #endif
3406
3407 tmp = tcg_temp_new();
3408 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3409 tmp = do_ibranch_priv(ctx, tmp);
3410
3411 #ifdef CONFIG_USER_ONLY
3412 return do_ibranch(ctx, tmp, a->l, a->n);
3413 #else
3414 TCGv_i64 new_spc = tcg_temp_new_i64();
3415
3416 load_spr(ctx, new_spc, a->sp);
3417 if (a->l) {
3418 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3419 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3420 }
3421 if (a->n && use_nullify_skip(ctx)) {
3422 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3423 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3424 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3425 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3426 } else {
3427 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3428 if (ctx->iaoq_b == -1) {
3429 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3430 }
3431 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3432 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3433 nullify_set(ctx, a->n);
3434 }
3435 tcg_gen_lookup_and_goto_ptr();
3436 ctx->base.is_jmp = DISAS_NORETURN;
3437 return nullify_end(ctx);
3438 #endif
3439 }
3440
3441 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3442 {
3443 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3444 }
3445
3446 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3447 {
3448 target_ureg dest = iaoq_dest(ctx, a->disp);
3449
3450 nullify_over(ctx);
3451
3452 /* Make sure the caller hasn't done something weird with the queue.
3453 * ??? This is not quite the same as the PSW[B] bit, which would be
3454 * expensive to track. Real hardware will trap for
3455 * b gateway
3456 * b gateway+4 (in delay slot of first branch)
3457 * However, checking for a non-sequential instruction queue *will*
3458 * diagnose the security hole
3459 * b gateway
3460 * b evil
3461 * in which instructions at evil would run with increased privs.
3462 */
3463 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3464 return gen_illegal(ctx);
3465 }
3466
3467 #ifndef CONFIG_USER_ONLY
3468 if (ctx->tb_flags & PSW_C) {
3469 CPUHPPAState *env = cpu_env(ctx->cs);
3470 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3471 /* If we could not find a TLB entry, then we need to generate an
3472 ITLB miss exception so the kernel will provide it.
3473 The resulting TLB fill operation will invalidate this TB and
3474 we will re-translate, at which point we *will* be able to find
3475 the TLB entry and determine if this is in fact a gateway page. */
3476 if (type < 0) {
3477 gen_excp(ctx, EXCP_ITLB_MISS);
3478 return true;
3479 }
3480 /* No change for non-gateway pages or for priv decrease. */
3481 if (type >= 4 && type - 4 < ctx->privilege) {
3482 dest = deposit32(dest, 0, 2, type - 4);
3483 }
3484 } else {
3485 dest &= -4; /* priv = 0 */
3486 }
3487 #endif
3488
3489 if (a->l) {
3490 TCGv_reg tmp = dest_gpr(ctx, a->l);
3491 if (ctx->privilege < 3) {
3492 tcg_gen_andi_reg(tmp, tmp, -4);
3493 }
3494 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3495 save_gpr(ctx, a->l, tmp);
3496 }
3497
3498 return do_dbranch(ctx, dest, 0, a->n);
3499 }
3500
3501 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3502 {
3503 if (a->x) {
3504 TCGv_reg tmp = tcg_temp_new();
3505 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3506 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3507 /* The computation here never changes privilege level. */
3508 return do_ibranch(ctx, tmp, a->l, a->n);
3509 } else {
3510 /* BLR R0,RX is a good way to load PC+8 into RX. */
3511 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3512 }
3513 }
3514
3515 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3516 {
3517 TCGv_reg dest;
3518
3519 if (a->x == 0) {
3520 dest = load_gpr(ctx, a->b);
3521 } else {
3522 dest = tcg_temp_new();
3523 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3524 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3525 }
3526 dest = do_ibranch_priv(ctx, dest);
3527 return do_ibranch(ctx, dest, 0, a->n);
3528 }
3529
3530 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3531 {
3532 TCGv_reg dest;
3533
3534 #ifdef CONFIG_USER_ONLY
3535 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3536 return do_ibranch(ctx, dest, a->l, a->n);
3537 #else
3538 nullify_over(ctx);
3539 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3540
3541 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3542 if (ctx->iaoq_b == -1) {
3543 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3544 }
3545 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3546 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3547 if (a->l) {
3548 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3549 }
3550 nullify_set(ctx, a->n);
3551 tcg_gen_lookup_and_goto_ptr();
3552 ctx->base.is_jmp = DISAS_NORETURN;
3553 return nullify_end(ctx);
3554 #endif
3555 }
3556
3557 /*
3558 * Float class 0
3559 */
3560
3561 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3562 {
3563 tcg_gen_mov_i32(dst, src);
3564 }
3565
3566 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3567 {
3568 uint64_t ret;
3569
3570 if (TARGET_REGISTER_BITS == 64) {
3571 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3572 } else {
3573 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3574 }
3575
3576 nullify_over(ctx);
3577 save_frd(0, tcg_constant_i64(ret));
3578 return nullify_end(ctx);
3579 }
3580
3581 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3582 {
3583 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3584 }
3585
3586 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3587 {
3588 tcg_gen_mov_i64(dst, src);
3589 }
3590
3591 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3592 {
3593 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3594 }
3595
3596 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3597 {
3598 tcg_gen_andi_i32(dst, src, INT32_MAX);
3599 }
3600
3601 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3602 {
3603 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3604 }
3605
3606 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3607 {
3608 tcg_gen_andi_i64(dst, src, INT64_MAX);
3609 }
3610
3611 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3612 {
3613 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3614 }
3615
3616 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3617 {
3618 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3619 }
3620
3621 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3622 {
3623 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3624 }
3625
3626 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3627 {
3628 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3629 }
3630
3631 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3632 {
3633 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3634 }
3635
3636 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3637 {
3638 tcg_gen_xori_i32(dst, src, INT32_MIN);
3639 }
3640
3641 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3642 {
3643 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3644 }
3645
3646 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3647 {
3648 tcg_gen_xori_i64(dst, src, INT64_MIN);
3649 }
3650
3651 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3652 {
3653 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3654 }
3655
3656 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3657 {
3658 tcg_gen_ori_i32(dst, src, INT32_MIN);
3659 }
3660
3661 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3662 {
3663 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3664 }
3665
3666 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3667 {
3668 tcg_gen_ori_i64(dst, src, INT64_MIN);
3669 }
3670
3671 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3672 {
3673 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3674 }
3675
3676 /*
3677 * Float class 1
3678 */
3679
3680 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3681 {
3682 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3683 }
3684
3685 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3686 {
3687 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3688 }
3689
3690 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3691 {
3692 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3693 }
3694
3695 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3696 {
3697 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3698 }
3699
3700 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3701 {
3702 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3703 }
3704
3705 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3706 {
3707 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3708 }
3709
3710 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3711 {
3712 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3713 }
3714
3715 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3716 {
3717 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3718 }
3719
3720 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3721 {
3722 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3723 }
3724
3725 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3726 {
3727 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3728 }
3729
3730 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3731 {
3732 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3733 }
3734
3735 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3736 {
3737 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3738 }
3739
3740 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3741 {
3742 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3743 }
3744
3745 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3746 {
3747 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3748 }
3749
3750 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3751 {
3752 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3753 }
3754
3755 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3756 {
3757 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3758 }
3759
3760 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3761 {
3762 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3763 }
3764
3765 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3766 {
3767 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3768 }
3769
3770 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3771 {
3772 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3773 }
3774
3775 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3776 {
3777 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3778 }
3779
3780 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3781 {
3782 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3783 }
3784
3785 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3786 {
3787 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3788 }
3789
3790 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3791 {
3792 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3793 }
3794
3795 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3796 {
3797 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3798 }
3799
3800 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3801 {
3802 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3803 }
3804
3805 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3806 {
3807 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3808 }
3809
3810 /*
3811 * Float class 2
3812 */
3813
3814 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3815 {
3816 TCGv_i32 ta, tb, tc, ty;
3817
3818 nullify_over(ctx);
3819
3820 ta = load_frw0_i32(a->r1);
3821 tb = load_frw0_i32(a->r2);
3822 ty = tcg_constant_i32(a->y);
3823 tc = tcg_constant_i32(a->c);
3824
3825 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
3826
3827 return nullify_end(ctx);
3828 }
3829
3830 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3831 {
3832 TCGv_i64 ta, tb;
3833 TCGv_i32 tc, ty;
3834
3835 nullify_over(ctx);
3836
3837 ta = load_frd0(a->r1);
3838 tb = load_frd0(a->r2);
3839 ty = tcg_constant_i32(a->y);
3840 tc = tcg_constant_i32(a->c);
3841
3842 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
3843
3844 return nullify_end(ctx);
3845 }
3846
3847 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3848 {
3849 TCGv_reg t;
3850
3851 nullify_over(ctx);
3852
3853 t = tcg_temp_new();
3854 tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
3855
3856 if (a->y == 1) {
3857 int mask;
3858 bool inv = false;
3859
3860 switch (a->c) {
3861 case 0: /* simple */
3862 tcg_gen_andi_reg(t, t, 0x4000000);
3863 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3864 goto done;
3865 case 2: /* rej */
3866 inv = true;
3867 /* fallthru */
3868 case 1: /* acc */
3869 mask = 0x43ff800;
3870 break;
3871 case 6: /* rej8 */
3872 inv = true;
3873 /* fallthru */
3874 case 5: /* acc8 */
3875 mask = 0x43f8000;
3876 break;
3877 case 9: /* acc6 */
3878 mask = 0x43e0000;
3879 break;
3880 case 13: /* acc4 */
3881 mask = 0x4380000;
3882 break;
3883 case 17: /* acc2 */
3884 mask = 0x4200000;
3885 break;
3886 default:
3887 gen_illegal(ctx);
3888 return true;
3889 }
3890 if (inv) {
3891 TCGv_reg c = tcg_constant_reg(mask);
3892 tcg_gen_or_reg(t, t, c);
3893 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3894 } else {
3895 tcg_gen_andi_reg(t, t, mask);
3896 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3897 }
3898 } else {
3899 unsigned cbit = (a->y ^ 1) - 1;
3900
3901 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3902 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3903 }
3904
3905 done:
3906 return nullify_end(ctx);
3907 }
3908
3909 /*
3910 * Float class 2
3911 */
3912
3913 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3914 {
3915 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3916 }
3917
3918 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3919 {
3920 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3921 }
3922
3923 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3924 {
3925 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3926 }
3927
3928 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3929 {
3930 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
3931 }
3932
3933 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
3934 {
3935 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3936 }
3937
3938 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3939 {
3940 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3941 }
3942
3943 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3944 {
3945 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3946 }
3947
3948 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3949 {
3950 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3951 }
3952
3953 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3954 {
3955 TCGv_i64 x, y;
3956
3957 nullify_over(ctx);
3958
3959 x = load_frw0_i64(a->r1);
3960 y = load_frw0_i64(a->r2);
3961 tcg_gen_mul_i64(x, x, y);
3962 save_frd(a->t, x);
3963
3964 return nullify_end(ctx);
3965 }
3966
3967 /* Convert the fmpyadd single-precision register encodings to standard. */
3968 static inline int fmpyadd_s_reg(unsigned r)
3969 {
3970 return (r & 16) * 2 + 16 + (r & 15);
3971 }
3972
3973 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3974 {
3975 int tm = fmpyadd_s_reg(a->tm);
3976 int ra = fmpyadd_s_reg(a->ra);
3977 int ta = fmpyadd_s_reg(a->ta);
3978 int rm2 = fmpyadd_s_reg(a->rm2);
3979 int rm1 = fmpyadd_s_reg(a->rm1);
3980
3981 nullify_over(ctx);
3982
3983 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3984 do_fop_weww(ctx, ta, ta, ra,
3985 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3986
3987 return nullify_end(ctx);
3988 }
3989
3990 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
3991 {
3992 return do_fmpyadd_s(ctx, a, false);
3993 }
3994
3995 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
3996 {
3997 return do_fmpyadd_s(ctx, a, true);
3998 }
3999
4000 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4001 {
4002 nullify_over(ctx);
4003
4004 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4005 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4006 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4007
4008 return nullify_end(ctx);
4009 }
4010
4011 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4012 {
4013 return do_fmpyadd_d(ctx, a, false);
4014 }
4015
4016 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4017 {
4018 return do_fmpyadd_d(ctx, a, true);
4019 }
4020
4021 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4022 {
4023 TCGv_i32 x, y, z;
4024
4025 nullify_over(ctx);
4026 x = load_frw0_i32(a->rm1);
4027 y = load_frw0_i32(a->rm2);
4028 z = load_frw0_i32(a->ra3);
4029
4030 if (a->neg) {
4031 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4032 } else {
4033 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4034 }
4035
4036 save_frw_i32(a->t, x);
4037 return nullify_end(ctx);
4038 }
4039
4040 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4041 {
4042 TCGv_i64 x, y, z;
4043
4044 nullify_over(ctx);
4045 x = load_frd0(a->rm1);
4046 y = load_frd0(a->rm2);
4047 z = load_frd0(a->ra3);
4048
4049 if (a->neg) {
4050 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4051 } else {
4052 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4053 }
4054
4055 save_frd(a->t, x);
4056 return nullify_end(ctx);
4057 }
4058
4059 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4060 {
4061 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4062 #ifndef CONFIG_USER_ONLY
4063 if (a->i == 0x100) {
4064 /* emulate PDC BTLB, called by SeaBIOS-hppa */
4065 nullify_over(ctx);
4066 gen_helper_diag_btlb(tcg_env);
4067 return nullify_end(ctx);
4068 }
4069 #endif
4070 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4071 return true;
4072 }
4073
4074 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4075 {
4076 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4077 int bound;
4078
4079 ctx->cs = cs;
4080 ctx->tb_flags = ctx->base.tb->flags;
4081
4082 #ifdef CONFIG_USER_ONLY
4083 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4084 ctx->mmu_idx = MMU_USER_IDX;
4085 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4086 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4087 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4088 #else
4089 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4090 ctx->mmu_idx = (ctx->tb_flags & PSW_D
4091 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4092 : MMU_PHYS_IDX);
4093
4094 /* Recover the IAOQ values from the GVA + PRIV. */
4095 uint64_t cs_base = ctx->base.tb->cs_base;
4096 uint64_t iasq_f = cs_base & ~0xffffffffull;
4097 int32_t diff = cs_base;
4098
4099 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4100 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4101 #endif
4102 ctx->iaoq_n = -1;
4103 ctx->iaoq_n_var = NULL;
4104
4105 /* Bound the number of instructions by those left on the page. */
4106 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4107 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4108 }
4109
4110 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4111 {
4112 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4113
4114 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4115 ctx->null_cond = cond_make_f();
4116 ctx->psw_n_nonzero = false;
4117 if (ctx->tb_flags & PSW_N) {
4118 ctx->null_cond.c = TCG_COND_ALWAYS;
4119 ctx->psw_n_nonzero = true;
4120 }
4121 ctx->null_lab = NULL;
4122 }
4123
4124 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4125 {
4126 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4127
4128 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4129 }
4130
4131 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4132 {
4133 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4134 CPUHPPAState *env = cpu_env(cs);
4135 DisasJumpType ret;
4136
4137 /* Execute one insn. */
4138 #ifdef CONFIG_USER_ONLY
4139 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4140 do_page_zero(ctx);
4141 ret = ctx->base.is_jmp;
4142 assert(ret != DISAS_NEXT);
4143 } else
4144 #endif
4145 {
4146 /* Always fetch the insn, even if nullified, so that we check
4147 the page permissions for execute. */
4148 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4149
4150 /* Set up the IA queue for the next insn.
4151 This will be overwritten by a branch. */
4152 if (ctx->iaoq_b == -1) {
4153 ctx->iaoq_n = -1;
4154 ctx->iaoq_n_var = tcg_temp_new();
4155 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4156 } else {
4157 ctx->iaoq_n = ctx->iaoq_b + 4;
4158 ctx->iaoq_n_var = NULL;
4159 }
4160
4161 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4162 ctx->null_cond.c = TCG_COND_NEVER;
4163 ret = DISAS_NEXT;
4164 } else {
4165 ctx->insn = insn;
4166 if (!decode(ctx, insn)) {
4167 gen_illegal(ctx);
4168 }
4169 ret = ctx->base.is_jmp;
4170 assert(ctx->null_lab == NULL);
4171 }
4172 }
4173
4174 /* Advance the insn queue. Note that this check also detects
4175 a priority change within the instruction queue. */
4176 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4177 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4178 && use_goto_tb(ctx, ctx->iaoq_b)
4179 && (ctx->null_cond.c == TCG_COND_NEVER
4180 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4181 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4182 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4183 ctx->base.is_jmp = ret = DISAS_NORETURN;
4184 } else {
4185 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4186 }
4187 }
4188 ctx->iaoq_f = ctx->iaoq_b;
4189 ctx->iaoq_b = ctx->iaoq_n;
4190 ctx->base.pc_next += 4;
4191
4192 switch (ret) {
4193 case DISAS_NORETURN:
4194 case DISAS_IAQ_N_UPDATED:
4195 break;
4196
4197 case DISAS_NEXT:
4198 case DISAS_IAQ_N_STALE:
4199 case DISAS_IAQ_N_STALE_EXIT:
4200 if (ctx->iaoq_f == -1) {
4201 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4202 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4203 #ifndef CONFIG_USER_ONLY
4204 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4205 #endif
4206 nullify_save(ctx);
4207 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4208 ? DISAS_EXIT
4209 : DISAS_IAQ_N_UPDATED);
4210 } else if (ctx->iaoq_b == -1) {
4211 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4212 }
4213 break;
4214
4215 default:
4216 g_assert_not_reached();
4217 }
4218 }
4219
4220 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4221 {
4222 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4223 DisasJumpType is_jmp = ctx->base.is_jmp;
4224
4225 switch (is_jmp) {
4226 case DISAS_NORETURN:
4227 break;
4228 case DISAS_TOO_MANY:
4229 case DISAS_IAQ_N_STALE:
4230 case DISAS_IAQ_N_STALE_EXIT:
4231 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4232 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4233 nullify_save(ctx);
4234 /* FALLTHRU */
4235 case DISAS_IAQ_N_UPDATED:
4236 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4237 tcg_gen_lookup_and_goto_ptr();
4238 break;
4239 }
4240 /* FALLTHRU */
4241 case DISAS_EXIT:
4242 tcg_gen_exit_tb(NULL, 0);
4243 break;
4244 default:
4245 g_assert_not_reached();
4246 }
4247 }
4248
4249 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4250 CPUState *cs, FILE *logfile)
4251 {
4252 target_ulong pc = dcbase->pc_first;
4253
4254 #ifdef CONFIG_USER_ONLY
4255 switch (pc) {
4256 case 0x00:
4257 fprintf(logfile, "IN:\n0x00000000: (null)\n");
4258 return;
4259 case 0xb0:
4260 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
4261 return;
4262 case 0xe0:
4263 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4264 return;
4265 case 0x100:
4266 fprintf(logfile, "IN:\n0x00000100: syscall\n");
4267 return;
4268 }
4269 #endif
4270
4271 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4272 target_disas(logfile, cs, pc, dcbase->tb->size);
4273 }
4274
4275 static const TranslatorOps hppa_tr_ops = {
4276 .init_disas_context = hppa_tr_init_disas_context,
4277 .tb_start = hppa_tr_tb_start,
4278 .insn_start = hppa_tr_insn_start,
4279 .translate_insn = hppa_tr_translate_insn,
4280 .tb_stop = hppa_tr_tb_stop,
4281 .disas_log = hppa_tr_disas_log,
4282 };
4283
4284 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4285 target_ulong pc, void *host_pc)
4286 {
4287 DisasContext ctx;
4288 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4289 }