]> git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/translate.c
target/hppa: Fix trans_ds for hppa64
[mirror_qemu.git] / target / hppa / translate.c
1 /*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/log.h"
30
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef HELPER_H
34
35
36 /* Since we have a distinction between register size and address size,
37 we need to redefine all of these. */
38
39 #undef TCGv
40 #undef tcg_temp_new
41 #undef tcg_global_mem_new
42
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #if TARGET_REGISTER_BITS == 64
47 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
48 #else
49 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
50 #endif
51 #else
52 #define TCGv_tl TCGv_i32
53 #define tcg_temp_new_tl tcg_temp_new_i32
54 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
55 #endif
56
57 #if TARGET_REGISTER_BITS == 64
58 #define TCGv_reg TCGv_i64
59
60 #define tcg_temp_new tcg_temp_new_i64
61 #define tcg_global_mem_new tcg_global_mem_new_i64
62
63 #define tcg_gen_movi_reg tcg_gen_movi_i64
64 #define tcg_gen_mov_reg tcg_gen_mov_i64
65 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
66 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
67 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
68 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
69 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
70 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
71 #define tcg_gen_ld_reg tcg_gen_ld_i64
72 #define tcg_gen_st8_reg tcg_gen_st8_i64
73 #define tcg_gen_st16_reg tcg_gen_st16_i64
74 #define tcg_gen_st32_reg tcg_gen_st32_i64
75 #define tcg_gen_st_reg tcg_gen_st_i64
76 #define tcg_gen_add_reg tcg_gen_add_i64
77 #define tcg_gen_addi_reg tcg_gen_addi_i64
78 #define tcg_gen_sub_reg tcg_gen_sub_i64
79 #define tcg_gen_neg_reg tcg_gen_neg_i64
80 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
81 #define tcg_gen_subi_reg tcg_gen_subi_i64
82 #define tcg_gen_and_reg tcg_gen_and_i64
83 #define tcg_gen_andi_reg tcg_gen_andi_i64
84 #define tcg_gen_or_reg tcg_gen_or_i64
85 #define tcg_gen_ori_reg tcg_gen_ori_i64
86 #define tcg_gen_xor_reg tcg_gen_xor_i64
87 #define tcg_gen_xori_reg tcg_gen_xori_i64
88 #define tcg_gen_not_reg tcg_gen_not_i64
89 #define tcg_gen_shl_reg tcg_gen_shl_i64
90 #define tcg_gen_shli_reg tcg_gen_shli_i64
91 #define tcg_gen_shr_reg tcg_gen_shr_i64
92 #define tcg_gen_shri_reg tcg_gen_shri_i64
93 #define tcg_gen_sar_reg tcg_gen_sar_i64
94 #define tcg_gen_sari_reg tcg_gen_sari_i64
95 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
96 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
97 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
98 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
99 #define tcg_gen_mul_reg tcg_gen_mul_i64
100 #define tcg_gen_muli_reg tcg_gen_muli_i64
101 #define tcg_gen_div_reg tcg_gen_div_i64
102 #define tcg_gen_rem_reg tcg_gen_rem_i64
103 #define tcg_gen_divu_reg tcg_gen_divu_i64
104 #define tcg_gen_remu_reg tcg_gen_remu_i64
105 #define tcg_gen_discard_reg tcg_gen_discard_i64
106 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
107 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
108 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
109 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
110 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
111 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
112 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
113 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
114 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
115 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
116 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
117 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
118 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
119 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
120 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
121 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
122 #define tcg_gen_andc_reg tcg_gen_andc_i64
123 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
124 #define tcg_gen_nand_reg tcg_gen_nand_i64
125 #define tcg_gen_nor_reg tcg_gen_nor_i64
126 #define tcg_gen_orc_reg tcg_gen_orc_i64
127 #define tcg_gen_clz_reg tcg_gen_clz_i64
128 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
129 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
130 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
131 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
132 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
133 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
134 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
135 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
136 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
137 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
138 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
139 #define tcg_gen_extract_reg tcg_gen_extract_i64
140 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
141 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
142 #define tcg_constant_reg tcg_constant_i64
143 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
144 #define tcg_gen_add2_reg tcg_gen_add2_i64
145 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
146 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
147 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
148 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
149 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
150 #else
151 #define TCGv_reg TCGv_i32
152 #define tcg_temp_new tcg_temp_new_i32
153 #define tcg_global_mem_new tcg_global_mem_new_i32
154
155 #define tcg_gen_movi_reg tcg_gen_movi_i32
156 #define tcg_gen_mov_reg tcg_gen_mov_i32
157 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
158 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
159 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
160 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
161 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
162 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
163 #define tcg_gen_ld_reg tcg_gen_ld_i32
164 #define tcg_gen_st8_reg tcg_gen_st8_i32
165 #define tcg_gen_st16_reg tcg_gen_st16_i32
166 #define tcg_gen_st32_reg tcg_gen_st32_i32
167 #define tcg_gen_st_reg tcg_gen_st_i32
168 #define tcg_gen_add_reg tcg_gen_add_i32
169 #define tcg_gen_addi_reg tcg_gen_addi_i32
170 #define tcg_gen_sub_reg tcg_gen_sub_i32
171 #define tcg_gen_neg_reg tcg_gen_neg_i32
172 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
173 #define tcg_gen_subi_reg tcg_gen_subi_i32
174 #define tcg_gen_and_reg tcg_gen_and_i32
175 #define tcg_gen_andi_reg tcg_gen_andi_i32
176 #define tcg_gen_or_reg tcg_gen_or_i32
177 #define tcg_gen_ori_reg tcg_gen_ori_i32
178 #define tcg_gen_xor_reg tcg_gen_xor_i32
179 #define tcg_gen_xori_reg tcg_gen_xori_i32
180 #define tcg_gen_not_reg tcg_gen_not_i32
181 #define tcg_gen_shl_reg tcg_gen_shl_i32
182 #define tcg_gen_shli_reg tcg_gen_shli_i32
183 #define tcg_gen_shr_reg tcg_gen_shr_i32
184 #define tcg_gen_shri_reg tcg_gen_shri_i32
185 #define tcg_gen_sar_reg tcg_gen_sar_i32
186 #define tcg_gen_sari_reg tcg_gen_sari_i32
187 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
188 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
189 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
190 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
191 #define tcg_gen_mul_reg tcg_gen_mul_i32
192 #define tcg_gen_muli_reg tcg_gen_muli_i32
193 #define tcg_gen_div_reg tcg_gen_div_i32
194 #define tcg_gen_rem_reg tcg_gen_rem_i32
195 #define tcg_gen_divu_reg tcg_gen_divu_i32
196 #define tcg_gen_remu_reg tcg_gen_remu_i32
197 #define tcg_gen_discard_reg tcg_gen_discard_i32
198 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
199 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
200 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
201 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
202 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
203 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
204 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
205 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
206 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
207 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
208 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
209 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
210 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
211 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
212 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
213 #define tcg_gen_andc_reg tcg_gen_andc_i32
214 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
215 #define tcg_gen_nand_reg tcg_gen_nand_i32
216 #define tcg_gen_nor_reg tcg_gen_nor_i32
217 #define tcg_gen_orc_reg tcg_gen_orc_i32
218 #define tcg_gen_clz_reg tcg_gen_clz_i32
219 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
220 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
221 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
222 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
223 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
224 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
225 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
226 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
227 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
228 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
229 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
230 #define tcg_gen_extract_reg tcg_gen_extract_i32
231 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
232 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
233 #define tcg_constant_reg tcg_constant_i32
234 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
235 #define tcg_gen_add2_reg tcg_gen_add2_i32
236 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
237 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
238 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
239 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
240 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
241 #endif /* TARGET_REGISTER_BITS */
242
243 typedef struct DisasCond {
244 TCGCond c;
245 TCGv_reg a0, a1;
246 } DisasCond;
247
248 typedef struct DisasContext {
249 DisasContextBase base;
250 CPUState *cs;
251
252 target_ureg iaoq_f;
253 target_ureg iaoq_b;
254 target_ureg iaoq_n;
255 TCGv_reg iaoq_n_var;
256
257 DisasCond null_cond;
258 TCGLabel *null_lab;
259
260 uint32_t insn;
261 uint32_t tb_flags;
262 int mmu_idx;
263 int privilege;
264 bool psw_n_nonzero;
265
266 #ifdef CONFIG_USER_ONLY
267 MemOp unalign;
268 #endif
269 } DisasContext;
270
271 #ifdef CONFIG_USER_ONLY
272 #define UNALIGN(C) (C)->unalign
273 #else
274 #define UNALIGN(C) MO_ALIGN
275 #endif
276
277 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
278 static int expand_sm_imm(DisasContext *ctx, int val)
279 {
280 if (val & PSW_SM_E) {
281 val = (val & ~PSW_SM_E) | PSW_E;
282 }
283 if (val & PSW_SM_W) {
284 val = (val & ~PSW_SM_W) | PSW_W;
285 }
286 return val;
287 }
288
289 /* Inverted space register indicates 0 means sr0 not inferred from base. */
290 static int expand_sr3x(DisasContext *ctx, int val)
291 {
292 return ~val;
293 }
294
295 /* Convert the M:A bits within a memory insn to the tri-state value
296 we use for the final M. */
297 static int ma_to_m(DisasContext *ctx, int val)
298 {
299 return val & 2 ? (val & 1 ? -1 : 1) : 0;
300 }
301
302 /* Convert the sign of the displacement to a pre or post-modify. */
303 static int pos_to_m(DisasContext *ctx, int val)
304 {
305 return val ? 1 : -1;
306 }
307
308 static int neg_to_m(DisasContext *ctx, int val)
309 {
310 return val ? -1 : 1;
311 }
312
313 /* Used for branch targets and fp memory ops. */
314 static int expand_shl2(DisasContext *ctx, int val)
315 {
316 return val << 2;
317 }
318
319 /* Used for fp memory ops. */
320 static int expand_shl3(DisasContext *ctx, int val)
321 {
322 return val << 3;
323 }
324
325 /* Used for assemble_21. */
326 static int expand_shl11(DisasContext *ctx, int val)
327 {
328 return val << 11;
329 }
330
331
332 /* Include the auto-generated decoder. */
333 #include "decode-insns.c.inc"
334
335 /* We are not using a goto_tb (for whatever reason), but have updated
336 the iaq (for whatever reason), so don't do it again on exit. */
337 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
338
339 /* We are exiting the TB, but have neither emitted a goto_tb, nor
340 updated the iaq for the next instruction to be executed. */
341 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
342
343 /* Similarly, but we want to return to the main loop immediately
344 to recognize unmasked interrupts. */
345 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
346 #define DISAS_EXIT DISAS_TARGET_3
347
348 /* global register indexes */
349 static TCGv_reg cpu_gr[32];
350 static TCGv_i64 cpu_sr[4];
351 static TCGv_i64 cpu_srH;
352 static TCGv_reg cpu_iaoq_f;
353 static TCGv_reg cpu_iaoq_b;
354 static TCGv_i64 cpu_iasq_f;
355 static TCGv_i64 cpu_iasq_b;
356 static TCGv_reg cpu_sar;
357 static TCGv_reg cpu_psw_n;
358 static TCGv_reg cpu_psw_v;
359 static TCGv_reg cpu_psw_cb;
360 static TCGv_reg cpu_psw_cb_msb;
361
362 void hppa_translate_init(void)
363 {
364 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
365
366 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
367 static const GlobalVar vars[] = {
368 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
369 DEF_VAR(psw_n),
370 DEF_VAR(psw_v),
371 DEF_VAR(psw_cb),
372 DEF_VAR(psw_cb_msb),
373 DEF_VAR(iaoq_f),
374 DEF_VAR(iaoq_b),
375 };
376
377 #undef DEF_VAR
378
379 /* Use the symbolic register names that match the disassembler. */
380 static const char gr_names[32][4] = {
381 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
382 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
383 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
384 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
385 };
386 /* SR[4-7] are not global registers so that we can index them. */
387 static const char sr_names[5][4] = {
388 "sr0", "sr1", "sr2", "sr3", "srH"
389 };
390
391 int i;
392
393 cpu_gr[0] = NULL;
394 for (i = 1; i < 32; i++) {
395 cpu_gr[i] = tcg_global_mem_new(tcg_env,
396 offsetof(CPUHPPAState, gr[i]),
397 gr_names[i]);
398 }
399 for (i = 0; i < 4; i++) {
400 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
401 offsetof(CPUHPPAState, sr[i]),
402 sr_names[i]);
403 }
404 cpu_srH = tcg_global_mem_new_i64(tcg_env,
405 offsetof(CPUHPPAState, sr[4]),
406 sr_names[4]);
407
408 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
409 const GlobalVar *v = &vars[i];
410 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
411 }
412
413 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
414 offsetof(CPUHPPAState, iasq_f),
415 "iasq_f");
416 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
417 offsetof(CPUHPPAState, iasq_b),
418 "iasq_b");
419 }
420
421 static DisasCond cond_make_f(void)
422 {
423 return (DisasCond){
424 .c = TCG_COND_NEVER,
425 .a0 = NULL,
426 .a1 = NULL,
427 };
428 }
429
430 static DisasCond cond_make_t(void)
431 {
432 return (DisasCond){
433 .c = TCG_COND_ALWAYS,
434 .a0 = NULL,
435 .a1 = NULL,
436 };
437 }
438
439 static DisasCond cond_make_n(void)
440 {
441 return (DisasCond){
442 .c = TCG_COND_NE,
443 .a0 = cpu_psw_n,
444 .a1 = tcg_constant_reg(0)
445 };
446 }
447
448 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
449 {
450 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
451 return (DisasCond){
452 .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
453 };
454 }
455
456 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
457 {
458 TCGv_reg tmp = tcg_temp_new();
459 tcg_gen_mov_reg(tmp, a0);
460 return cond_make_0_tmp(c, tmp);
461 }
462
463 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
464 {
465 DisasCond r = { .c = c };
466
467 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
468 r.a0 = tcg_temp_new();
469 tcg_gen_mov_reg(r.a0, a0);
470 r.a1 = tcg_temp_new();
471 tcg_gen_mov_reg(r.a1, a1);
472
473 return r;
474 }
475
476 static void cond_free(DisasCond *cond)
477 {
478 switch (cond->c) {
479 default:
480 cond->a0 = NULL;
481 cond->a1 = NULL;
482 /* fallthru */
483 case TCG_COND_ALWAYS:
484 cond->c = TCG_COND_NEVER;
485 break;
486 case TCG_COND_NEVER:
487 break;
488 }
489 }
490
491 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
492 {
493 if (reg == 0) {
494 TCGv_reg t = tcg_temp_new();
495 tcg_gen_movi_reg(t, 0);
496 return t;
497 } else {
498 return cpu_gr[reg];
499 }
500 }
501
502 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
503 {
504 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
505 return tcg_temp_new();
506 } else {
507 return cpu_gr[reg];
508 }
509 }
510
511 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
512 {
513 if (ctx->null_cond.c != TCG_COND_NEVER) {
514 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
515 ctx->null_cond.a1, dest, t);
516 } else {
517 tcg_gen_mov_reg(dest, t);
518 }
519 }
520
521 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
522 {
523 if (reg != 0) {
524 save_or_nullify(ctx, cpu_gr[reg], t);
525 }
526 }
527
528 #if HOST_BIG_ENDIAN
529 # define HI_OFS 0
530 # define LO_OFS 4
531 #else
532 # define HI_OFS 4
533 # define LO_OFS 0
534 #endif
535
536 static TCGv_i32 load_frw_i32(unsigned rt)
537 {
538 TCGv_i32 ret = tcg_temp_new_i32();
539 tcg_gen_ld_i32(ret, tcg_env,
540 offsetof(CPUHPPAState, fr[rt & 31])
541 + (rt & 32 ? LO_OFS : HI_OFS));
542 return ret;
543 }
544
545 static TCGv_i32 load_frw0_i32(unsigned rt)
546 {
547 if (rt == 0) {
548 TCGv_i32 ret = tcg_temp_new_i32();
549 tcg_gen_movi_i32(ret, 0);
550 return ret;
551 } else {
552 return load_frw_i32(rt);
553 }
554 }
555
556 static TCGv_i64 load_frw0_i64(unsigned rt)
557 {
558 TCGv_i64 ret = tcg_temp_new_i64();
559 if (rt == 0) {
560 tcg_gen_movi_i64(ret, 0);
561 } else {
562 tcg_gen_ld32u_i64(ret, tcg_env,
563 offsetof(CPUHPPAState, fr[rt & 31])
564 + (rt & 32 ? LO_OFS : HI_OFS));
565 }
566 return ret;
567 }
568
569 static void save_frw_i32(unsigned rt, TCGv_i32 val)
570 {
571 tcg_gen_st_i32(val, tcg_env,
572 offsetof(CPUHPPAState, fr[rt & 31])
573 + (rt & 32 ? LO_OFS : HI_OFS));
574 }
575
576 #undef HI_OFS
577 #undef LO_OFS
578
579 static TCGv_i64 load_frd(unsigned rt)
580 {
581 TCGv_i64 ret = tcg_temp_new_i64();
582 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
583 return ret;
584 }
585
586 static TCGv_i64 load_frd0(unsigned rt)
587 {
588 if (rt == 0) {
589 TCGv_i64 ret = tcg_temp_new_i64();
590 tcg_gen_movi_i64(ret, 0);
591 return ret;
592 } else {
593 return load_frd(rt);
594 }
595 }
596
597 static void save_frd(unsigned rt, TCGv_i64 val)
598 {
599 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
600 }
601
602 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
603 {
604 #ifdef CONFIG_USER_ONLY
605 tcg_gen_movi_i64(dest, 0);
606 #else
607 if (reg < 4) {
608 tcg_gen_mov_i64(dest, cpu_sr[reg]);
609 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
610 tcg_gen_mov_i64(dest, cpu_srH);
611 } else {
612 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
613 }
614 #endif
615 }
616
617 /* Skip over the implementation of an insn that has been nullified.
618 Use this when the insn is too complex for a conditional move. */
619 static void nullify_over(DisasContext *ctx)
620 {
621 if (ctx->null_cond.c != TCG_COND_NEVER) {
622 /* The always condition should have been handled in the main loop. */
623 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
624
625 ctx->null_lab = gen_new_label();
626
627 /* If we're using PSW[N], copy it to a temp because... */
628 if (ctx->null_cond.a0 == cpu_psw_n) {
629 ctx->null_cond.a0 = tcg_temp_new();
630 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
631 }
632 /* ... we clear it before branching over the implementation,
633 so that (1) it's clear after nullifying this insn and
634 (2) if this insn nullifies the next, PSW[N] is valid. */
635 if (ctx->psw_n_nonzero) {
636 ctx->psw_n_nonzero = false;
637 tcg_gen_movi_reg(cpu_psw_n, 0);
638 }
639
640 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
641 ctx->null_cond.a1, ctx->null_lab);
642 cond_free(&ctx->null_cond);
643 }
644 }
645
646 /* Save the current nullification state to PSW[N]. */
647 static void nullify_save(DisasContext *ctx)
648 {
649 if (ctx->null_cond.c == TCG_COND_NEVER) {
650 if (ctx->psw_n_nonzero) {
651 tcg_gen_movi_reg(cpu_psw_n, 0);
652 }
653 return;
654 }
655 if (ctx->null_cond.a0 != cpu_psw_n) {
656 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
657 ctx->null_cond.a0, ctx->null_cond.a1);
658 ctx->psw_n_nonzero = true;
659 }
660 cond_free(&ctx->null_cond);
661 }
662
663 /* Set a PSW[N] to X. The intention is that this is used immediately
664 before a goto_tb/exit_tb, so that there is no fallthru path to other
665 code within the TB. Therefore we do not update psw_n_nonzero. */
666 static void nullify_set(DisasContext *ctx, bool x)
667 {
668 if (ctx->psw_n_nonzero || x) {
669 tcg_gen_movi_reg(cpu_psw_n, x);
670 }
671 }
672
673 /* Mark the end of an instruction that may have been nullified.
674 This is the pair to nullify_over. Always returns true so that
675 it may be tail-called from a translate function. */
676 static bool nullify_end(DisasContext *ctx)
677 {
678 TCGLabel *null_lab = ctx->null_lab;
679 DisasJumpType status = ctx->base.is_jmp;
680
681 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
682 For UPDATED, we cannot update on the nullified path. */
683 assert(status != DISAS_IAQ_N_UPDATED);
684
685 if (likely(null_lab == NULL)) {
686 /* The current insn wasn't conditional or handled the condition
687 applied to it without a branch, so the (new) setting of
688 NULL_COND can be applied directly to the next insn. */
689 return true;
690 }
691 ctx->null_lab = NULL;
692
693 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
694 /* The next instruction will be unconditional,
695 and NULL_COND already reflects that. */
696 gen_set_label(null_lab);
697 } else {
698 /* The insn that we just executed is itself nullifying the next
699 instruction. Store the condition in the PSW[N] global.
700 We asserted PSW[N] = 0 in nullify_over, so that after the
701 label we have the proper value in place. */
702 nullify_save(ctx);
703 gen_set_label(null_lab);
704 ctx->null_cond = cond_make_n();
705 }
706 if (status == DISAS_NORETURN) {
707 ctx->base.is_jmp = DISAS_NEXT;
708 }
709 return true;
710 }
711
712 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
713 {
714 if (unlikely(ival == -1)) {
715 tcg_gen_mov_reg(dest, vval);
716 } else {
717 tcg_gen_movi_reg(dest, ival);
718 }
719 }
720
721 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
722 {
723 return ctx->iaoq_f + disp + 8;
724 }
725
726 static void gen_excp_1(int exception)
727 {
728 gen_helper_excp(tcg_env, tcg_constant_i32(exception));
729 }
730
731 static void gen_excp(DisasContext *ctx, int exception)
732 {
733 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
734 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
735 nullify_save(ctx);
736 gen_excp_1(exception);
737 ctx->base.is_jmp = DISAS_NORETURN;
738 }
739
740 static bool gen_excp_iir(DisasContext *ctx, int exc)
741 {
742 nullify_over(ctx);
743 tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
744 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
745 gen_excp(ctx, exc);
746 return nullify_end(ctx);
747 }
748
749 static bool gen_illegal(DisasContext *ctx)
750 {
751 return gen_excp_iir(ctx, EXCP_ILL);
752 }
753
754 #ifdef CONFIG_USER_ONLY
755 #define CHECK_MOST_PRIVILEGED(EXCP) \
756 return gen_excp_iir(ctx, EXCP)
757 #else
758 #define CHECK_MOST_PRIVILEGED(EXCP) \
759 do { \
760 if (ctx->privilege != 0) { \
761 return gen_excp_iir(ctx, EXCP); \
762 } \
763 } while (0)
764 #endif
765
766 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
767 {
768 return translator_use_goto_tb(&ctx->base, dest);
769 }
770
771 /* If the next insn is to be nullified, and it's on the same page,
772 and we're not attempting to set a breakpoint on it, then we can
773 totally skip the nullified insn. This avoids creating and
774 executing a TB that merely branches to the next TB. */
775 static bool use_nullify_skip(DisasContext *ctx)
776 {
777 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
778 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
779 }
780
781 static void gen_goto_tb(DisasContext *ctx, int which,
782 target_ureg f, target_ureg b)
783 {
784 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
785 tcg_gen_goto_tb(which);
786 tcg_gen_movi_reg(cpu_iaoq_f, f);
787 tcg_gen_movi_reg(cpu_iaoq_b, b);
788 tcg_gen_exit_tb(ctx->base.tb, which);
789 } else {
790 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
791 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
792 tcg_gen_lookup_and_goto_ptr();
793 }
794 }
795
796 static bool cond_need_sv(int c)
797 {
798 return c == 2 || c == 3 || c == 6;
799 }
800
801 static bool cond_need_cb(int c)
802 {
803 return c == 4 || c == 5;
804 }
805
806 /* Need extensions from TCGv_i32 to TCGv_reg. */
807 static bool cond_need_ext(DisasContext *ctx, bool d)
808 {
809 return TARGET_REGISTER_BITS == 64 && !d;
810 }
811
812 /*
813 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
814 * the Parisc 1.1 Architecture Reference Manual for details.
815 */
816
817 static DisasCond do_cond(unsigned cf, TCGv_reg res,
818 TCGv_reg cb_msb, TCGv_reg sv)
819 {
820 DisasCond cond;
821 TCGv_reg tmp;
822
823 switch (cf >> 1) {
824 case 0: /* Never / TR (0 / 1) */
825 cond = cond_make_f();
826 break;
827 case 1: /* = / <> (Z / !Z) */
828 cond = cond_make_0(TCG_COND_EQ, res);
829 break;
830 case 2: /* < / >= (N ^ V / !(N ^ V) */
831 tmp = tcg_temp_new();
832 tcg_gen_xor_reg(tmp, res, sv);
833 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
834 break;
835 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
836 /*
837 * Simplify:
838 * (N ^ V) | Z
839 * ((res < 0) ^ (sv < 0)) | !res
840 * ((res ^ sv) < 0) | !res
841 * (~(res ^ sv) >= 0) | !res
842 * !(~(res ^ sv) >> 31) | !res
843 * !(~(res ^ sv) >> 31 & res)
844 */
845 tmp = tcg_temp_new();
846 tcg_gen_eqv_reg(tmp, res, sv);
847 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
848 tcg_gen_and_reg(tmp, tmp, res);
849 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
850 break;
851 case 4: /* NUV / UV (!C / C) */
852 cond = cond_make_0(TCG_COND_EQ, cb_msb);
853 break;
854 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
855 tmp = tcg_temp_new();
856 tcg_gen_neg_reg(tmp, cb_msb);
857 tcg_gen_and_reg(tmp, tmp, res);
858 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
859 break;
860 case 6: /* SV / NSV (V / !V) */
861 cond = cond_make_0(TCG_COND_LT, sv);
862 break;
863 case 7: /* OD / EV */
864 tmp = tcg_temp_new();
865 tcg_gen_andi_reg(tmp, res, 1);
866 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
867 break;
868 default:
869 g_assert_not_reached();
870 }
871 if (cf & 1) {
872 cond.c = tcg_invert_cond(cond.c);
873 }
874
875 return cond;
876 }
877
878 /* Similar, but for the special case of subtraction without borrow, we
879 can use the inputs directly. This can allow other computation to be
880 deleted as unused. */
881
882 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
883 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
884 {
885 DisasCond cond;
886
887 switch (cf >> 1) {
888 case 1: /* = / <> */
889 cond = cond_make(TCG_COND_EQ, in1, in2);
890 break;
891 case 2: /* < / >= */
892 cond = cond_make(TCG_COND_LT, in1, in2);
893 break;
894 case 3: /* <= / > */
895 cond = cond_make(TCG_COND_LE, in1, in2);
896 break;
897 case 4: /* << / >>= */
898 cond = cond_make(TCG_COND_LTU, in1, in2);
899 break;
900 case 5: /* <<= / >> */
901 cond = cond_make(TCG_COND_LEU, in1, in2);
902 break;
903 default:
904 return do_cond(cf, res, NULL, sv);
905 }
906 if (cf & 1) {
907 cond.c = tcg_invert_cond(cond.c);
908 }
909
910 return cond;
911 }
912
913 /*
914 * Similar, but for logicals, where the carry and overflow bits are not
915 * computed, and use of them is undefined.
916 *
917 * Undefined or not, hardware does not trap. It seems reasonable to
918 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
919 * how cases c={2,3} are treated.
920 */
921
922 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
923 {
924 switch (cf) {
925 case 0: /* never */
926 case 9: /* undef, C */
927 case 11: /* undef, C & !Z */
928 case 12: /* undef, V */
929 return cond_make_f();
930
931 case 1: /* true */
932 case 8: /* undef, !C */
933 case 10: /* undef, !C | Z */
934 case 13: /* undef, !V */
935 return cond_make_t();
936
937 case 2: /* == */
938 return cond_make_0(TCG_COND_EQ, res);
939 case 3: /* <> */
940 return cond_make_0(TCG_COND_NE, res);
941 case 4: /* < */
942 return cond_make_0(TCG_COND_LT, res);
943 case 5: /* >= */
944 return cond_make_0(TCG_COND_GE, res);
945 case 6: /* <= */
946 return cond_make_0(TCG_COND_LE, res);
947 case 7: /* > */
948 return cond_make_0(TCG_COND_GT, res);
949
950 case 14: /* OD */
951 case 15: /* EV */
952 return do_cond(cf, res, NULL, NULL);
953
954 default:
955 g_assert_not_reached();
956 }
957 }
958
959 /* Similar, but for shift/extract/deposit conditions. */
960
961 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
962 {
963 unsigned c, f;
964
965 /* Convert the compressed condition codes to standard.
966 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
967 4-7 are the reverse of 0-3. */
968 c = orig & 3;
969 if (c == 3) {
970 c = 7;
971 }
972 f = (orig & 4) / 4;
973
974 return do_log_cond(c * 2 + f, res);
975 }
976
977 /* Similar, but for unit conditions. */
978
979 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
980 TCGv_reg in1, TCGv_reg in2)
981 {
982 DisasCond cond;
983 TCGv_reg tmp, cb = NULL;
984
985 if (cf & 8) {
986 /* Since we want to test lots of carry-out bits all at once, do not
987 * do our normal thing and compute carry-in of bit B+1 since that
988 * leaves us with carry bits spread across two words.
989 */
990 cb = tcg_temp_new();
991 tmp = tcg_temp_new();
992 tcg_gen_or_reg(cb, in1, in2);
993 tcg_gen_and_reg(tmp, in1, in2);
994 tcg_gen_andc_reg(cb, cb, res);
995 tcg_gen_or_reg(cb, cb, tmp);
996 }
997
998 switch (cf >> 1) {
999 case 0: /* never / TR */
1000 case 1: /* undefined */
1001 case 5: /* undefined */
1002 cond = cond_make_f();
1003 break;
1004
1005 case 2: /* SBZ / NBZ */
1006 /* See hasless(v,1) from
1007 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1008 */
1009 tmp = tcg_temp_new();
1010 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1011 tcg_gen_andc_reg(tmp, tmp, res);
1012 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1013 cond = cond_make_0(TCG_COND_NE, tmp);
1014 break;
1015
1016 case 3: /* SHZ / NHZ */
1017 tmp = tcg_temp_new();
1018 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1019 tcg_gen_andc_reg(tmp, tmp, res);
1020 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1021 cond = cond_make_0(TCG_COND_NE, tmp);
1022 break;
1023
1024 case 4: /* SDC / NDC */
1025 tcg_gen_andi_reg(cb, cb, 0x88888888u);
1026 cond = cond_make_0(TCG_COND_NE, cb);
1027 break;
1028
1029 case 6: /* SBC / NBC */
1030 tcg_gen_andi_reg(cb, cb, 0x80808080u);
1031 cond = cond_make_0(TCG_COND_NE, cb);
1032 break;
1033
1034 case 7: /* SHC / NHC */
1035 tcg_gen_andi_reg(cb, cb, 0x80008000u);
1036 cond = cond_make_0(TCG_COND_NE, cb);
1037 break;
1038
1039 default:
1040 g_assert_not_reached();
1041 }
1042 if (cf & 1) {
1043 cond.c = tcg_invert_cond(cond.c);
1044 }
1045
1046 return cond;
1047 }
1048
1049 static TCGv_reg get_carry(DisasContext *ctx, bool d,
1050 TCGv_reg cb, TCGv_reg cb_msb)
1051 {
1052 if (cond_need_ext(ctx, d)) {
1053 TCGv_reg t = tcg_temp_new();
1054 tcg_gen_extract_reg(t, cb, 32, 1);
1055 return t;
1056 }
1057 return cb_msb;
1058 }
1059
1060 static TCGv_reg get_psw_carry(DisasContext *ctx, bool d)
1061 {
1062 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1063 }
1064
1065 /* Compute signed overflow for addition. */
1066 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1067 TCGv_reg in1, TCGv_reg in2)
1068 {
1069 TCGv_reg sv = tcg_temp_new();
1070 TCGv_reg tmp = tcg_temp_new();
1071
1072 tcg_gen_xor_reg(sv, res, in1);
1073 tcg_gen_xor_reg(tmp, in1, in2);
1074 tcg_gen_andc_reg(sv, sv, tmp);
1075
1076 return sv;
1077 }
1078
1079 /* Compute signed overflow for subtraction. */
1080 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1081 TCGv_reg in1, TCGv_reg in2)
1082 {
1083 TCGv_reg sv = tcg_temp_new();
1084 TCGv_reg tmp = tcg_temp_new();
1085
1086 tcg_gen_xor_reg(sv, res, in1);
1087 tcg_gen_xor_reg(tmp, in1, in2);
1088 tcg_gen_and_reg(sv, sv, tmp);
1089
1090 return sv;
1091 }
1092
1093 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1094 TCGv_reg in2, unsigned shift, bool is_l,
1095 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1096 {
1097 TCGv_reg dest, cb, cb_msb, sv, tmp;
1098 unsigned c = cf >> 1;
1099 DisasCond cond;
1100
1101 dest = tcg_temp_new();
1102 cb = NULL;
1103 cb_msb = NULL;
1104
1105 if (shift) {
1106 tmp = tcg_temp_new();
1107 tcg_gen_shli_reg(tmp, in1, shift);
1108 in1 = tmp;
1109 }
1110
1111 if (!is_l || cond_need_cb(c)) {
1112 TCGv_reg zero = tcg_constant_reg(0);
1113 cb_msb = tcg_temp_new();
1114 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1115 if (is_c) {
1116 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1117 }
1118 if (!is_l) {
1119 cb = tcg_temp_new();
1120 tcg_gen_xor_reg(cb, in1, in2);
1121 tcg_gen_xor_reg(cb, cb, dest);
1122 }
1123 } else {
1124 tcg_gen_add_reg(dest, in1, in2);
1125 if (is_c) {
1126 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1127 }
1128 }
1129
1130 /* Compute signed overflow if required. */
1131 sv = NULL;
1132 if (is_tsv || cond_need_sv(c)) {
1133 sv = do_add_sv(ctx, dest, in1, in2);
1134 if (is_tsv) {
1135 /* ??? Need to include overflow from shift. */
1136 gen_helper_tsv(tcg_env, sv);
1137 }
1138 }
1139
1140 /* Emit any conditional trap before any writeback. */
1141 cond = do_cond(cf, dest, cb_msb, sv);
1142 if (is_tc) {
1143 tmp = tcg_temp_new();
1144 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1145 gen_helper_tcond(tcg_env, tmp);
1146 }
1147
1148 /* Write back the result. */
1149 if (!is_l) {
1150 save_or_nullify(ctx, cpu_psw_cb, cb);
1151 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1152 }
1153 save_gpr(ctx, rt, dest);
1154
1155 /* Install the new nullification. */
1156 cond_free(&ctx->null_cond);
1157 ctx->null_cond = cond;
1158 }
1159
1160 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1161 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1162 {
1163 TCGv_reg tcg_r1, tcg_r2;
1164
1165 if (a->cf) {
1166 nullify_over(ctx);
1167 }
1168 tcg_r1 = load_gpr(ctx, a->r1);
1169 tcg_r2 = load_gpr(ctx, a->r2);
1170 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1171 return nullify_end(ctx);
1172 }
1173
1174 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1175 bool is_tsv, bool is_tc)
1176 {
1177 TCGv_reg tcg_im, tcg_r2;
1178
1179 if (a->cf) {
1180 nullify_over(ctx);
1181 }
1182 tcg_im = tcg_constant_reg(a->i);
1183 tcg_r2 = load_gpr(ctx, a->r);
1184 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1185 return nullify_end(ctx);
1186 }
1187
1188 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1189 TCGv_reg in2, bool is_tsv, bool is_b,
1190 bool is_tc, unsigned cf)
1191 {
1192 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1193 unsigned c = cf >> 1;
1194 DisasCond cond;
1195
1196 dest = tcg_temp_new();
1197 cb = tcg_temp_new();
1198 cb_msb = tcg_temp_new();
1199
1200 zero = tcg_constant_reg(0);
1201 if (is_b) {
1202 /* DEST,C = IN1 + ~IN2 + C. */
1203 tcg_gen_not_reg(cb, in2);
1204 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1205 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1206 tcg_gen_xor_reg(cb, cb, in1);
1207 tcg_gen_xor_reg(cb, cb, dest);
1208 } else {
1209 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1210 operations by seeding the high word with 1 and subtracting. */
1211 tcg_gen_movi_reg(cb_msb, 1);
1212 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1213 tcg_gen_eqv_reg(cb, in1, in2);
1214 tcg_gen_xor_reg(cb, cb, dest);
1215 }
1216
1217 /* Compute signed overflow if required. */
1218 sv = NULL;
1219 if (is_tsv || cond_need_sv(c)) {
1220 sv = do_sub_sv(ctx, dest, in1, in2);
1221 if (is_tsv) {
1222 gen_helper_tsv(tcg_env, sv);
1223 }
1224 }
1225
1226 /* Compute the condition. We cannot use the special case for borrow. */
1227 if (!is_b) {
1228 cond = do_sub_cond(cf, dest, in1, in2, sv);
1229 } else {
1230 cond = do_cond(cf, dest, cb_msb, sv);
1231 }
1232
1233 /* Emit any conditional trap before any writeback. */
1234 if (is_tc) {
1235 tmp = tcg_temp_new();
1236 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1237 gen_helper_tcond(tcg_env, tmp);
1238 }
1239
1240 /* Write back the result. */
1241 save_or_nullify(ctx, cpu_psw_cb, cb);
1242 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1243 save_gpr(ctx, rt, dest);
1244
1245 /* Install the new nullification. */
1246 cond_free(&ctx->null_cond);
1247 ctx->null_cond = cond;
1248 }
1249
1250 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1251 bool is_tsv, bool is_b, bool is_tc)
1252 {
1253 TCGv_reg tcg_r1, tcg_r2;
1254
1255 if (a->cf) {
1256 nullify_over(ctx);
1257 }
1258 tcg_r1 = load_gpr(ctx, a->r1);
1259 tcg_r2 = load_gpr(ctx, a->r2);
1260 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1261 return nullify_end(ctx);
1262 }
1263
1264 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1265 {
1266 TCGv_reg tcg_im, tcg_r2;
1267
1268 if (a->cf) {
1269 nullify_over(ctx);
1270 }
1271 tcg_im = tcg_constant_reg(a->i);
1272 tcg_r2 = load_gpr(ctx, a->r);
1273 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1274 return nullify_end(ctx);
1275 }
1276
1277 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1278 TCGv_reg in2, unsigned cf)
1279 {
1280 TCGv_reg dest, sv;
1281 DisasCond cond;
1282
1283 dest = tcg_temp_new();
1284 tcg_gen_sub_reg(dest, in1, in2);
1285
1286 /* Compute signed overflow if required. */
1287 sv = NULL;
1288 if (cond_need_sv(cf >> 1)) {
1289 sv = do_sub_sv(ctx, dest, in1, in2);
1290 }
1291
1292 /* Form the condition for the compare. */
1293 cond = do_sub_cond(cf, dest, in1, in2, sv);
1294
1295 /* Clear. */
1296 tcg_gen_movi_reg(dest, 0);
1297 save_gpr(ctx, rt, dest);
1298
1299 /* Install the new nullification. */
1300 cond_free(&ctx->null_cond);
1301 ctx->null_cond = cond;
1302 }
1303
1304 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1305 TCGv_reg in2, unsigned cf,
1306 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1307 {
1308 TCGv_reg dest = dest_gpr(ctx, rt);
1309
1310 /* Perform the operation, and writeback. */
1311 fn(dest, in1, in2);
1312 save_gpr(ctx, rt, dest);
1313
1314 /* Install the new nullification. */
1315 cond_free(&ctx->null_cond);
1316 if (cf) {
1317 ctx->null_cond = do_log_cond(cf, dest);
1318 }
1319 }
1320
1321 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1322 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1323 {
1324 TCGv_reg tcg_r1, tcg_r2;
1325
1326 if (a->cf) {
1327 nullify_over(ctx);
1328 }
1329 tcg_r1 = load_gpr(ctx, a->r1);
1330 tcg_r2 = load_gpr(ctx, a->r2);
1331 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1332 return nullify_end(ctx);
1333 }
1334
1335 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1336 TCGv_reg in2, unsigned cf, bool is_tc,
1337 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1338 {
1339 TCGv_reg dest;
1340 DisasCond cond;
1341
1342 if (cf == 0) {
1343 dest = dest_gpr(ctx, rt);
1344 fn(dest, in1, in2);
1345 save_gpr(ctx, rt, dest);
1346 cond_free(&ctx->null_cond);
1347 } else {
1348 dest = tcg_temp_new();
1349 fn(dest, in1, in2);
1350
1351 cond = do_unit_cond(cf, dest, in1, in2);
1352
1353 if (is_tc) {
1354 TCGv_reg tmp = tcg_temp_new();
1355 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1356 gen_helper_tcond(tcg_env, tmp);
1357 }
1358 save_gpr(ctx, rt, dest);
1359
1360 cond_free(&ctx->null_cond);
1361 ctx->null_cond = cond;
1362 }
1363 }
1364
1365 #ifndef CONFIG_USER_ONLY
1366 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1367 from the top 2 bits of the base register. There are a few system
1368 instructions that have a 3-bit space specifier, for which SR0 is
1369 not special. To handle this, pass ~SP. */
1370 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1371 {
1372 TCGv_ptr ptr;
1373 TCGv_reg tmp;
1374 TCGv_i64 spc;
1375
1376 if (sp != 0) {
1377 if (sp < 0) {
1378 sp = ~sp;
1379 }
1380 spc = tcg_temp_new_tl();
1381 load_spr(ctx, spc, sp);
1382 return spc;
1383 }
1384 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1385 return cpu_srH;
1386 }
1387
1388 ptr = tcg_temp_new_ptr();
1389 tmp = tcg_temp_new();
1390 spc = tcg_temp_new_tl();
1391
1392 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1393 tcg_gen_andi_reg(tmp, tmp, 030);
1394 tcg_gen_trunc_reg_ptr(ptr, tmp);
1395
1396 tcg_gen_add_ptr(ptr, ptr, tcg_env);
1397 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1398
1399 return spc;
1400 }
1401 #endif
1402
1403 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1404 unsigned rb, unsigned rx, int scale, target_sreg disp,
1405 unsigned sp, int modify, bool is_phys)
1406 {
1407 TCGv_reg base = load_gpr(ctx, rb);
1408 TCGv_reg ofs;
1409
1410 /* Note that RX is mutually exclusive with DISP. */
1411 if (rx) {
1412 ofs = tcg_temp_new();
1413 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1414 tcg_gen_add_reg(ofs, ofs, base);
1415 } else if (disp || modify) {
1416 ofs = tcg_temp_new();
1417 tcg_gen_addi_reg(ofs, base, disp);
1418 } else {
1419 ofs = base;
1420 }
1421
1422 *pofs = ofs;
1423 #ifdef CONFIG_USER_ONLY
1424 *pgva = (modify <= 0 ? ofs : base);
1425 #else
1426 TCGv_tl addr = tcg_temp_new_tl();
1427 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1428 if (ctx->tb_flags & PSW_W) {
1429 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1430 }
1431 if (!is_phys) {
1432 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1433 }
1434 *pgva = addr;
1435 #endif
1436 }
1437
1438 /* Emit a memory load. The modify parameter should be
1439 * < 0 for pre-modify,
1440 * > 0 for post-modify,
1441 * = 0 for no base register update.
1442 */
1443 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1444 unsigned rx, int scale, target_sreg disp,
1445 unsigned sp, int modify, MemOp mop)
1446 {
1447 TCGv_reg ofs;
1448 TCGv_tl addr;
1449
1450 /* Caller uses nullify_over/nullify_end. */
1451 assert(ctx->null_cond.c == TCG_COND_NEVER);
1452
1453 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1454 ctx->mmu_idx == MMU_PHYS_IDX);
1455 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1456 if (modify) {
1457 save_gpr(ctx, rb, ofs);
1458 }
1459 }
1460
1461 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1462 unsigned rx, int scale, target_sreg disp,
1463 unsigned sp, int modify, MemOp mop)
1464 {
1465 TCGv_reg ofs;
1466 TCGv_tl addr;
1467
1468 /* Caller uses nullify_over/nullify_end. */
1469 assert(ctx->null_cond.c == TCG_COND_NEVER);
1470
1471 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1472 ctx->mmu_idx == MMU_PHYS_IDX);
1473 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1474 if (modify) {
1475 save_gpr(ctx, rb, ofs);
1476 }
1477 }
1478
1479 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1480 unsigned rx, int scale, target_sreg disp,
1481 unsigned sp, int modify, MemOp mop)
1482 {
1483 TCGv_reg ofs;
1484 TCGv_tl addr;
1485
1486 /* Caller uses nullify_over/nullify_end. */
1487 assert(ctx->null_cond.c == TCG_COND_NEVER);
1488
1489 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1490 ctx->mmu_idx == MMU_PHYS_IDX);
1491 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1492 if (modify) {
1493 save_gpr(ctx, rb, ofs);
1494 }
1495 }
1496
1497 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1498 unsigned rx, int scale, target_sreg disp,
1499 unsigned sp, int modify, MemOp mop)
1500 {
1501 TCGv_reg ofs;
1502 TCGv_tl addr;
1503
1504 /* Caller uses nullify_over/nullify_end. */
1505 assert(ctx->null_cond.c == TCG_COND_NEVER);
1506
1507 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1508 ctx->mmu_idx == MMU_PHYS_IDX);
1509 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1510 if (modify) {
1511 save_gpr(ctx, rb, ofs);
1512 }
1513 }
1514
1515 #if TARGET_REGISTER_BITS == 64
1516 #define do_load_reg do_load_64
1517 #define do_store_reg do_store_64
1518 #else
1519 #define do_load_reg do_load_32
1520 #define do_store_reg do_store_32
1521 #endif
1522
1523 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1524 unsigned rx, int scale, target_sreg disp,
1525 unsigned sp, int modify, MemOp mop)
1526 {
1527 TCGv_reg dest;
1528
1529 nullify_over(ctx);
1530
1531 if (modify == 0) {
1532 /* No base register update. */
1533 dest = dest_gpr(ctx, rt);
1534 } else {
1535 /* Make sure if RT == RB, we see the result of the load. */
1536 dest = tcg_temp_new();
1537 }
1538 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1539 save_gpr(ctx, rt, dest);
1540
1541 return nullify_end(ctx);
1542 }
1543
1544 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1545 unsigned rx, int scale, target_sreg disp,
1546 unsigned sp, int modify)
1547 {
1548 TCGv_i32 tmp;
1549
1550 nullify_over(ctx);
1551
1552 tmp = tcg_temp_new_i32();
1553 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1554 save_frw_i32(rt, tmp);
1555
1556 if (rt == 0) {
1557 gen_helper_loaded_fr0(tcg_env);
1558 }
1559
1560 return nullify_end(ctx);
1561 }
1562
1563 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1564 {
1565 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1566 a->disp, a->sp, a->m);
1567 }
1568
1569 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1570 unsigned rx, int scale, target_sreg disp,
1571 unsigned sp, int modify)
1572 {
1573 TCGv_i64 tmp;
1574
1575 nullify_over(ctx);
1576
1577 tmp = tcg_temp_new_i64();
1578 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1579 save_frd(rt, tmp);
1580
1581 if (rt == 0) {
1582 gen_helper_loaded_fr0(tcg_env);
1583 }
1584
1585 return nullify_end(ctx);
1586 }
1587
1588 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1589 {
1590 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1591 a->disp, a->sp, a->m);
1592 }
1593
1594 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1595 target_sreg disp, unsigned sp,
1596 int modify, MemOp mop)
1597 {
1598 nullify_over(ctx);
1599 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1600 return nullify_end(ctx);
1601 }
1602
1603 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1604 unsigned rx, int scale, target_sreg disp,
1605 unsigned sp, int modify)
1606 {
1607 TCGv_i32 tmp;
1608
1609 nullify_over(ctx);
1610
1611 tmp = load_frw_i32(rt);
1612 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1613
1614 return nullify_end(ctx);
1615 }
1616
1617 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1618 {
1619 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1620 a->disp, a->sp, a->m);
1621 }
1622
1623 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1624 unsigned rx, int scale, target_sreg disp,
1625 unsigned sp, int modify)
1626 {
1627 TCGv_i64 tmp;
1628
1629 nullify_over(ctx);
1630
1631 tmp = load_frd(rt);
1632 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1633
1634 return nullify_end(ctx);
1635 }
1636
1637 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1638 {
1639 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1640 a->disp, a->sp, a->m);
1641 }
1642
1643 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1644 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1645 {
1646 TCGv_i32 tmp;
1647
1648 nullify_over(ctx);
1649 tmp = load_frw0_i32(ra);
1650
1651 func(tmp, tcg_env, tmp);
1652
1653 save_frw_i32(rt, tmp);
1654 return nullify_end(ctx);
1655 }
1656
1657 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1658 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1659 {
1660 TCGv_i32 dst;
1661 TCGv_i64 src;
1662
1663 nullify_over(ctx);
1664 src = load_frd(ra);
1665 dst = tcg_temp_new_i32();
1666
1667 func(dst, tcg_env, src);
1668
1669 save_frw_i32(rt, dst);
1670 return nullify_end(ctx);
1671 }
1672
1673 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1674 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1675 {
1676 TCGv_i64 tmp;
1677
1678 nullify_over(ctx);
1679 tmp = load_frd0(ra);
1680
1681 func(tmp, tcg_env, tmp);
1682
1683 save_frd(rt, tmp);
1684 return nullify_end(ctx);
1685 }
1686
1687 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1688 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1689 {
1690 TCGv_i32 src;
1691 TCGv_i64 dst;
1692
1693 nullify_over(ctx);
1694 src = load_frw0_i32(ra);
1695 dst = tcg_temp_new_i64();
1696
1697 func(dst, tcg_env, src);
1698
1699 save_frd(rt, dst);
1700 return nullify_end(ctx);
1701 }
1702
1703 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1704 unsigned ra, unsigned rb,
1705 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1706 {
1707 TCGv_i32 a, b;
1708
1709 nullify_over(ctx);
1710 a = load_frw0_i32(ra);
1711 b = load_frw0_i32(rb);
1712
1713 func(a, tcg_env, a, b);
1714
1715 save_frw_i32(rt, a);
1716 return nullify_end(ctx);
1717 }
1718
1719 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1720 unsigned ra, unsigned rb,
1721 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1722 {
1723 TCGv_i64 a, b;
1724
1725 nullify_over(ctx);
1726 a = load_frd0(ra);
1727 b = load_frd0(rb);
1728
1729 func(a, tcg_env, a, b);
1730
1731 save_frd(rt, a);
1732 return nullify_end(ctx);
1733 }
1734
1735 /* Emit an unconditional branch to a direct target, which may or may not
1736 have already had nullification handled. */
1737 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1738 unsigned link, bool is_n)
1739 {
1740 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1741 if (link != 0) {
1742 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1743 }
1744 ctx->iaoq_n = dest;
1745 if (is_n) {
1746 ctx->null_cond.c = TCG_COND_ALWAYS;
1747 }
1748 } else {
1749 nullify_over(ctx);
1750
1751 if (link != 0) {
1752 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1753 }
1754
1755 if (is_n && use_nullify_skip(ctx)) {
1756 nullify_set(ctx, 0);
1757 gen_goto_tb(ctx, 0, dest, dest + 4);
1758 } else {
1759 nullify_set(ctx, is_n);
1760 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1761 }
1762
1763 nullify_end(ctx);
1764
1765 nullify_set(ctx, 0);
1766 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1767 ctx->base.is_jmp = DISAS_NORETURN;
1768 }
1769 return true;
1770 }
1771
1772 /* Emit a conditional branch to a direct target. If the branch itself
1773 is nullified, we should have already used nullify_over. */
1774 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1775 DisasCond *cond)
1776 {
1777 target_ureg dest = iaoq_dest(ctx, disp);
1778 TCGLabel *taken = NULL;
1779 TCGCond c = cond->c;
1780 bool n;
1781
1782 assert(ctx->null_cond.c == TCG_COND_NEVER);
1783
1784 /* Handle TRUE and NEVER as direct branches. */
1785 if (c == TCG_COND_ALWAYS) {
1786 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1787 }
1788 if (c == TCG_COND_NEVER) {
1789 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1790 }
1791
1792 taken = gen_new_label();
1793 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1794 cond_free(cond);
1795
1796 /* Not taken: Condition not satisfied; nullify on backward branches. */
1797 n = is_n && disp < 0;
1798 if (n && use_nullify_skip(ctx)) {
1799 nullify_set(ctx, 0);
1800 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1801 } else {
1802 if (!n && ctx->null_lab) {
1803 gen_set_label(ctx->null_lab);
1804 ctx->null_lab = NULL;
1805 }
1806 nullify_set(ctx, n);
1807 if (ctx->iaoq_n == -1) {
1808 /* The temporary iaoq_n_var died at the branch above.
1809 Regenerate it here instead of saving it. */
1810 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1811 }
1812 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1813 }
1814
1815 gen_set_label(taken);
1816
1817 /* Taken: Condition satisfied; nullify on forward branches. */
1818 n = is_n && disp >= 0;
1819 if (n && use_nullify_skip(ctx)) {
1820 nullify_set(ctx, 0);
1821 gen_goto_tb(ctx, 1, dest, dest + 4);
1822 } else {
1823 nullify_set(ctx, n);
1824 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1825 }
1826
1827 /* Not taken: the branch itself was nullified. */
1828 if (ctx->null_lab) {
1829 gen_set_label(ctx->null_lab);
1830 ctx->null_lab = NULL;
1831 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1832 } else {
1833 ctx->base.is_jmp = DISAS_NORETURN;
1834 }
1835 return true;
1836 }
1837
1838 /* Emit an unconditional branch to an indirect target. This handles
1839 nullification of the branch itself. */
1840 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1841 unsigned link, bool is_n)
1842 {
1843 TCGv_reg a0, a1, next, tmp;
1844 TCGCond c;
1845
1846 assert(ctx->null_lab == NULL);
1847
1848 if (ctx->null_cond.c == TCG_COND_NEVER) {
1849 if (link != 0) {
1850 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1851 }
1852 next = tcg_temp_new();
1853 tcg_gen_mov_reg(next, dest);
1854 if (is_n) {
1855 if (use_nullify_skip(ctx)) {
1856 tcg_gen_mov_reg(cpu_iaoq_f, next);
1857 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1858 nullify_set(ctx, 0);
1859 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1860 return true;
1861 }
1862 ctx->null_cond.c = TCG_COND_ALWAYS;
1863 }
1864 ctx->iaoq_n = -1;
1865 ctx->iaoq_n_var = next;
1866 } else if (is_n && use_nullify_skip(ctx)) {
1867 /* The (conditional) branch, B, nullifies the next insn, N,
1868 and we're allowed to skip execution N (no single-step or
1869 tracepoint in effect). Since the goto_ptr that we must use
1870 for the indirect branch consumes no special resources, we
1871 can (conditionally) skip B and continue execution. */
1872 /* The use_nullify_skip test implies we have a known control path. */
1873 tcg_debug_assert(ctx->iaoq_b != -1);
1874 tcg_debug_assert(ctx->iaoq_n != -1);
1875
1876 /* We do have to handle the non-local temporary, DEST, before
1877 branching. Since IOAQ_F is not really live at this point, we
1878 can simply store DEST optimistically. Similarly with IAOQ_B. */
1879 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1880 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1881
1882 nullify_over(ctx);
1883 if (link != 0) {
1884 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1885 }
1886 tcg_gen_lookup_and_goto_ptr();
1887 return nullify_end(ctx);
1888 } else {
1889 c = ctx->null_cond.c;
1890 a0 = ctx->null_cond.a0;
1891 a1 = ctx->null_cond.a1;
1892
1893 tmp = tcg_temp_new();
1894 next = tcg_temp_new();
1895
1896 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1897 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1898 ctx->iaoq_n = -1;
1899 ctx->iaoq_n_var = next;
1900
1901 if (link != 0) {
1902 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1903 }
1904
1905 if (is_n) {
1906 /* The branch nullifies the next insn, which means the state of N
1907 after the branch is the inverse of the state of N that applied
1908 to the branch. */
1909 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1910 cond_free(&ctx->null_cond);
1911 ctx->null_cond = cond_make_n();
1912 ctx->psw_n_nonzero = true;
1913 } else {
1914 cond_free(&ctx->null_cond);
1915 }
1916 }
1917 return true;
1918 }
1919
1920 /* Implement
1921 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1922 * IAOQ_Next{30..31} ← GR[b]{30..31};
1923 * else
1924 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1925 * which keeps the privilege level from being increased.
1926 */
1927 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1928 {
1929 TCGv_reg dest;
1930 switch (ctx->privilege) {
1931 case 0:
1932 /* Privilege 0 is maximum and is allowed to decrease. */
1933 return offset;
1934 case 3:
1935 /* Privilege 3 is minimum and is never allowed to increase. */
1936 dest = tcg_temp_new();
1937 tcg_gen_ori_reg(dest, offset, 3);
1938 break;
1939 default:
1940 dest = tcg_temp_new();
1941 tcg_gen_andi_reg(dest, offset, -4);
1942 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1943 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1944 break;
1945 }
1946 return dest;
1947 }
1948
1949 #ifdef CONFIG_USER_ONLY
1950 /* On Linux, page zero is normally marked execute only + gateway.
1951 Therefore normal read or write is supposed to fail, but specific
1952 offsets have kernel code mapped to raise permissions to implement
1953 system calls. Handling this via an explicit check here, rather
1954 in than the "be disp(sr2,r0)" instruction that probably sent us
1955 here, is the easiest way to handle the branch delay slot on the
1956 aforementioned BE. */
1957 static void do_page_zero(DisasContext *ctx)
1958 {
1959 /* If by some means we get here with PSW[N]=1, that implies that
1960 the B,GATE instruction would be skipped, and we'd fault on the
1961 next insn within the privileged page. */
1962 switch (ctx->null_cond.c) {
1963 case TCG_COND_NEVER:
1964 break;
1965 case TCG_COND_ALWAYS:
1966 tcg_gen_movi_reg(cpu_psw_n, 0);
1967 goto do_sigill;
1968 default:
1969 /* Since this is always the first (and only) insn within the
1970 TB, we should know the state of PSW[N] from TB->FLAGS. */
1971 g_assert_not_reached();
1972 }
1973
1974 /* Check that we didn't arrive here via some means that allowed
1975 non-sequential instruction execution. Normally the PSW[B] bit
1976 detects this by disallowing the B,GATE instruction to execute
1977 under such conditions. */
1978 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1979 goto do_sigill;
1980 }
1981
1982 switch (ctx->iaoq_f & -4) {
1983 case 0x00: /* Null pointer call */
1984 gen_excp_1(EXCP_IMP);
1985 ctx->base.is_jmp = DISAS_NORETURN;
1986 break;
1987
1988 case 0xb0: /* LWS */
1989 gen_excp_1(EXCP_SYSCALL_LWS);
1990 ctx->base.is_jmp = DISAS_NORETURN;
1991 break;
1992
1993 case 0xe0: /* SET_THREAD_POINTER */
1994 tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1995 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
1996 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
1997 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1998 break;
1999
2000 case 0x100: /* SYSCALL */
2001 gen_excp_1(EXCP_SYSCALL);
2002 ctx->base.is_jmp = DISAS_NORETURN;
2003 break;
2004
2005 default:
2006 do_sigill:
2007 gen_excp_1(EXCP_ILL);
2008 ctx->base.is_jmp = DISAS_NORETURN;
2009 break;
2010 }
2011 }
2012 #endif
2013
2014 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2015 {
2016 cond_free(&ctx->null_cond);
2017 return true;
2018 }
2019
2020 static bool trans_break(DisasContext *ctx, arg_break *a)
2021 {
2022 return gen_excp_iir(ctx, EXCP_BREAK);
2023 }
2024
2025 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2026 {
2027 /* No point in nullifying the memory barrier. */
2028 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2029
2030 cond_free(&ctx->null_cond);
2031 return true;
2032 }
2033
2034 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2035 {
2036 unsigned rt = a->t;
2037 TCGv_reg tmp = dest_gpr(ctx, rt);
2038 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2039 save_gpr(ctx, rt, tmp);
2040
2041 cond_free(&ctx->null_cond);
2042 return true;
2043 }
2044
2045 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2046 {
2047 unsigned rt = a->t;
2048 unsigned rs = a->sp;
2049 TCGv_i64 t0 = tcg_temp_new_i64();
2050 TCGv_reg t1 = tcg_temp_new();
2051
2052 load_spr(ctx, t0, rs);
2053 tcg_gen_shri_i64(t0, t0, 32);
2054 tcg_gen_trunc_i64_reg(t1, t0);
2055
2056 save_gpr(ctx, rt, t1);
2057
2058 cond_free(&ctx->null_cond);
2059 return true;
2060 }
2061
2062 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2063 {
2064 unsigned rt = a->t;
2065 unsigned ctl = a->r;
2066 TCGv_reg tmp;
2067
2068 switch (ctl) {
2069 case CR_SAR:
2070 #ifdef TARGET_HPPA64
2071 if (a->e == 0) {
2072 /* MFSAR without ,W masks low 5 bits. */
2073 tmp = dest_gpr(ctx, rt);
2074 tcg_gen_andi_reg(tmp, cpu_sar, 31);
2075 save_gpr(ctx, rt, tmp);
2076 goto done;
2077 }
2078 #endif
2079 save_gpr(ctx, rt, cpu_sar);
2080 goto done;
2081 case CR_IT: /* Interval Timer */
2082 /* FIXME: Respect PSW_S bit. */
2083 nullify_over(ctx);
2084 tmp = dest_gpr(ctx, rt);
2085 if (translator_io_start(&ctx->base)) {
2086 gen_helper_read_interval_timer(tmp);
2087 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2088 } else {
2089 gen_helper_read_interval_timer(tmp);
2090 }
2091 save_gpr(ctx, rt, tmp);
2092 return nullify_end(ctx);
2093 case 26:
2094 case 27:
2095 break;
2096 default:
2097 /* All other control registers are privileged. */
2098 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2099 break;
2100 }
2101
2102 tmp = tcg_temp_new();
2103 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2104 save_gpr(ctx, rt, tmp);
2105
2106 done:
2107 cond_free(&ctx->null_cond);
2108 return true;
2109 }
2110
2111 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2112 {
2113 unsigned rr = a->r;
2114 unsigned rs = a->sp;
2115 TCGv_i64 t64;
2116
2117 if (rs >= 5) {
2118 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2119 }
2120 nullify_over(ctx);
2121
2122 t64 = tcg_temp_new_i64();
2123 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2124 tcg_gen_shli_i64(t64, t64, 32);
2125
2126 if (rs >= 4) {
2127 tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2128 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2129 } else {
2130 tcg_gen_mov_i64(cpu_sr[rs], t64);
2131 }
2132
2133 return nullify_end(ctx);
2134 }
2135
2136 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2137 {
2138 unsigned ctl = a->t;
2139 TCGv_reg reg;
2140 TCGv_reg tmp;
2141
2142 if (ctl == CR_SAR) {
2143 reg = load_gpr(ctx, a->r);
2144 tmp = tcg_temp_new();
2145 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2146 save_or_nullify(ctx, cpu_sar, tmp);
2147
2148 cond_free(&ctx->null_cond);
2149 return true;
2150 }
2151
2152 /* All other control registers are privileged or read-only. */
2153 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2154
2155 #ifndef CONFIG_USER_ONLY
2156 nullify_over(ctx);
2157 reg = load_gpr(ctx, a->r);
2158
2159 switch (ctl) {
2160 case CR_IT:
2161 gen_helper_write_interval_timer(tcg_env, reg);
2162 break;
2163 case CR_EIRR:
2164 gen_helper_write_eirr(tcg_env, reg);
2165 break;
2166 case CR_EIEM:
2167 gen_helper_write_eiem(tcg_env, reg);
2168 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2169 break;
2170
2171 case CR_IIASQ:
2172 case CR_IIAOQ:
2173 /* FIXME: Respect PSW_Q bit */
2174 /* The write advances the queue and stores to the back element. */
2175 tmp = tcg_temp_new();
2176 tcg_gen_ld_reg(tmp, tcg_env,
2177 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2178 tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2179 tcg_gen_st_reg(reg, tcg_env,
2180 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2181 break;
2182
2183 case CR_PID1:
2184 case CR_PID2:
2185 case CR_PID3:
2186 case CR_PID4:
2187 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2188 #ifndef CONFIG_USER_ONLY
2189 gen_helper_change_prot_id(tcg_env);
2190 #endif
2191 break;
2192
2193 default:
2194 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2195 break;
2196 }
2197 return nullify_end(ctx);
2198 #endif
2199 }
2200
2201 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2202 {
2203 TCGv_reg tmp = tcg_temp_new();
2204
2205 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2206 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2207 save_or_nullify(ctx, cpu_sar, tmp);
2208
2209 cond_free(&ctx->null_cond);
2210 return true;
2211 }
2212
2213 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2214 {
2215 TCGv_reg dest = dest_gpr(ctx, a->t);
2216
2217 #ifdef CONFIG_USER_ONLY
2218 /* We don't implement space registers in user mode. */
2219 tcg_gen_movi_reg(dest, 0);
2220 #else
2221 TCGv_i64 t0 = tcg_temp_new_i64();
2222
2223 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2224 tcg_gen_shri_i64(t0, t0, 32);
2225 tcg_gen_trunc_i64_reg(dest, t0);
2226 #endif
2227 save_gpr(ctx, a->t, dest);
2228
2229 cond_free(&ctx->null_cond);
2230 return true;
2231 }
2232
2233 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2234 {
2235 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2236 #ifndef CONFIG_USER_ONLY
2237 TCGv_reg tmp;
2238
2239 nullify_over(ctx);
2240
2241 tmp = tcg_temp_new();
2242 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2243 tcg_gen_andi_reg(tmp, tmp, ~a->i);
2244 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2245 save_gpr(ctx, a->t, tmp);
2246
2247 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2248 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2249 return nullify_end(ctx);
2250 #endif
2251 }
2252
2253 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2254 {
2255 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2256 #ifndef CONFIG_USER_ONLY
2257 TCGv_reg tmp;
2258
2259 nullify_over(ctx);
2260
2261 tmp = tcg_temp_new();
2262 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2263 tcg_gen_ori_reg(tmp, tmp, a->i);
2264 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2265 save_gpr(ctx, a->t, tmp);
2266
2267 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2268 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2269 return nullify_end(ctx);
2270 #endif
2271 }
2272
2273 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2274 {
2275 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2276 #ifndef CONFIG_USER_ONLY
2277 TCGv_reg tmp, reg;
2278 nullify_over(ctx);
2279
2280 reg = load_gpr(ctx, a->r);
2281 tmp = tcg_temp_new();
2282 gen_helper_swap_system_mask(tmp, tcg_env, reg);
2283
2284 /* Exit the TB to recognize new interrupts. */
2285 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2286 return nullify_end(ctx);
2287 #endif
2288 }
2289
2290 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2291 {
2292 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2293 #ifndef CONFIG_USER_ONLY
2294 nullify_over(ctx);
2295
2296 if (rfi_r) {
2297 gen_helper_rfi_r(tcg_env);
2298 } else {
2299 gen_helper_rfi(tcg_env);
2300 }
2301 /* Exit the TB to recognize new interrupts. */
2302 tcg_gen_exit_tb(NULL, 0);
2303 ctx->base.is_jmp = DISAS_NORETURN;
2304
2305 return nullify_end(ctx);
2306 #endif
2307 }
2308
2309 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2310 {
2311 return do_rfi(ctx, false);
2312 }
2313
2314 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2315 {
2316 return do_rfi(ctx, true);
2317 }
2318
2319 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2320 {
2321 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2322 #ifndef CONFIG_USER_ONLY
2323 nullify_over(ctx);
2324 gen_helper_halt(tcg_env);
2325 ctx->base.is_jmp = DISAS_NORETURN;
2326 return nullify_end(ctx);
2327 #endif
2328 }
2329
2330 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2331 {
2332 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2333 #ifndef CONFIG_USER_ONLY
2334 nullify_over(ctx);
2335 gen_helper_reset(tcg_env);
2336 ctx->base.is_jmp = DISAS_NORETURN;
2337 return nullify_end(ctx);
2338 #endif
2339 }
2340
2341 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2342 {
2343 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2344 #ifndef CONFIG_USER_ONLY
2345 nullify_over(ctx);
2346 gen_helper_getshadowregs(tcg_env);
2347 return nullify_end(ctx);
2348 #endif
2349 }
2350
2351 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2352 {
2353 if (a->m) {
2354 TCGv_reg dest = dest_gpr(ctx, a->b);
2355 TCGv_reg src1 = load_gpr(ctx, a->b);
2356 TCGv_reg src2 = load_gpr(ctx, a->x);
2357
2358 /* The only thing we need to do is the base register modification. */
2359 tcg_gen_add_reg(dest, src1, src2);
2360 save_gpr(ctx, a->b, dest);
2361 }
2362 cond_free(&ctx->null_cond);
2363 return true;
2364 }
2365
2366 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2367 {
2368 TCGv_reg dest, ofs;
2369 TCGv_i32 level, want;
2370 TCGv_tl addr;
2371
2372 nullify_over(ctx);
2373
2374 dest = dest_gpr(ctx, a->t);
2375 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2376
2377 if (a->imm) {
2378 level = tcg_constant_i32(a->ri);
2379 } else {
2380 level = tcg_temp_new_i32();
2381 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2382 tcg_gen_andi_i32(level, level, 3);
2383 }
2384 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2385
2386 gen_helper_probe(dest, tcg_env, addr, level, want);
2387
2388 save_gpr(ctx, a->t, dest);
2389 return nullify_end(ctx);
2390 }
2391
2392 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2393 {
2394 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2395 #ifndef CONFIG_USER_ONLY
2396 TCGv_tl addr;
2397 TCGv_reg ofs, reg;
2398
2399 nullify_over(ctx);
2400
2401 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2402 reg = load_gpr(ctx, a->r);
2403 if (a->addr) {
2404 gen_helper_itlba(tcg_env, addr, reg);
2405 } else {
2406 gen_helper_itlbp(tcg_env, addr, reg);
2407 }
2408
2409 /* Exit TB for TLB change if mmu is enabled. */
2410 if (ctx->tb_flags & PSW_C) {
2411 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2412 }
2413 return nullify_end(ctx);
2414 #endif
2415 }
2416
2417 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2418 {
2419 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2420 #ifndef CONFIG_USER_ONLY
2421 TCGv_tl addr;
2422 TCGv_reg ofs;
2423
2424 nullify_over(ctx);
2425
2426 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2427 if (a->m) {
2428 save_gpr(ctx, a->b, ofs);
2429 }
2430 if (a->local) {
2431 gen_helper_ptlbe(tcg_env);
2432 } else {
2433 gen_helper_ptlb(tcg_env, addr);
2434 }
2435
2436 /* Exit TB for TLB change if mmu is enabled. */
2437 if (ctx->tb_flags & PSW_C) {
2438 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2439 }
2440 return nullify_end(ctx);
2441 #endif
2442 }
2443
2444 /*
2445 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2446 * See
2447 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2448 * page 13-9 (195/206)
2449 */
2450 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2451 {
2452 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2453 #ifndef CONFIG_USER_ONLY
2454 TCGv_tl addr, atl, stl;
2455 TCGv_reg reg;
2456
2457 nullify_over(ctx);
2458
2459 /*
2460 * FIXME:
2461 * if (not (pcxl or pcxl2))
2462 * return gen_illegal(ctx);
2463 *
2464 * Note for future: these are 32-bit systems; no hppa64.
2465 */
2466
2467 atl = tcg_temp_new_tl();
2468 stl = tcg_temp_new_tl();
2469 addr = tcg_temp_new_tl();
2470
2471 tcg_gen_ld32u_i64(stl, tcg_env,
2472 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2473 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2474 tcg_gen_ld32u_i64(atl, tcg_env,
2475 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2476 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2477 tcg_gen_shli_i64(stl, stl, 32);
2478 tcg_gen_or_tl(addr, atl, stl);
2479
2480 reg = load_gpr(ctx, a->r);
2481 if (a->addr) {
2482 gen_helper_itlba(tcg_env, addr, reg);
2483 } else {
2484 gen_helper_itlbp(tcg_env, addr, reg);
2485 }
2486
2487 /* Exit TB for TLB change if mmu is enabled. */
2488 if (ctx->tb_flags & PSW_C) {
2489 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2490 }
2491 return nullify_end(ctx);
2492 #endif
2493 }
2494
2495 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2496 {
2497 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2498 #ifndef CONFIG_USER_ONLY
2499 TCGv_tl vaddr;
2500 TCGv_reg ofs, paddr;
2501
2502 nullify_over(ctx);
2503
2504 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2505
2506 paddr = tcg_temp_new();
2507 gen_helper_lpa(paddr, tcg_env, vaddr);
2508
2509 /* Note that physical address result overrides base modification. */
2510 if (a->m) {
2511 save_gpr(ctx, a->b, ofs);
2512 }
2513 save_gpr(ctx, a->t, paddr);
2514
2515 return nullify_end(ctx);
2516 #endif
2517 }
2518
2519 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2520 {
2521 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2522
2523 /* The Coherence Index is an implementation-defined function of the
2524 physical address. Two addresses with the same CI have a coherent
2525 view of the cache. Our implementation is to return 0 for all,
2526 since the entire address space is coherent. */
2527 save_gpr(ctx, a->t, tcg_constant_reg(0));
2528
2529 cond_free(&ctx->null_cond);
2530 return true;
2531 }
2532
2533 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2534 {
2535 return do_add_reg(ctx, a, false, false, false, false);
2536 }
2537
2538 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2539 {
2540 return do_add_reg(ctx, a, true, false, false, false);
2541 }
2542
2543 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2544 {
2545 return do_add_reg(ctx, a, false, true, false, false);
2546 }
2547
2548 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2549 {
2550 return do_add_reg(ctx, a, false, false, false, true);
2551 }
2552
2553 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2554 {
2555 return do_add_reg(ctx, a, false, true, false, true);
2556 }
2557
2558 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2559 {
2560 return do_sub_reg(ctx, a, false, false, false);
2561 }
2562
2563 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2564 {
2565 return do_sub_reg(ctx, a, true, false, false);
2566 }
2567
2568 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2569 {
2570 return do_sub_reg(ctx, a, false, false, true);
2571 }
2572
2573 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2574 {
2575 return do_sub_reg(ctx, a, true, false, true);
2576 }
2577
2578 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2579 {
2580 return do_sub_reg(ctx, a, false, true, false);
2581 }
2582
2583 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2584 {
2585 return do_sub_reg(ctx, a, true, true, false);
2586 }
2587
2588 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2589 {
2590 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2591 }
2592
2593 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2594 {
2595 return do_log_reg(ctx, a, tcg_gen_and_reg);
2596 }
2597
2598 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2599 {
2600 if (a->cf == 0) {
2601 unsigned r2 = a->r2;
2602 unsigned r1 = a->r1;
2603 unsigned rt = a->t;
2604
2605 if (rt == 0) { /* NOP */
2606 cond_free(&ctx->null_cond);
2607 return true;
2608 }
2609 if (r2 == 0) { /* COPY */
2610 if (r1 == 0) {
2611 TCGv_reg dest = dest_gpr(ctx, rt);
2612 tcg_gen_movi_reg(dest, 0);
2613 save_gpr(ctx, rt, dest);
2614 } else {
2615 save_gpr(ctx, rt, cpu_gr[r1]);
2616 }
2617 cond_free(&ctx->null_cond);
2618 return true;
2619 }
2620 #ifndef CONFIG_USER_ONLY
2621 /* These are QEMU extensions and are nops in the real architecture:
2622 *
2623 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2624 * or %r31,%r31,%r31 -- death loop; offline cpu
2625 * currently implemented as idle.
2626 */
2627 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2628 /* No need to check for supervisor, as userland can only pause
2629 until the next timer interrupt. */
2630 nullify_over(ctx);
2631
2632 /* Advance the instruction queue. */
2633 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2634 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2635 nullify_set(ctx, 0);
2636
2637 /* Tell the qemu main loop to halt until this cpu has work. */
2638 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2639 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2640 gen_excp_1(EXCP_HALTED);
2641 ctx->base.is_jmp = DISAS_NORETURN;
2642
2643 return nullify_end(ctx);
2644 }
2645 #endif
2646 }
2647 return do_log_reg(ctx, a, tcg_gen_or_reg);
2648 }
2649
2650 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2651 {
2652 return do_log_reg(ctx, a, tcg_gen_xor_reg);
2653 }
2654
2655 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2656 {
2657 TCGv_reg tcg_r1, tcg_r2;
2658
2659 if (a->cf) {
2660 nullify_over(ctx);
2661 }
2662 tcg_r1 = load_gpr(ctx, a->r1);
2663 tcg_r2 = load_gpr(ctx, a->r2);
2664 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2665 return nullify_end(ctx);
2666 }
2667
2668 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2669 {
2670 TCGv_reg tcg_r1, tcg_r2;
2671
2672 if (a->cf) {
2673 nullify_over(ctx);
2674 }
2675 tcg_r1 = load_gpr(ctx, a->r1);
2676 tcg_r2 = load_gpr(ctx, a->r2);
2677 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2678 return nullify_end(ctx);
2679 }
2680
2681 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2682 {
2683 TCGv_reg tcg_r1, tcg_r2, tmp;
2684
2685 if (a->cf) {
2686 nullify_over(ctx);
2687 }
2688 tcg_r1 = load_gpr(ctx, a->r1);
2689 tcg_r2 = load_gpr(ctx, a->r2);
2690 tmp = tcg_temp_new();
2691 tcg_gen_not_reg(tmp, tcg_r2);
2692 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2693 return nullify_end(ctx);
2694 }
2695
2696 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2697 {
2698 return do_uaddcm(ctx, a, false);
2699 }
2700
2701 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2702 {
2703 return do_uaddcm(ctx, a, true);
2704 }
2705
2706 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2707 {
2708 TCGv_reg tmp;
2709
2710 nullify_over(ctx);
2711
2712 tmp = tcg_temp_new();
2713 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2714 if (!is_i) {
2715 tcg_gen_not_reg(tmp, tmp);
2716 }
2717 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2718 tcg_gen_muli_reg(tmp, tmp, 6);
2719 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
2720 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2721 return nullify_end(ctx);
2722 }
2723
2724 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2725 {
2726 return do_dcor(ctx, a, false);
2727 }
2728
2729 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2730 {
2731 return do_dcor(ctx, a, true);
2732 }
2733
2734 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2735 {
2736 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2737 TCGv_reg cout;
2738
2739 nullify_over(ctx);
2740
2741 in1 = load_gpr(ctx, a->r1);
2742 in2 = load_gpr(ctx, a->r2);
2743
2744 add1 = tcg_temp_new();
2745 add2 = tcg_temp_new();
2746 addc = tcg_temp_new();
2747 dest = tcg_temp_new();
2748 zero = tcg_constant_reg(0);
2749
2750 /* Form R1 << 1 | PSW[CB]{8}. */
2751 tcg_gen_add_reg(add1, in1, in1);
2752 tcg_gen_add_reg(add1, add1, get_psw_carry(ctx, false));
2753
2754 /*
2755 * Add or subtract R2, depending on PSW[V]. Proper computation of
2756 * carry requires that we subtract via + ~R2 + 1, as described in
2757 * the manual. By extracting and masking V, we can produce the
2758 * proper inputs to the addition without movcond.
2759 */
2760 tcg_gen_sextract_reg(addc, cpu_psw_v, 31, 1);
2761 tcg_gen_xor_reg(add2, in2, addc);
2762 tcg_gen_andi_reg(addc, addc, 1);
2763
2764 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2765 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2766
2767 /* Write back the result register. */
2768 save_gpr(ctx, a->t, dest);
2769
2770 /* Write back PSW[CB]. */
2771 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2772 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2773
2774 /* Write back PSW[V] for the division step. */
2775 cout = get_psw_carry(ctx, false);
2776 tcg_gen_neg_reg(cpu_psw_v, cout);
2777 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2778
2779 /* Install the new nullification. */
2780 if (a->cf) {
2781 TCGv_reg sv = NULL;
2782 if (cond_need_sv(a->cf >> 1)) {
2783 /* ??? The lshift is supposed to contribute to overflow. */
2784 sv = do_add_sv(ctx, dest, add1, add2);
2785 }
2786 ctx->null_cond = do_cond(a->cf, dest, cout, sv);
2787 }
2788
2789 return nullify_end(ctx);
2790 }
2791
2792 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2793 {
2794 return do_add_imm(ctx, a, false, false);
2795 }
2796
2797 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2798 {
2799 return do_add_imm(ctx, a, true, false);
2800 }
2801
2802 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2803 {
2804 return do_add_imm(ctx, a, false, true);
2805 }
2806
2807 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2808 {
2809 return do_add_imm(ctx, a, true, true);
2810 }
2811
2812 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2813 {
2814 return do_sub_imm(ctx, a, false);
2815 }
2816
2817 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2818 {
2819 return do_sub_imm(ctx, a, true);
2820 }
2821
2822 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2823 {
2824 TCGv_reg tcg_im, tcg_r2;
2825
2826 if (a->cf) {
2827 nullify_over(ctx);
2828 }
2829
2830 tcg_im = tcg_constant_reg(a->i);
2831 tcg_r2 = load_gpr(ctx, a->r);
2832 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2833
2834 return nullify_end(ctx);
2835 }
2836
2837 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2838 {
2839 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2840 return gen_illegal(ctx);
2841 } else {
2842 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2843 a->disp, a->sp, a->m, a->size | MO_TE);
2844 }
2845 }
2846
2847 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2848 {
2849 assert(a->x == 0 && a->scale == 0);
2850 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2851 return gen_illegal(ctx);
2852 } else {
2853 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2854 }
2855 }
2856
2857 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2858 {
2859 MemOp mop = MO_TE | MO_ALIGN | a->size;
2860 TCGv_reg zero, dest, ofs;
2861 TCGv_tl addr;
2862
2863 nullify_over(ctx);
2864
2865 if (a->m) {
2866 /* Base register modification. Make sure if RT == RB,
2867 we see the result of the load. */
2868 dest = tcg_temp_new();
2869 } else {
2870 dest = dest_gpr(ctx, a->t);
2871 }
2872
2873 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2874 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2875
2876 /*
2877 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2878 * However actual hardware succeeds with aligned mod 4.
2879 * Detect this case and log a GUEST_ERROR.
2880 *
2881 * TODO: HPPA64 relaxes the over-alignment requirement
2882 * with the ,co completer.
2883 */
2884 gen_helper_ldc_check(addr);
2885
2886 zero = tcg_constant_reg(0);
2887 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2888
2889 if (a->m) {
2890 save_gpr(ctx, a->b, ofs);
2891 }
2892 save_gpr(ctx, a->t, dest);
2893
2894 return nullify_end(ctx);
2895 }
2896
2897 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2898 {
2899 TCGv_reg ofs, val;
2900 TCGv_tl addr;
2901
2902 nullify_over(ctx);
2903
2904 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2905 ctx->mmu_idx == MMU_PHYS_IDX);
2906 val = load_gpr(ctx, a->r);
2907 if (a->a) {
2908 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2909 gen_helper_stby_e_parallel(tcg_env, addr, val);
2910 } else {
2911 gen_helper_stby_e(tcg_env, addr, val);
2912 }
2913 } else {
2914 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2915 gen_helper_stby_b_parallel(tcg_env, addr, val);
2916 } else {
2917 gen_helper_stby_b(tcg_env, addr, val);
2918 }
2919 }
2920 if (a->m) {
2921 tcg_gen_andi_reg(ofs, ofs, ~3);
2922 save_gpr(ctx, a->b, ofs);
2923 }
2924
2925 return nullify_end(ctx);
2926 }
2927
2928 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2929 {
2930 int hold_mmu_idx = ctx->mmu_idx;
2931
2932 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2933 ctx->mmu_idx = MMU_PHYS_IDX;
2934 trans_ld(ctx, a);
2935 ctx->mmu_idx = hold_mmu_idx;
2936 return true;
2937 }
2938
2939 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2940 {
2941 int hold_mmu_idx = ctx->mmu_idx;
2942
2943 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2944 ctx->mmu_idx = MMU_PHYS_IDX;
2945 trans_st(ctx, a);
2946 ctx->mmu_idx = hold_mmu_idx;
2947 return true;
2948 }
2949
2950 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2951 {
2952 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2953
2954 tcg_gen_movi_reg(tcg_rt, a->i);
2955 save_gpr(ctx, a->t, tcg_rt);
2956 cond_free(&ctx->null_cond);
2957 return true;
2958 }
2959
2960 static bool trans_addil(DisasContext *ctx, arg_addil *a)
2961 {
2962 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
2963 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
2964
2965 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
2966 save_gpr(ctx, 1, tcg_r1);
2967 cond_free(&ctx->null_cond);
2968 return true;
2969 }
2970
2971 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
2972 {
2973 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2974
2975 /* Special case rb == 0, for the LDI pseudo-op.
2976 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2977 if (a->b == 0) {
2978 tcg_gen_movi_reg(tcg_rt, a->i);
2979 } else {
2980 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
2981 }
2982 save_gpr(ctx, a->t, tcg_rt);
2983 cond_free(&ctx->null_cond);
2984 return true;
2985 }
2986
2987 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
2988 unsigned c, unsigned f, unsigned n, int disp)
2989 {
2990 TCGv_reg dest, in2, sv;
2991 DisasCond cond;
2992
2993 in2 = load_gpr(ctx, r);
2994 dest = tcg_temp_new();
2995
2996 tcg_gen_sub_reg(dest, in1, in2);
2997
2998 sv = NULL;
2999 if (cond_need_sv(c)) {
3000 sv = do_sub_sv(ctx, dest, in1, in2);
3001 }
3002
3003 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3004 return do_cbranch(ctx, disp, n, &cond);
3005 }
3006
3007 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3008 {
3009 nullify_over(ctx);
3010 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3011 }
3012
3013 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3014 {
3015 nullify_over(ctx);
3016 return do_cmpb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
3017 }
3018
3019 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3020 unsigned c, unsigned f, unsigned n, int disp)
3021 {
3022 TCGv_reg dest, in2, sv, cb_msb;
3023 DisasCond cond;
3024
3025 in2 = load_gpr(ctx, r);
3026 dest = tcg_temp_new();
3027 sv = NULL;
3028 cb_msb = NULL;
3029
3030 if (cond_need_cb(c)) {
3031 cb_msb = tcg_temp_new();
3032 tcg_gen_movi_reg(cb_msb, 0);
3033 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3034 } else {
3035 tcg_gen_add_reg(dest, in1, in2);
3036 }
3037 if (cond_need_sv(c)) {
3038 sv = do_add_sv(ctx, dest, in1, in2);
3039 }
3040
3041 cond = do_cond(c * 2 + f, dest, cb_msb, sv);
3042 save_gpr(ctx, r, dest);
3043 return do_cbranch(ctx, disp, n, &cond);
3044 }
3045
3046 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3047 {
3048 nullify_over(ctx);
3049 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3050 }
3051
3052 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3053 {
3054 nullify_over(ctx);
3055 return do_addb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
3056 }
3057
3058 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3059 {
3060 TCGv_reg tmp, tcg_r;
3061 DisasCond cond;
3062
3063 nullify_over(ctx);
3064
3065 tmp = tcg_temp_new();
3066 tcg_r = load_gpr(ctx, a->r);
3067 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3068
3069 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3070 return do_cbranch(ctx, a->disp, a->n, &cond);
3071 }
3072
3073 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3074 {
3075 TCGv_reg tmp, tcg_r;
3076 DisasCond cond;
3077
3078 nullify_over(ctx);
3079
3080 tmp = tcg_temp_new();
3081 tcg_r = load_gpr(ctx, a->r);
3082 tcg_gen_shli_reg(tmp, tcg_r, a->p);
3083
3084 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3085 return do_cbranch(ctx, a->disp, a->n, &cond);
3086 }
3087
3088 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3089 {
3090 TCGv_reg dest;
3091 DisasCond cond;
3092
3093 nullify_over(ctx);
3094
3095 dest = dest_gpr(ctx, a->r2);
3096 if (a->r1 == 0) {
3097 tcg_gen_movi_reg(dest, 0);
3098 } else {
3099 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3100 }
3101
3102 cond = do_sed_cond(a->c, dest);
3103 return do_cbranch(ctx, a->disp, a->n, &cond);
3104 }
3105
3106 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3107 {
3108 TCGv_reg dest;
3109 DisasCond cond;
3110
3111 nullify_over(ctx);
3112
3113 dest = dest_gpr(ctx, a->r);
3114 tcg_gen_movi_reg(dest, a->i);
3115
3116 cond = do_sed_cond(a->c, dest);
3117 return do_cbranch(ctx, a->disp, a->n, &cond);
3118 }
3119
3120 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3121 {
3122 TCGv_reg dest;
3123
3124 if (a->c) {
3125 nullify_over(ctx);
3126 }
3127
3128 dest = dest_gpr(ctx, a->t);
3129 if (a->r1 == 0) {
3130 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3131 tcg_gen_shr_reg(dest, dest, cpu_sar);
3132 } else if (a->r1 == a->r2) {
3133 TCGv_i32 t32 = tcg_temp_new_i32();
3134 TCGv_i32 s32 = tcg_temp_new_i32();
3135
3136 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3137 tcg_gen_trunc_reg_i32(s32, cpu_sar);
3138 tcg_gen_rotr_i32(t32, t32, s32);
3139 tcg_gen_extu_i32_reg(dest, t32);
3140 } else {
3141 TCGv_i64 t = tcg_temp_new_i64();
3142 TCGv_i64 s = tcg_temp_new_i64();
3143
3144 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3145 tcg_gen_extu_reg_i64(s, cpu_sar);
3146 tcg_gen_shr_i64(t, t, s);
3147 tcg_gen_trunc_i64_reg(dest, t);
3148 }
3149 save_gpr(ctx, a->t, dest);
3150
3151 /* Install the new nullification. */
3152 cond_free(&ctx->null_cond);
3153 if (a->c) {
3154 ctx->null_cond = do_sed_cond(a->c, dest);
3155 }
3156 return nullify_end(ctx);
3157 }
3158
3159 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3160 {
3161 unsigned sa = 31 - a->cpos;
3162 TCGv_reg dest, t2;
3163
3164 if (a->c) {
3165 nullify_over(ctx);
3166 }
3167
3168 dest = dest_gpr(ctx, a->t);
3169 t2 = load_gpr(ctx, a->r2);
3170 if (a->r1 == 0) {
3171 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3172 } else if (TARGET_REGISTER_BITS == 32) {
3173 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3174 } else if (a->r1 == a->r2) {
3175 TCGv_i32 t32 = tcg_temp_new_i32();
3176 tcg_gen_trunc_reg_i32(t32, t2);
3177 tcg_gen_rotri_i32(t32, t32, sa);
3178 tcg_gen_extu_i32_reg(dest, t32);
3179 } else {
3180 TCGv_i64 t64 = tcg_temp_new_i64();
3181 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3182 tcg_gen_shri_i64(t64, t64, sa);
3183 tcg_gen_trunc_i64_reg(dest, t64);
3184 }
3185 save_gpr(ctx, a->t, dest);
3186
3187 /* Install the new nullification. */
3188 cond_free(&ctx->null_cond);
3189 if (a->c) {
3190 ctx->null_cond = do_sed_cond(a->c, dest);
3191 }
3192 return nullify_end(ctx);
3193 }
3194
3195 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3196 {
3197 unsigned len = 32 - a->clen;
3198 TCGv_reg dest, src, tmp;
3199
3200 if (a->c) {
3201 nullify_over(ctx);
3202 }
3203
3204 dest = dest_gpr(ctx, a->t);
3205 src = load_gpr(ctx, a->r);
3206 tmp = tcg_temp_new();
3207
3208 /* Recall that SAR is using big-endian bit numbering. */
3209 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3210 if (a->se) {
3211 tcg_gen_sar_reg(dest, src, tmp);
3212 tcg_gen_sextract_reg(dest, dest, 0, len);
3213 } else {
3214 tcg_gen_shr_reg(dest, src, tmp);
3215 tcg_gen_extract_reg(dest, dest, 0, len);
3216 }
3217 save_gpr(ctx, a->t, dest);
3218
3219 /* Install the new nullification. */
3220 cond_free(&ctx->null_cond);
3221 if (a->c) {
3222 ctx->null_cond = do_sed_cond(a->c, dest);
3223 }
3224 return nullify_end(ctx);
3225 }
3226
3227 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3228 {
3229 unsigned len = 32 - a->clen;
3230 unsigned cpos = 31 - a->pos;
3231 TCGv_reg dest, src;
3232
3233 if (a->c) {
3234 nullify_over(ctx);
3235 }
3236
3237 dest = dest_gpr(ctx, a->t);
3238 src = load_gpr(ctx, a->r);
3239 if (a->se) {
3240 tcg_gen_sextract_reg(dest, src, cpos, len);
3241 } else {
3242 tcg_gen_extract_reg(dest, src, cpos, len);
3243 }
3244 save_gpr(ctx, a->t, dest);
3245
3246 /* Install the new nullification. */
3247 cond_free(&ctx->null_cond);
3248 if (a->c) {
3249 ctx->null_cond = do_sed_cond(a->c, dest);
3250 }
3251 return nullify_end(ctx);
3252 }
3253
3254 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3255 {
3256 unsigned len = 32 - a->clen;
3257 target_sreg mask0, mask1;
3258 TCGv_reg dest;
3259
3260 if (a->c) {
3261 nullify_over(ctx);
3262 }
3263 if (a->cpos + len > 32) {
3264 len = 32 - a->cpos;
3265 }
3266
3267 dest = dest_gpr(ctx, a->t);
3268 mask0 = deposit64(0, a->cpos, len, a->i);
3269 mask1 = deposit64(-1, a->cpos, len, a->i);
3270
3271 if (a->nz) {
3272 TCGv_reg src = load_gpr(ctx, a->t);
3273 if (mask1 != -1) {
3274 tcg_gen_andi_reg(dest, src, mask1);
3275 src = dest;
3276 }
3277 tcg_gen_ori_reg(dest, src, mask0);
3278 } else {
3279 tcg_gen_movi_reg(dest, mask0);
3280 }
3281 save_gpr(ctx, a->t, dest);
3282
3283 /* Install the new nullification. */
3284 cond_free(&ctx->null_cond);
3285 if (a->c) {
3286 ctx->null_cond = do_sed_cond(a->c, dest);
3287 }
3288 return nullify_end(ctx);
3289 }
3290
3291 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3292 {
3293 unsigned rs = a->nz ? a->t : 0;
3294 unsigned len = 32 - a->clen;
3295 TCGv_reg dest, val;
3296
3297 if (a->c) {
3298 nullify_over(ctx);
3299 }
3300 if (a->cpos + len > 32) {
3301 len = 32 - a->cpos;
3302 }
3303
3304 dest = dest_gpr(ctx, a->t);
3305 val = load_gpr(ctx, a->r);
3306 if (rs == 0) {
3307 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3308 } else {
3309 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3310 }
3311 save_gpr(ctx, a->t, dest);
3312
3313 /* Install the new nullification. */
3314 cond_free(&ctx->null_cond);
3315 if (a->c) {
3316 ctx->null_cond = do_sed_cond(a->c, dest);
3317 }
3318 return nullify_end(ctx);
3319 }
3320
3321 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3322 unsigned nz, unsigned clen, TCGv_reg val)
3323 {
3324 unsigned rs = nz ? rt : 0;
3325 unsigned len = 32 - clen;
3326 TCGv_reg mask, tmp, shift, dest;
3327 unsigned msb = 1U << (len - 1);
3328
3329 dest = dest_gpr(ctx, rt);
3330 shift = tcg_temp_new();
3331 tmp = tcg_temp_new();
3332
3333 /* Convert big-endian bit numbering in SAR to left-shift. */
3334 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3335
3336 mask = tcg_temp_new();
3337 tcg_gen_movi_reg(mask, msb + (msb - 1));
3338 tcg_gen_and_reg(tmp, val, mask);
3339 if (rs) {
3340 tcg_gen_shl_reg(mask, mask, shift);
3341 tcg_gen_shl_reg(tmp, tmp, shift);
3342 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3343 tcg_gen_or_reg(dest, dest, tmp);
3344 } else {
3345 tcg_gen_shl_reg(dest, tmp, shift);
3346 }
3347 save_gpr(ctx, rt, dest);
3348
3349 /* Install the new nullification. */
3350 cond_free(&ctx->null_cond);
3351 if (c) {
3352 ctx->null_cond = do_sed_cond(c, dest);
3353 }
3354 return nullify_end(ctx);
3355 }
3356
3357 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3358 {
3359 if (a->c) {
3360 nullify_over(ctx);
3361 }
3362 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3363 }
3364
3365 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3366 {
3367 if (a->c) {
3368 nullify_over(ctx);
3369 }
3370 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, tcg_constant_reg(a->i));
3371 }
3372
3373 static bool trans_be(DisasContext *ctx, arg_be *a)
3374 {
3375 TCGv_reg tmp;
3376
3377 #ifdef CONFIG_USER_ONLY
3378 /* ??? It seems like there should be a good way of using
3379 "be disp(sr2, r0)", the canonical gateway entry mechanism
3380 to our advantage. But that appears to be inconvenient to
3381 manage along side branch delay slots. Therefore we handle
3382 entry into the gateway page via absolute address. */
3383 /* Since we don't implement spaces, just branch. Do notice the special
3384 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3385 goto_tb to the TB containing the syscall. */
3386 if (a->b == 0) {
3387 return do_dbranch(ctx, a->disp, a->l, a->n);
3388 }
3389 #else
3390 nullify_over(ctx);
3391 #endif
3392
3393 tmp = tcg_temp_new();
3394 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3395 tmp = do_ibranch_priv(ctx, tmp);
3396
3397 #ifdef CONFIG_USER_ONLY
3398 return do_ibranch(ctx, tmp, a->l, a->n);
3399 #else
3400 TCGv_i64 new_spc = tcg_temp_new_i64();
3401
3402 load_spr(ctx, new_spc, a->sp);
3403 if (a->l) {
3404 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3405 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3406 }
3407 if (a->n && use_nullify_skip(ctx)) {
3408 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3409 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3410 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3411 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3412 } else {
3413 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3414 if (ctx->iaoq_b == -1) {
3415 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3416 }
3417 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3418 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3419 nullify_set(ctx, a->n);
3420 }
3421 tcg_gen_lookup_and_goto_ptr();
3422 ctx->base.is_jmp = DISAS_NORETURN;
3423 return nullify_end(ctx);
3424 #endif
3425 }
3426
3427 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3428 {
3429 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3430 }
3431
3432 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3433 {
3434 target_ureg dest = iaoq_dest(ctx, a->disp);
3435
3436 nullify_over(ctx);
3437
3438 /* Make sure the caller hasn't done something weird with the queue.
3439 * ??? This is not quite the same as the PSW[B] bit, which would be
3440 * expensive to track. Real hardware will trap for
3441 * b gateway
3442 * b gateway+4 (in delay slot of first branch)
3443 * However, checking for a non-sequential instruction queue *will*
3444 * diagnose the security hole
3445 * b gateway
3446 * b evil
3447 * in which instructions at evil would run with increased privs.
3448 */
3449 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3450 return gen_illegal(ctx);
3451 }
3452
3453 #ifndef CONFIG_USER_ONLY
3454 if (ctx->tb_flags & PSW_C) {
3455 CPUHPPAState *env = cpu_env(ctx->cs);
3456 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3457 /* If we could not find a TLB entry, then we need to generate an
3458 ITLB miss exception so the kernel will provide it.
3459 The resulting TLB fill operation will invalidate this TB and
3460 we will re-translate, at which point we *will* be able to find
3461 the TLB entry and determine if this is in fact a gateway page. */
3462 if (type < 0) {
3463 gen_excp(ctx, EXCP_ITLB_MISS);
3464 return true;
3465 }
3466 /* No change for non-gateway pages or for priv decrease. */
3467 if (type >= 4 && type - 4 < ctx->privilege) {
3468 dest = deposit32(dest, 0, 2, type - 4);
3469 }
3470 } else {
3471 dest &= -4; /* priv = 0 */
3472 }
3473 #endif
3474
3475 if (a->l) {
3476 TCGv_reg tmp = dest_gpr(ctx, a->l);
3477 if (ctx->privilege < 3) {
3478 tcg_gen_andi_reg(tmp, tmp, -4);
3479 }
3480 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3481 save_gpr(ctx, a->l, tmp);
3482 }
3483
3484 return do_dbranch(ctx, dest, 0, a->n);
3485 }
3486
3487 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3488 {
3489 if (a->x) {
3490 TCGv_reg tmp = tcg_temp_new();
3491 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3492 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3493 /* The computation here never changes privilege level. */
3494 return do_ibranch(ctx, tmp, a->l, a->n);
3495 } else {
3496 /* BLR R0,RX is a good way to load PC+8 into RX. */
3497 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3498 }
3499 }
3500
3501 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3502 {
3503 TCGv_reg dest;
3504
3505 if (a->x == 0) {
3506 dest = load_gpr(ctx, a->b);
3507 } else {
3508 dest = tcg_temp_new();
3509 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3510 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3511 }
3512 dest = do_ibranch_priv(ctx, dest);
3513 return do_ibranch(ctx, dest, 0, a->n);
3514 }
3515
3516 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3517 {
3518 TCGv_reg dest;
3519
3520 #ifdef CONFIG_USER_ONLY
3521 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3522 return do_ibranch(ctx, dest, a->l, a->n);
3523 #else
3524 nullify_over(ctx);
3525 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3526
3527 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3528 if (ctx->iaoq_b == -1) {
3529 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3530 }
3531 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3532 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3533 if (a->l) {
3534 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3535 }
3536 nullify_set(ctx, a->n);
3537 tcg_gen_lookup_and_goto_ptr();
3538 ctx->base.is_jmp = DISAS_NORETURN;
3539 return nullify_end(ctx);
3540 #endif
3541 }
3542
3543 /*
3544 * Float class 0
3545 */
3546
3547 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3548 {
3549 tcg_gen_mov_i32(dst, src);
3550 }
3551
3552 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3553 {
3554 uint64_t ret;
3555
3556 if (TARGET_REGISTER_BITS == 64) {
3557 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3558 } else {
3559 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3560 }
3561
3562 nullify_over(ctx);
3563 save_frd(0, tcg_constant_i64(ret));
3564 return nullify_end(ctx);
3565 }
3566
3567 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3568 {
3569 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3570 }
3571
3572 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3573 {
3574 tcg_gen_mov_i64(dst, src);
3575 }
3576
3577 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3578 {
3579 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3580 }
3581
3582 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3583 {
3584 tcg_gen_andi_i32(dst, src, INT32_MAX);
3585 }
3586
3587 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3588 {
3589 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3590 }
3591
3592 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3593 {
3594 tcg_gen_andi_i64(dst, src, INT64_MAX);
3595 }
3596
3597 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3598 {
3599 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3600 }
3601
3602 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3603 {
3604 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3605 }
3606
3607 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3608 {
3609 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3610 }
3611
3612 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3613 {
3614 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3615 }
3616
3617 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3618 {
3619 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3620 }
3621
3622 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3623 {
3624 tcg_gen_xori_i32(dst, src, INT32_MIN);
3625 }
3626
3627 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3628 {
3629 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3630 }
3631
3632 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3633 {
3634 tcg_gen_xori_i64(dst, src, INT64_MIN);
3635 }
3636
3637 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3638 {
3639 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3640 }
3641
3642 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3643 {
3644 tcg_gen_ori_i32(dst, src, INT32_MIN);
3645 }
3646
3647 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3648 {
3649 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3650 }
3651
3652 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3653 {
3654 tcg_gen_ori_i64(dst, src, INT64_MIN);
3655 }
3656
3657 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3658 {
3659 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3660 }
3661
3662 /*
3663 * Float class 1
3664 */
3665
3666 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3667 {
3668 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3669 }
3670
3671 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3672 {
3673 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3674 }
3675
3676 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3677 {
3678 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3679 }
3680
3681 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3682 {
3683 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3684 }
3685
3686 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3687 {
3688 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3689 }
3690
3691 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3692 {
3693 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3694 }
3695
3696 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3697 {
3698 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3699 }
3700
3701 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3702 {
3703 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3704 }
3705
3706 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3707 {
3708 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3709 }
3710
3711 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3712 {
3713 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3714 }
3715
3716 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3717 {
3718 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3719 }
3720
3721 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3722 {
3723 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3724 }
3725
3726 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3727 {
3728 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3729 }
3730
3731 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3732 {
3733 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3734 }
3735
3736 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3737 {
3738 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3739 }
3740
3741 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3742 {
3743 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3744 }
3745
3746 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3747 {
3748 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3749 }
3750
3751 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3752 {
3753 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3754 }
3755
3756 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3757 {
3758 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3759 }
3760
3761 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3762 {
3763 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3764 }
3765
3766 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3767 {
3768 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3769 }
3770
3771 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3772 {
3773 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3774 }
3775
3776 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3777 {
3778 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3779 }
3780
3781 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3782 {
3783 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3784 }
3785
3786 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3787 {
3788 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3789 }
3790
3791 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3792 {
3793 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3794 }
3795
3796 /*
3797 * Float class 2
3798 */
3799
3800 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3801 {
3802 TCGv_i32 ta, tb, tc, ty;
3803
3804 nullify_over(ctx);
3805
3806 ta = load_frw0_i32(a->r1);
3807 tb = load_frw0_i32(a->r2);
3808 ty = tcg_constant_i32(a->y);
3809 tc = tcg_constant_i32(a->c);
3810
3811 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
3812
3813 return nullify_end(ctx);
3814 }
3815
3816 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3817 {
3818 TCGv_i64 ta, tb;
3819 TCGv_i32 tc, ty;
3820
3821 nullify_over(ctx);
3822
3823 ta = load_frd0(a->r1);
3824 tb = load_frd0(a->r2);
3825 ty = tcg_constant_i32(a->y);
3826 tc = tcg_constant_i32(a->c);
3827
3828 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
3829
3830 return nullify_end(ctx);
3831 }
3832
3833 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3834 {
3835 TCGv_reg t;
3836
3837 nullify_over(ctx);
3838
3839 t = tcg_temp_new();
3840 tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
3841
3842 if (a->y == 1) {
3843 int mask;
3844 bool inv = false;
3845
3846 switch (a->c) {
3847 case 0: /* simple */
3848 tcg_gen_andi_reg(t, t, 0x4000000);
3849 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3850 goto done;
3851 case 2: /* rej */
3852 inv = true;
3853 /* fallthru */
3854 case 1: /* acc */
3855 mask = 0x43ff800;
3856 break;
3857 case 6: /* rej8 */
3858 inv = true;
3859 /* fallthru */
3860 case 5: /* acc8 */
3861 mask = 0x43f8000;
3862 break;
3863 case 9: /* acc6 */
3864 mask = 0x43e0000;
3865 break;
3866 case 13: /* acc4 */
3867 mask = 0x4380000;
3868 break;
3869 case 17: /* acc2 */
3870 mask = 0x4200000;
3871 break;
3872 default:
3873 gen_illegal(ctx);
3874 return true;
3875 }
3876 if (inv) {
3877 TCGv_reg c = tcg_constant_reg(mask);
3878 tcg_gen_or_reg(t, t, c);
3879 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3880 } else {
3881 tcg_gen_andi_reg(t, t, mask);
3882 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3883 }
3884 } else {
3885 unsigned cbit = (a->y ^ 1) - 1;
3886
3887 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3888 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3889 }
3890
3891 done:
3892 return nullify_end(ctx);
3893 }
3894
3895 /*
3896 * Float class 2
3897 */
3898
3899 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3900 {
3901 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3902 }
3903
3904 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3905 {
3906 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3907 }
3908
3909 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3910 {
3911 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3912 }
3913
3914 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3915 {
3916 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
3917 }
3918
3919 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
3920 {
3921 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3922 }
3923
3924 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3925 {
3926 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3927 }
3928
3929 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3930 {
3931 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3932 }
3933
3934 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3935 {
3936 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3937 }
3938
3939 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3940 {
3941 TCGv_i64 x, y;
3942
3943 nullify_over(ctx);
3944
3945 x = load_frw0_i64(a->r1);
3946 y = load_frw0_i64(a->r2);
3947 tcg_gen_mul_i64(x, x, y);
3948 save_frd(a->t, x);
3949
3950 return nullify_end(ctx);
3951 }
3952
3953 /* Convert the fmpyadd single-precision register encodings to standard. */
3954 static inline int fmpyadd_s_reg(unsigned r)
3955 {
3956 return (r & 16) * 2 + 16 + (r & 15);
3957 }
3958
3959 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3960 {
3961 int tm = fmpyadd_s_reg(a->tm);
3962 int ra = fmpyadd_s_reg(a->ra);
3963 int ta = fmpyadd_s_reg(a->ta);
3964 int rm2 = fmpyadd_s_reg(a->rm2);
3965 int rm1 = fmpyadd_s_reg(a->rm1);
3966
3967 nullify_over(ctx);
3968
3969 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3970 do_fop_weww(ctx, ta, ta, ra,
3971 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3972
3973 return nullify_end(ctx);
3974 }
3975
3976 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
3977 {
3978 return do_fmpyadd_s(ctx, a, false);
3979 }
3980
3981 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
3982 {
3983 return do_fmpyadd_s(ctx, a, true);
3984 }
3985
3986 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3987 {
3988 nullify_over(ctx);
3989
3990 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
3991 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
3992 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3993
3994 return nullify_end(ctx);
3995 }
3996
3997 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
3998 {
3999 return do_fmpyadd_d(ctx, a, false);
4000 }
4001
4002 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4003 {
4004 return do_fmpyadd_d(ctx, a, true);
4005 }
4006
4007 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4008 {
4009 TCGv_i32 x, y, z;
4010
4011 nullify_over(ctx);
4012 x = load_frw0_i32(a->rm1);
4013 y = load_frw0_i32(a->rm2);
4014 z = load_frw0_i32(a->ra3);
4015
4016 if (a->neg) {
4017 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4018 } else {
4019 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4020 }
4021
4022 save_frw_i32(a->t, x);
4023 return nullify_end(ctx);
4024 }
4025
4026 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4027 {
4028 TCGv_i64 x, y, z;
4029
4030 nullify_over(ctx);
4031 x = load_frd0(a->rm1);
4032 y = load_frd0(a->rm2);
4033 z = load_frd0(a->ra3);
4034
4035 if (a->neg) {
4036 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4037 } else {
4038 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4039 }
4040
4041 save_frd(a->t, x);
4042 return nullify_end(ctx);
4043 }
4044
4045 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4046 {
4047 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4048 #ifndef CONFIG_USER_ONLY
4049 if (a->i == 0x100) {
4050 /* emulate PDC BTLB, called by SeaBIOS-hppa */
4051 nullify_over(ctx);
4052 gen_helper_diag_btlb(tcg_env);
4053 return nullify_end(ctx);
4054 }
4055 #endif
4056 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4057 return true;
4058 }
4059
4060 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4061 {
4062 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4063 int bound;
4064
4065 ctx->cs = cs;
4066 ctx->tb_flags = ctx->base.tb->flags;
4067
4068 #ifdef CONFIG_USER_ONLY
4069 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4070 ctx->mmu_idx = MMU_USER_IDX;
4071 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4072 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4073 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4074 #else
4075 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4076 ctx->mmu_idx = (ctx->tb_flags & PSW_D
4077 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4078 : MMU_PHYS_IDX);
4079
4080 /* Recover the IAOQ values from the GVA + PRIV. */
4081 uint64_t cs_base = ctx->base.tb->cs_base;
4082 uint64_t iasq_f = cs_base & ~0xffffffffull;
4083 int32_t diff = cs_base;
4084
4085 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4086 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4087 #endif
4088 ctx->iaoq_n = -1;
4089 ctx->iaoq_n_var = NULL;
4090
4091 /* Bound the number of instructions by those left on the page. */
4092 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4093 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4094 }
4095
4096 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4097 {
4098 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4099
4100 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4101 ctx->null_cond = cond_make_f();
4102 ctx->psw_n_nonzero = false;
4103 if (ctx->tb_flags & PSW_N) {
4104 ctx->null_cond.c = TCG_COND_ALWAYS;
4105 ctx->psw_n_nonzero = true;
4106 }
4107 ctx->null_lab = NULL;
4108 }
4109
4110 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4111 {
4112 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4113
4114 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4115 }
4116
4117 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4118 {
4119 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4120 CPUHPPAState *env = cpu_env(cs);
4121 DisasJumpType ret;
4122
4123 /* Execute one insn. */
4124 #ifdef CONFIG_USER_ONLY
4125 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4126 do_page_zero(ctx);
4127 ret = ctx->base.is_jmp;
4128 assert(ret != DISAS_NEXT);
4129 } else
4130 #endif
4131 {
4132 /* Always fetch the insn, even if nullified, so that we check
4133 the page permissions for execute. */
4134 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4135
4136 /* Set up the IA queue for the next insn.
4137 This will be overwritten by a branch. */
4138 if (ctx->iaoq_b == -1) {
4139 ctx->iaoq_n = -1;
4140 ctx->iaoq_n_var = tcg_temp_new();
4141 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4142 } else {
4143 ctx->iaoq_n = ctx->iaoq_b + 4;
4144 ctx->iaoq_n_var = NULL;
4145 }
4146
4147 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4148 ctx->null_cond.c = TCG_COND_NEVER;
4149 ret = DISAS_NEXT;
4150 } else {
4151 ctx->insn = insn;
4152 if (!decode(ctx, insn)) {
4153 gen_illegal(ctx);
4154 }
4155 ret = ctx->base.is_jmp;
4156 assert(ctx->null_lab == NULL);
4157 }
4158 }
4159
4160 /* Advance the insn queue. Note that this check also detects
4161 a priority change within the instruction queue. */
4162 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4163 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4164 && use_goto_tb(ctx, ctx->iaoq_b)
4165 && (ctx->null_cond.c == TCG_COND_NEVER
4166 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4167 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4168 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4169 ctx->base.is_jmp = ret = DISAS_NORETURN;
4170 } else {
4171 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4172 }
4173 }
4174 ctx->iaoq_f = ctx->iaoq_b;
4175 ctx->iaoq_b = ctx->iaoq_n;
4176 ctx->base.pc_next += 4;
4177
4178 switch (ret) {
4179 case DISAS_NORETURN:
4180 case DISAS_IAQ_N_UPDATED:
4181 break;
4182
4183 case DISAS_NEXT:
4184 case DISAS_IAQ_N_STALE:
4185 case DISAS_IAQ_N_STALE_EXIT:
4186 if (ctx->iaoq_f == -1) {
4187 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4188 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4189 #ifndef CONFIG_USER_ONLY
4190 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4191 #endif
4192 nullify_save(ctx);
4193 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4194 ? DISAS_EXIT
4195 : DISAS_IAQ_N_UPDATED);
4196 } else if (ctx->iaoq_b == -1) {
4197 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4198 }
4199 break;
4200
4201 default:
4202 g_assert_not_reached();
4203 }
4204 }
4205
4206 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4207 {
4208 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4209 DisasJumpType is_jmp = ctx->base.is_jmp;
4210
4211 switch (is_jmp) {
4212 case DISAS_NORETURN:
4213 break;
4214 case DISAS_TOO_MANY:
4215 case DISAS_IAQ_N_STALE:
4216 case DISAS_IAQ_N_STALE_EXIT:
4217 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4218 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4219 nullify_save(ctx);
4220 /* FALLTHRU */
4221 case DISAS_IAQ_N_UPDATED:
4222 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4223 tcg_gen_lookup_and_goto_ptr();
4224 break;
4225 }
4226 /* FALLTHRU */
4227 case DISAS_EXIT:
4228 tcg_gen_exit_tb(NULL, 0);
4229 break;
4230 default:
4231 g_assert_not_reached();
4232 }
4233 }
4234
4235 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4236 CPUState *cs, FILE *logfile)
4237 {
4238 target_ulong pc = dcbase->pc_first;
4239
4240 #ifdef CONFIG_USER_ONLY
4241 switch (pc) {
4242 case 0x00:
4243 fprintf(logfile, "IN:\n0x00000000: (null)\n");
4244 return;
4245 case 0xb0:
4246 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
4247 return;
4248 case 0xe0:
4249 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4250 return;
4251 case 0x100:
4252 fprintf(logfile, "IN:\n0x00000100: syscall\n");
4253 return;
4254 }
4255 #endif
4256
4257 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4258 target_disas(logfile, cs, pc, dcbase->tb->size);
4259 }
4260
4261 static const TranslatorOps hppa_tr_ops = {
4262 .init_disas_context = hppa_tr_init_disas_context,
4263 .tb_start = hppa_tr_tb_start,
4264 .insn_start = hppa_tr_insn_start,
4265 .translate_insn = hppa_tr_translate_insn,
4266 .tb_stop = hppa_tr_tb_stop,
4267 .disas_log = hppa_tr_disas_log,
4268 };
4269
4270 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4271 target_ulong pc, void *host_pc)
4272 {
4273 DisasContext ctx;
4274 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4275 }