]> git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/translate.c
target/hppa: Remove get_temp
[mirror_qemu.git] / target / hppa / translate.c
1 /*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/log.h"
30
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef HELPER_H
34
35
36 /* Since we have a distinction between register size and address size,
37 we need to redefine all of these. */
38
39 #undef TCGv
40 #undef tcg_temp_new
41 #undef tcg_global_mem_new
42
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #if TARGET_REGISTER_BITS == 64
47 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
48 #else
49 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
50 #endif
51 #else
52 #define TCGv_tl TCGv_i32
53 #define tcg_temp_new_tl tcg_temp_new_i32
54 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
55 #endif
56
57 #if TARGET_REGISTER_BITS == 64
58 #define TCGv_reg TCGv_i64
59
60 #define tcg_temp_new tcg_temp_new_i64
61 #define tcg_global_mem_new tcg_global_mem_new_i64
62
63 #define tcg_gen_movi_reg tcg_gen_movi_i64
64 #define tcg_gen_mov_reg tcg_gen_mov_i64
65 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
66 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
67 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
68 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
69 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
70 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
71 #define tcg_gen_ld_reg tcg_gen_ld_i64
72 #define tcg_gen_st8_reg tcg_gen_st8_i64
73 #define tcg_gen_st16_reg tcg_gen_st16_i64
74 #define tcg_gen_st32_reg tcg_gen_st32_i64
75 #define tcg_gen_st_reg tcg_gen_st_i64
76 #define tcg_gen_add_reg tcg_gen_add_i64
77 #define tcg_gen_addi_reg tcg_gen_addi_i64
78 #define tcg_gen_sub_reg tcg_gen_sub_i64
79 #define tcg_gen_neg_reg tcg_gen_neg_i64
80 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
81 #define tcg_gen_subi_reg tcg_gen_subi_i64
82 #define tcg_gen_and_reg tcg_gen_and_i64
83 #define tcg_gen_andi_reg tcg_gen_andi_i64
84 #define tcg_gen_or_reg tcg_gen_or_i64
85 #define tcg_gen_ori_reg tcg_gen_ori_i64
86 #define tcg_gen_xor_reg tcg_gen_xor_i64
87 #define tcg_gen_xori_reg tcg_gen_xori_i64
88 #define tcg_gen_not_reg tcg_gen_not_i64
89 #define tcg_gen_shl_reg tcg_gen_shl_i64
90 #define tcg_gen_shli_reg tcg_gen_shli_i64
91 #define tcg_gen_shr_reg tcg_gen_shr_i64
92 #define tcg_gen_shri_reg tcg_gen_shri_i64
93 #define tcg_gen_sar_reg tcg_gen_sar_i64
94 #define tcg_gen_sari_reg tcg_gen_sari_i64
95 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
96 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
97 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
98 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
99 #define tcg_gen_mul_reg tcg_gen_mul_i64
100 #define tcg_gen_muli_reg tcg_gen_muli_i64
101 #define tcg_gen_div_reg tcg_gen_div_i64
102 #define tcg_gen_rem_reg tcg_gen_rem_i64
103 #define tcg_gen_divu_reg tcg_gen_divu_i64
104 #define tcg_gen_remu_reg tcg_gen_remu_i64
105 #define tcg_gen_discard_reg tcg_gen_discard_i64
106 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
107 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
108 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
109 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
110 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
111 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
112 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
113 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
114 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
115 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
116 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
117 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
118 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
119 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
120 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
121 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
122 #define tcg_gen_andc_reg tcg_gen_andc_i64
123 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
124 #define tcg_gen_nand_reg tcg_gen_nand_i64
125 #define tcg_gen_nor_reg tcg_gen_nor_i64
126 #define tcg_gen_orc_reg tcg_gen_orc_i64
127 #define tcg_gen_clz_reg tcg_gen_clz_i64
128 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
129 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
130 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
131 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
132 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
133 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
134 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
135 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
136 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
137 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
138 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
139 #define tcg_gen_extract_reg tcg_gen_extract_i64
140 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
141 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
142 #define tcg_constant_reg tcg_constant_i64
143 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
144 #define tcg_gen_add2_reg tcg_gen_add2_i64
145 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
146 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
147 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
148 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
149 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
150 #else
151 #define TCGv_reg TCGv_i32
152 #define tcg_temp_new tcg_temp_new_i32
153 #define tcg_global_mem_new tcg_global_mem_new_i32
154
155 #define tcg_gen_movi_reg tcg_gen_movi_i32
156 #define tcg_gen_mov_reg tcg_gen_mov_i32
157 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
158 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
159 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
160 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
161 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
162 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
163 #define tcg_gen_ld_reg tcg_gen_ld_i32
164 #define tcg_gen_st8_reg tcg_gen_st8_i32
165 #define tcg_gen_st16_reg tcg_gen_st16_i32
166 #define tcg_gen_st32_reg tcg_gen_st32_i32
167 #define tcg_gen_st_reg tcg_gen_st_i32
168 #define tcg_gen_add_reg tcg_gen_add_i32
169 #define tcg_gen_addi_reg tcg_gen_addi_i32
170 #define tcg_gen_sub_reg tcg_gen_sub_i32
171 #define tcg_gen_neg_reg tcg_gen_neg_i32
172 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
173 #define tcg_gen_subi_reg tcg_gen_subi_i32
174 #define tcg_gen_and_reg tcg_gen_and_i32
175 #define tcg_gen_andi_reg tcg_gen_andi_i32
176 #define tcg_gen_or_reg tcg_gen_or_i32
177 #define tcg_gen_ori_reg tcg_gen_ori_i32
178 #define tcg_gen_xor_reg tcg_gen_xor_i32
179 #define tcg_gen_xori_reg tcg_gen_xori_i32
180 #define tcg_gen_not_reg tcg_gen_not_i32
181 #define tcg_gen_shl_reg tcg_gen_shl_i32
182 #define tcg_gen_shli_reg tcg_gen_shli_i32
183 #define tcg_gen_shr_reg tcg_gen_shr_i32
184 #define tcg_gen_shri_reg tcg_gen_shri_i32
185 #define tcg_gen_sar_reg tcg_gen_sar_i32
186 #define tcg_gen_sari_reg tcg_gen_sari_i32
187 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
188 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
189 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
190 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
191 #define tcg_gen_mul_reg tcg_gen_mul_i32
192 #define tcg_gen_muli_reg tcg_gen_muli_i32
193 #define tcg_gen_div_reg tcg_gen_div_i32
194 #define tcg_gen_rem_reg tcg_gen_rem_i32
195 #define tcg_gen_divu_reg tcg_gen_divu_i32
196 #define tcg_gen_remu_reg tcg_gen_remu_i32
197 #define tcg_gen_discard_reg tcg_gen_discard_i32
198 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
199 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
200 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
201 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
202 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
203 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
204 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
205 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
206 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
207 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
208 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
209 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
210 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
211 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
212 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
213 #define tcg_gen_andc_reg tcg_gen_andc_i32
214 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
215 #define tcg_gen_nand_reg tcg_gen_nand_i32
216 #define tcg_gen_nor_reg tcg_gen_nor_i32
217 #define tcg_gen_orc_reg tcg_gen_orc_i32
218 #define tcg_gen_clz_reg tcg_gen_clz_i32
219 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
220 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
221 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
222 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
223 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
224 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
225 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
226 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
227 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
228 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
229 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
230 #define tcg_gen_extract_reg tcg_gen_extract_i32
231 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
232 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
233 #define tcg_constant_reg tcg_constant_i32
234 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
235 #define tcg_gen_add2_reg tcg_gen_add2_i32
236 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
237 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
238 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
239 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
240 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
241 #endif /* TARGET_REGISTER_BITS */
242
243 typedef struct DisasCond {
244 TCGCond c;
245 TCGv_reg a0, a1;
246 } DisasCond;
247
248 typedef struct DisasContext {
249 DisasContextBase base;
250 CPUState *cs;
251
252 target_ureg iaoq_f;
253 target_ureg iaoq_b;
254 target_ureg iaoq_n;
255 TCGv_reg iaoq_n_var;
256
257 int ntempl;
258 TCGv_tl templ[4];
259
260 DisasCond null_cond;
261 TCGLabel *null_lab;
262
263 uint32_t insn;
264 uint32_t tb_flags;
265 int mmu_idx;
266 int privilege;
267 bool psw_n_nonzero;
268
269 #ifdef CONFIG_USER_ONLY
270 MemOp unalign;
271 #endif
272 } DisasContext;
273
274 #ifdef CONFIG_USER_ONLY
275 #define UNALIGN(C) (C)->unalign
276 #else
277 #define UNALIGN(C) MO_ALIGN
278 #endif
279
280 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
281 static int expand_sm_imm(DisasContext *ctx, int val)
282 {
283 if (val & PSW_SM_E) {
284 val = (val & ~PSW_SM_E) | PSW_E;
285 }
286 if (val & PSW_SM_W) {
287 val = (val & ~PSW_SM_W) | PSW_W;
288 }
289 return val;
290 }
291
292 /* Inverted space register indicates 0 means sr0 not inferred from base. */
293 static int expand_sr3x(DisasContext *ctx, int val)
294 {
295 return ~val;
296 }
297
298 /* Convert the M:A bits within a memory insn to the tri-state value
299 we use for the final M. */
300 static int ma_to_m(DisasContext *ctx, int val)
301 {
302 return val & 2 ? (val & 1 ? -1 : 1) : 0;
303 }
304
305 /* Convert the sign of the displacement to a pre or post-modify. */
306 static int pos_to_m(DisasContext *ctx, int val)
307 {
308 return val ? 1 : -1;
309 }
310
311 static int neg_to_m(DisasContext *ctx, int val)
312 {
313 return val ? -1 : 1;
314 }
315
316 /* Used for branch targets and fp memory ops. */
317 static int expand_shl2(DisasContext *ctx, int val)
318 {
319 return val << 2;
320 }
321
322 /* Used for fp memory ops. */
323 static int expand_shl3(DisasContext *ctx, int val)
324 {
325 return val << 3;
326 }
327
328 /* Used for assemble_21. */
329 static int expand_shl11(DisasContext *ctx, int val)
330 {
331 return val << 11;
332 }
333
334
335 /* Include the auto-generated decoder. */
336 #include "decode-insns.c.inc"
337
338 /* We are not using a goto_tb (for whatever reason), but have updated
339 the iaq (for whatever reason), so don't do it again on exit. */
340 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
341
342 /* We are exiting the TB, but have neither emitted a goto_tb, nor
343 updated the iaq for the next instruction to be executed. */
344 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
345
346 /* Similarly, but we want to return to the main loop immediately
347 to recognize unmasked interrupts. */
348 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
349 #define DISAS_EXIT DISAS_TARGET_3
350
351 /* global register indexes */
352 static TCGv_reg cpu_gr[32];
353 static TCGv_i64 cpu_sr[4];
354 static TCGv_i64 cpu_srH;
355 static TCGv_reg cpu_iaoq_f;
356 static TCGv_reg cpu_iaoq_b;
357 static TCGv_i64 cpu_iasq_f;
358 static TCGv_i64 cpu_iasq_b;
359 static TCGv_reg cpu_sar;
360 static TCGv_reg cpu_psw_n;
361 static TCGv_reg cpu_psw_v;
362 static TCGv_reg cpu_psw_cb;
363 static TCGv_reg cpu_psw_cb_msb;
364
365 void hppa_translate_init(void)
366 {
367 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
368
369 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
370 static const GlobalVar vars[] = {
371 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
372 DEF_VAR(psw_n),
373 DEF_VAR(psw_v),
374 DEF_VAR(psw_cb),
375 DEF_VAR(psw_cb_msb),
376 DEF_VAR(iaoq_f),
377 DEF_VAR(iaoq_b),
378 };
379
380 #undef DEF_VAR
381
382 /* Use the symbolic register names that match the disassembler. */
383 static const char gr_names[32][4] = {
384 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
385 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
386 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
387 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
388 };
389 /* SR[4-7] are not global registers so that we can index them. */
390 static const char sr_names[5][4] = {
391 "sr0", "sr1", "sr2", "sr3", "srH"
392 };
393
394 int i;
395
396 cpu_gr[0] = NULL;
397 for (i = 1; i < 32; i++) {
398 cpu_gr[i] = tcg_global_mem_new(tcg_env,
399 offsetof(CPUHPPAState, gr[i]),
400 gr_names[i]);
401 }
402 for (i = 0; i < 4; i++) {
403 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
404 offsetof(CPUHPPAState, sr[i]),
405 sr_names[i]);
406 }
407 cpu_srH = tcg_global_mem_new_i64(tcg_env,
408 offsetof(CPUHPPAState, sr[4]),
409 sr_names[4]);
410
411 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
412 const GlobalVar *v = &vars[i];
413 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
414 }
415
416 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
417 offsetof(CPUHPPAState, iasq_f),
418 "iasq_f");
419 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
420 offsetof(CPUHPPAState, iasq_b),
421 "iasq_b");
422 }
423
424 static DisasCond cond_make_f(void)
425 {
426 return (DisasCond){
427 .c = TCG_COND_NEVER,
428 .a0 = NULL,
429 .a1 = NULL,
430 };
431 }
432
433 static DisasCond cond_make_t(void)
434 {
435 return (DisasCond){
436 .c = TCG_COND_ALWAYS,
437 .a0 = NULL,
438 .a1 = NULL,
439 };
440 }
441
442 static DisasCond cond_make_n(void)
443 {
444 return (DisasCond){
445 .c = TCG_COND_NE,
446 .a0 = cpu_psw_n,
447 .a1 = tcg_constant_reg(0)
448 };
449 }
450
451 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
452 {
453 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
454 return (DisasCond){
455 .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
456 };
457 }
458
459 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
460 {
461 TCGv_reg tmp = tcg_temp_new();
462 tcg_gen_mov_reg(tmp, a0);
463 return cond_make_0_tmp(c, tmp);
464 }
465
466 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
467 {
468 DisasCond r = { .c = c };
469
470 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
471 r.a0 = tcg_temp_new();
472 tcg_gen_mov_reg(r.a0, a0);
473 r.a1 = tcg_temp_new();
474 tcg_gen_mov_reg(r.a1, a1);
475
476 return r;
477 }
478
479 static void cond_free(DisasCond *cond)
480 {
481 switch (cond->c) {
482 default:
483 cond->a0 = NULL;
484 cond->a1 = NULL;
485 /* fallthru */
486 case TCG_COND_ALWAYS:
487 cond->c = TCG_COND_NEVER;
488 break;
489 case TCG_COND_NEVER:
490 break;
491 }
492 }
493
494 #ifndef CONFIG_USER_ONLY
495 static TCGv_tl get_temp_tl(DisasContext *ctx)
496 {
497 unsigned i = ctx->ntempl++;
498 g_assert(i < ARRAY_SIZE(ctx->templ));
499 return ctx->templ[i] = tcg_temp_new_tl();
500 }
501 #endif
502
503 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
504 {
505 TCGv_reg t = tcg_temp_new();
506 tcg_gen_movi_reg(t, v);
507 return t;
508 }
509
510 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
511 {
512 if (reg == 0) {
513 TCGv_reg t = tcg_temp_new();
514 tcg_gen_movi_reg(t, 0);
515 return t;
516 } else {
517 return cpu_gr[reg];
518 }
519 }
520
521 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
522 {
523 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
524 return tcg_temp_new();
525 } else {
526 return cpu_gr[reg];
527 }
528 }
529
530 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
531 {
532 if (ctx->null_cond.c != TCG_COND_NEVER) {
533 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
534 ctx->null_cond.a1, dest, t);
535 } else {
536 tcg_gen_mov_reg(dest, t);
537 }
538 }
539
540 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
541 {
542 if (reg != 0) {
543 save_or_nullify(ctx, cpu_gr[reg], t);
544 }
545 }
546
547 #if HOST_BIG_ENDIAN
548 # define HI_OFS 0
549 # define LO_OFS 4
550 #else
551 # define HI_OFS 4
552 # define LO_OFS 0
553 #endif
554
555 static TCGv_i32 load_frw_i32(unsigned rt)
556 {
557 TCGv_i32 ret = tcg_temp_new_i32();
558 tcg_gen_ld_i32(ret, tcg_env,
559 offsetof(CPUHPPAState, fr[rt & 31])
560 + (rt & 32 ? LO_OFS : HI_OFS));
561 return ret;
562 }
563
564 static TCGv_i32 load_frw0_i32(unsigned rt)
565 {
566 if (rt == 0) {
567 TCGv_i32 ret = tcg_temp_new_i32();
568 tcg_gen_movi_i32(ret, 0);
569 return ret;
570 } else {
571 return load_frw_i32(rt);
572 }
573 }
574
575 static TCGv_i64 load_frw0_i64(unsigned rt)
576 {
577 TCGv_i64 ret = tcg_temp_new_i64();
578 if (rt == 0) {
579 tcg_gen_movi_i64(ret, 0);
580 } else {
581 tcg_gen_ld32u_i64(ret, tcg_env,
582 offsetof(CPUHPPAState, fr[rt & 31])
583 + (rt & 32 ? LO_OFS : HI_OFS));
584 }
585 return ret;
586 }
587
588 static void save_frw_i32(unsigned rt, TCGv_i32 val)
589 {
590 tcg_gen_st_i32(val, tcg_env,
591 offsetof(CPUHPPAState, fr[rt & 31])
592 + (rt & 32 ? LO_OFS : HI_OFS));
593 }
594
595 #undef HI_OFS
596 #undef LO_OFS
597
598 static TCGv_i64 load_frd(unsigned rt)
599 {
600 TCGv_i64 ret = tcg_temp_new_i64();
601 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
602 return ret;
603 }
604
605 static TCGv_i64 load_frd0(unsigned rt)
606 {
607 if (rt == 0) {
608 TCGv_i64 ret = tcg_temp_new_i64();
609 tcg_gen_movi_i64(ret, 0);
610 return ret;
611 } else {
612 return load_frd(rt);
613 }
614 }
615
616 static void save_frd(unsigned rt, TCGv_i64 val)
617 {
618 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
619 }
620
621 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
622 {
623 #ifdef CONFIG_USER_ONLY
624 tcg_gen_movi_i64(dest, 0);
625 #else
626 if (reg < 4) {
627 tcg_gen_mov_i64(dest, cpu_sr[reg]);
628 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
629 tcg_gen_mov_i64(dest, cpu_srH);
630 } else {
631 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
632 }
633 #endif
634 }
635
636 /* Skip over the implementation of an insn that has been nullified.
637 Use this when the insn is too complex for a conditional move. */
638 static void nullify_over(DisasContext *ctx)
639 {
640 if (ctx->null_cond.c != TCG_COND_NEVER) {
641 /* The always condition should have been handled in the main loop. */
642 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
643
644 ctx->null_lab = gen_new_label();
645
646 /* If we're using PSW[N], copy it to a temp because... */
647 if (ctx->null_cond.a0 == cpu_psw_n) {
648 ctx->null_cond.a0 = tcg_temp_new();
649 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
650 }
651 /* ... we clear it before branching over the implementation,
652 so that (1) it's clear after nullifying this insn and
653 (2) if this insn nullifies the next, PSW[N] is valid. */
654 if (ctx->psw_n_nonzero) {
655 ctx->psw_n_nonzero = false;
656 tcg_gen_movi_reg(cpu_psw_n, 0);
657 }
658
659 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
660 ctx->null_cond.a1, ctx->null_lab);
661 cond_free(&ctx->null_cond);
662 }
663 }
664
665 /* Save the current nullification state to PSW[N]. */
666 static void nullify_save(DisasContext *ctx)
667 {
668 if (ctx->null_cond.c == TCG_COND_NEVER) {
669 if (ctx->psw_n_nonzero) {
670 tcg_gen_movi_reg(cpu_psw_n, 0);
671 }
672 return;
673 }
674 if (ctx->null_cond.a0 != cpu_psw_n) {
675 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
676 ctx->null_cond.a0, ctx->null_cond.a1);
677 ctx->psw_n_nonzero = true;
678 }
679 cond_free(&ctx->null_cond);
680 }
681
682 /* Set a PSW[N] to X. The intention is that this is used immediately
683 before a goto_tb/exit_tb, so that there is no fallthru path to other
684 code within the TB. Therefore we do not update psw_n_nonzero. */
685 static void nullify_set(DisasContext *ctx, bool x)
686 {
687 if (ctx->psw_n_nonzero || x) {
688 tcg_gen_movi_reg(cpu_psw_n, x);
689 }
690 }
691
692 /* Mark the end of an instruction that may have been nullified.
693 This is the pair to nullify_over. Always returns true so that
694 it may be tail-called from a translate function. */
695 static bool nullify_end(DisasContext *ctx)
696 {
697 TCGLabel *null_lab = ctx->null_lab;
698 DisasJumpType status = ctx->base.is_jmp;
699
700 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
701 For UPDATED, we cannot update on the nullified path. */
702 assert(status != DISAS_IAQ_N_UPDATED);
703
704 if (likely(null_lab == NULL)) {
705 /* The current insn wasn't conditional or handled the condition
706 applied to it without a branch, so the (new) setting of
707 NULL_COND can be applied directly to the next insn. */
708 return true;
709 }
710 ctx->null_lab = NULL;
711
712 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
713 /* The next instruction will be unconditional,
714 and NULL_COND already reflects that. */
715 gen_set_label(null_lab);
716 } else {
717 /* The insn that we just executed is itself nullifying the next
718 instruction. Store the condition in the PSW[N] global.
719 We asserted PSW[N] = 0 in nullify_over, so that after the
720 label we have the proper value in place. */
721 nullify_save(ctx);
722 gen_set_label(null_lab);
723 ctx->null_cond = cond_make_n();
724 }
725 if (status == DISAS_NORETURN) {
726 ctx->base.is_jmp = DISAS_NEXT;
727 }
728 return true;
729 }
730
731 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
732 {
733 if (unlikely(ival == -1)) {
734 tcg_gen_mov_reg(dest, vval);
735 } else {
736 tcg_gen_movi_reg(dest, ival);
737 }
738 }
739
740 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
741 {
742 return ctx->iaoq_f + disp + 8;
743 }
744
745 static void gen_excp_1(int exception)
746 {
747 gen_helper_excp(tcg_env, tcg_constant_i32(exception));
748 }
749
750 static void gen_excp(DisasContext *ctx, int exception)
751 {
752 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
753 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
754 nullify_save(ctx);
755 gen_excp_1(exception);
756 ctx->base.is_jmp = DISAS_NORETURN;
757 }
758
759 static bool gen_excp_iir(DisasContext *ctx, int exc)
760 {
761 nullify_over(ctx);
762 tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
763 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
764 gen_excp(ctx, exc);
765 return nullify_end(ctx);
766 }
767
768 static bool gen_illegal(DisasContext *ctx)
769 {
770 return gen_excp_iir(ctx, EXCP_ILL);
771 }
772
773 #ifdef CONFIG_USER_ONLY
774 #define CHECK_MOST_PRIVILEGED(EXCP) \
775 return gen_excp_iir(ctx, EXCP)
776 #else
777 #define CHECK_MOST_PRIVILEGED(EXCP) \
778 do { \
779 if (ctx->privilege != 0) { \
780 return gen_excp_iir(ctx, EXCP); \
781 } \
782 } while (0)
783 #endif
784
785 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
786 {
787 return translator_use_goto_tb(&ctx->base, dest);
788 }
789
790 /* If the next insn is to be nullified, and it's on the same page,
791 and we're not attempting to set a breakpoint on it, then we can
792 totally skip the nullified insn. This avoids creating and
793 executing a TB that merely branches to the next TB. */
794 static bool use_nullify_skip(DisasContext *ctx)
795 {
796 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
797 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
798 }
799
800 static void gen_goto_tb(DisasContext *ctx, int which,
801 target_ureg f, target_ureg b)
802 {
803 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
804 tcg_gen_goto_tb(which);
805 tcg_gen_movi_reg(cpu_iaoq_f, f);
806 tcg_gen_movi_reg(cpu_iaoq_b, b);
807 tcg_gen_exit_tb(ctx->base.tb, which);
808 } else {
809 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
810 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
811 tcg_gen_lookup_and_goto_ptr();
812 }
813 }
814
815 static bool cond_need_sv(int c)
816 {
817 return c == 2 || c == 3 || c == 6;
818 }
819
820 static bool cond_need_cb(int c)
821 {
822 return c == 4 || c == 5;
823 }
824
825 /*
826 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
827 * the Parisc 1.1 Architecture Reference Manual for details.
828 */
829
830 static DisasCond do_cond(unsigned cf, TCGv_reg res,
831 TCGv_reg cb_msb, TCGv_reg sv)
832 {
833 DisasCond cond;
834 TCGv_reg tmp;
835
836 switch (cf >> 1) {
837 case 0: /* Never / TR (0 / 1) */
838 cond = cond_make_f();
839 break;
840 case 1: /* = / <> (Z / !Z) */
841 cond = cond_make_0(TCG_COND_EQ, res);
842 break;
843 case 2: /* < / >= (N ^ V / !(N ^ V) */
844 tmp = tcg_temp_new();
845 tcg_gen_xor_reg(tmp, res, sv);
846 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
847 break;
848 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
849 /*
850 * Simplify:
851 * (N ^ V) | Z
852 * ((res < 0) ^ (sv < 0)) | !res
853 * ((res ^ sv) < 0) | !res
854 * (~(res ^ sv) >= 0) | !res
855 * !(~(res ^ sv) >> 31) | !res
856 * !(~(res ^ sv) >> 31 & res)
857 */
858 tmp = tcg_temp_new();
859 tcg_gen_eqv_reg(tmp, res, sv);
860 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
861 tcg_gen_and_reg(tmp, tmp, res);
862 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
863 break;
864 case 4: /* NUV / UV (!C / C) */
865 cond = cond_make_0(TCG_COND_EQ, cb_msb);
866 break;
867 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
868 tmp = tcg_temp_new();
869 tcg_gen_neg_reg(tmp, cb_msb);
870 tcg_gen_and_reg(tmp, tmp, res);
871 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
872 break;
873 case 6: /* SV / NSV (V / !V) */
874 cond = cond_make_0(TCG_COND_LT, sv);
875 break;
876 case 7: /* OD / EV */
877 tmp = tcg_temp_new();
878 tcg_gen_andi_reg(tmp, res, 1);
879 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
880 break;
881 default:
882 g_assert_not_reached();
883 }
884 if (cf & 1) {
885 cond.c = tcg_invert_cond(cond.c);
886 }
887
888 return cond;
889 }
890
891 /* Similar, but for the special case of subtraction without borrow, we
892 can use the inputs directly. This can allow other computation to be
893 deleted as unused. */
894
895 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
896 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
897 {
898 DisasCond cond;
899
900 switch (cf >> 1) {
901 case 1: /* = / <> */
902 cond = cond_make(TCG_COND_EQ, in1, in2);
903 break;
904 case 2: /* < / >= */
905 cond = cond_make(TCG_COND_LT, in1, in2);
906 break;
907 case 3: /* <= / > */
908 cond = cond_make(TCG_COND_LE, in1, in2);
909 break;
910 case 4: /* << / >>= */
911 cond = cond_make(TCG_COND_LTU, in1, in2);
912 break;
913 case 5: /* <<= / >> */
914 cond = cond_make(TCG_COND_LEU, in1, in2);
915 break;
916 default:
917 return do_cond(cf, res, NULL, sv);
918 }
919 if (cf & 1) {
920 cond.c = tcg_invert_cond(cond.c);
921 }
922
923 return cond;
924 }
925
926 /*
927 * Similar, but for logicals, where the carry and overflow bits are not
928 * computed, and use of them is undefined.
929 *
930 * Undefined or not, hardware does not trap. It seems reasonable to
931 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
932 * how cases c={2,3} are treated.
933 */
934
935 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
936 {
937 switch (cf) {
938 case 0: /* never */
939 case 9: /* undef, C */
940 case 11: /* undef, C & !Z */
941 case 12: /* undef, V */
942 return cond_make_f();
943
944 case 1: /* true */
945 case 8: /* undef, !C */
946 case 10: /* undef, !C | Z */
947 case 13: /* undef, !V */
948 return cond_make_t();
949
950 case 2: /* == */
951 return cond_make_0(TCG_COND_EQ, res);
952 case 3: /* <> */
953 return cond_make_0(TCG_COND_NE, res);
954 case 4: /* < */
955 return cond_make_0(TCG_COND_LT, res);
956 case 5: /* >= */
957 return cond_make_0(TCG_COND_GE, res);
958 case 6: /* <= */
959 return cond_make_0(TCG_COND_LE, res);
960 case 7: /* > */
961 return cond_make_0(TCG_COND_GT, res);
962
963 case 14: /* OD */
964 case 15: /* EV */
965 return do_cond(cf, res, NULL, NULL);
966
967 default:
968 g_assert_not_reached();
969 }
970 }
971
972 /* Similar, but for shift/extract/deposit conditions. */
973
974 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
975 {
976 unsigned c, f;
977
978 /* Convert the compressed condition codes to standard.
979 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
980 4-7 are the reverse of 0-3. */
981 c = orig & 3;
982 if (c == 3) {
983 c = 7;
984 }
985 f = (orig & 4) / 4;
986
987 return do_log_cond(c * 2 + f, res);
988 }
989
990 /* Similar, but for unit conditions. */
991
992 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
993 TCGv_reg in1, TCGv_reg in2)
994 {
995 DisasCond cond;
996 TCGv_reg tmp, cb = NULL;
997
998 if (cf & 8) {
999 /* Since we want to test lots of carry-out bits all at once, do not
1000 * do our normal thing and compute carry-in of bit B+1 since that
1001 * leaves us with carry bits spread across two words.
1002 */
1003 cb = tcg_temp_new();
1004 tmp = tcg_temp_new();
1005 tcg_gen_or_reg(cb, in1, in2);
1006 tcg_gen_and_reg(tmp, in1, in2);
1007 tcg_gen_andc_reg(cb, cb, res);
1008 tcg_gen_or_reg(cb, cb, tmp);
1009 }
1010
1011 switch (cf >> 1) {
1012 case 0: /* never / TR */
1013 case 1: /* undefined */
1014 case 5: /* undefined */
1015 cond = cond_make_f();
1016 break;
1017
1018 case 2: /* SBZ / NBZ */
1019 /* See hasless(v,1) from
1020 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1021 */
1022 tmp = tcg_temp_new();
1023 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1024 tcg_gen_andc_reg(tmp, tmp, res);
1025 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1026 cond = cond_make_0(TCG_COND_NE, tmp);
1027 break;
1028
1029 case 3: /* SHZ / NHZ */
1030 tmp = tcg_temp_new();
1031 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1032 tcg_gen_andc_reg(tmp, tmp, res);
1033 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1034 cond = cond_make_0(TCG_COND_NE, tmp);
1035 break;
1036
1037 case 4: /* SDC / NDC */
1038 tcg_gen_andi_reg(cb, cb, 0x88888888u);
1039 cond = cond_make_0(TCG_COND_NE, cb);
1040 break;
1041
1042 case 6: /* SBC / NBC */
1043 tcg_gen_andi_reg(cb, cb, 0x80808080u);
1044 cond = cond_make_0(TCG_COND_NE, cb);
1045 break;
1046
1047 case 7: /* SHC / NHC */
1048 tcg_gen_andi_reg(cb, cb, 0x80008000u);
1049 cond = cond_make_0(TCG_COND_NE, cb);
1050 break;
1051
1052 default:
1053 g_assert_not_reached();
1054 }
1055 if (cf & 1) {
1056 cond.c = tcg_invert_cond(cond.c);
1057 }
1058
1059 return cond;
1060 }
1061
1062 /* Compute signed overflow for addition. */
1063 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1064 TCGv_reg in1, TCGv_reg in2)
1065 {
1066 TCGv_reg sv = tcg_temp_new();
1067 TCGv_reg tmp = tcg_temp_new();
1068
1069 tcg_gen_xor_reg(sv, res, in1);
1070 tcg_gen_xor_reg(tmp, in1, in2);
1071 tcg_gen_andc_reg(sv, sv, tmp);
1072
1073 return sv;
1074 }
1075
1076 /* Compute signed overflow for subtraction. */
1077 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1078 TCGv_reg in1, TCGv_reg in2)
1079 {
1080 TCGv_reg sv = tcg_temp_new();
1081 TCGv_reg tmp = tcg_temp_new();
1082
1083 tcg_gen_xor_reg(sv, res, in1);
1084 tcg_gen_xor_reg(tmp, in1, in2);
1085 tcg_gen_and_reg(sv, sv, tmp);
1086
1087 return sv;
1088 }
1089
1090 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1091 TCGv_reg in2, unsigned shift, bool is_l,
1092 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1093 {
1094 TCGv_reg dest, cb, cb_msb, sv, tmp;
1095 unsigned c = cf >> 1;
1096 DisasCond cond;
1097
1098 dest = tcg_temp_new();
1099 cb = NULL;
1100 cb_msb = NULL;
1101
1102 if (shift) {
1103 tmp = tcg_temp_new();
1104 tcg_gen_shli_reg(tmp, in1, shift);
1105 in1 = tmp;
1106 }
1107
1108 if (!is_l || cond_need_cb(c)) {
1109 TCGv_reg zero = tcg_constant_reg(0);
1110 cb_msb = tcg_temp_new();
1111 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1112 if (is_c) {
1113 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1114 }
1115 if (!is_l) {
1116 cb = tcg_temp_new();
1117 tcg_gen_xor_reg(cb, in1, in2);
1118 tcg_gen_xor_reg(cb, cb, dest);
1119 }
1120 } else {
1121 tcg_gen_add_reg(dest, in1, in2);
1122 if (is_c) {
1123 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1124 }
1125 }
1126
1127 /* Compute signed overflow if required. */
1128 sv = NULL;
1129 if (is_tsv || cond_need_sv(c)) {
1130 sv = do_add_sv(ctx, dest, in1, in2);
1131 if (is_tsv) {
1132 /* ??? Need to include overflow from shift. */
1133 gen_helper_tsv(tcg_env, sv);
1134 }
1135 }
1136
1137 /* Emit any conditional trap before any writeback. */
1138 cond = do_cond(cf, dest, cb_msb, sv);
1139 if (is_tc) {
1140 tmp = tcg_temp_new();
1141 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1142 gen_helper_tcond(tcg_env, tmp);
1143 }
1144
1145 /* Write back the result. */
1146 if (!is_l) {
1147 save_or_nullify(ctx, cpu_psw_cb, cb);
1148 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1149 }
1150 save_gpr(ctx, rt, dest);
1151
1152 /* Install the new nullification. */
1153 cond_free(&ctx->null_cond);
1154 ctx->null_cond = cond;
1155 }
1156
1157 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1158 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1159 {
1160 TCGv_reg tcg_r1, tcg_r2;
1161
1162 if (a->cf) {
1163 nullify_over(ctx);
1164 }
1165 tcg_r1 = load_gpr(ctx, a->r1);
1166 tcg_r2 = load_gpr(ctx, a->r2);
1167 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1168 return nullify_end(ctx);
1169 }
1170
1171 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1172 bool is_tsv, bool is_tc)
1173 {
1174 TCGv_reg tcg_im, tcg_r2;
1175
1176 if (a->cf) {
1177 nullify_over(ctx);
1178 }
1179 tcg_im = load_const(ctx, a->i);
1180 tcg_r2 = load_gpr(ctx, a->r);
1181 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1182 return nullify_end(ctx);
1183 }
1184
1185 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1186 TCGv_reg in2, bool is_tsv, bool is_b,
1187 bool is_tc, unsigned cf)
1188 {
1189 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1190 unsigned c = cf >> 1;
1191 DisasCond cond;
1192
1193 dest = tcg_temp_new();
1194 cb = tcg_temp_new();
1195 cb_msb = tcg_temp_new();
1196
1197 zero = tcg_constant_reg(0);
1198 if (is_b) {
1199 /* DEST,C = IN1 + ~IN2 + C. */
1200 tcg_gen_not_reg(cb, in2);
1201 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1202 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1203 tcg_gen_xor_reg(cb, cb, in1);
1204 tcg_gen_xor_reg(cb, cb, dest);
1205 } else {
1206 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1207 operations by seeding the high word with 1 and subtracting. */
1208 tcg_gen_movi_reg(cb_msb, 1);
1209 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1210 tcg_gen_eqv_reg(cb, in1, in2);
1211 tcg_gen_xor_reg(cb, cb, dest);
1212 }
1213
1214 /* Compute signed overflow if required. */
1215 sv = NULL;
1216 if (is_tsv || cond_need_sv(c)) {
1217 sv = do_sub_sv(ctx, dest, in1, in2);
1218 if (is_tsv) {
1219 gen_helper_tsv(tcg_env, sv);
1220 }
1221 }
1222
1223 /* Compute the condition. We cannot use the special case for borrow. */
1224 if (!is_b) {
1225 cond = do_sub_cond(cf, dest, in1, in2, sv);
1226 } else {
1227 cond = do_cond(cf, dest, cb_msb, sv);
1228 }
1229
1230 /* Emit any conditional trap before any writeback. */
1231 if (is_tc) {
1232 tmp = tcg_temp_new();
1233 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1234 gen_helper_tcond(tcg_env, tmp);
1235 }
1236
1237 /* Write back the result. */
1238 save_or_nullify(ctx, cpu_psw_cb, cb);
1239 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1240 save_gpr(ctx, rt, dest);
1241
1242 /* Install the new nullification. */
1243 cond_free(&ctx->null_cond);
1244 ctx->null_cond = cond;
1245 }
1246
1247 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1248 bool is_tsv, bool is_b, bool is_tc)
1249 {
1250 TCGv_reg tcg_r1, tcg_r2;
1251
1252 if (a->cf) {
1253 nullify_over(ctx);
1254 }
1255 tcg_r1 = load_gpr(ctx, a->r1);
1256 tcg_r2 = load_gpr(ctx, a->r2);
1257 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1258 return nullify_end(ctx);
1259 }
1260
1261 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1262 {
1263 TCGv_reg tcg_im, tcg_r2;
1264
1265 if (a->cf) {
1266 nullify_over(ctx);
1267 }
1268 tcg_im = load_const(ctx, a->i);
1269 tcg_r2 = load_gpr(ctx, a->r);
1270 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1271 return nullify_end(ctx);
1272 }
1273
1274 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1275 TCGv_reg in2, unsigned cf)
1276 {
1277 TCGv_reg dest, sv;
1278 DisasCond cond;
1279
1280 dest = tcg_temp_new();
1281 tcg_gen_sub_reg(dest, in1, in2);
1282
1283 /* Compute signed overflow if required. */
1284 sv = NULL;
1285 if (cond_need_sv(cf >> 1)) {
1286 sv = do_sub_sv(ctx, dest, in1, in2);
1287 }
1288
1289 /* Form the condition for the compare. */
1290 cond = do_sub_cond(cf, dest, in1, in2, sv);
1291
1292 /* Clear. */
1293 tcg_gen_movi_reg(dest, 0);
1294 save_gpr(ctx, rt, dest);
1295
1296 /* Install the new nullification. */
1297 cond_free(&ctx->null_cond);
1298 ctx->null_cond = cond;
1299 }
1300
1301 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1302 TCGv_reg in2, unsigned cf,
1303 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1304 {
1305 TCGv_reg dest = dest_gpr(ctx, rt);
1306
1307 /* Perform the operation, and writeback. */
1308 fn(dest, in1, in2);
1309 save_gpr(ctx, rt, dest);
1310
1311 /* Install the new nullification. */
1312 cond_free(&ctx->null_cond);
1313 if (cf) {
1314 ctx->null_cond = do_log_cond(cf, dest);
1315 }
1316 }
1317
1318 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1319 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1320 {
1321 TCGv_reg tcg_r1, tcg_r2;
1322
1323 if (a->cf) {
1324 nullify_over(ctx);
1325 }
1326 tcg_r1 = load_gpr(ctx, a->r1);
1327 tcg_r2 = load_gpr(ctx, a->r2);
1328 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1329 return nullify_end(ctx);
1330 }
1331
1332 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1333 TCGv_reg in2, unsigned cf, bool is_tc,
1334 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1335 {
1336 TCGv_reg dest;
1337 DisasCond cond;
1338
1339 if (cf == 0) {
1340 dest = dest_gpr(ctx, rt);
1341 fn(dest, in1, in2);
1342 save_gpr(ctx, rt, dest);
1343 cond_free(&ctx->null_cond);
1344 } else {
1345 dest = tcg_temp_new();
1346 fn(dest, in1, in2);
1347
1348 cond = do_unit_cond(cf, dest, in1, in2);
1349
1350 if (is_tc) {
1351 TCGv_reg tmp = tcg_temp_new();
1352 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1353 gen_helper_tcond(tcg_env, tmp);
1354 }
1355 save_gpr(ctx, rt, dest);
1356
1357 cond_free(&ctx->null_cond);
1358 ctx->null_cond = cond;
1359 }
1360 }
1361
1362 #ifndef CONFIG_USER_ONLY
1363 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1364 from the top 2 bits of the base register. There are a few system
1365 instructions that have a 3-bit space specifier, for which SR0 is
1366 not special. To handle this, pass ~SP. */
1367 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1368 {
1369 TCGv_ptr ptr;
1370 TCGv_reg tmp;
1371 TCGv_i64 spc;
1372
1373 if (sp != 0) {
1374 if (sp < 0) {
1375 sp = ~sp;
1376 }
1377 spc = get_temp_tl(ctx);
1378 load_spr(ctx, spc, sp);
1379 return spc;
1380 }
1381 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1382 return cpu_srH;
1383 }
1384
1385 ptr = tcg_temp_new_ptr();
1386 tmp = tcg_temp_new();
1387 spc = get_temp_tl(ctx);
1388
1389 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1390 tcg_gen_andi_reg(tmp, tmp, 030);
1391 tcg_gen_trunc_reg_ptr(ptr, tmp);
1392
1393 tcg_gen_add_ptr(ptr, ptr, tcg_env);
1394 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1395
1396 return spc;
1397 }
1398 #endif
1399
1400 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1401 unsigned rb, unsigned rx, int scale, target_sreg disp,
1402 unsigned sp, int modify, bool is_phys)
1403 {
1404 TCGv_reg base = load_gpr(ctx, rb);
1405 TCGv_reg ofs;
1406
1407 /* Note that RX is mutually exclusive with DISP. */
1408 if (rx) {
1409 ofs = tcg_temp_new();
1410 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1411 tcg_gen_add_reg(ofs, ofs, base);
1412 } else if (disp || modify) {
1413 ofs = tcg_temp_new();
1414 tcg_gen_addi_reg(ofs, base, disp);
1415 } else {
1416 ofs = base;
1417 }
1418
1419 *pofs = ofs;
1420 #ifdef CONFIG_USER_ONLY
1421 *pgva = (modify <= 0 ? ofs : base);
1422 #else
1423 TCGv_tl addr = get_temp_tl(ctx);
1424 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1425 if (ctx->tb_flags & PSW_W) {
1426 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1427 }
1428 if (!is_phys) {
1429 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1430 }
1431 *pgva = addr;
1432 #endif
1433 }
1434
1435 /* Emit a memory load. The modify parameter should be
1436 * < 0 for pre-modify,
1437 * > 0 for post-modify,
1438 * = 0 for no base register update.
1439 */
1440 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1441 unsigned rx, int scale, target_sreg disp,
1442 unsigned sp, int modify, MemOp mop)
1443 {
1444 TCGv_reg ofs;
1445 TCGv_tl addr;
1446
1447 /* Caller uses nullify_over/nullify_end. */
1448 assert(ctx->null_cond.c == TCG_COND_NEVER);
1449
1450 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1451 ctx->mmu_idx == MMU_PHYS_IDX);
1452 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1453 if (modify) {
1454 save_gpr(ctx, rb, ofs);
1455 }
1456 }
1457
1458 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1459 unsigned rx, int scale, target_sreg disp,
1460 unsigned sp, int modify, MemOp mop)
1461 {
1462 TCGv_reg ofs;
1463 TCGv_tl addr;
1464
1465 /* Caller uses nullify_over/nullify_end. */
1466 assert(ctx->null_cond.c == TCG_COND_NEVER);
1467
1468 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1469 ctx->mmu_idx == MMU_PHYS_IDX);
1470 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1471 if (modify) {
1472 save_gpr(ctx, rb, ofs);
1473 }
1474 }
1475
1476 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1477 unsigned rx, int scale, target_sreg disp,
1478 unsigned sp, int modify, MemOp mop)
1479 {
1480 TCGv_reg ofs;
1481 TCGv_tl addr;
1482
1483 /* Caller uses nullify_over/nullify_end. */
1484 assert(ctx->null_cond.c == TCG_COND_NEVER);
1485
1486 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1487 ctx->mmu_idx == MMU_PHYS_IDX);
1488 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1489 if (modify) {
1490 save_gpr(ctx, rb, ofs);
1491 }
1492 }
1493
1494 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1495 unsigned rx, int scale, target_sreg disp,
1496 unsigned sp, int modify, MemOp mop)
1497 {
1498 TCGv_reg ofs;
1499 TCGv_tl addr;
1500
1501 /* Caller uses nullify_over/nullify_end. */
1502 assert(ctx->null_cond.c == TCG_COND_NEVER);
1503
1504 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1505 ctx->mmu_idx == MMU_PHYS_IDX);
1506 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1507 if (modify) {
1508 save_gpr(ctx, rb, ofs);
1509 }
1510 }
1511
1512 #if TARGET_REGISTER_BITS == 64
1513 #define do_load_reg do_load_64
1514 #define do_store_reg do_store_64
1515 #else
1516 #define do_load_reg do_load_32
1517 #define do_store_reg do_store_32
1518 #endif
1519
1520 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1521 unsigned rx, int scale, target_sreg disp,
1522 unsigned sp, int modify, MemOp mop)
1523 {
1524 TCGv_reg dest;
1525
1526 nullify_over(ctx);
1527
1528 if (modify == 0) {
1529 /* No base register update. */
1530 dest = dest_gpr(ctx, rt);
1531 } else {
1532 /* Make sure if RT == RB, we see the result of the load. */
1533 dest = tcg_temp_new();
1534 }
1535 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1536 save_gpr(ctx, rt, dest);
1537
1538 return nullify_end(ctx);
1539 }
1540
1541 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1542 unsigned rx, int scale, target_sreg disp,
1543 unsigned sp, int modify)
1544 {
1545 TCGv_i32 tmp;
1546
1547 nullify_over(ctx);
1548
1549 tmp = tcg_temp_new_i32();
1550 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1551 save_frw_i32(rt, tmp);
1552
1553 if (rt == 0) {
1554 gen_helper_loaded_fr0(tcg_env);
1555 }
1556
1557 return nullify_end(ctx);
1558 }
1559
1560 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1561 {
1562 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1563 a->disp, a->sp, a->m);
1564 }
1565
1566 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1567 unsigned rx, int scale, target_sreg disp,
1568 unsigned sp, int modify)
1569 {
1570 TCGv_i64 tmp;
1571
1572 nullify_over(ctx);
1573
1574 tmp = tcg_temp_new_i64();
1575 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1576 save_frd(rt, tmp);
1577
1578 if (rt == 0) {
1579 gen_helper_loaded_fr0(tcg_env);
1580 }
1581
1582 return nullify_end(ctx);
1583 }
1584
1585 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1586 {
1587 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1588 a->disp, a->sp, a->m);
1589 }
1590
1591 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1592 target_sreg disp, unsigned sp,
1593 int modify, MemOp mop)
1594 {
1595 nullify_over(ctx);
1596 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1597 return nullify_end(ctx);
1598 }
1599
1600 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1601 unsigned rx, int scale, target_sreg disp,
1602 unsigned sp, int modify)
1603 {
1604 TCGv_i32 tmp;
1605
1606 nullify_over(ctx);
1607
1608 tmp = load_frw_i32(rt);
1609 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1610
1611 return nullify_end(ctx);
1612 }
1613
1614 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1615 {
1616 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1617 a->disp, a->sp, a->m);
1618 }
1619
1620 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1621 unsigned rx, int scale, target_sreg disp,
1622 unsigned sp, int modify)
1623 {
1624 TCGv_i64 tmp;
1625
1626 nullify_over(ctx);
1627
1628 tmp = load_frd(rt);
1629 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1630
1631 return nullify_end(ctx);
1632 }
1633
1634 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1635 {
1636 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1637 a->disp, a->sp, a->m);
1638 }
1639
1640 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1641 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1642 {
1643 TCGv_i32 tmp;
1644
1645 nullify_over(ctx);
1646 tmp = load_frw0_i32(ra);
1647
1648 func(tmp, tcg_env, tmp);
1649
1650 save_frw_i32(rt, tmp);
1651 return nullify_end(ctx);
1652 }
1653
1654 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1655 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1656 {
1657 TCGv_i32 dst;
1658 TCGv_i64 src;
1659
1660 nullify_over(ctx);
1661 src = load_frd(ra);
1662 dst = tcg_temp_new_i32();
1663
1664 func(dst, tcg_env, src);
1665
1666 save_frw_i32(rt, dst);
1667 return nullify_end(ctx);
1668 }
1669
1670 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1671 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1672 {
1673 TCGv_i64 tmp;
1674
1675 nullify_over(ctx);
1676 tmp = load_frd0(ra);
1677
1678 func(tmp, tcg_env, tmp);
1679
1680 save_frd(rt, tmp);
1681 return nullify_end(ctx);
1682 }
1683
1684 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1685 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1686 {
1687 TCGv_i32 src;
1688 TCGv_i64 dst;
1689
1690 nullify_over(ctx);
1691 src = load_frw0_i32(ra);
1692 dst = tcg_temp_new_i64();
1693
1694 func(dst, tcg_env, src);
1695
1696 save_frd(rt, dst);
1697 return nullify_end(ctx);
1698 }
1699
1700 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1701 unsigned ra, unsigned rb,
1702 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1703 {
1704 TCGv_i32 a, b;
1705
1706 nullify_over(ctx);
1707 a = load_frw0_i32(ra);
1708 b = load_frw0_i32(rb);
1709
1710 func(a, tcg_env, a, b);
1711
1712 save_frw_i32(rt, a);
1713 return nullify_end(ctx);
1714 }
1715
1716 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1717 unsigned ra, unsigned rb,
1718 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1719 {
1720 TCGv_i64 a, b;
1721
1722 nullify_over(ctx);
1723 a = load_frd0(ra);
1724 b = load_frd0(rb);
1725
1726 func(a, tcg_env, a, b);
1727
1728 save_frd(rt, a);
1729 return nullify_end(ctx);
1730 }
1731
1732 /* Emit an unconditional branch to a direct target, which may or may not
1733 have already had nullification handled. */
1734 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1735 unsigned link, bool is_n)
1736 {
1737 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1738 if (link != 0) {
1739 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1740 }
1741 ctx->iaoq_n = dest;
1742 if (is_n) {
1743 ctx->null_cond.c = TCG_COND_ALWAYS;
1744 }
1745 } else {
1746 nullify_over(ctx);
1747
1748 if (link != 0) {
1749 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1750 }
1751
1752 if (is_n && use_nullify_skip(ctx)) {
1753 nullify_set(ctx, 0);
1754 gen_goto_tb(ctx, 0, dest, dest + 4);
1755 } else {
1756 nullify_set(ctx, is_n);
1757 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1758 }
1759
1760 nullify_end(ctx);
1761
1762 nullify_set(ctx, 0);
1763 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1764 ctx->base.is_jmp = DISAS_NORETURN;
1765 }
1766 return true;
1767 }
1768
1769 /* Emit a conditional branch to a direct target. If the branch itself
1770 is nullified, we should have already used nullify_over. */
1771 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1772 DisasCond *cond)
1773 {
1774 target_ureg dest = iaoq_dest(ctx, disp);
1775 TCGLabel *taken = NULL;
1776 TCGCond c = cond->c;
1777 bool n;
1778
1779 assert(ctx->null_cond.c == TCG_COND_NEVER);
1780
1781 /* Handle TRUE and NEVER as direct branches. */
1782 if (c == TCG_COND_ALWAYS) {
1783 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1784 }
1785 if (c == TCG_COND_NEVER) {
1786 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1787 }
1788
1789 taken = gen_new_label();
1790 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1791 cond_free(cond);
1792
1793 /* Not taken: Condition not satisfied; nullify on backward branches. */
1794 n = is_n && disp < 0;
1795 if (n && use_nullify_skip(ctx)) {
1796 nullify_set(ctx, 0);
1797 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1798 } else {
1799 if (!n && ctx->null_lab) {
1800 gen_set_label(ctx->null_lab);
1801 ctx->null_lab = NULL;
1802 }
1803 nullify_set(ctx, n);
1804 if (ctx->iaoq_n == -1) {
1805 /* The temporary iaoq_n_var died at the branch above.
1806 Regenerate it here instead of saving it. */
1807 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1808 }
1809 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1810 }
1811
1812 gen_set_label(taken);
1813
1814 /* Taken: Condition satisfied; nullify on forward branches. */
1815 n = is_n && disp >= 0;
1816 if (n && use_nullify_skip(ctx)) {
1817 nullify_set(ctx, 0);
1818 gen_goto_tb(ctx, 1, dest, dest + 4);
1819 } else {
1820 nullify_set(ctx, n);
1821 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1822 }
1823
1824 /* Not taken: the branch itself was nullified. */
1825 if (ctx->null_lab) {
1826 gen_set_label(ctx->null_lab);
1827 ctx->null_lab = NULL;
1828 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1829 } else {
1830 ctx->base.is_jmp = DISAS_NORETURN;
1831 }
1832 return true;
1833 }
1834
1835 /* Emit an unconditional branch to an indirect target. This handles
1836 nullification of the branch itself. */
1837 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1838 unsigned link, bool is_n)
1839 {
1840 TCGv_reg a0, a1, next, tmp;
1841 TCGCond c;
1842
1843 assert(ctx->null_lab == NULL);
1844
1845 if (ctx->null_cond.c == TCG_COND_NEVER) {
1846 if (link != 0) {
1847 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1848 }
1849 next = tcg_temp_new();
1850 tcg_gen_mov_reg(next, dest);
1851 if (is_n) {
1852 if (use_nullify_skip(ctx)) {
1853 tcg_gen_mov_reg(cpu_iaoq_f, next);
1854 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1855 nullify_set(ctx, 0);
1856 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1857 return true;
1858 }
1859 ctx->null_cond.c = TCG_COND_ALWAYS;
1860 }
1861 ctx->iaoq_n = -1;
1862 ctx->iaoq_n_var = next;
1863 } else if (is_n && use_nullify_skip(ctx)) {
1864 /* The (conditional) branch, B, nullifies the next insn, N,
1865 and we're allowed to skip execution N (no single-step or
1866 tracepoint in effect). Since the goto_ptr that we must use
1867 for the indirect branch consumes no special resources, we
1868 can (conditionally) skip B and continue execution. */
1869 /* The use_nullify_skip test implies we have a known control path. */
1870 tcg_debug_assert(ctx->iaoq_b != -1);
1871 tcg_debug_assert(ctx->iaoq_n != -1);
1872
1873 /* We do have to handle the non-local temporary, DEST, before
1874 branching. Since IOAQ_F is not really live at this point, we
1875 can simply store DEST optimistically. Similarly with IAOQ_B. */
1876 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1877 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1878
1879 nullify_over(ctx);
1880 if (link != 0) {
1881 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1882 }
1883 tcg_gen_lookup_and_goto_ptr();
1884 return nullify_end(ctx);
1885 } else {
1886 c = ctx->null_cond.c;
1887 a0 = ctx->null_cond.a0;
1888 a1 = ctx->null_cond.a1;
1889
1890 tmp = tcg_temp_new();
1891 next = tcg_temp_new();
1892
1893 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1894 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1895 ctx->iaoq_n = -1;
1896 ctx->iaoq_n_var = next;
1897
1898 if (link != 0) {
1899 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1900 }
1901
1902 if (is_n) {
1903 /* The branch nullifies the next insn, which means the state of N
1904 after the branch is the inverse of the state of N that applied
1905 to the branch. */
1906 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1907 cond_free(&ctx->null_cond);
1908 ctx->null_cond = cond_make_n();
1909 ctx->psw_n_nonzero = true;
1910 } else {
1911 cond_free(&ctx->null_cond);
1912 }
1913 }
1914 return true;
1915 }
1916
1917 /* Implement
1918 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1919 * IAOQ_Next{30..31} ← GR[b]{30..31};
1920 * else
1921 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1922 * which keeps the privilege level from being increased.
1923 */
1924 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1925 {
1926 TCGv_reg dest;
1927 switch (ctx->privilege) {
1928 case 0:
1929 /* Privilege 0 is maximum and is allowed to decrease. */
1930 return offset;
1931 case 3:
1932 /* Privilege 3 is minimum and is never allowed to increase. */
1933 dest = tcg_temp_new();
1934 tcg_gen_ori_reg(dest, offset, 3);
1935 break;
1936 default:
1937 dest = tcg_temp_new();
1938 tcg_gen_andi_reg(dest, offset, -4);
1939 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1940 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1941 break;
1942 }
1943 return dest;
1944 }
1945
1946 #ifdef CONFIG_USER_ONLY
1947 /* On Linux, page zero is normally marked execute only + gateway.
1948 Therefore normal read or write is supposed to fail, but specific
1949 offsets have kernel code mapped to raise permissions to implement
1950 system calls. Handling this via an explicit check here, rather
1951 in than the "be disp(sr2,r0)" instruction that probably sent us
1952 here, is the easiest way to handle the branch delay slot on the
1953 aforementioned BE. */
1954 static void do_page_zero(DisasContext *ctx)
1955 {
1956 /* If by some means we get here with PSW[N]=1, that implies that
1957 the B,GATE instruction would be skipped, and we'd fault on the
1958 next insn within the privileged page. */
1959 switch (ctx->null_cond.c) {
1960 case TCG_COND_NEVER:
1961 break;
1962 case TCG_COND_ALWAYS:
1963 tcg_gen_movi_reg(cpu_psw_n, 0);
1964 goto do_sigill;
1965 default:
1966 /* Since this is always the first (and only) insn within the
1967 TB, we should know the state of PSW[N] from TB->FLAGS. */
1968 g_assert_not_reached();
1969 }
1970
1971 /* Check that we didn't arrive here via some means that allowed
1972 non-sequential instruction execution. Normally the PSW[B] bit
1973 detects this by disallowing the B,GATE instruction to execute
1974 under such conditions. */
1975 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1976 goto do_sigill;
1977 }
1978
1979 switch (ctx->iaoq_f & -4) {
1980 case 0x00: /* Null pointer call */
1981 gen_excp_1(EXCP_IMP);
1982 ctx->base.is_jmp = DISAS_NORETURN;
1983 break;
1984
1985 case 0xb0: /* LWS */
1986 gen_excp_1(EXCP_SYSCALL_LWS);
1987 ctx->base.is_jmp = DISAS_NORETURN;
1988 break;
1989
1990 case 0xe0: /* SET_THREAD_POINTER */
1991 tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1992 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
1993 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
1994 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1995 break;
1996
1997 case 0x100: /* SYSCALL */
1998 gen_excp_1(EXCP_SYSCALL);
1999 ctx->base.is_jmp = DISAS_NORETURN;
2000 break;
2001
2002 default:
2003 do_sigill:
2004 gen_excp_1(EXCP_ILL);
2005 ctx->base.is_jmp = DISAS_NORETURN;
2006 break;
2007 }
2008 }
2009 #endif
2010
2011 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2012 {
2013 cond_free(&ctx->null_cond);
2014 return true;
2015 }
2016
2017 static bool trans_break(DisasContext *ctx, arg_break *a)
2018 {
2019 return gen_excp_iir(ctx, EXCP_BREAK);
2020 }
2021
2022 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2023 {
2024 /* No point in nullifying the memory barrier. */
2025 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2026
2027 cond_free(&ctx->null_cond);
2028 return true;
2029 }
2030
2031 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2032 {
2033 unsigned rt = a->t;
2034 TCGv_reg tmp = dest_gpr(ctx, rt);
2035 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2036 save_gpr(ctx, rt, tmp);
2037
2038 cond_free(&ctx->null_cond);
2039 return true;
2040 }
2041
2042 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2043 {
2044 unsigned rt = a->t;
2045 unsigned rs = a->sp;
2046 TCGv_i64 t0 = tcg_temp_new_i64();
2047 TCGv_reg t1 = tcg_temp_new();
2048
2049 load_spr(ctx, t0, rs);
2050 tcg_gen_shri_i64(t0, t0, 32);
2051 tcg_gen_trunc_i64_reg(t1, t0);
2052
2053 save_gpr(ctx, rt, t1);
2054
2055 cond_free(&ctx->null_cond);
2056 return true;
2057 }
2058
2059 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2060 {
2061 unsigned rt = a->t;
2062 unsigned ctl = a->r;
2063 TCGv_reg tmp;
2064
2065 switch (ctl) {
2066 case CR_SAR:
2067 #ifdef TARGET_HPPA64
2068 if (a->e == 0) {
2069 /* MFSAR without ,W masks low 5 bits. */
2070 tmp = dest_gpr(ctx, rt);
2071 tcg_gen_andi_reg(tmp, cpu_sar, 31);
2072 save_gpr(ctx, rt, tmp);
2073 goto done;
2074 }
2075 #endif
2076 save_gpr(ctx, rt, cpu_sar);
2077 goto done;
2078 case CR_IT: /* Interval Timer */
2079 /* FIXME: Respect PSW_S bit. */
2080 nullify_over(ctx);
2081 tmp = dest_gpr(ctx, rt);
2082 if (translator_io_start(&ctx->base)) {
2083 gen_helper_read_interval_timer(tmp);
2084 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2085 } else {
2086 gen_helper_read_interval_timer(tmp);
2087 }
2088 save_gpr(ctx, rt, tmp);
2089 return nullify_end(ctx);
2090 case 26:
2091 case 27:
2092 break;
2093 default:
2094 /* All other control registers are privileged. */
2095 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2096 break;
2097 }
2098
2099 tmp = tcg_temp_new();
2100 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2101 save_gpr(ctx, rt, tmp);
2102
2103 done:
2104 cond_free(&ctx->null_cond);
2105 return true;
2106 }
2107
2108 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2109 {
2110 unsigned rr = a->r;
2111 unsigned rs = a->sp;
2112 TCGv_i64 t64;
2113
2114 if (rs >= 5) {
2115 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2116 }
2117 nullify_over(ctx);
2118
2119 t64 = tcg_temp_new_i64();
2120 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2121 tcg_gen_shli_i64(t64, t64, 32);
2122
2123 if (rs >= 4) {
2124 tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2125 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2126 } else {
2127 tcg_gen_mov_i64(cpu_sr[rs], t64);
2128 }
2129
2130 return nullify_end(ctx);
2131 }
2132
2133 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2134 {
2135 unsigned ctl = a->t;
2136 TCGv_reg reg;
2137 TCGv_reg tmp;
2138
2139 if (ctl == CR_SAR) {
2140 reg = load_gpr(ctx, a->r);
2141 tmp = tcg_temp_new();
2142 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2143 save_or_nullify(ctx, cpu_sar, tmp);
2144
2145 cond_free(&ctx->null_cond);
2146 return true;
2147 }
2148
2149 /* All other control registers are privileged or read-only. */
2150 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2151
2152 #ifndef CONFIG_USER_ONLY
2153 nullify_over(ctx);
2154 reg = load_gpr(ctx, a->r);
2155
2156 switch (ctl) {
2157 case CR_IT:
2158 gen_helper_write_interval_timer(tcg_env, reg);
2159 break;
2160 case CR_EIRR:
2161 gen_helper_write_eirr(tcg_env, reg);
2162 break;
2163 case CR_EIEM:
2164 gen_helper_write_eiem(tcg_env, reg);
2165 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2166 break;
2167
2168 case CR_IIASQ:
2169 case CR_IIAOQ:
2170 /* FIXME: Respect PSW_Q bit */
2171 /* The write advances the queue and stores to the back element. */
2172 tmp = tcg_temp_new();
2173 tcg_gen_ld_reg(tmp, tcg_env,
2174 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2175 tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2176 tcg_gen_st_reg(reg, tcg_env,
2177 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2178 break;
2179
2180 case CR_PID1:
2181 case CR_PID2:
2182 case CR_PID3:
2183 case CR_PID4:
2184 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2185 #ifndef CONFIG_USER_ONLY
2186 gen_helper_change_prot_id(tcg_env);
2187 #endif
2188 break;
2189
2190 default:
2191 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2192 break;
2193 }
2194 return nullify_end(ctx);
2195 #endif
2196 }
2197
2198 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2199 {
2200 TCGv_reg tmp = tcg_temp_new();
2201
2202 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2203 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2204 save_or_nullify(ctx, cpu_sar, tmp);
2205
2206 cond_free(&ctx->null_cond);
2207 return true;
2208 }
2209
2210 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2211 {
2212 TCGv_reg dest = dest_gpr(ctx, a->t);
2213
2214 #ifdef CONFIG_USER_ONLY
2215 /* We don't implement space registers in user mode. */
2216 tcg_gen_movi_reg(dest, 0);
2217 #else
2218 TCGv_i64 t0 = tcg_temp_new_i64();
2219
2220 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2221 tcg_gen_shri_i64(t0, t0, 32);
2222 tcg_gen_trunc_i64_reg(dest, t0);
2223 #endif
2224 save_gpr(ctx, a->t, dest);
2225
2226 cond_free(&ctx->null_cond);
2227 return true;
2228 }
2229
2230 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2231 {
2232 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2233 #ifndef CONFIG_USER_ONLY
2234 TCGv_reg tmp;
2235
2236 nullify_over(ctx);
2237
2238 tmp = tcg_temp_new();
2239 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2240 tcg_gen_andi_reg(tmp, tmp, ~a->i);
2241 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2242 save_gpr(ctx, a->t, tmp);
2243
2244 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2245 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2246 return nullify_end(ctx);
2247 #endif
2248 }
2249
2250 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2251 {
2252 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2253 #ifndef CONFIG_USER_ONLY
2254 TCGv_reg tmp;
2255
2256 nullify_over(ctx);
2257
2258 tmp = tcg_temp_new();
2259 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2260 tcg_gen_ori_reg(tmp, tmp, a->i);
2261 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2262 save_gpr(ctx, a->t, tmp);
2263
2264 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2265 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2266 return nullify_end(ctx);
2267 #endif
2268 }
2269
2270 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2271 {
2272 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2273 #ifndef CONFIG_USER_ONLY
2274 TCGv_reg tmp, reg;
2275 nullify_over(ctx);
2276
2277 reg = load_gpr(ctx, a->r);
2278 tmp = tcg_temp_new();
2279 gen_helper_swap_system_mask(tmp, tcg_env, reg);
2280
2281 /* Exit the TB to recognize new interrupts. */
2282 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2283 return nullify_end(ctx);
2284 #endif
2285 }
2286
2287 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2288 {
2289 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2290 #ifndef CONFIG_USER_ONLY
2291 nullify_over(ctx);
2292
2293 if (rfi_r) {
2294 gen_helper_rfi_r(tcg_env);
2295 } else {
2296 gen_helper_rfi(tcg_env);
2297 }
2298 /* Exit the TB to recognize new interrupts. */
2299 tcg_gen_exit_tb(NULL, 0);
2300 ctx->base.is_jmp = DISAS_NORETURN;
2301
2302 return nullify_end(ctx);
2303 #endif
2304 }
2305
2306 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2307 {
2308 return do_rfi(ctx, false);
2309 }
2310
2311 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2312 {
2313 return do_rfi(ctx, true);
2314 }
2315
2316 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2317 {
2318 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2319 #ifndef CONFIG_USER_ONLY
2320 nullify_over(ctx);
2321 gen_helper_halt(tcg_env);
2322 ctx->base.is_jmp = DISAS_NORETURN;
2323 return nullify_end(ctx);
2324 #endif
2325 }
2326
2327 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2328 {
2329 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2330 #ifndef CONFIG_USER_ONLY
2331 nullify_over(ctx);
2332 gen_helper_reset(tcg_env);
2333 ctx->base.is_jmp = DISAS_NORETURN;
2334 return nullify_end(ctx);
2335 #endif
2336 }
2337
2338 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2339 {
2340 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2341 #ifndef CONFIG_USER_ONLY
2342 nullify_over(ctx);
2343 gen_helper_getshadowregs(tcg_env);
2344 return nullify_end(ctx);
2345 #endif
2346 }
2347
2348 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2349 {
2350 if (a->m) {
2351 TCGv_reg dest = dest_gpr(ctx, a->b);
2352 TCGv_reg src1 = load_gpr(ctx, a->b);
2353 TCGv_reg src2 = load_gpr(ctx, a->x);
2354
2355 /* The only thing we need to do is the base register modification. */
2356 tcg_gen_add_reg(dest, src1, src2);
2357 save_gpr(ctx, a->b, dest);
2358 }
2359 cond_free(&ctx->null_cond);
2360 return true;
2361 }
2362
2363 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2364 {
2365 TCGv_reg dest, ofs;
2366 TCGv_i32 level, want;
2367 TCGv_tl addr;
2368
2369 nullify_over(ctx);
2370
2371 dest = dest_gpr(ctx, a->t);
2372 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2373
2374 if (a->imm) {
2375 level = tcg_constant_i32(a->ri);
2376 } else {
2377 level = tcg_temp_new_i32();
2378 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2379 tcg_gen_andi_i32(level, level, 3);
2380 }
2381 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2382
2383 gen_helper_probe(dest, tcg_env, addr, level, want);
2384
2385 save_gpr(ctx, a->t, dest);
2386 return nullify_end(ctx);
2387 }
2388
2389 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2390 {
2391 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2392 #ifndef CONFIG_USER_ONLY
2393 TCGv_tl addr;
2394 TCGv_reg ofs, reg;
2395
2396 nullify_over(ctx);
2397
2398 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2399 reg = load_gpr(ctx, a->r);
2400 if (a->addr) {
2401 gen_helper_itlba(tcg_env, addr, reg);
2402 } else {
2403 gen_helper_itlbp(tcg_env, addr, reg);
2404 }
2405
2406 /* Exit TB for TLB change if mmu is enabled. */
2407 if (ctx->tb_flags & PSW_C) {
2408 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2409 }
2410 return nullify_end(ctx);
2411 #endif
2412 }
2413
2414 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2415 {
2416 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2417 #ifndef CONFIG_USER_ONLY
2418 TCGv_tl addr;
2419 TCGv_reg ofs;
2420
2421 nullify_over(ctx);
2422
2423 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2424 if (a->m) {
2425 save_gpr(ctx, a->b, ofs);
2426 }
2427 if (a->local) {
2428 gen_helper_ptlbe(tcg_env);
2429 } else {
2430 gen_helper_ptlb(tcg_env, addr);
2431 }
2432
2433 /* Exit TB for TLB change if mmu is enabled. */
2434 if (ctx->tb_flags & PSW_C) {
2435 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2436 }
2437 return nullify_end(ctx);
2438 #endif
2439 }
2440
2441 /*
2442 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2443 * See
2444 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2445 * page 13-9 (195/206)
2446 */
2447 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2448 {
2449 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2450 #ifndef CONFIG_USER_ONLY
2451 TCGv_tl addr, atl, stl;
2452 TCGv_reg reg;
2453
2454 nullify_over(ctx);
2455
2456 /*
2457 * FIXME:
2458 * if (not (pcxl or pcxl2))
2459 * return gen_illegal(ctx);
2460 *
2461 * Note for future: these are 32-bit systems; no hppa64.
2462 */
2463
2464 atl = tcg_temp_new_tl();
2465 stl = tcg_temp_new_tl();
2466 addr = tcg_temp_new_tl();
2467
2468 tcg_gen_ld32u_i64(stl, tcg_env,
2469 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2470 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2471 tcg_gen_ld32u_i64(atl, tcg_env,
2472 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2473 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2474 tcg_gen_shli_i64(stl, stl, 32);
2475 tcg_gen_or_tl(addr, atl, stl);
2476
2477 reg = load_gpr(ctx, a->r);
2478 if (a->addr) {
2479 gen_helper_itlba(tcg_env, addr, reg);
2480 } else {
2481 gen_helper_itlbp(tcg_env, addr, reg);
2482 }
2483
2484 /* Exit TB for TLB change if mmu is enabled. */
2485 if (ctx->tb_flags & PSW_C) {
2486 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2487 }
2488 return nullify_end(ctx);
2489 #endif
2490 }
2491
2492 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2493 {
2494 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2495 #ifndef CONFIG_USER_ONLY
2496 TCGv_tl vaddr;
2497 TCGv_reg ofs, paddr;
2498
2499 nullify_over(ctx);
2500
2501 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2502
2503 paddr = tcg_temp_new();
2504 gen_helper_lpa(paddr, tcg_env, vaddr);
2505
2506 /* Note that physical address result overrides base modification. */
2507 if (a->m) {
2508 save_gpr(ctx, a->b, ofs);
2509 }
2510 save_gpr(ctx, a->t, paddr);
2511
2512 return nullify_end(ctx);
2513 #endif
2514 }
2515
2516 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2517 {
2518 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2519
2520 /* The Coherence Index is an implementation-defined function of the
2521 physical address. Two addresses with the same CI have a coherent
2522 view of the cache. Our implementation is to return 0 for all,
2523 since the entire address space is coherent. */
2524 save_gpr(ctx, a->t, tcg_constant_reg(0));
2525
2526 cond_free(&ctx->null_cond);
2527 return true;
2528 }
2529
2530 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2531 {
2532 return do_add_reg(ctx, a, false, false, false, false);
2533 }
2534
2535 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2536 {
2537 return do_add_reg(ctx, a, true, false, false, false);
2538 }
2539
2540 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2541 {
2542 return do_add_reg(ctx, a, false, true, false, false);
2543 }
2544
2545 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2546 {
2547 return do_add_reg(ctx, a, false, false, false, true);
2548 }
2549
2550 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2551 {
2552 return do_add_reg(ctx, a, false, true, false, true);
2553 }
2554
2555 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2556 {
2557 return do_sub_reg(ctx, a, false, false, false);
2558 }
2559
2560 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2561 {
2562 return do_sub_reg(ctx, a, true, false, false);
2563 }
2564
2565 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2566 {
2567 return do_sub_reg(ctx, a, false, false, true);
2568 }
2569
2570 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2571 {
2572 return do_sub_reg(ctx, a, true, false, true);
2573 }
2574
2575 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2576 {
2577 return do_sub_reg(ctx, a, false, true, false);
2578 }
2579
2580 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2581 {
2582 return do_sub_reg(ctx, a, true, true, false);
2583 }
2584
2585 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2586 {
2587 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2588 }
2589
2590 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2591 {
2592 return do_log_reg(ctx, a, tcg_gen_and_reg);
2593 }
2594
2595 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2596 {
2597 if (a->cf == 0) {
2598 unsigned r2 = a->r2;
2599 unsigned r1 = a->r1;
2600 unsigned rt = a->t;
2601
2602 if (rt == 0) { /* NOP */
2603 cond_free(&ctx->null_cond);
2604 return true;
2605 }
2606 if (r2 == 0) { /* COPY */
2607 if (r1 == 0) {
2608 TCGv_reg dest = dest_gpr(ctx, rt);
2609 tcg_gen_movi_reg(dest, 0);
2610 save_gpr(ctx, rt, dest);
2611 } else {
2612 save_gpr(ctx, rt, cpu_gr[r1]);
2613 }
2614 cond_free(&ctx->null_cond);
2615 return true;
2616 }
2617 #ifndef CONFIG_USER_ONLY
2618 /* These are QEMU extensions and are nops in the real architecture:
2619 *
2620 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2621 * or %r31,%r31,%r31 -- death loop; offline cpu
2622 * currently implemented as idle.
2623 */
2624 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2625 /* No need to check for supervisor, as userland can only pause
2626 until the next timer interrupt. */
2627 nullify_over(ctx);
2628
2629 /* Advance the instruction queue. */
2630 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2631 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2632 nullify_set(ctx, 0);
2633
2634 /* Tell the qemu main loop to halt until this cpu has work. */
2635 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2636 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2637 gen_excp_1(EXCP_HALTED);
2638 ctx->base.is_jmp = DISAS_NORETURN;
2639
2640 return nullify_end(ctx);
2641 }
2642 #endif
2643 }
2644 return do_log_reg(ctx, a, tcg_gen_or_reg);
2645 }
2646
2647 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2648 {
2649 return do_log_reg(ctx, a, tcg_gen_xor_reg);
2650 }
2651
2652 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2653 {
2654 TCGv_reg tcg_r1, tcg_r2;
2655
2656 if (a->cf) {
2657 nullify_over(ctx);
2658 }
2659 tcg_r1 = load_gpr(ctx, a->r1);
2660 tcg_r2 = load_gpr(ctx, a->r2);
2661 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2662 return nullify_end(ctx);
2663 }
2664
2665 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2666 {
2667 TCGv_reg tcg_r1, tcg_r2;
2668
2669 if (a->cf) {
2670 nullify_over(ctx);
2671 }
2672 tcg_r1 = load_gpr(ctx, a->r1);
2673 tcg_r2 = load_gpr(ctx, a->r2);
2674 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2675 return nullify_end(ctx);
2676 }
2677
2678 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2679 {
2680 TCGv_reg tcg_r1, tcg_r2, tmp;
2681
2682 if (a->cf) {
2683 nullify_over(ctx);
2684 }
2685 tcg_r1 = load_gpr(ctx, a->r1);
2686 tcg_r2 = load_gpr(ctx, a->r2);
2687 tmp = tcg_temp_new();
2688 tcg_gen_not_reg(tmp, tcg_r2);
2689 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2690 return nullify_end(ctx);
2691 }
2692
2693 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2694 {
2695 return do_uaddcm(ctx, a, false);
2696 }
2697
2698 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2699 {
2700 return do_uaddcm(ctx, a, true);
2701 }
2702
2703 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2704 {
2705 TCGv_reg tmp;
2706
2707 nullify_over(ctx);
2708
2709 tmp = tcg_temp_new();
2710 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2711 if (!is_i) {
2712 tcg_gen_not_reg(tmp, tmp);
2713 }
2714 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2715 tcg_gen_muli_reg(tmp, tmp, 6);
2716 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
2717 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2718 return nullify_end(ctx);
2719 }
2720
2721 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2722 {
2723 return do_dcor(ctx, a, false);
2724 }
2725
2726 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2727 {
2728 return do_dcor(ctx, a, true);
2729 }
2730
2731 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2732 {
2733 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2734
2735 nullify_over(ctx);
2736
2737 in1 = load_gpr(ctx, a->r1);
2738 in2 = load_gpr(ctx, a->r2);
2739
2740 add1 = tcg_temp_new();
2741 add2 = tcg_temp_new();
2742 addc = tcg_temp_new();
2743 dest = tcg_temp_new();
2744 zero = tcg_constant_reg(0);
2745
2746 /* Form R1 << 1 | PSW[CB]{8}. */
2747 tcg_gen_add_reg(add1, in1, in1);
2748 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2749
2750 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2751 carry{8} requires that we subtract via + ~R2 + 1, as described in
2752 the manual. By extracting and masking V, we can produce the
2753 proper inputs to the addition without movcond. */
2754 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2755 tcg_gen_xor_reg(add2, in2, addc);
2756 tcg_gen_andi_reg(addc, addc, 1);
2757 /* ??? This is only correct for 32-bit. */
2758 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2759 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2760
2761 /* Write back the result register. */
2762 save_gpr(ctx, a->t, dest);
2763
2764 /* Write back PSW[CB]. */
2765 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2766 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2767
2768 /* Write back PSW[V] for the division step. */
2769 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2770 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2771
2772 /* Install the new nullification. */
2773 if (a->cf) {
2774 TCGv_reg sv = NULL;
2775 if (cond_need_sv(a->cf >> 1)) {
2776 /* ??? The lshift is supposed to contribute to overflow. */
2777 sv = do_add_sv(ctx, dest, add1, add2);
2778 }
2779 ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
2780 }
2781
2782 return nullify_end(ctx);
2783 }
2784
2785 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2786 {
2787 return do_add_imm(ctx, a, false, false);
2788 }
2789
2790 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2791 {
2792 return do_add_imm(ctx, a, true, false);
2793 }
2794
2795 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2796 {
2797 return do_add_imm(ctx, a, false, true);
2798 }
2799
2800 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2801 {
2802 return do_add_imm(ctx, a, true, true);
2803 }
2804
2805 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2806 {
2807 return do_sub_imm(ctx, a, false);
2808 }
2809
2810 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2811 {
2812 return do_sub_imm(ctx, a, true);
2813 }
2814
2815 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2816 {
2817 TCGv_reg tcg_im, tcg_r2;
2818
2819 if (a->cf) {
2820 nullify_over(ctx);
2821 }
2822
2823 tcg_im = load_const(ctx, a->i);
2824 tcg_r2 = load_gpr(ctx, a->r);
2825 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2826
2827 return nullify_end(ctx);
2828 }
2829
2830 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2831 {
2832 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2833 return gen_illegal(ctx);
2834 } else {
2835 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2836 a->disp, a->sp, a->m, a->size | MO_TE);
2837 }
2838 }
2839
2840 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2841 {
2842 assert(a->x == 0 && a->scale == 0);
2843 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2844 return gen_illegal(ctx);
2845 } else {
2846 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2847 }
2848 }
2849
2850 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2851 {
2852 MemOp mop = MO_TE | MO_ALIGN | a->size;
2853 TCGv_reg zero, dest, ofs;
2854 TCGv_tl addr;
2855
2856 nullify_over(ctx);
2857
2858 if (a->m) {
2859 /* Base register modification. Make sure if RT == RB,
2860 we see the result of the load. */
2861 dest = tcg_temp_new();
2862 } else {
2863 dest = dest_gpr(ctx, a->t);
2864 }
2865
2866 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2867 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2868
2869 /*
2870 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2871 * However actual hardware succeeds with aligned mod 4.
2872 * Detect this case and log a GUEST_ERROR.
2873 *
2874 * TODO: HPPA64 relaxes the over-alignment requirement
2875 * with the ,co completer.
2876 */
2877 gen_helper_ldc_check(addr);
2878
2879 zero = tcg_constant_reg(0);
2880 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2881
2882 if (a->m) {
2883 save_gpr(ctx, a->b, ofs);
2884 }
2885 save_gpr(ctx, a->t, dest);
2886
2887 return nullify_end(ctx);
2888 }
2889
2890 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2891 {
2892 TCGv_reg ofs, val;
2893 TCGv_tl addr;
2894
2895 nullify_over(ctx);
2896
2897 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2898 ctx->mmu_idx == MMU_PHYS_IDX);
2899 val = load_gpr(ctx, a->r);
2900 if (a->a) {
2901 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2902 gen_helper_stby_e_parallel(tcg_env, addr, val);
2903 } else {
2904 gen_helper_stby_e(tcg_env, addr, val);
2905 }
2906 } else {
2907 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2908 gen_helper_stby_b_parallel(tcg_env, addr, val);
2909 } else {
2910 gen_helper_stby_b(tcg_env, addr, val);
2911 }
2912 }
2913 if (a->m) {
2914 tcg_gen_andi_reg(ofs, ofs, ~3);
2915 save_gpr(ctx, a->b, ofs);
2916 }
2917
2918 return nullify_end(ctx);
2919 }
2920
2921 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2922 {
2923 int hold_mmu_idx = ctx->mmu_idx;
2924
2925 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2926 ctx->mmu_idx = MMU_PHYS_IDX;
2927 trans_ld(ctx, a);
2928 ctx->mmu_idx = hold_mmu_idx;
2929 return true;
2930 }
2931
2932 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2933 {
2934 int hold_mmu_idx = ctx->mmu_idx;
2935
2936 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2937 ctx->mmu_idx = MMU_PHYS_IDX;
2938 trans_st(ctx, a);
2939 ctx->mmu_idx = hold_mmu_idx;
2940 return true;
2941 }
2942
2943 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2944 {
2945 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2946
2947 tcg_gen_movi_reg(tcg_rt, a->i);
2948 save_gpr(ctx, a->t, tcg_rt);
2949 cond_free(&ctx->null_cond);
2950 return true;
2951 }
2952
2953 static bool trans_addil(DisasContext *ctx, arg_addil *a)
2954 {
2955 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
2956 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
2957
2958 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
2959 save_gpr(ctx, 1, tcg_r1);
2960 cond_free(&ctx->null_cond);
2961 return true;
2962 }
2963
2964 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
2965 {
2966 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2967
2968 /* Special case rb == 0, for the LDI pseudo-op.
2969 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2970 if (a->b == 0) {
2971 tcg_gen_movi_reg(tcg_rt, a->i);
2972 } else {
2973 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
2974 }
2975 save_gpr(ctx, a->t, tcg_rt);
2976 cond_free(&ctx->null_cond);
2977 return true;
2978 }
2979
2980 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
2981 unsigned c, unsigned f, unsigned n, int disp)
2982 {
2983 TCGv_reg dest, in2, sv;
2984 DisasCond cond;
2985
2986 in2 = load_gpr(ctx, r);
2987 dest = tcg_temp_new();
2988
2989 tcg_gen_sub_reg(dest, in1, in2);
2990
2991 sv = NULL;
2992 if (cond_need_sv(c)) {
2993 sv = do_sub_sv(ctx, dest, in1, in2);
2994 }
2995
2996 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
2997 return do_cbranch(ctx, disp, n, &cond);
2998 }
2999
3000 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3001 {
3002 nullify_over(ctx);
3003 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3004 }
3005
3006 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3007 {
3008 nullify_over(ctx);
3009 return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3010 }
3011
3012 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3013 unsigned c, unsigned f, unsigned n, int disp)
3014 {
3015 TCGv_reg dest, in2, sv, cb_msb;
3016 DisasCond cond;
3017
3018 in2 = load_gpr(ctx, r);
3019 dest = tcg_temp_new();
3020 sv = NULL;
3021 cb_msb = NULL;
3022
3023 if (cond_need_cb(c)) {
3024 cb_msb = tcg_temp_new();
3025 tcg_gen_movi_reg(cb_msb, 0);
3026 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3027 } else {
3028 tcg_gen_add_reg(dest, in1, in2);
3029 }
3030 if (cond_need_sv(c)) {
3031 sv = do_add_sv(ctx, dest, in1, in2);
3032 }
3033
3034 cond = do_cond(c * 2 + f, dest, cb_msb, sv);
3035 save_gpr(ctx, r, dest);
3036 return do_cbranch(ctx, disp, n, &cond);
3037 }
3038
3039 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3040 {
3041 nullify_over(ctx);
3042 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3043 }
3044
3045 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3046 {
3047 nullify_over(ctx);
3048 return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3049 }
3050
3051 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3052 {
3053 TCGv_reg tmp, tcg_r;
3054 DisasCond cond;
3055
3056 nullify_over(ctx);
3057
3058 tmp = tcg_temp_new();
3059 tcg_r = load_gpr(ctx, a->r);
3060 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3061
3062 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3063 return do_cbranch(ctx, a->disp, a->n, &cond);
3064 }
3065
3066 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3067 {
3068 TCGv_reg tmp, tcg_r;
3069 DisasCond cond;
3070
3071 nullify_over(ctx);
3072
3073 tmp = tcg_temp_new();
3074 tcg_r = load_gpr(ctx, a->r);
3075 tcg_gen_shli_reg(tmp, tcg_r, a->p);
3076
3077 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3078 return do_cbranch(ctx, a->disp, a->n, &cond);
3079 }
3080
3081 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3082 {
3083 TCGv_reg dest;
3084 DisasCond cond;
3085
3086 nullify_over(ctx);
3087
3088 dest = dest_gpr(ctx, a->r2);
3089 if (a->r1 == 0) {
3090 tcg_gen_movi_reg(dest, 0);
3091 } else {
3092 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3093 }
3094
3095 cond = do_sed_cond(a->c, dest);
3096 return do_cbranch(ctx, a->disp, a->n, &cond);
3097 }
3098
3099 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3100 {
3101 TCGv_reg dest;
3102 DisasCond cond;
3103
3104 nullify_over(ctx);
3105
3106 dest = dest_gpr(ctx, a->r);
3107 tcg_gen_movi_reg(dest, a->i);
3108
3109 cond = do_sed_cond(a->c, dest);
3110 return do_cbranch(ctx, a->disp, a->n, &cond);
3111 }
3112
3113 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3114 {
3115 TCGv_reg dest;
3116
3117 if (a->c) {
3118 nullify_over(ctx);
3119 }
3120
3121 dest = dest_gpr(ctx, a->t);
3122 if (a->r1 == 0) {
3123 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3124 tcg_gen_shr_reg(dest, dest, cpu_sar);
3125 } else if (a->r1 == a->r2) {
3126 TCGv_i32 t32 = tcg_temp_new_i32();
3127 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3128 tcg_gen_rotr_i32(t32, t32, cpu_sar);
3129 tcg_gen_extu_i32_reg(dest, t32);
3130 } else {
3131 TCGv_i64 t = tcg_temp_new_i64();
3132 TCGv_i64 s = tcg_temp_new_i64();
3133
3134 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3135 tcg_gen_extu_reg_i64(s, cpu_sar);
3136 tcg_gen_shr_i64(t, t, s);
3137 tcg_gen_trunc_i64_reg(dest, t);
3138 }
3139 save_gpr(ctx, a->t, dest);
3140
3141 /* Install the new nullification. */
3142 cond_free(&ctx->null_cond);
3143 if (a->c) {
3144 ctx->null_cond = do_sed_cond(a->c, dest);
3145 }
3146 return nullify_end(ctx);
3147 }
3148
3149 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3150 {
3151 unsigned sa = 31 - a->cpos;
3152 TCGv_reg dest, t2;
3153
3154 if (a->c) {
3155 nullify_over(ctx);
3156 }
3157
3158 dest = dest_gpr(ctx, a->t);
3159 t2 = load_gpr(ctx, a->r2);
3160 if (a->r1 == 0) {
3161 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3162 } else if (TARGET_REGISTER_BITS == 32) {
3163 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3164 } else if (a->r1 == a->r2) {
3165 TCGv_i32 t32 = tcg_temp_new_i32();
3166 tcg_gen_trunc_reg_i32(t32, t2);
3167 tcg_gen_rotri_i32(t32, t32, sa);
3168 tcg_gen_extu_i32_reg(dest, t32);
3169 } else {
3170 TCGv_i64 t64 = tcg_temp_new_i64();
3171 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3172 tcg_gen_shri_i64(t64, t64, sa);
3173 tcg_gen_trunc_i64_reg(dest, t64);
3174 }
3175 save_gpr(ctx, a->t, dest);
3176
3177 /* Install the new nullification. */
3178 cond_free(&ctx->null_cond);
3179 if (a->c) {
3180 ctx->null_cond = do_sed_cond(a->c, dest);
3181 }
3182 return nullify_end(ctx);
3183 }
3184
3185 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3186 {
3187 unsigned len = 32 - a->clen;
3188 TCGv_reg dest, src, tmp;
3189
3190 if (a->c) {
3191 nullify_over(ctx);
3192 }
3193
3194 dest = dest_gpr(ctx, a->t);
3195 src = load_gpr(ctx, a->r);
3196 tmp = tcg_temp_new();
3197
3198 /* Recall that SAR is using big-endian bit numbering. */
3199 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3200 if (a->se) {
3201 tcg_gen_sar_reg(dest, src, tmp);
3202 tcg_gen_sextract_reg(dest, dest, 0, len);
3203 } else {
3204 tcg_gen_shr_reg(dest, src, tmp);
3205 tcg_gen_extract_reg(dest, dest, 0, len);
3206 }
3207 save_gpr(ctx, a->t, dest);
3208
3209 /* Install the new nullification. */
3210 cond_free(&ctx->null_cond);
3211 if (a->c) {
3212 ctx->null_cond = do_sed_cond(a->c, dest);
3213 }
3214 return nullify_end(ctx);
3215 }
3216
3217 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3218 {
3219 unsigned len = 32 - a->clen;
3220 unsigned cpos = 31 - a->pos;
3221 TCGv_reg dest, src;
3222
3223 if (a->c) {
3224 nullify_over(ctx);
3225 }
3226
3227 dest = dest_gpr(ctx, a->t);
3228 src = load_gpr(ctx, a->r);
3229 if (a->se) {
3230 tcg_gen_sextract_reg(dest, src, cpos, len);
3231 } else {
3232 tcg_gen_extract_reg(dest, src, cpos, len);
3233 }
3234 save_gpr(ctx, a->t, dest);
3235
3236 /* Install the new nullification. */
3237 cond_free(&ctx->null_cond);
3238 if (a->c) {
3239 ctx->null_cond = do_sed_cond(a->c, dest);
3240 }
3241 return nullify_end(ctx);
3242 }
3243
3244 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3245 {
3246 unsigned len = 32 - a->clen;
3247 target_sreg mask0, mask1;
3248 TCGv_reg dest;
3249
3250 if (a->c) {
3251 nullify_over(ctx);
3252 }
3253 if (a->cpos + len > 32) {
3254 len = 32 - a->cpos;
3255 }
3256
3257 dest = dest_gpr(ctx, a->t);
3258 mask0 = deposit64(0, a->cpos, len, a->i);
3259 mask1 = deposit64(-1, a->cpos, len, a->i);
3260
3261 if (a->nz) {
3262 TCGv_reg src = load_gpr(ctx, a->t);
3263 if (mask1 != -1) {
3264 tcg_gen_andi_reg(dest, src, mask1);
3265 src = dest;
3266 }
3267 tcg_gen_ori_reg(dest, src, mask0);
3268 } else {
3269 tcg_gen_movi_reg(dest, mask0);
3270 }
3271 save_gpr(ctx, a->t, dest);
3272
3273 /* Install the new nullification. */
3274 cond_free(&ctx->null_cond);
3275 if (a->c) {
3276 ctx->null_cond = do_sed_cond(a->c, dest);
3277 }
3278 return nullify_end(ctx);
3279 }
3280
3281 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3282 {
3283 unsigned rs = a->nz ? a->t : 0;
3284 unsigned len = 32 - a->clen;
3285 TCGv_reg dest, val;
3286
3287 if (a->c) {
3288 nullify_over(ctx);
3289 }
3290 if (a->cpos + len > 32) {
3291 len = 32 - a->cpos;
3292 }
3293
3294 dest = dest_gpr(ctx, a->t);
3295 val = load_gpr(ctx, a->r);
3296 if (rs == 0) {
3297 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3298 } else {
3299 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3300 }
3301 save_gpr(ctx, a->t, dest);
3302
3303 /* Install the new nullification. */
3304 cond_free(&ctx->null_cond);
3305 if (a->c) {
3306 ctx->null_cond = do_sed_cond(a->c, dest);
3307 }
3308 return nullify_end(ctx);
3309 }
3310
3311 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3312 unsigned nz, unsigned clen, TCGv_reg val)
3313 {
3314 unsigned rs = nz ? rt : 0;
3315 unsigned len = 32 - clen;
3316 TCGv_reg mask, tmp, shift, dest;
3317 unsigned msb = 1U << (len - 1);
3318
3319 dest = dest_gpr(ctx, rt);
3320 shift = tcg_temp_new();
3321 tmp = tcg_temp_new();
3322
3323 /* Convert big-endian bit numbering in SAR to left-shift. */
3324 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3325
3326 mask = tcg_temp_new();
3327 tcg_gen_movi_reg(mask, msb + (msb - 1));
3328 tcg_gen_and_reg(tmp, val, mask);
3329 if (rs) {
3330 tcg_gen_shl_reg(mask, mask, shift);
3331 tcg_gen_shl_reg(tmp, tmp, shift);
3332 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3333 tcg_gen_or_reg(dest, dest, tmp);
3334 } else {
3335 tcg_gen_shl_reg(dest, tmp, shift);
3336 }
3337 save_gpr(ctx, rt, dest);
3338
3339 /* Install the new nullification. */
3340 cond_free(&ctx->null_cond);
3341 if (c) {
3342 ctx->null_cond = do_sed_cond(c, dest);
3343 }
3344 return nullify_end(ctx);
3345 }
3346
3347 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3348 {
3349 if (a->c) {
3350 nullify_over(ctx);
3351 }
3352 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3353 }
3354
3355 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3356 {
3357 if (a->c) {
3358 nullify_over(ctx);
3359 }
3360 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3361 }
3362
3363 static bool trans_be(DisasContext *ctx, arg_be *a)
3364 {
3365 TCGv_reg tmp;
3366
3367 #ifdef CONFIG_USER_ONLY
3368 /* ??? It seems like there should be a good way of using
3369 "be disp(sr2, r0)", the canonical gateway entry mechanism
3370 to our advantage. But that appears to be inconvenient to
3371 manage along side branch delay slots. Therefore we handle
3372 entry into the gateway page via absolute address. */
3373 /* Since we don't implement spaces, just branch. Do notice the special
3374 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3375 goto_tb to the TB containing the syscall. */
3376 if (a->b == 0) {
3377 return do_dbranch(ctx, a->disp, a->l, a->n);
3378 }
3379 #else
3380 nullify_over(ctx);
3381 #endif
3382
3383 tmp = tcg_temp_new();
3384 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3385 tmp = do_ibranch_priv(ctx, tmp);
3386
3387 #ifdef CONFIG_USER_ONLY
3388 return do_ibranch(ctx, tmp, a->l, a->n);
3389 #else
3390 TCGv_i64 new_spc = tcg_temp_new_i64();
3391
3392 load_spr(ctx, new_spc, a->sp);
3393 if (a->l) {
3394 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3395 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3396 }
3397 if (a->n && use_nullify_skip(ctx)) {
3398 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3399 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3400 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3401 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3402 } else {
3403 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3404 if (ctx->iaoq_b == -1) {
3405 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3406 }
3407 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3408 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3409 nullify_set(ctx, a->n);
3410 }
3411 tcg_gen_lookup_and_goto_ptr();
3412 ctx->base.is_jmp = DISAS_NORETURN;
3413 return nullify_end(ctx);
3414 #endif
3415 }
3416
3417 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3418 {
3419 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3420 }
3421
3422 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3423 {
3424 target_ureg dest = iaoq_dest(ctx, a->disp);
3425
3426 nullify_over(ctx);
3427
3428 /* Make sure the caller hasn't done something weird with the queue.
3429 * ??? This is not quite the same as the PSW[B] bit, which would be
3430 * expensive to track. Real hardware will trap for
3431 * b gateway
3432 * b gateway+4 (in delay slot of first branch)
3433 * However, checking for a non-sequential instruction queue *will*
3434 * diagnose the security hole
3435 * b gateway
3436 * b evil
3437 * in which instructions at evil would run with increased privs.
3438 */
3439 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3440 return gen_illegal(ctx);
3441 }
3442
3443 #ifndef CONFIG_USER_ONLY
3444 if (ctx->tb_flags & PSW_C) {
3445 CPUHPPAState *env = cpu_env(ctx->cs);
3446 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3447 /* If we could not find a TLB entry, then we need to generate an
3448 ITLB miss exception so the kernel will provide it.
3449 The resulting TLB fill operation will invalidate this TB and
3450 we will re-translate, at which point we *will* be able to find
3451 the TLB entry and determine if this is in fact a gateway page. */
3452 if (type < 0) {
3453 gen_excp(ctx, EXCP_ITLB_MISS);
3454 return true;
3455 }
3456 /* No change for non-gateway pages or for priv decrease. */
3457 if (type >= 4 && type - 4 < ctx->privilege) {
3458 dest = deposit32(dest, 0, 2, type - 4);
3459 }
3460 } else {
3461 dest &= -4; /* priv = 0 */
3462 }
3463 #endif
3464
3465 if (a->l) {
3466 TCGv_reg tmp = dest_gpr(ctx, a->l);
3467 if (ctx->privilege < 3) {
3468 tcg_gen_andi_reg(tmp, tmp, -4);
3469 }
3470 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3471 save_gpr(ctx, a->l, tmp);
3472 }
3473
3474 return do_dbranch(ctx, dest, 0, a->n);
3475 }
3476
3477 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3478 {
3479 if (a->x) {
3480 TCGv_reg tmp = tcg_temp_new();
3481 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3482 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3483 /* The computation here never changes privilege level. */
3484 return do_ibranch(ctx, tmp, a->l, a->n);
3485 } else {
3486 /* BLR R0,RX is a good way to load PC+8 into RX. */
3487 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3488 }
3489 }
3490
3491 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3492 {
3493 TCGv_reg dest;
3494
3495 if (a->x == 0) {
3496 dest = load_gpr(ctx, a->b);
3497 } else {
3498 dest = tcg_temp_new();
3499 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3500 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3501 }
3502 dest = do_ibranch_priv(ctx, dest);
3503 return do_ibranch(ctx, dest, 0, a->n);
3504 }
3505
3506 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3507 {
3508 TCGv_reg dest;
3509
3510 #ifdef CONFIG_USER_ONLY
3511 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3512 return do_ibranch(ctx, dest, a->l, a->n);
3513 #else
3514 nullify_over(ctx);
3515 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3516
3517 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3518 if (ctx->iaoq_b == -1) {
3519 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3520 }
3521 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3522 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3523 if (a->l) {
3524 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3525 }
3526 nullify_set(ctx, a->n);
3527 tcg_gen_lookup_and_goto_ptr();
3528 ctx->base.is_jmp = DISAS_NORETURN;
3529 return nullify_end(ctx);
3530 #endif
3531 }
3532
3533 /*
3534 * Float class 0
3535 */
3536
3537 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3538 {
3539 tcg_gen_mov_i32(dst, src);
3540 }
3541
3542 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3543 {
3544 uint64_t ret;
3545
3546 if (TARGET_REGISTER_BITS == 64) {
3547 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3548 } else {
3549 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3550 }
3551
3552 nullify_over(ctx);
3553 save_frd(0, tcg_constant_i64(ret));
3554 return nullify_end(ctx);
3555 }
3556
3557 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3558 {
3559 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3560 }
3561
3562 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3563 {
3564 tcg_gen_mov_i64(dst, src);
3565 }
3566
3567 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3568 {
3569 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3570 }
3571
3572 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3573 {
3574 tcg_gen_andi_i32(dst, src, INT32_MAX);
3575 }
3576
3577 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3578 {
3579 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3580 }
3581
3582 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3583 {
3584 tcg_gen_andi_i64(dst, src, INT64_MAX);
3585 }
3586
3587 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3588 {
3589 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3590 }
3591
3592 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3593 {
3594 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3595 }
3596
3597 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3598 {
3599 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3600 }
3601
3602 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3603 {
3604 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3605 }
3606
3607 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3608 {
3609 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3610 }
3611
3612 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3613 {
3614 tcg_gen_xori_i32(dst, src, INT32_MIN);
3615 }
3616
3617 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3618 {
3619 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3620 }
3621
3622 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3623 {
3624 tcg_gen_xori_i64(dst, src, INT64_MIN);
3625 }
3626
3627 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3628 {
3629 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3630 }
3631
3632 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3633 {
3634 tcg_gen_ori_i32(dst, src, INT32_MIN);
3635 }
3636
3637 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3638 {
3639 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3640 }
3641
3642 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3643 {
3644 tcg_gen_ori_i64(dst, src, INT64_MIN);
3645 }
3646
3647 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3648 {
3649 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3650 }
3651
3652 /*
3653 * Float class 1
3654 */
3655
3656 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3657 {
3658 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3659 }
3660
3661 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3662 {
3663 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3664 }
3665
3666 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3667 {
3668 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3669 }
3670
3671 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3672 {
3673 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3674 }
3675
3676 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3677 {
3678 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3679 }
3680
3681 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3682 {
3683 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3684 }
3685
3686 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3687 {
3688 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3689 }
3690
3691 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3692 {
3693 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3694 }
3695
3696 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3697 {
3698 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3699 }
3700
3701 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3702 {
3703 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3704 }
3705
3706 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3707 {
3708 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3709 }
3710
3711 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3712 {
3713 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3714 }
3715
3716 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3717 {
3718 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3719 }
3720
3721 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3722 {
3723 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3724 }
3725
3726 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3727 {
3728 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3729 }
3730
3731 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3732 {
3733 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3734 }
3735
3736 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3737 {
3738 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3739 }
3740
3741 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3742 {
3743 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3744 }
3745
3746 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3747 {
3748 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3749 }
3750
3751 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3752 {
3753 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3754 }
3755
3756 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3757 {
3758 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3759 }
3760
3761 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3762 {
3763 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3764 }
3765
3766 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3767 {
3768 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3769 }
3770
3771 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3772 {
3773 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3774 }
3775
3776 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3777 {
3778 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3779 }
3780
3781 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3782 {
3783 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3784 }
3785
3786 /*
3787 * Float class 2
3788 */
3789
3790 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3791 {
3792 TCGv_i32 ta, tb, tc, ty;
3793
3794 nullify_over(ctx);
3795
3796 ta = load_frw0_i32(a->r1);
3797 tb = load_frw0_i32(a->r2);
3798 ty = tcg_constant_i32(a->y);
3799 tc = tcg_constant_i32(a->c);
3800
3801 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
3802
3803 return nullify_end(ctx);
3804 }
3805
3806 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3807 {
3808 TCGv_i64 ta, tb;
3809 TCGv_i32 tc, ty;
3810
3811 nullify_over(ctx);
3812
3813 ta = load_frd0(a->r1);
3814 tb = load_frd0(a->r2);
3815 ty = tcg_constant_i32(a->y);
3816 tc = tcg_constant_i32(a->c);
3817
3818 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
3819
3820 return nullify_end(ctx);
3821 }
3822
3823 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3824 {
3825 TCGv_reg t;
3826
3827 nullify_over(ctx);
3828
3829 t = tcg_temp_new();
3830 tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
3831
3832 if (a->y == 1) {
3833 int mask;
3834 bool inv = false;
3835
3836 switch (a->c) {
3837 case 0: /* simple */
3838 tcg_gen_andi_reg(t, t, 0x4000000);
3839 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3840 goto done;
3841 case 2: /* rej */
3842 inv = true;
3843 /* fallthru */
3844 case 1: /* acc */
3845 mask = 0x43ff800;
3846 break;
3847 case 6: /* rej8 */
3848 inv = true;
3849 /* fallthru */
3850 case 5: /* acc8 */
3851 mask = 0x43f8000;
3852 break;
3853 case 9: /* acc6 */
3854 mask = 0x43e0000;
3855 break;
3856 case 13: /* acc4 */
3857 mask = 0x4380000;
3858 break;
3859 case 17: /* acc2 */
3860 mask = 0x4200000;
3861 break;
3862 default:
3863 gen_illegal(ctx);
3864 return true;
3865 }
3866 if (inv) {
3867 TCGv_reg c = load_const(ctx, mask);
3868 tcg_gen_or_reg(t, t, c);
3869 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3870 } else {
3871 tcg_gen_andi_reg(t, t, mask);
3872 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3873 }
3874 } else {
3875 unsigned cbit = (a->y ^ 1) - 1;
3876
3877 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3878 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3879 }
3880
3881 done:
3882 return nullify_end(ctx);
3883 }
3884
3885 /*
3886 * Float class 2
3887 */
3888
3889 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3890 {
3891 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3892 }
3893
3894 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3895 {
3896 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3897 }
3898
3899 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3900 {
3901 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3902 }
3903
3904 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3905 {
3906 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
3907 }
3908
3909 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
3910 {
3911 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3912 }
3913
3914 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3915 {
3916 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3917 }
3918
3919 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3920 {
3921 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3922 }
3923
3924 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3925 {
3926 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3927 }
3928
3929 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3930 {
3931 TCGv_i64 x, y;
3932
3933 nullify_over(ctx);
3934
3935 x = load_frw0_i64(a->r1);
3936 y = load_frw0_i64(a->r2);
3937 tcg_gen_mul_i64(x, x, y);
3938 save_frd(a->t, x);
3939
3940 return nullify_end(ctx);
3941 }
3942
3943 /* Convert the fmpyadd single-precision register encodings to standard. */
3944 static inline int fmpyadd_s_reg(unsigned r)
3945 {
3946 return (r & 16) * 2 + 16 + (r & 15);
3947 }
3948
3949 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3950 {
3951 int tm = fmpyadd_s_reg(a->tm);
3952 int ra = fmpyadd_s_reg(a->ra);
3953 int ta = fmpyadd_s_reg(a->ta);
3954 int rm2 = fmpyadd_s_reg(a->rm2);
3955 int rm1 = fmpyadd_s_reg(a->rm1);
3956
3957 nullify_over(ctx);
3958
3959 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3960 do_fop_weww(ctx, ta, ta, ra,
3961 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3962
3963 return nullify_end(ctx);
3964 }
3965
3966 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
3967 {
3968 return do_fmpyadd_s(ctx, a, false);
3969 }
3970
3971 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
3972 {
3973 return do_fmpyadd_s(ctx, a, true);
3974 }
3975
3976 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3977 {
3978 nullify_over(ctx);
3979
3980 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
3981 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
3982 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3983
3984 return nullify_end(ctx);
3985 }
3986
3987 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
3988 {
3989 return do_fmpyadd_d(ctx, a, false);
3990 }
3991
3992 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
3993 {
3994 return do_fmpyadd_d(ctx, a, true);
3995 }
3996
3997 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
3998 {
3999 TCGv_i32 x, y, z;
4000
4001 nullify_over(ctx);
4002 x = load_frw0_i32(a->rm1);
4003 y = load_frw0_i32(a->rm2);
4004 z = load_frw0_i32(a->ra3);
4005
4006 if (a->neg) {
4007 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4008 } else {
4009 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4010 }
4011
4012 save_frw_i32(a->t, x);
4013 return nullify_end(ctx);
4014 }
4015
4016 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4017 {
4018 TCGv_i64 x, y, z;
4019
4020 nullify_over(ctx);
4021 x = load_frd0(a->rm1);
4022 y = load_frd0(a->rm2);
4023 z = load_frd0(a->ra3);
4024
4025 if (a->neg) {
4026 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4027 } else {
4028 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4029 }
4030
4031 save_frd(a->t, x);
4032 return nullify_end(ctx);
4033 }
4034
4035 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4036 {
4037 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4038 #ifndef CONFIG_USER_ONLY
4039 if (a->i == 0x100) {
4040 /* emulate PDC BTLB, called by SeaBIOS-hppa */
4041 nullify_over(ctx);
4042 gen_helper_diag_btlb(tcg_env);
4043 return nullify_end(ctx);
4044 }
4045 #endif
4046 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4047 return true;
4048 }
4049
4050 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4051 {
4052 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4053 int bound;
4054
4055 ctx->cs = cs;
4056 ctx->tb_flags = ctx->base.tb->flags;
4057
4058 #ifdef CONFIG_USER_ONLY
4059 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4060 ctx->mmu_idx = MMU_USER_IDX;
4061 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4062 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4063 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4064 #else
4065 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4066 ctx->mmu_idx = (ctx->tb_flags & PSW_D
4067 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4068 : MMU_PHYS_IDX);
4069
4070 /* Recover the IAOQ values from the GVA + PRIV. */
4071 uint64_t cs_base = ctx->base.tb->cs_base;
4072 uint64_t iasq_f = cs_base & ~0xffffffffull;
4073 int32_t diff = cs_base;
4074
4075 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4076 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4077 #endif
4078 ctx->iaoq_n = -1;
4079 ctx->iaoq_n_var = NULL;
4080
4081 /* Bound the number of instructions by those left on the page. */
4082 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4083 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4084
4085 ctx->ntempl = 0;
4086 memset(ctx->templ, 0, sizeof(ctx->templ));
4087 }
4088
4089 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4090 {
4091 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4092
4093 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4094 ctx->null_cond = cond_make_f();
4095 ctx->psw_n_nonzero = false;
4096 if (ctx->tb_flags & PSW_N) {
4097 ctx->null_cond.c = TCG_COND_ALWAYS;
4098 ctx->psw_n_nonzero = true;
4099 }
4100 ctx->null_lab = NULL;
4101 }
4102
4103 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4104 {
4105 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4106
4107 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4108 }
4109
4110 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4111 {
4112 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4113 CPUHPPAState *env = cpu_env(cs);
4114 DisasJumpType ret;
4115 int i, n;
4116
4117 /* Execute one insn. */
4118 #ifdef CONFIG_USER_ONLY
4119 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4120 do_page_zero(ctx);
4121 ret = ctx->base.is_jmp;
4122 assert(ret != DISAS_NEXT);
4123 } else
4124 #endif
4125 {
4126 /* Always fetch the insn, even if nullified, so that we check
4127 the page permissions for execute. */
4128 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4129
4130 /* Set up the IA queue for the next insn.
4131 This will be overwritten by a branch. */
4132 if (ctx->iaoq_b == -1) {
4133 ctx->iaoq_n = -1;
4134 ctx->iaoq_n_var = tcg_temp_new();
4135 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4136 } else {
4137 ctx->iaoq_n = ctx->iaoq_b + 4;
4138 ctx->iaoq_n_var = NULL;
4139 }
4140
4141 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4142 ctx->null_cond.c = TCG_COND_NEVER;
4143 ret = DISAS_NEXT;
4144 } else {
4145 ctx->insn = insn;
4146 if (!decode(ctx, insn)) {
4147 gen_illegal(ctx);
4148 }
4149 ret = ctx->base.is_jmp;
4150 assert(ctx->null_lab == NULL);
4151 }
4152 }
4153
4154 /* Forget any temporaries allocated. */
4155 for (i = 0, n = ctx->ntempl; i < n; ++i) {
4156 ctx->templ[i] = NULL;
4157 }
4158 ctx->ntempl = 0;
4159
4160 /* Advance the insn queue. Note that this check also detects
4161 a priority change within the instruction queue. */
4162 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4163 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4164 && use_goto_tb(ctx, ctx->iaoq_b)
4165 && (ctx->null_cond.c == TCG_COND_NEVER
4166 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4167 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4168 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4169 ctx->base.is_jmp = ret = DISAS_NORETURN;
4170 } else {
4171 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4172 }
4173 }
4174 ctx->iaoq_f = ctx->iaoq_b;
4175 ctx->iaoq_b = ctx->iaoq_n;
4176 ctx->base.pc_next += 4;
4177
4178 switch (ret) {
4179 case DISAS_NORETURN:
4180 case DISAS_IAQ_N_UPDATED:
4181 break;
4182
4183 case DISAS_NEXT:
4184 case DISAS_IAQ_N_STALE:
4185 case DISAS_IAQ_N_STALE_EXIT:
4186 if (ctx->iaoq_f == -1) {
4187 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4188 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4189 #ifndef CONFIG_USER_ONLY
4190 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4191 #endif
4192 nullify_save(ctx);
4193 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4194 ? DISAS_EXIT
4195 : DISAS_IAQ_N_UPDATED);
4196 } else if (ctx->iaoq_b == -1) {
4197 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4198 }
4199 break;
4200
4201 default:
4202 g_assert_not_reached();
4203 }
4204 }
4205
4206 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4207 {
4208 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4209 DisasJumpType is_jmp = ctx->base.is_jmp;
4210
4211 switch (is_jmp) {
4212 case DISAS_NORETURN:
4213 break;
4214 case DISAS_TOO_MANY:
4215 case DISAS_IAQ_N_STALE:
4216 case DISAS_IAQ_N_STALE_EXIT:
4217 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4218 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4219 nullify_save(ctx);
4220 /* FALLTHRU */
4221 case DISAS_IAQ_N_UPDATED:
4222 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4223 tcg_gen_lookup_and_goto_ptr();
4224 break;
4225 }
4226 /* FALLTHRU */
4227 case DISAS_EXIT:
4228 tcg_gen_exit_tb(NULL, 0);
4229 break;
4230 default:
4231 g_assert_not_reached();
4232 }
4233 }
4234
4235 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4236 CPUState *cs, FILE *logfile)
4237 {
4238 target_ulong pc = dcbase->pc_first;
4239
4240 #ifdef CONFIG_USER_ONLY
4241 switch (pc) {
4242 case 0x00:
4243 fprintf(logfile, "IN:\n0x00000000: (null)\n");
4244 return;
4245 case 0xb0:
4246 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
4247 return;
4248 case 0xe0:
4249 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4250 return;
4251 case 0x100:
4252 fprintf(logfile, "IN:\n0x00000100: syscall\n");
4253 return;
4254 }
4255 #endif
4256
4257 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4258 target_disas(logfile, cs, pc, dcbase->tb->size);
4259 }
4260
4261 static const TranslatorOps hppa_tr_ops = {
4262 .init_disas_context = hppa_tr_init_disas_context,
4263 .tb_start = hppa_tr_tb_start,
4264 .insn_start = hppa_tr_insn_start,
4265 .translate_insn = hppa_tr_translate_insn,
4266 .tb_stop = hppa_tr_tb_stop,
4267 .disas_log = hppa_tr_disas_log,
4268 };
4269
4270 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4271 target_ulong pc, void *host_pc)
4272 {
4273 DisasContext ctx;
4274 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4275 }