]> git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/translate.c
target/hppa: Use copy_iaoq_entry for link in do_ibranch
[mirror_qemu.git] / target / hppa / translate.c
1 /*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/log.h"
30
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef HELPER_H
34
35
36 /* Since we have a distinction between register size and address size,
37 we need to redefine all of these. */
38
39 #undef TCGv
40 #undef tcg_temp_new
41 #undef tcg_global_mem_new
42
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #if TARGET_REGISTER_BITS == 64
47 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
48 #else
49 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
50 #endif
51 #else
52 #define TCGv_tl TCGv_i32
53 #define tcg_temp_new_tl tcg_temp_new_i32
54 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
55 #endif
56
57 #if TARGET_REGISTER_BITS == 64
58 #define TCGv_reg TCGv_i64
59
60 #define tcg_temp_new tcg_temp_new_i64
61 #define tcg_global_mem_new tcg_global_mem_new_i64
62
63 #define tcg_gen_movi_reg tcg_gen_movi_i64
64 #define tcg_gen_mov_reg tcg_gen_mov_i64
65 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
66 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
67 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
68 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
69 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
70 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
71 #define tcg_gen_ld_reg tcg_gen_ld_i64
72 #define tcg_gen_st8_reg tcg_gen_st8_i64
73 #define tcg_gen_st16_reg tcg_gen_st16_i64
74 #define tcg_gen_st32_reg tcg_gen_st32_i64
75 #define tcg_gen_st_reg tcg_gen_st_i64
76 #define tcg_gen_add_reg tcg_gen_add_i64
77 #define tcg_gen_addi_reg tcg_gen_addi_i64
78 #define tcg_gen_sub_reg tcg_gen_sub_i64
79 #define tcg_gen_neg_reg tcg_gen_neg_i64
80 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
81 #define tcg_gen_subi_reg tcg_gen_subi_i64
82 #define tcg_gen_and_reg tcg_gen_and_i64
83 #define tcg_gen_andi_reg tcg_gen_andi_i64
84 #define tcg_gen_or_reg tcg_gen_or_i64
85 #define tcg_gen_ori_reg tcg_gen_ori_i64
86 #define tcg_gen_xor_reg tcg_gen_xor_i64
87 #define tcg_gen_xori_reg tcg_gen_xori_i64
88 #define tcg_gen_not_reg tcg_gen_not_i64
89 #define tcg_gen_shl_reg tcg_gen_shl_i64
90 #define tcg_gen_shli_reg tcg_gen_shli_i64
91 #define tcg_gen_shr_reg tcg_gen_shr_i64
92 #define tcg_gen_shri_reg tcg_gen_shri_i64
93 #define tcg_gen_sar_reg tcg_gen_sar_i64
94 #define tcg_gen_sari_reg tcg_gen_sari_i64
95 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
96 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
97 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
98 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
99 #define tcg_gen_mul_reg tcg_gen_mul_i64
100 #define tcg_gen_muli_reg tcg_gen_muli_i64
101 #define tcg_gen_div_reg tcg_gen_div_i64
102 #define tcg_gen_rem_reg tcg_gen_rem_i64
103 #define tcg_gen_divu_reg tcg_gen_divu_i64
104 #define tcg_gen_remu_reg tcg_gen_remu_i64
105 #define tcg_gen_discard_reg tcg_gen_discard_i64
106 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
107 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
108 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
109 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
110 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
111 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
112 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
113 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
114 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
115 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
116 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
117 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
118 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
119 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
120 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
121 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
122 #define tcg_gen_andc_reg tcg_gen_andc_i64
123 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
124 #define tcg_gen_nand_reg tcg_gen_nand_i64
125 #define tcg_gen_nor_reg tcg_gen_nor_i64
126 #define tcg_gen_orc_reg tcg_gen_orc_i64
127 #define tcg_gen_clz_reg tcg_gen_clz_i64
128 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
129 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
130 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
131 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
132 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
133 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
134 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
135 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
136 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
137 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
138 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
139 #define tcg_gen_extract_reg tcg_gen_extract_i64
140 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
141 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
142 #define tcg_constant_reg tcg_constant_i64
143 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
144 #define tcg_gen_add2_reg tcg_gen_add2_i64
145 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
146 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
147 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
148 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
149 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
150 #else
151 #define TCGv_reg TCGv_i32
152 #define tcg_temp_new tcg_temp_new_i32
153 #define tcg_global_mem_new tcg_global_mem_new_i32
154
155 #define tcg_gen_movi_reg tcg_gen_movi_i32
156 #define tcg_gen_mov_reg tcg_gen_mov_i32
157 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
158 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
159 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
160 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
161 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
162 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
163 #define tcg_gen_ld_reg tcg_gen_ld_i32
164 #define tcg_gen_st8_reg tcg_gen_st8_i32
165 #define tcg_gen_st16_reg tcg_gen_st16_i32
166 #define tcg_gen_st32_reg tcg_gen_st32_i32
167 #define tcg_gen_st_reg tcg_gen_st_i32
168 #define tcg_gen_add_reg tcg_gen_add_i32
169 #define tcg_gen_addi_reg tcg_gen_addi_i32
170 #define tcg_gen_sub_reg tcg_gen_sub_i32
171 #define tcg_gen_neg_reg tcg_gen_neg_i32
172 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
173 #define tcg_gen_subi_reg tcg_gen_subi_i32
174 #define tcg_gen_and_reg tcg_gen_and_i32
175 #define tcg_gen_andi_reg tcg_gen_andi_i32
176 #define tcg_gen_or_reg tcg_gen_or_i32
177 #define tcg_gen_ori_reg tcg_gen_ori_i32
178 #define tcg_gen_xor_reg tcg_gen_xor_i32
179 #define tcg_gen_xori_reg tcg_gen_xori_i32
180 #define tcg_gen_not_reg tcg_gen_not_i32
181 #define tcg_gen_shl_reg tcg_gen_shl_i32
182 #define tcg_gen_shli_reg tcg_gen_shli_i32
183 #define tcg_gen_shr_reg tcg_gen_shr_i32
184 #define tcg_gen_shri_reg tcg_gen_shri_i32
185 #define tcg_gen_sar_reg tcg_gen_sar_i32
186 #define tcg_gen_sari_reg tcg_gen_sari_i32
187 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
188 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
189 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
190 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
191 #define tcg_gen_mul_reg tcg_gen_mul_i32
192 #define tcg_gen_muli_reg tcg_gen_muli_i32
193 #define tcg_gen_div_reg tcg_gen_div_i32
194 #define tcg_gen_rem_reg tcg_gen_rem_i32
195 #define tcg_gen_divu_reg tcg_gen_divu_i32
196 #define tcg_gen_remu_reg tcg_gen_remu_i32
197 #define tcg_gen_discard_reg tcg_gen_discard_i32
198 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
199 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
200 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
201 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
202 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
203 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
204 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
205 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
206 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
207 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
208 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
209 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
210 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
211 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
212 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
213 #define tcg_gen_andc_reg tcg_gen_andc_i32
214 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
215 #define tcg_gen_nand_reg tcg_gen_nand_i32
216 #define tcg_gen_nor_reg tcg_gen_nor_i32
217 #define tcg_gen_orc_reg tcg_gen_orc_i32
218 #define tcg_gen_clz_reg tcg_gen_clz_i32
219 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
220 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
221 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
222 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
223 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
224 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
225 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
226 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
227 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
228 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
229 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
230 #define tcg_gen_extract_reg tcg_gen_extract_i32
231 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
232 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
233 #define tcg_constant_reg tcg_constant_i32
234 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
235 #define tcg_gen_add2_reg tcg_gen_add2_i32
236 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
237 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
238 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
239 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
240 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
241 #endif /* TARGET_REGISTER_BITS */
242
243 typedef struct DisasCond {
244 TCGCond c;
245 TCGv_reg a0, a1;
246 } DisasCond;
247
248 typedef struct DisasContext {
249 DisasContextBase base;
250 CPUState *cs;
251
252 target_ureg iaoq_f;
253 target_ureg iaoq_b;
254 target_ureg iaoq_n;
255 TCGv_reg iaoq_n_var;
256
257 DisasCond null_cond;
258 TCGLabel *null_lab;
259
260 uint32_t insn;
261 uint32_t tb_flags;
262 int mmu_idx;
263 int privilege;
264 bool psw_n_nonzero;
265 bool is_pa20;
266
267 #ifdef CONFIG_USER_ONLY
268 MemOp unalign;
269 #endif
270 } DisasContext;
271
272 #ifdef CONFIG_USER_ONLY
273 #define UNALIGN(C) (C)->unalign
274 #else
275 #define UNALIGN(C) MO_ALIGN
276 #endif
277
278 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
279 static int expand_sm_imm(DisasContext *ctx, int val)
280 {
281 if (val & PSW_SM_E) {
282 val = (val & ~PSW_SM_E) | PSW_E;
283 }
284 if (val & PSW_SM_W) {
285 val = (val & ~PSW_SM_W) | PSW_W;
286 }
287 return val;
288 }
289
290 /* Inverted space register indicates 0 means sr0 not inferred from base. */
291 static int expand_sr3x(DisasContext *ctx, int val)
292 {
293 return ~val;
294 }
295
296 /* Convert the M:A bits within a memory insn to the tri-state value
297 we use for the final M. */
298 static int ma_to_m(DisasContext *ctx, int val)
299 {
300 return val & 2 ? (val & 1 ? -1 : 1) : 0;
301 }
302
303 /* Convert the sign of the displacement to a pre or post-modify. */
304 static int pos_to_m(DisasContext *ctx, int val)
305 {
306 return val ? 1 : -1;
307 }
308
309 static int neg_to_m(DisasContext *ctx, int val)
310 {
311 return val ? -1 : 1;
312 }
313
314 /* Used for branch targets and fp memory ops. */
315 static int expand_shl2(DisasContext *ctx, int val)
316 {
317 return val << 2;
318 }
319
320 /* Used for fp memory ops. */
321 static int expand_shl3(DisasContext *ctx, int val)
322 {
323 return val << 3;
324 }
325
326 /* Used for assemble_21. */
327 static int expand_shl11(DisasContext *ctx, int val)
328 {
329 return val << 11;
330 }
331
332
333 /* Include the auto-generated decoder. */
334 #include "decode-insns.c.inc"
335
336 /* We are not using a goto_tb (for whatever reason), but have updated
337 the iaq (for whatever reason), so don't do it again on exit. */
338 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
339
340 /* We are exiting the TB, but have neither emitted a goto_tb, nor
341 updated the iaq for the next instruction to be executed. */
342 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
343
344 /* Similarly, but we want to return to the main loop immediately
345 to recognize unmasked interrupts. */
346 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
347 #define DISAS_EXIT DISAS_TARGET_3
348
349 /* global register indexes */
350 static TCGv_reg cpu_gr[32];
351 static TCGv_i64 cpu_sr[4];
352 static TCGv_i64 cpu_srH;
353 static TCGv_reg cpu_iaoq_f;
354 static TCGv_reg cpu_iaoq_b;
355 static TCGv_i64 cpu_iasq_f;
356 static TCGv_i64 cpu_iasq_b;
357 static TCGv_reg cpu_sar;
358 static TCGv_reg cpu_psw_n;
359 static TCGv_reg cpu_psw_v;
360 static TCGv_reg cpu_psw_cb;
361 static TCGv_reg cpu_psw_cb_msb;
362
363 void hppa_translate_init(void)
364 {
365 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
366
367 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
368 static const GlobalVar vars[] = {
369 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
370 DEF_VAR(psw_n),
371 DEF_VAR(psw_v),
372 DEF_VAR(psw_cb),
373 DEF_VAR(psw_cb_msb),
374 DEF_VAR(iaoq_f),
375 DEF_VAR(iaoq_b),
376 };
377
378 #undef DEF_VAR
379
380 /* Use the symbolic register names that match the disassembler. */
381 static const char gr_names[32][4] = {
382 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
383 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
384 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
385 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
386 };
387 /* SR[4-7] are not global registers so that we can index them. */
388 static const char sr_names[5][4] = {
389 "sr0", "sr1", "sr2", "sr3", "srH"
390 };
391
392 int i;
393
394 cpu_gr[0] = NULL;
395 for (i = 1; i < 32; i++) {
396 cpu_gr[i] = tcg_global_mem_new(tcg_env,
397 offsetof(CPUHPPAState, gr[i]),
398 gr_names[i]);
399 }
400 for (i = 0; i < 4; i++) {
401 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
402 offsetof(CPUHPPAState, sr[i]),
403 sr_names[i]);
404 }
405 cpu_srH = tcg_global_mem_new_i64(tcg_env,
406 offsetof(CPUHPPAState, sr[4]),
407 sr_names[4]);
408
409 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
410 const GlobalVar *v = &vars[i];
411 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
412 }
413
414 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
415 offsetof(CPUHPPAState, iasq_f),
416 "iasq_f");
417 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
418 offsetof(CPUHPPAState, iasq_b),
419 "iasq_b");
420 }
421
422 static DisasCond cond_make_f(void)
423 {
424 return (DisasCond){
425 .c = TCG_COND_NEVER,
426 .a0 = NULL,
427 .a1 = NULL,
428 };
429 }
430
431 static DisasCond cond_make_t(void)
432 {
433 return (DisasCond){
434 .c = TCG_COND_ALWAYS,
435 .a0 = NULL,
436 .a1 = NULL,
437 };
438 }
439
440 static DisasCond cond_make_n(void)
441 {
442 return (DisasCond){
443 .c = TCG_COND_NE,
444 .a0 = cpu_psw_n,
445 .a1 = tcg_constant_reg(0)
446 };
447 }
448
449 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
450 {
451 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
452 return (DisasCond){
453 .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
454 };
455 }
456
457 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
458 {
459 TCGv_reg tmp = tcg_temp_new();
460 tcg_gen_mov_reg(tmp, a0);
461 return cond_make_0_tmp(c, tmp);
462 }
463
464 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
465 {
466 DisasCond r = { .c = c };
467
468 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
469 r.a0 = tcg_temp_new();
470 tcg_gen_mov_reg(r.a0, a0);
471 r.a1 = tcg_temp_new();
472 tcg_gen_mov_reg(r.a1, a1);
473
474 return r;
475 }
476
477 static void cond_free(DisasCond *cond)
478 {
479 switch (cond->c) {
480 default:
481 cond->a0 = NULL;
482 cond->a1 = NULL;
483 /* fallthru */
484 case TCG_COND_ALWAYS:
485 cond->c = TCG_COND_NEVER;
486 break;
487 case TCG_COND_NEVER:
488 break;
489 }
490 }
491
492 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
493 {
494 if (reg == 0) {
495 TCGv_reg t = tcg_temp_new();
496 tcg_gen_movi_reg(t, 0);
497 return t;
498 } else {
499 return cpu_gr[reg];
500 }
501 }
502
503 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
504 {
505 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
506 return tcg_temp_new();
507 } else {
508 return cpu_gr[reg];
509 }
510 }
511
512 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
513 {
514 if (ctx->null_cond.c != TCG_COND_NEVER) {
515 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
516 ctx->null_cond.a1, dest, t);
517 } else {
518 tcg_gen_mov_reg(dest, t);
519 }
520 }
521
522 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
523 {
524 if (reg != 0) {
525 save_or_nullify(ctx, cpu_gr[reg], t);
526 }
527 }
528
529 #if HOST_BIG_ENDIAN
530 # define HI_OFS 0
531 # define LO_OFS 4
532 #else
533 # define HI_OFS 4
534 # define LO_OFS 0
535 #endif
536
537 static TCGv_i32 load_frw_i32(unsigned rt)
538 {
539 TCGv_i32 ret = tcg_temp_new_i32();
540 tcg_gen_ld_i32(ret, tcg_env,
541 offsetof(CPUHPPAState, fr[rt & 31])
542 + (rt & 32 ? LO_OFS : HI_OFS));
543 return ret;
544 }
545
546 static TCGv_i32 load_frw0_i32(unsigned rt)
547 {
548 if (rt == 0) {
549 TCGv_i32 ret = tcg_temp_new_i32();
550 tcg_gen_movi_i32(ret, 0);
551 return ret;
552 } else {
553 return load_frw_i32(rt);
554 }
555 }
556
557 static TCGv_i64 load_frw0_i64(unsigned rt)
558 {
559 TCGv_i64 ret = tcg_temp_new_i64();
560 if (rt == 0) {
561 tcg_gen_movi_i64(ret, 0);
562 } else {
563 tcg_gen_ld32u_i64(ret, tcg_env,
564 offsetof(CPUHPPAState, fr[rt & 31])
565 + (rt & 32 ? LO_OFS : HI_OFS));
566 }
567 return ret;
568 }
569
570 static void save_frw_i32(unsigned rt, TCGv_i32 val)
571 {
572 tcg_gen_st_i32(val, tcg_env,
573 offsetof(CPUHPPAState, fr[rt & 31])
574 + (rt & 32 ? LO_OFS : HI_OFS));
575 }
576
577 #undef HI_OFS
578 #undef LO_OFS
579
580 static TCGv_i64 load_frd(unsigned rt)
581 {
582 TCGv_i64 ret = tcg_temp_new_i64();
583 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
584 return ret;
585 }
586
587 static TCGv_i64 load_frd0(unsigned rt)
588 {
589 if (rt == 0) {
590 TCGv_i64 ret = tcg_temp_new_i64();
591 tcg_gen_movi_i64(ret, 0);
592 return ret;
593 } else {
594 return load_frd(rt);
595 }
596 }
597
598 static void save_frd(unsigned rt, TCGv_i64 val)
599 {
600 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
601 }
602
603 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
604 {
605 #ifdef CONFIG_USER_ONLY
606 tcg_gen_movi_i64(dest, 0);
607 #else
608 if (reg < 4) {
609 tcg_gen_mov_i64(dest, cpu_sr[reg]);
610 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
611 tcg_gen_mov_i64(dest, cpu_srH);
612 } else {
613 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
614 }
615 #endif
616 }
617
618 /* Skip over the implementation of an insn that has been nullified.
619 Use this when the insn is too complex for a conditional move. */
620 static void nullify_over(DisasContext *ctx)
621 {
622 if (ctx->null_cond.c != TCG_COND_NEVER) {
623 /* The always condition should have been handled in the main loop. */
624 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
625
626 ctx->null_lab = gen_new_label();
627
628 /* If we're using PSW[N], copy it to a temp because... */
629 if (ctx->null_cond.a0 == cpu_psw_n) {
630 ctx->null_cond.a0 = tcg_temp_new();
631 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
632 }
633 /* ... we clear it before branching over the implementation,
634 so that (1) it's clear after nullifying this insn and
635 (2) if this insn nullifies the next, PSW[N] is valid. */
636 if (ctx->psw_n_nonzero) {
637 ctx->psw_n_nonzero = false;
638 tcg_gen_movi_reg(cpu_psw_n, 0);
639 }
640
641 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
642 ctx->null_cond.a1, ctx->null_lab);
643 cond_free(&ctx->null_cond);
644 }
645 }
646
647 /* Save the current nullification state to PSW[N]. */
648 static void nullify_save(DisasContext *ctx)
649 {
650 if (ctx->null_cond.c == TCG_COND_NEVER) {
651 if (ctx->psw_n_nonzero) {
652 tcg_gen_movi_reg(cpu_psw_n, 0);
653 }
654 return;
655 }
656 if (ctx->null_cond.a0 != cpu_psw_n) {
657 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
658 ctx->null_cond.a0, ctx->null_cond.a1);
659 ctx->psw_n_nonzero = true;
660 }
661 cond_free(&ctx->null_cond);
662 }
663
664 /* Set a PSW[N] to X. The intention is that this is used immediately
665 before a goto_tb/exit_tb, so that there is no fallthru path to other
666 code within the TB. Therefore we do not update psw_n_nonzero. */
667 static void nullify_set(DisasContext *ctx, bool x)
668 {
669 if (ctx->psw_n_nonzero || x) {
670 tcg_gen_movi_reg(cpu_psw_n, x);
671 }
672 }
673
674 /* Mark the end of an instruction that may have been nullified.
675 This is the pair to nullify_over. Always returns true so that
676 it may be tail-called from a translate function. */
677 static bool nullify_end(DisasContext *ctx)
678 {
679 TCGLabel *null_lab = ctx->null_lab;
680 DisasJumpType status = ctx->base.is_jmp;
681
682 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
683 For UPDATED, we cannot update on the nullified path. */
684 assert(status != DISAS_IAQ_N_UPDATED);
685
686 if (likely(null_lab == NULL)) {
687 /* The current insn wasn't conditional or handled the condition
688 applied to it without a branch, so the (new) setting of
689 NULL_COND can be applied directly to the next insn. */
690 return true;
691 }
692 ctx->null_lab = NULL;
693
694 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
695 /* The next instruction will be unconditional,
696 and NULL_COND already reflects that. */
697 gen_set_label(null_lab);
698 } else {
699 /* The insn that we just executed is itself nullifying the next
700 instruction. Store the condition in the PSW[N] global.
701 We asserted PSW[N] = 0 in nullify_over, so that after the
702 label we have the proper value in place. */
703 nullify_save(ctx);
704 gen_set_label(null_lab);
705 ctx->null_cond = cond_make_n();
706 }
707 if (status == DISAS_NORETURN) {
708 ctx->base.is_jmp = DISAS_NEXT;
709 }
710 return true;
711 }
712
713 static target_ureg gva_offset_mask(DisasContext *ctx)
714 {
715 return (ctx->tb_flags & PSW_W
716 ? MAKE_64BIT_MASK(0, 62)
717 : MAKE_64BIT_MASK(0, 32));
718 }
719
720 static void copy_iaoq_entry(DisasContext *ctx, TCGv_reg dest,
721 target_ureg ival, TCGv_reg vval)
722 {
723 if (unlikely(ival == -1)) {
724 tcg_gen_mov_reg(dest, vval);
725 } else {
726 tcg_gen_movi_reg(dest, ival);
727 }
728 }
729
730 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
731 {
732 return ctx->iaoq_f + disp + 8;
733 }
734
735 static void gen_excp_1(int exception)
736 {
737 gen_helper_excp(tcg_env, tcg_constant_i32(exception));
738 }
739
740 static void gen_excp(DisasContext *ctx, int exception)
741 {
742 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
743 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
744 nullify_save(ctx);
745 gen_excp_1(exception);
746 ctx->base.is_jmp = DISAS_NORETURN;
747 }
748
749 static bool gen_excp_iir(DisasContext *ctx, int exc)
750 {
751 nullify_over(ctx);
752 tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
753 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
754 gen_excp(ctx, exc);
755 return nullify_end(ctx);
756 }
757
758 static bool gen_illegal(DisasContext *ctx)
759 {
760 return gen_excp_iir(ctx, EXCP_ILL);
761 }
762
763 #ifdef CONFIG_USER_ONLY
764 #define CHECK_MOST_PRIVILEGED(EXCP) \
765 return gen_excp_iir(ctx, EXCP)
766 #else
767 #define CHECK_MOST_PRIVILEGED(EXCP) \
768 do { \
769 if (ctx->privilege != 0) { \
770 return gen_excp_iir(ctx, EXCP); \
771 } \
772 } while (0)
773 #endif
774
775 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
776 {
777 return translator_use_goto_tb(&ctx->base, dest);
778 }
779
780 /* If the next insn is to be nullified, and it's on the same page,
781 and we're not attempting to set a breakpoint on it, then we can
782 totally skip the nullified insn. This avoids creating and
783 executing a TB that merely branches to the next TB. */
784 static bool use_nullify_skip(DisasContext *ctx)
785 {
786 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
787 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
788 }
789
790 static void gen_goto_tb(DisasContext *ctx, int which,
791 target_ureg f, target_ureg b)
792 {
793 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
794 tcg_gen_goto_tb(which);
795 copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
796 copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
797 tcg_gen_exit_tb(ctx->base.tb, which);
798 } else {
799 copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
800 copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
801 tcg_gen_lookup_and_goto_ptr();
802 }
803 }
804
805 static bool cond_need_sv(int c)
806 {
807 return c == 2 || c == 3 || c == 6;
808 }
809
810 static bool cond_need_cb(int c)
811 {
812 return c == 4 || c == 5;
813 }
814
815 /* Need extensions from TCGv_i32 to TCGv_reg. */
816 static bool cond_need_ext(DisasContext *ctx, bool d)
817 {
818 return TARGET_REGISTER_BITS == 64 && !d;
819 }
820
821 /*
822 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
823 * the Parisc 1.1 Architecture Reference Manual for details.
824 */
825
826 static DisasCond do_cond(unsigned cf, TCGv_reg res,
827 TCGv_reg cb_msb, TCGv_reg sv)
828 {
829 DisasCond cond;
830 TCGv_reg tmp;
831
832 switch (cf >> 1) {
833 case 0: /* Never / TR (0 / 1) */
834 cond = cond_make_f();
835 break;
836 case 1: /* = / <> (Z / !Z) */
837 cond = cond_make_0(TCG_COND_EQ, res);
838 break;
839 case 2: /* < / >= (N ^ V / !(N ^ V) */
840 tmp = tcg_temp_new();
841 tcg_gen_xor_reg(tmp, res, sv);
842 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
843 break;
844 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
845 /*
846 * Simplify:
847 * (N ^ V) | Z
848 * ((res < 0) ^ (sv < 0)) | !res
849 * ((res ^ sv) < 0) | !res
850 * (~(res ^ sv) >= 0) | !res
851 * !(~(res ^ sv) >> 31) | !res
852 * !(~(res ^ sv) >> 31 & res)
853 */
854 tmp = tcg_temp_new();
855 tcg_gen_eqv_reg(tmp, res, sv);
856 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
857 tcg_gen_and_reg(tmp, tmp, res);
858 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
859 break;
860 case 4: /* NUV / UV (!C / C) */
861 cond = cond_make_0(TCG_COND_EQ, cb_msb);
862 break;
863 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
864 tmp = tcg_temp_new();
865 tcg_gen_neg_reg(tmp, cb_msb);
866 tcg_gen_and_reg(tmp, tmp, res);
867 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
868 break;
869 case 6: /* SV / NSV (V / !V) */
870 cond = cond_make_0(TCG_COND_LT, sv);
871 break;
872 case 7: /* OD / EV */
873 tmp = tcg_temp_new();
874 tcg_gen_andi_reg(tmp, res, 1);
875 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
876 break;
877 default:
878 g_assert_not_reached();
879 }
880 if (cf & 1) {
881 cond.c = tcg_invert_cond(cond.c);
882 }
883
884 return cond;
885 }
886
887 /* Similar, but for the special case of subtraction without borrow, we
888 can use the inputs directly. This can allow other computation to be
889 deleted as unused. */
890
891 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
892 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
893 {
894 DisasCond cond;
895
896 switch (cf >> 1) {
897 case 1: /* = / <> */
898 cond = cond_make(TCG_COND_EQ, in1, in2);
899 break;
900 case 2: /* < / >= */
901 cond = cond_make(TCG_COND_LT, in1, in2);
902 break;
903 case 3: /* <= / > */
904 cond = cond_make(TCG_COND_LE, in1, in2);
905 break;
906 case 4: /* << / >>= */
907 cond = cond_make(TCG_COND_LTU, in1, in2);
908 break;
909 case 5: /* <<= / >> */
910 cond = cond_make(TCG_COND_LEU, in1, in2);
911 break;
912 default:
913 return do_cond(cf, res, NULL, sv);
914 }
915 if (cf & 1) {
916 cond.c = tcg_invert_cond(cond.c);
917 }
918
919 return cond;
920 }
921
922 /*
923 * Similar, but for logicals, where the carry and overflow bits are not
924 * computed, and use of them is undefined.
925 *
926 * Undefined or not, hardware does not trap. It seems reasonable to
927 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
928 * how cases c={2,3} are treated.
929 */
930
931 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
932 {
933 switch (cf) {
934 case 0: /* never */
935 case 9: /* undef, C */
936 case 11: /* undef, C & !Z */
937 case 12: /* undef, V */
938 return cond_make_f();
939
940 case 1: /* true */
941 case 8: /* undef, !C */
942 case 10: /* undef, !C | Z */
943 case 13: /* undef, !V */
944 return cond_make_t();
945
946 case 2: /* == */
947 return cond_make_0(TCG_COND_EQ, res);
948 case 3: /* <> */
949 return cond_make_0(TCG_COND_NE, res);
950 case 4: /* < */
951 return cond_make_0(TCG_COND_LT, res);
952 case 5: /* >= */
953 return cond_make_0(TCG_COND_GE, res);
954 case 6: /* <= */
955 return cond_make_0(TCG_COND_LE, res);
956 case 7: /* > */
957 return cond_make_0(TCG_COND_GT, res);
958
959 case 14: /* OD */
960 case 15: /* EV */
961 return do_cond(cf, res, NULL, NULL);
962
963 default:
964 g_assert_not_reached();
965 }
966 }
967
968 /* Similar, but for shift/extract/deposit conditions. */
969
970 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
971 {
972 unsigned c, f;
973
974 /* Convert the compressed condition codes to standard.
975 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
976 4-7 are the reverse of 0-3. */
977 c = orig & 3;
978 if (c == 3) {
979 c = 7;
980 }
981 f = (orig & 4) / 4;
982
983 return do_log_cond(c * 2 + f, res);
984 }
985
986 /* Similar, but for unit conditions. */
987
988 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
989 TCGv_reg in1, TCGv_reg in2)
990 {
991 DisasCond cond;
992 TCGv_reg tmp, cb = NULL;
993
994 if (cf & 8) {
995 /* Since we want to test lots of carry-out bits all at once, do not
996 * do our normal thing and compute carry-in of bit B+1 since that
997 * leaves us with carry bits spread across two words.
998 */
999 cb = tcg_temp_new();
1000 tmp = tcg_temp_new();
1001 tcg_gen_or_reg(cb, in1, in2);
1002 tcg_gen_and_reg(tmp, in1, in2);
1003 tcg_gen_andc_reg(cb, cb, res);
1004 tcg_gen_or_reg(cb, cb, tmp);
1005 }
1006
1007 switch (cf >> 1) {
1008 case 0: /* never / TR */
1009 case 1: /* undefined */
1010 case 5: /* undefined */
1011 cond = cond_make_f();
1012 break;
1013
1014 case 2: /* SBZ / NBZ */
1015 /* See hasless(v,1) from
1016 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1017 */
1018 tmp = tcg_temp_new();
1019 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1020 tcg_gen_andc_reg(tmp, tmp, res);
1021 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1022 cond = cond_make_0(TCG_COND_NE, tmp);
1023 break;
1024
1025 case 3: /* SHZ / NHZ */
1026 tmp = tcg_temp_new();
1027 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1028 tcg_gen_andc_reg(tmp, tmp, res);
1029 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1030 cond = cond_make_0(TCG_COND_NE, tmp);
1031 break;
1032
1033 case 4: /* SDC / NDC */
1034 tcg_gen_andi_reg(cb, cb, 0x88888888u);
1035 cond = cond_make_0(TCG_COND_NE, cb);
1036 break;
1037
1038 case 6: /* SBC / NBC */
1039 tcg_gen_andi_reg(cb, cb, 0x80808080u);
1040 cond = cond_make_0(TCG_COND_NE, cb);
1041 break;
1042
1043 case 7: /* SHC / NHC */
1044 tcg_gen_andi_reg(cb, cb, 0x80008000u);
1045 cond = cond_make_0(TCG_COND_NE, cb);
1046 break;
1047
1048 default:
1049 g_assert_not_reached();
1050 }
1051 if (cf & 1) {
1052 cond.c = tcg_invert_cond(cond.c);
1053 }
1054
1055 return cond;
1056 }
1057
1058 static TCGv_reg get_carry(DisasContext *ctx, bool d,
1059 TCGv_reg cb, TCGv_reg cb_msb)
1060 {
1061 if (cond_need_ext(ctx, d)) {
1062 TCGv_reg t = tcg_temp_new();
1063 tcg_gen_extract_reg(t, cb, 32, 1);
1064 return t;
1065 }
1066 return cb_msb;
1067 }
1068
1069 static TCGv_reg get_psw_carry(DisasContext *ctx, bool d)
1070 {
1071 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1072 }
1073
1074 /* Compute signed overflow for addition. */
1075 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1076 TCGv_reg in1, TCGv_reg in2)
1077 {
1078 TCGv_reg sv = tcg_temp_new();
1079 TCGv_reg tmp = tcg_temp_new();
1080
1081 tcg_gen_xor_reg(sv, res, in1);
1082 tcg_gen_xor_reg(tmp, in1, in2);
1083 tcg_gen_andc_reg(sv, sv, tmp);
1084
1085 return sv;
1086 }
1087
1088 /* Compute signed overflow for subtraction. */
1089 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1090 TCGv_reg in1, TCGv_reg in2)
1091 {
1092 TCGv_reg sv = tcg_temp_new();
1093 TCGv_reg tmp = tcg_temp_new();
1094
1095 tcg_gen_xor_reg(sv, res, in1);
1096 tcg_gen_xor_reg(tmp, in1, in2);
1097 tcg_gen_and_reg(sv, sv, tmp);
1098
1099 return sv;
1100 }
1101
1102 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1103 TCGv_reg in2, unsigned shift, bool is_l,
1104 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1105 {
1106 TCGv_reg dest, cb, cb_msb, cb_cond, sv, tmp;
1107 unsigned c = cf >> 1;
1108 DisasCond cond;
1109 bool d = false;
1110
1111 dest = tcg_temp_new();
1112 cb = NULL;
1113 cb_msb = NULL;
1114 cb_cond = NULL;
1115
1116 if (shift) {
1117 tmp = tcg_temp_new();
1118 tcg_gen_shli_reg(tmp, in1, shift);
1119 in1 = tmp;
1120 }
1121
1122 if (!is_l || cond_need_cb(c)) {
1123 TCGv_reg zero = tcg_constant_reg(0);
1124 cb_msb = tcg_temp_new();
1125 cb = tcg_temp_new();
1126
1127 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1128 if (is_c) {
1129 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb,
1130 get_psw_carry(ctx, d), zero);
1131 }
1132 tcg_gen_xor_reg(cb, in1, in2);
1133 tcg_gen_xor_reg(cb, cb, dest);
1134 if (cond_need_cb(c)) {
1135 cb_cond = get_carry(ctx, d, cb, cb_msb);
1136 }
1137 } else {
1138 tcg_gen_add_reg(dest, in1, in2);
1139 if (is_c) {
1140 tcg_gen_add_reg(dest, dest, get_psw_carry(ctx, d));
1141 }
1142 }
1143
1144 /* Compute signed overflow if required. */
1145 sv = NULL;
1146 if (is_tsv || cond_need_sv(c)) {
1147 sv = do_add_sv(ctx, dest, in1, in2);
1148 if (is_tsv) {
1149 /* ??? Need to include overflow from shift. */
1150 gen_helper_tsv(tcg_env, sv);
1151 }
1152 }
1153
1154 /* Emit any conditional trap before any writeback. */
1155 cond = do_cond(cf, dest, cb_cond, sv);
1156 if (is_tc) {
1157 tmp = tcg_temp_new();
1158 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1159 gen_helper_tcond(tcg_env, tmp);
1160 }
1161
1162 /* Write back the result. */
1163 if (!is_l) {
1164 save_or_nullify(ctx, cpu_psw_cb, cb);
1165 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1166 }
1167 save_gpr(ctx, rt, dest);
1168
1169 /* Install the new nullification. */
1170 cond_free(&ctx->null_cond);
1171 ctx->null_cond = cond;
1172 }
1173
1174 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1175 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1176 {
1177 TCGv_reg tcg_r1, tcg_r2;
1178
1179 if (a->cf) {
1180 nullify_over(ctx);
1181 }
1182 tcg_r1 = load_gpr(ctx, a->r1);
1183 tcg_r2 = load_gpr(ctx, a->r2);
1184 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1185 return nullify_end(ctx);
1186 }
1187
1188 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1189 bool is_tsv, bool is_tc)
1190 {
1191 TCGv_reg tcg_im, tcg_r2;
1192
1193 if (a->cf) {
1194 nullify_over(ctx);
1195 }
1196 tcg_im = tcg_constant_reg(a->i);
1197 tcg_r2 = load_gpr(ctx, a->r);
1198 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1199 return nullify_end(ctx);
1200 }
1201
1202 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1203 TCGv_reg in2, bool is_tsv, bool is_b,
1204 bool is_tc, unsigned cf)
1205 {
1206 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1207 unsigned c = cf >> 1;
1208 DisasCond cond;
1209 bool d = false;
1210
1211 dest = tcg_temp_new();
1212 cb = tcg_temp_new();
1213 cb_msb = tcg_temp_new();
1214
1215 zero = tcg_constant_reg(0);
1216 if (is_b) {
1217 /* DEST,C = IN1 + ~IN2 + C. */
1218 tcg_gen_not_reg(cb, in2);
1219 tcg_gen_add2_reg(dest, cb_msb, in1, zero, get_psw_carry(ctx, d), zero);
1220 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1221 tcg_gen_xor_reg(cb, cb, in1);
1222 tcg_gen_xor_reg(cb, cb, dest);
1223 } else {
1224 /*
1225 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1226 * operations by seeding the high word with 1 and subtracting.
1227 */
1228 TCGv_reg one = tcg_constant_reg(1);
1229 tcg_gen_sub2_reg(dest, cb_msb, in1, one, in2, zero);
1230 tcg_gen_eqv_reg(cb, in1, in2);
1231 tcg_gen_xor_reg(cb, cb, dest);
1232 }
1233
1234 /* Compute signed overflow if required. */
1235 sv = NULL;
1236 if (is_tsv || cond_need_sv(c)) {
1237 sv = do_sub_sv(ctx, dest, in1, in2);
1238 if (is_tsv) {
1239 gen_helper_tsv(tcg_env, sv);
1240 }
1241 }
1242
1243 /* Compute the condition. We cannot use the special case for borrow. */
1244 if (!is_b) {
1245 cond = do_sub_cond(cf, dest, in1, in2, sv);
1246 } else {
1247 cond = do_cond(cf, dest, get_carry(ctx, d, cb, cb_msb), sv);
1248 }
1249
1250 /* Emit any conditional trap before any writeback. */
1251 if (is_tc) {
1252 tmp = tcg_temp_new();
1253 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1254 gen_helper_tcond(tcg_env, tmp);
1255 }
1256
1257 /* Write back the result. */
1258 save_or_nullify(ctx, cpu_psw_cb, cb);
1259 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1260 save_gpr(ctx, rt, dest);
1261
1262 /* Install the new nullification. */
1263 cond_free(&ctx->null_cond);
1264 ctx->null_cond = cond;
1265 }
1266
1267 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1268 bool is_tsv, bool is_b, bool is_tc)
1269 {
1270 TCGv_reg tcg_r1, tcg_r2;
1271
1272 if (a->cf) {
1273 nullify_over(ctx);
1274 }
1275 tcg_r1 = load_gpr(ctx, a->r1);
1276 tcg_r2 = load_gpr(ctx, a->r2);
1277 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1278 return nullify_end(ctx);
1279 }
1280
1281 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1282 {
1283 TCGv_reg tcg_im, tcg_r2;
1284
1285 if (a->cf) {
1286 nullify_over(ctx);
1287 }
1288 tcg_im = tcg_constant_reg(a->i);
1289 tcg_r2 = load_gpr(ctx, a->r);
1290 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1291 return nullify_end(ctx);
1292 }
1293
1294 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1295 TCGv_reg in2, unsigned cf)
1296 {
1297 TCGv_reg dest, sv;
1298 DisasCond cond;
1299
1300 dest = tcg_temp_new();
1301 tcg_gen_sub_reg(dest, in1, in2);
1302
1303 /* Compute signed overflow if required. */
1304 sv = NULL;
1305 if (cond_need_sv(cf >> 1)) {
1306 sv = do_sub_sv(ctx, dest, in1, in2);
1307 }
1308
1309 /* Form the condition for the compare. */
1310 cond = do_sub_cond(cf, dest, in1, in2, sv);
1311
1312 /* Clear. */
1313 tcg_gen_movi_reg(dest, 0);
1314 save_gpr(ctx, rt, dest);
1315
1316 /* Install the new nullification. */
1317 cond_free(&ctx->null_cond);
1318 ctx->null_cond = cond;
1319 }
1320
1321 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1322 TCGv_reg in2, unsigned cf,
1323 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1324 {
1325 TCGv_reg dest = dest_gpr(ctx, rt);
1326
1327 /* Perform the operation, and writeback. */
1328 fn(dest, in1, in2);
1329 save_gpr(ctx, rt, dest);
1330
1331 /* Install the new nullification. */
1332 cond_free(&ctx->null_cond);
1333 if (cf) {
1334 ctx->null_cond = do_log_cond(cf, dest);
1335 }
1336 }
1337
1338 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1339 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1340 {
1341 TCGv_reg tcg_r1, tcg_r2;
1342
1343 if (a->cf) {
1344 nullify_over(ctx);
1345 }
1346 tcg_r1 = load_gpr(ctx, a->r1);
1347 tcg_r2 = load_gpr(ctx, a->r2);
1348 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1349 return nullify_end(ctx);
1350 }
1351
1352 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1353 TCGv_reg in2, unsigned cf, bool is_tc,
1354 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1355 {
1356 TCGv_reg dest;
1357 DisasCond cond;
1358
1359 if (cf == 0) {
1360 dest = dest_gpr(ctx, rt);
1361 fn(dest, in1, in2);
1362 save_gpr(ctx, rt, dest);
1363 cond_free(&ctx->null_cond);
1364 } else {
1365 dest = tcg_temp_new();
1366 fn(dest, in1, in2);
1367
1368 cond = do_unit_cond(cf, dest, in1, in2);
1369
1370 if (is_tc) {
1371 TCGv_reg tmp = tcg_temp_new();
1372 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1373 gen_helper_tcond(tcg_env, tmp);
1374 }
1375 save_gpr(ctx, rt, dest);
1376
1377 cond_free(&ctx->null_cond);
1378 ctx->null_cond = cond;
1379 }
1380 }
1381
1382 #ifndef CONFIG_USER_ONLY
1383 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1384 from the top 2 bits of the base register. There are a few system
1385 instructions that have a 3-bit space specifier, for which SR0 is
1386 not special. To handle this, pass ~SP. */
1387 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1388 {
1389 TCGv_ptr ptr;
1390 TCGv_reg tmp;
1391 TCGv_i64 spc;
1392
1393 if (sp != 0) {
1394 if (sp < 0) {
1395 sp = ~sp;
1396 }
1397 spc = tcg_temp_new_tl();
1398 load_spr(ctx, spc, sp);
1399 return spc;
1400 }
1401 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1402 return cpu_srH;
1403 }
1404
1405 ptr = tcg_temp_new_ptr();
1406 tmp = tcg_temp_new();
1407 spc = tcg_temp_new_tl();
1408
1409 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1410 tcg_gen_shri_reg(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1411 tcg_gen_andi_reg(tmp, tmp, 030);
1412 tcg_gen_trunc_reg_ptr(ptr, tmp);
1413
1414 tcg_gen_add_ptr(ptr, ptr, tcg_env);
1415 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1416
1417 return spc;
1418 }
1419 #endif
1420
1421 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1422 unsigned rb, unsigned rx, int scale, target_sreg disp,
1423 unsigned sp, int modify, bool is_phys)
1424 {
1425 TCGv_reg base = load_gpr(ctx, rb);
1426 TCGv_reg ofs;
1427 TCGv_tl addr;
1428
1429 /* Note that RX is mutually exclusive with DISP. */
1430 if (rx) {
1431 ofs = tcg_temp_new();
1432 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1433 tcg_gen_add_reg(ofs, ofs, base);
1434 } else if (disp || modify) {
1435 ofs = tcg_temp_new();
1436 tcg_gen_addi_reg(ofs, base, disp);
1437 } else {
1438 ofs = base;
1439 }
1440
1441 *pofs = ofs;
1442 *pgva = addr = tcg_temp_new_tl();
1443 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1444 tcg_gen_andi_tl(addr, addr, gva_offset_mask(ctx));
1445 #ifndef CONFIG_USER_ONLY
1446 if (!is_phys) {
1447 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1448 }
1449 #endif
1450 }
1451
1452 /* Emit a memory load. The modify parameter should be
1453 * < 0 for pre-modify,
1454 * > 0 for post-modify,
1455 * = 0 for no base register update.
1456 */
1457 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1458 unsigned rx, int scale, target_sreg disp,
1459 unsigned sp, int modify, MemOp mop)
1460 {
1461 TCGv_reg ofs;
1462 TCGv_tl addr;
1463
1464 /* Caller uses nullify_over/nullify_end. */
1465 assert(ctx->null_cond.c == TCG_COND_NEVER);
1466
1467 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1468 ctx->mmu_idx == MMU_PHYS_IDX);
1469 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1470 if (modify) {
1471 save_gpr(ctx, rb, ofs);
1472 }
1473 }
1474
1475 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1476 unsigned rx, int scale, target_sreg disp,
1477 unsigned sp, int modify, MemOp mop)
1478 {
1479 TCGv_reg ofs;
1480 TCGv_tl addr;
1481
1482 /* Caller uses nullify_over/nullify_end. */
1483 assert(ctx->null_cond.c == TCG_COND_NEVER);
1484
1485 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1486 ctx->mmu_idx == MMU_PHYS_IDX);
1487 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1488 if (modify) {
1489 save_gpr(ctx, rb, ofs);
1490 }
1491 }
1492
1493 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1494 unsigned rx, int scale, target_sreg disp,
1495 unsigned sp, int modify, MemOp mop)
1496 {
1497 TCGv_reg ofs;
1498 TCGv_tl addr;
1499
1500 /* Caller uses nullify_over/nullify_end. */
1501 assert(ctx->null_cond.c == TCG_COND_NEVER);
1502
1503 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1504 ctx->mmu_idx == MMU_PHYS_IDX);
1505 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1506 if (modify) {
1507 save_gpr(ctx, rb, ofs);
1508 }
1509 }
1510
1511 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1512 unsigned rx, int scale, target_sreg disp,
1513 unsigned sp, int modify, MemOp mop)
1514 {
1515 TCGv_reg ofs;
1516 TCGv_tl addr;
1517
1518 /* Caller uses nullify_over/nullify_end. */
1519 assert(ctx->null_cond.c == TCG_COND_NEVER);
1520
1521 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1522 ctx->mmu_idx == MMU_PHYS_IDX);
1523 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1524 if (modify) {
1525 save_gpr(ctx, rb, ofs);
1526 }
1527 }
1528
1529 #if TARGET_REGISTER_BITS == 64
1530 #define do_load_reg do_load_64
1531 #define do_store_reg do_store_64
1532 #else
1533 #define do_load_reg do_load_32
1534 #define do_store_reg do_store_32
1535 #endif
1536
1537 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1538 unsigned rx, int scale, target_sreg disp,
1539 unsigned sp, int modify, MemOp mop)
1540 {
1541 TCGv_reg dest;
1542
1543 nullify_over(ctx);
1544
1545 if (modify == 0) {
1546 /* No base register update. */
1547 dest = dest_gpr(ctx, rt);
1548 } else {
1549 /* Make sure if RT == RB, we see the result of the load. */
1550 dest = tcg_temp_new();
1551 }
1552 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1553 save_gpr(ctx, rt, dest);
1554
1555 return nullify_end(ctx);
1556 }
1557
1558 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1559 unsigned rx, int scale, target_sreg disp,
1560 unsigned sp, int modify)
1561 {
1562 TCGv_i32 tmp;
1563
1564 nullify_over(ctx);
1565
1566 tmp = tcg_temp_new_i32();
1567 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1568 save_frw_i32(rt, tmp);
1569
1570 if (rt == 0) {
1571 gen_helper_loaded_fr0(tcg_env);
1572 }
1573
1574 return nullify_end(ctx);
1575 }
1576
1577 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1578 {
1579 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1580 a->disp, a->sp, a->m);
1581 }
1582
1583 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1584 unsigned rx, int scale, target_sreg disp,
1585 unsigned sp, int modify)
1586 {
1587 TCGv_i64 tmp;
1588
1589 nullify_over(ctx);
1590
1591 tmp = tcg_temp_new_i64();
1592 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1593 save_frd(rt, tmp);
1594
1595 if (rt == 0) {
1596 gen_helper_loaded_fr0(tcg_env);
1597 }
1598
1599 return nullify_end(ctx);
1600 }
1601
1602 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1603 {
1604 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1605 a->disp, a->sp, a->m);
1606 }
1607
1608 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1609 target_sreg disp, unsigned sp,
1610 int modify, MemOp mop)
1611 {
1612 nullify_over(ctx);
1613 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1614 return nullify_end(ctx);
1615 }
1616
1617 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1618 unsigned rx, int scale, target_sreg disp,
1619 unsigned sp, int modify)
1620 {
1621 TCGv_i32 tmp;
1622
1623 nullify_over(ctx);
1624
1625 tmp = load_frw_i32(rt);
1626 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1627
1628 return nullify_end(ctx);
1629 }
1630
1631 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1632 {
1633 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1634 a->disp, a->sp, a->m);
1635 }
1636
1637 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1638 unsigned rx, int scale, target_sreg disp,
1639 unsigned sp, int modify)
1640 {
1641 TCGv_i64 tmp;
1642
1643 nullify_over(ctx);
1644
1645 tmp = load_frd(rt);
1646 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1647
1648 return nullify_end(ctx);
1649 }
1650
1651 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1652 {
1653 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1654 a->disp, a->sp, a->m);
1655 }
1656
1657 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1658 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1659 {
1660 TCGv_i32 tmp;
1661
1662 nullify_over(ctx);
1663 tmp = load_frw0_i32(ra);
1664
1665 func(tmp, tcg_env, tmp);
1666
1667 save_frw_i32(rt, tmp);
1668 return nullify_end(ctx);
1669 }
1670
1671 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1672 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1673 {
1674 TCGv_i32 dst;
1675 TCGv_i64 src;
1676
1677 nullify_over(ctx);
1678 src = load_frd(ra);
1679 dst = tcg_temp_new_i32();
1680
1681 func(dst, tcg_env, src);
1682
1683 save_frw_i32(rt, dst);
1684 return nullify_end(ctx);
1685 }
1686
1687 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1688 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1689 {
1690 TCGv_i64 tmp;
1691
1692 nullify_over(ctx);
1693 tmp = load_frd0(ra);
1694
1695 func(tmp, tcg_env, tmp);
1696
1697 save_frd(rt, tmp);
1698 return nullify_end(ctx);
1699 }
1700
1701 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1702 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1703 {
1704 TCGv_i32 src;
1705 TCGv_i64 dst;
1706
1707 nullify_over(ctx);
1708 src = load_frw0_i32(ra);
1709 dst = tcg_temp_new_i64();
1710
1711 func(dst, tcg_env, src);
1712
1713 save_frd(rt, dst);
1714 return nullify_end(ctx);
1715 }
1716
1717 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1718 unsigned ra, unsigned rb,
1719 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1720 {
1721 TCGv_i32 a, b;
1722
1723 nullify_over(ctx);
1724 a = load_frw0_i32(ra);
1725 b = load_frw0_i32(rb);
1726
1727 func(a, tcg_env, a, b);
1728
1729 save_frw_i32(rt, a);
1730 return nullify_end(ctx);
1731 }
1732
1733 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1734 unsigned ra, unsigned rb,
1735 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1736 {
1737 TCGv_i64 a, b;
1738
1739 nullify_over(ctx);
1740 a = load_frd0(ra);
1741 b = load_frd0(rb);
1742
1743 func(a, tcg_env, a, b);
1744
1745 save_frd(rt, a);
1746 return nullify_end(ctx);
1747 }
1748
1749 /* Emit an unconditional branch to a direct target, which may or may not
1750 have already had nullification handled. */
1751 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1752 unsigned link, bool is_n)
1753 {
1754 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1755 if (link != 0) {
1756 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1757 }
1758 ctx->iaoq_n = dest;
1759 if (is_n) {
1760 ctx->null_cond.c = TCG_COND_ALWAYS;
1761 }
1762 } else {
1763 nullify_over(ctx);
1764
1765 if (link != 0) {
1766 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1767 }
1768
1769 if (is_n && use_nullify_skip(ctx)) {
1770 nullify_set(ctx, 0);
1771 gen_goto_tb(ctx, 0, dest, dest + 4);
1772 } else {
1773 nullify_set(ctx, is_n);
1774 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1775 }
1776
1777 nullify_end(ctx);
1778
1779 nullify_set(ctx, 0);
1780 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1781 ctx->base.is_jmp = DISAS_NORETURN;
1782 }
1783 return true;
1784 }
1785
1786 /* Emit a conditional branch to a direct target. If the branch itself
1787 is nullified, we should have already used nullify_over. */
1788 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1789 DisasCond *cond)
1790 {
1791 target_ureg dest = iaoq_dest(ctx, disp);
1792 TCGLabel *taken = NULL;
1793 TCGCond c = cond->c;
1794 bool n;
1795
1796 assert(ctx->null_cond.c == TCG_COND_NEVER);
1797
1798 /* Handle TRUE and NEVER as direct branches. */
1799 if (c == TCG_COND_ALWAYS) {
1800 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1801 }
1802 if (c == TCG_COND_NEVER) {
1803 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1804 }
1805
1806 taken = gen_new_label();
1807 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1808 cond_free(cond);
1809
1810 /* Not taken: Condition not satisfied; nullify on backward branches. */
1811 n = is_n && disp < 0;
1812 if (n && use_nullify_skip(ctx)) {
1813 nullify_set(ctx, 0);
1814 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1815 } else {
1816 if (!n && ctx->null_lab) {
1817 gen_set_label(ctx->null_lab);
1818 ctx->null_lab = NULL;
1819 }
1820 nullify_set(ctx, n);
1821 if (ctx->iaoq_n == -1) {
1822 /* The temporary iaoq_n_var died at the branch above.
1823 Regenerate it here instead of saving it. */
1824 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1825 }
1826 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1827 }
1828
1829 gen_set_label(taken);
1830
1831 /* Taken: Condition satisfied; nullify on forward branches. */
1832 n = is_n && disp >= 0;
1833 if (n && use_nullify_skip(ctx)) {
1834 nullify_set(ctx, 0);
1835 gen_goto_tb(ctx, 1, dest, dest + 4);
1836 } else {
1837 nullify_set(ctx, n);
1838 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1839 }
1840
1841 /* Not taken: the branch itself was nullified. */
1842 if (ctx->null_lab) {
1843 gen_set_label(ctx->null_lab);
1844 ctx->null_lab = NULL;
1845 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1846 } else {
1847 ctx->base.is_jmp = DISAS_NORETURN;
1848 }
1849 return true;
1850 }
1851
1852 /* Emit an unconditional branch to an indirect target. This handles
1853 nullification of the branch itself. */
1854 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1855 unsigned link, bool is_n)
1856 {
1857 TCGv_reg a0, a1, next, tmp;
1858 TCGCond c;
1859
1860 assert(ctx->null_lab == NULL);
1861
1862 if (ctx->null_cond.c == TCG_COND_NEVER) {
1863 if (link != 0) {
1864 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1865 }
1866 next = tcg_temp_new();
1867 tcg_gen_mov_reg(next, dest);
1868 if (is_n) {
1869 if (use_nullify_skip(ctx)) {
1870 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1871 tcg_gen_addi_reg(next, next, 4);
1872 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1873 nullify_set(ctx, 0);
1874 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1875 return true;
1876 }
1877 ctx->null_cond.c = TCG_COND_ALWAYS;
1878 }
1879 ctx->iaoq_n = -1;
1880 ctx->iaoq_n_var = next;
1881 } else if (is_n && use_nullify_skip(ctx)) {
1882 /* The (conditional) branch, B, nullifies the next insn, N,
1883 and we're allowed to skip execution N (no single-step or
1884 tracepoint in effect). Since the goto_ptr that we must use
1885 for the indirect branch consumes no special resources, we
1886 can (conditionally) skip B and continue execution. */
1887 /* The use_nullify_skip test implies we have a known control path. */
1888 tcg_debug_assert(ctx->iaoq_b != -1);
1889 tcg_debug_assert(ctx->iaoq_n != -1);
1890
1891 /* We do have to handle the non-local temporary, DEST, before
1892 branching. Since IOAQ_F is not really live at this point, we
1893 can simply store DEST optimistically. Similarly with IAOQ_B. */
1894 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1895 next = tcg_temp_new();
1896 tcg_gen_addi_reg(next, dest, 4);
1897 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1898
1899 nullify_over(ctx);
1900 if (link != 0) {
1901 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1902 }
1903 tcg_gen_lookup_and_goto_ptr();
1904 return nullify_end(ctx);
1905 } else {
1906 c = ctx->null_cond.c;
1907 a0 = ctx->null_cond.a0;
1908 a1 = ctx->null_cond.a1;
1909
1910 tmp = tcg_temp_new();
1911 next = tcg_temp_new();
1912
1913 copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1914 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1915 ctx->iaoq_n = -1;
1916 ctx->iaoq_n_var = next;
1917
1918 if (link != 0) {
1919 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1920 }
1921
1922 if (is_n) {
1923 /* The branch nullifies the next insn, which means the state of N
1924 after the branch is the inverse of the state of N that applied
1925 to the branch. */
1926 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1927 cond_free(&ctx->null_cond);
1928 ctx->null_cond = cond_make_n();
1929 ctx->psw_n_nonzero = true;
1930 } else {
1931 cond_free(&ctx->null_cond);
1932 }
1933 }
1934 return true;
1935 }
1936
1937 /* Implement
1938 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1939 * IAOQ_Next{30..31} ← GR[b]{30..31};
1940 * else
1941 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1942 * which keeps the privilege level from being increased.
1943 */
1944 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1945 {
1946 TCGv_reg dest;
1947 switch (ctx->privilege) {
1948 case 0:
1949 /* Privilege 0 is maximum and is allowed to decrease. */
1950 return offset;
1951 case 3:
1952 /* Privilege 3 is minimum and is never allowed to increase. */
1953 dest = tcg_temp_new();
1954 tcg_gen_ori_reg(dest, offset, 3);
1955 break;
1956 default:
1957 dest = tcg_temp_new();
1958 tcg_gen_andi_reg(dest, offset, -4);
1959 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1960 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1961 break;
1962 }
1963 return dest;
1964 }
1965
1966 #ifdef CONFIG_USER_ONLY
1967 /* On Linux, page zero is normally marked execute only + gateway.
1968 Therefore normal read or write is supposed to fail, but specific
1969 offsets have kernel code mapped to raise permissions to implement
1970 system calls. Handling this via an explicit check here, rather
1971 in than the "be disp(sr2,r0)" instruction that probably sent us
1972 here, is the easiest way to handle the branch delay slot on the
1973 aforementioned BE. */
1974 static void do_page_zero(DisasContext *ctx)
1975 {
1976 TCGv_reg tmp;
1977
1978 /* If by some means we get here with PSW[N]=1, that implies that
1979 the B,GATE instruction would be skipped, and we'd fault on the
1980 next insn within the privileged page. */
1981 switch (ctx->null_cond.c) {
1982 case TCG_COND_NEVER:
1983 break;
1984 case TCG_COND_ALWAYS:
1985 tcg_gen_movi_reg(cpu_psw_n, 0);
1986 goto do_sigill;
1987 default:
1988 /* Since this is always the first (and only) insn within the
1989 TB, we should know the state of PSW[N] from TB->FLAGS. */
1990 g_assert_not_reached();
1991 }
1992
1993 /* Check that we didn't arrive here via some means that allowed
1994 non-sequential instruction execution. Normally the PSW[B] bit
1995 detects this by disallowing the B,GATE instruction to execute
1996 under such conditions. */
1997 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1998 goto do_sigill;
1999 }
2000
2001 switch (ctx->iaoq_f & -4) {
2002 case 0x00: /* Null pointer call */
2003 gen_excp_1(EXCP_IMP);
2004 ctx->base.is_jmp = DISAS_NORETURN;
2005 break;
2006
2007 case 0xb0: /* LWS */
2008 gen_excp_1(EXCP_SYSCALL_LWS);
2009 ctx->base.is_jmp = DISAS_NORETURN;
2010 break;
2011
2012 case 0xe0: /* SET_THREAD_POINTER */
2013 tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
2014 tmp = tcg_temp_new();
2015 tcg_gen_ori_reg(tmp, cpu_gr[31], 3);
2016 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
2017 tcg_gen_addi_reg(tmp, tmp, 4);
2018 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
2019 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2020 break;
2021
2022 case 0x100: /* SYSCALL */
2023 gen_excp_1(EXCP_SYSCALL);
2024 ctx->base.is_jmp = DISAS_NORETURN;
2025 break;
2026
2027 default:
2028 do_sigill:
2029 gen_excp_1(EXCP_ILL);
2030 ctx->base.is_jmp = DISAS_NORETURN;
2031 break;
2032 }
2033 }
2034 #endif
2035
2036 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2037 {
2038 cond_free(&ctx->null_cond);
2039 return true;
2040 }
2041
2042 static bool trans_break(DisasContext *ctx, arg_break *a)
2043 {
2044 return gen_excp_iir(ctx, EXCP_BREAK);
2045 }
2046
2047 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2048 {
2049 /* No point in nullifying the memory barrier. */
2050 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2051
2052 cond_free(&ctx->null_cond);
2053 return true;
2054 }
2055
2056 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2057 {
2058 unsigned rt = a->t;
2059 TCGv_reg tmp = dest_gpr(ctx, rt);
2060 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2061 save_gpr(ctx, rt, tmp);
2062
2063 cond_free(&ctx->null_cond);
2064 return true;
2065 }
2066
2067 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2068 {
2069 unsigned rt = a->t;
2070 unsigned rs = a->sp;
2071 TCGv_i64 t0 = tcg_temp_new_i64();
2072 TCGv_reg t1 = tcg_temp_new();
2073
2074 load_spr(ctx, t0, rs);
2075 tcg_gen_shri_i64(t0, t0, 32);
2076 tcg_gen_trunc_i64_reg(t1, t0);
2077
2078 save_gpr(ctx, rt, t1);
2079
2080 cond_free(&ctx->null_cond);
2081 return true;
2082 }
2083
2084 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2085 {
2086 unsigned rt = a->t;
2087 unsigned ctl = a->r;
2088 TCGv_reg tmp;
2089
2090 switch (ctl) {
2091 case CR_SAR:
2092 #ifdef TARGET_HPPA64
2093 if (a->e == 0) {
2094 /* MFSAR without ,W masks low 5 bits. */
2095 tmp = dest_gpr(ctx, rt);
2096 tcg_gen_andi_reg(tmp, cpu_sar, 31);
2097 save_gpr(ctx, rt, tmp);
2098 goto done;
2099 }
2100 #endif
2101 save_gpr(ctx, rt, cpu_sar);
2102 goto done;
2103 case CR_IT: /* Interval Timer */
2104 /* FIXME: Respect PSW_S bit. */
2105 nullify_over(ctx);
2106 tmp = dest_gpr(ctx, rt);
2107 if (translator_io_start(&ctx->base)) {
2108 gen_helper_read_interval_timer(tmp);
2109 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2110 } else {
2111 gen_helper_read_interval_timer(tmp);
2112 }
2113 save_gpr(ctx, rt, tmp);
2114 return nullify_end(ctx);
2115 case 26:
2116 case 27:
2117 break;
2118 default:
2119 /* All other control registers are privileged. */
2120 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2121 break;
2122 }
2123
2124 tmp = tcg_temp_new();
2125 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2126 save_gpr(ctx, rt, tmp);
2127
2128 done:
2129 cond_free(&ctx->null_cond);
2130 return true;
2131 }
2132
2133 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2134 {
2135 unsigned rr = a->r;
2136 unsigned rs = a->sp;
2137 TCGv_i64 t64;
2138
2139 if (rs >= 5) {
2140 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2141 }
2142 nullify_over(ctx);
2143
2144 t64 = tcg_temp_new_i64();
2145 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2146 tcg_gen_shli_i64(t64, t64, 32);
2147
2148 if (rs >= 4) {
2149 tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2150 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2151 } else {
2152 tcg_gen_mov_i64(cpu_sr[rs], t64);
2153 }
2154
2155 return nullify_end(ctx);
2156 }
2157
2158 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2159 {
2160 unsigned ctl = a->t;
2161 TCGv_reg reg;
2162 TCGv_reg tmp;
2163
2164 if (ctl == CR_SAR) {
2165 reg = load_gpr(ctx, a->r);
2166 tmp = tcg_temp_new();
2167 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2168 save_or_nullify(ctx, cpu_sar, tmp);
2169
2170 cond_free(&ctx->null_cond);
2171 return true;
2172 }
2173
2174 /* All other control registers are privileged or read-only. */
2175 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2176
2177 #ifndef CONFIG_USER_ONLY
2178 nullify_over(ctx);
2179 reg = load_gpr(ctx, a->r);
2180
2181 switch (ctl) {
2182 case CR_IT:
2183 gen_helper_write_interval_timer(tcg_env, reg);
2184 break;
2185 case CR_EIRR:
2186 gen_helper_write_eirr(tcg_env, reg);
2187 break;
2188 case CR_EIEM:
2189 gen_helper_write_eiem(tcg_env, reg);
2190 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2191 break;
2192
2193 case CR_IIASQ:
2194 case CR_IIAOQ:
2195 /* FIXME: Respect PSW_Q bit */
2196 /* The write advances the queue and stores to the back element. */
2197 tmp = tcg_temp_new();
2198 tcg_gen_ld_reg(tmp, tcg_env,
2199 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2200 tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2201 tcg_gen_st_reg(reg, tcg_env,
2202 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2203 break;
2204
2205 case CR_PID1:
2206 case CR_PID2:
2207 case CR_PID3:
2208 case CR_PID4:
2209 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2210 #ifndef CONFIG_USER_ONLY
2211 gen_helper_change_prot_id(tcg_env);
2212 #endif
2213 break;
2214
2215 default:
2216 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2217 break;
2218 }
2219 return nullify_end(ctx);
2220 #endif
2221 }
2222
2223 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2224 {
2225 TCGv_reg tmp = tcg_temp_new();
2226
2227 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2228 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2229 save_or_nullify(ctx, cpu_sar, tmp);
2230
2231 cond_free(&ctx->null_cond);
2232 return true;
2233 }
2234
2235 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2236 {
2237 TCGv_reg dest = dest_gpr(ctx, a->t);
2238
2239 #ifdef CONFIG_USER_ONLY
2240 /* We don't implement space registers in user mode. */
2241 tcg_gen_movi_reg(dest, 0);
2242 #else
2243 TCGv_i64 t0 = tcg_temp_new_i64();
2244
2245 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2246 tcg_gen_shri_i64(t0, t0, 32);
2247 tcg_gen_trunc_i64_reg(dest, t0);
2248 #endif
2249 save_gpr(ctx, a->t, dest);
2250
2251 cond_free(&ctx->null_cond);
2252 return true;
2253 }
2254
2255 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2256 {
2257 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2258 #ifndef CONFIG_USER_ONLY
2259 TCGv_reg tmp;
2260
2261 nullify_over(ctx);
2262
2263 tmp = tcg_temp_new();
2264 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2265 tcg_gen_andi_reg(tmp, tmp, ~a->i);
2266 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2267 save_gpr(ctx, a->t, tmp);
2268
2269 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2270 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2271 return nullify_end(ctx);
2272 #endif
2273 }
2274
2275 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2276 {
2277 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2278 #ifndef CONFIG_USER_ONLY
2279 TCGv_reg tmp;
2280
2281 nullify_over(ctx);
2282
2283 tmp = tcg_temp_new();
2284 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2285 tcg_gen_ori_reg(tmp, tmp, a->i);
2286 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2287 save_gpr(ctx, a->t, tmp);
2288
2289 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2290 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2291 return nullify_end(ctx);
2292 #endif
2293 }
2294
2295 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2296 {
2297 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2298 #ifndef CONFIG_USER_ONLY
2299 TCGv_reg tmp, reg;
2300 nullify_over(ctx);
2301
2302 reg = load_gpr(ctx, a->r);
2303 tmp = tcg_temp_new();
2304 gen_helper_swap_system_mask(tmp, tcg_env, reg);
2305
2306 /* Exit the TB to recognize new interrupts. */
2307 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2308 return nullify_end(ctx);
2309 #endif
2310 }
2311
2312 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2313 {
2314 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2315 #ifndef CONFIG_USER_ONLY
2316 nullify_over(ctx);
2317
2318 if (rfi_r) {
2319 gen_helper_rfi_r(tcg_env);
2320 } else {
2321 gen_helper_rfi(tcg_env);
2322 }
2323 /* Exit the TB to recognize new interrupts. */
2324 tcg_gen_exit_tb(NULL, 0);
2325 ctx->base.is_jmp = DISAS_NORETURN;
2326
2327 return nullify_end(ctx);
2328 #endif
2329 }
2330
2331 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2332 {
2333 return do_rfi(ctx, false);
2334 }
2335
2336 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2337 {
2338 return do_rfi(ctx, true);
2339 }
2340
2341 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2342 {
2343 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2344 #ifndef CONFIG_USER_ONLY
2345 nullify_over(ctx);
2346 gen_helper_halt(tcg_env);
2347 ctx->base.is_jmp = DISAS_NORETURN;
2348 return nullify_end(ctx);
2349 #endif
2350 }
2351
2352 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2353 {
2354 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2355 #ifndef CONFIG_USER_ONLY
2356 nullify_over(ctx);
2357 gen_helper_reset(tcg_env);
2358 ctx->base.is_jmp = DISAS_NORETURN;
2359 return nullify_end(ctx);
2360 #endif
2361 }
2362
2363 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2364 {
2365 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2366 #ifndef CONFIG_USER_ONLY
2367 nullify_over(ctx);
2368 gen_helper_getshadowregs(tcg_env);
2369 return nullify_end(ctx);
2370 #endif
2371 }
2372
2373 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2374 {
2375 if (a->m) {
2376 TCGv_reg dest = dest_gpr(ctx, a->b);
2377 TCGv_reg src1 = load_gpr(ctx, a->b);
2378 TCGv_reg src2 = load_gpr(ctx, a->x);
2379
2380 /* The only thing we need to do is the base register modification. */
2381 tcg_gen_add_reg(dest, src1, src2);
2382 save_gpr(ctx, a->b, dest);
2383 }
2384 cond_free(&ctx->null_cond);
2385 return true;
2386 }
2387
2388 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2389 {
2390 TCGv_reg dest, ofs;
2391 TCGv_i32 level, want;
2392 TCGv_tl addr;
2393
2394 nullify_over(ctx);
2395
2396 dest = dest_gpr(ctx, a->t);
2397 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2398
2399 if (a->imm) {
2400 level = tcg_constant_i32(a->ri);
2401 } else {
2402 level = tcg_temp_new_i32();
2403 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2404 tcg_gen_andi_i32(level, level, 3);
2405 }
2406 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2407
2408 gen_helper_probe(dest, tcg_env, addr, level, want);
2409
2410 save_gpr(ctx, a->t, dest);
2411 return nullify_end(ctx);
2412 }
2413
2414 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2415 {
2416 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2417 #ifndef CONFIG_USER_ONLY
2418 TCGv_tl addr;
2419 TCGv_reg ofs, reg;
2420
2421 nullify_over(ctx);
2422
2423 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2424 reg = load_gpr(ctx, a->r);
2425 if (a->addr) {
2426 gen_helper_itlba(tcg_env, addr, reg);
2427 } else {
2428 gen_helper_itlbp(tcg_env, addr, reg);
2429 }
2430
2431 /* Exit TB for TLB change if mmu is enabled. */
2432 if (ctx->tb_flags & PSW_C) {
2433 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2434 }
2435 return nullify_end(ctx);
2436 #endif
2437 }
2438
2439 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2440 {
2441 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2442 #ifndef CONFIG_USER_ONLY
2443 TCGv_tl addr;
2444 TCGv_reg ofs;
2445
2446 nullify_over(ctx);
2447
2448 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2449 if (a->m) {
2450 save_gpr(ctx, a->b, ofs);
2451 }
2452 if (a->local) {
2453 gen_helper_ptlbe(tcg_env);
2454 } else {
2455 gen_helper_ptlb(tcg_env, addr);
2456 }
2457
2458 /* Exit TB for TLB change if mmu is enabled. */
2459 if (ctx->tb_flags & PSW_C) {
2460 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2461 }
2462 return nullify_end(ctx);
2463 #endif
2464 }
2465
2466 /*
2467 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2468 * See
2469 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2470 * page 13-9 (195/206)
2471 */
2472 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2473 {
2474 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2475 #ifndef CONFIG_USER_ONLY
2476 TCGv_tl addr, atl, stl;
2477 TCGv_reg reg;
2478
2479 nullify_over(ctx);
2480
2481 /*
2482 * FIXME:
2483 * if (not (pcxl or pcxl2))
2484 * return gen_illegal(ctx);
2485 *
2486 * Note for future: these are 32-bit systems; no hppa64.
2487 */
2488
2489 atl = tcg_temp_new_tl();
2490 stl = tcg_temp_new_tl();
2491 addr = tcg_temp_new_tl();
2492
2493 tcg_gen_ld32u_i64(stl, tcg_env,
2494 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2495 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2496 tcg_gen_ld32u_i64(atl, tcg_env,
2497 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2498 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2499 tcg_gen_shli_i64(stl, stl, 32);
2500 tcg_gen_or_tl(addr, atl, stl);
2501
2502 reg = load_gpr(ctx, a->r);
2503 if (a->addr) {
2504 gen_helper_itlba(tcg_env, addr, reg);
2505 } else {
2506 gen_helper_itlbp(tcg_env, addr, reg);
2507 }
2508
2509 /* Exit TB for TLB change if mmu is enabled. */
2510 if (ctx->tb_flags & PSW_C) {
2511 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2512 }
2513 return nullify_end(ctx);
2514 #endif
2515 }
2516
2517 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2518 {
2519 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2520 #ifndef CONFIG_USER_ONLY
2521 TCGv_tl vaddr;
2522 TCGv_reg ofs, paddr;
2523
2524 nullify_over(ctx);
2525
2526 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2527
2528 paddr = tcg_temp_new();
2529 gen_helper_lpa(paddr, tcg_env, vaddr);
2530
2531 /* Note that physical address result overrides base modification. */
2532 if (a->m) {
2533 save_gpr(ctx, a->b, ofs);
2534 }
2535 save_gpr(ctx, a->t, paddr);
2536
2537 return nullify_end(ctx);
2538 #endif
2539 }
2540
2541 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2542 {
2543 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2544
2545 /* The Coherence Index is an implementation-defined function of the
2546 physical address. Two addresses with the same CI have a coherent
2547 view of the cache. Our implementation is to return 0 for all,
2548 since the entire address space is coherent. */
2549 save_gpr(ctx, a->t, tcg_constant_reg(0));
2550
2551 cond_free(&ctx->null_cond);
2552 return true;
2553 }
2554
2555 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2556 {
2557 return do_add_reg(ctx, a, false, false, false, false);
2558 }
2559
2560 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2561 {
2562 return do_add_reg(ctx, a, true, false, false, false);
2563 }
2564
2565 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2566 {
2567 return do_add_reg(ctx, a, false, true, false, false);
2568 }
2569
2570 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2571 {
2572 return do_add_reg(ctx, a, false, false, false, true);
2573 }
2574
2575 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2576 {
2577 return do_add_reg(ctx, a, false, true, false, true);
2578 }
2579
2580 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2581 {
2582 return do_sub_reg(ctx, a, false, false, false);
2583 }
2584
2585 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2586 {
2587 return do_sub_reg(ctx, a, true, false, false);
2588 }
2589
2590 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2591 {
2592 return do_sub_reg(ctx, a, false, false, true);
2593 }
2594
2595 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2596 {
2597 return do_sub_reg(ctx, a, true, false, true);
2598 }
2599
2600 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2601 {
2602 return do_sub_reg(ctx, a, false, true, false);
2603 }
2604
2605 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2606 {
2607 return do_sub_reg(ctx, a, true, true, false);
2608 }
2609
2610 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2611 {
2612 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2613 }
2614
2615 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2616 {
2617 return do_log_reg(ctx, a, tcg_gen_and_reg);
2618 }
2619
2620 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2621 {
2622 if (a->cf == 0) {
2623 unsigned r2 = a->r2;
2624 unsigned r1 = a->r1;
2625 unsigned rt = a->t;
2626
2627 if (rt == 0) { /* NOP */
2628 cond_free(&ctx->null_cond);
2629 return true;
2630 }
2631 if (r2 == 0) { /* COPY */
2632 if (r1 == 0) {
2633 TCGv_reg dest = dest_gpr(ctx, rt);
2634 tcg_gen_movi_reg(dest, 0);
2635 save_gpr(ctx, rt, dest);
2636 } else {
2637 save_gpr(ctx, rt, cpu_gr[r1]);
2638 }
2639 cond_free(&ctx->null_cond);
2640 return true;
2641 }
2642 #ifndef CONFIG_USER_ONLY
2643 /* These are QEMU extensions and are nops in the real architecture:
2644 *
2645 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2646 * or %r31,%r31,%r31 -- death loop; offline cpu
2647 * currently implemented as idle.
2648 */
2649 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2650 /* No need to check for supervisor, as userland can only pause
2651 until the next timer interrupt. */
2652 nullify_over(ctx);
2653
2654 /* Advance the instruction queue. */
2655 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2656 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2657 nullify_set(ctx, 0);
2658
2659 /* Tell the qemu main loop to halt until this cpu has work. */
2660 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2661 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2662 gen_excp_1(EXCP_HALTED);
2663 ctx->base.is_jmp = DISAS_NORETURN;
2664
2665 return nullify_end(ctx);
2666 }
2667 #endif
2668 }
2669 return do_log_reg(ctx, a, tcg_gen_or_reg);
2670 }
2671
2672 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2673 {
2674 return do_log_reg(ctx, a, tcg_gen_xor_reg);
2675 }
2676
2677 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2678 {
2679 TCGv_reg tcg_r1, tcg_r2;
2680
2681 if (a->cf) {
2682 nullify_over(ctx);
2683 }
2684 tcg_r1 = load_gpr(ctx, a->r1);
2685 tcg_r2 = load_gpr(ctx, a->r2);
2686 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2687 return nullify_end(ctx);
2688 }
2689
2690 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2691 {
2692 TCGv_reg tcg_r1, tcg_r2;
2693
2694 if (a->cf) {
2695 nullify_over(ctx);
2696 }
2697 tcg_r1 = load_gpr(ctx, a->r1);
2698 tcg_r2 = load_gpr(ctx, a->r2);
2699 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2700 return nullify_end(ctx);
2701 }
2702
2703 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2704 {
2705 TCGv_reg tcg_r1, tcg_r2, tmp;
2706
2707 if (a->cf) {
2708 nullify_over(ctx);
2709 }
2710 tcg_r1 = load_gpr(ctx, a->r1);
2711 tcg_r2 = load_gpr(ctx, a->r2);
2712 tmp = tcg_temp_new();
2713 tcg_gen_not_reg(tmp, tcg_r2);
2714 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2715 return nullify_end(ctx);
2716 }
2717
2718 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2719 {
2720 return do_uaddcm(ctx, a, false);
2721 }
2722
2723 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2724 {
2725 return do_uaddcm(ctx, a, true);
2726 }
2727
2728 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2729 {
2730 TCGv_reg tmp;
2731
2732 nullify_over(ctx);
2733
2734 tmp = tcg_temp_new();
2735 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2736 if (!is_i) {
2737 tcg_gen_not_reg(tmp, tmp);
2738 }
2739 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2740 tcg_gen_muli_reg(tmp, tmp, 6);
2741 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
2742 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2743 return nullify_end(ctx);
2744 }
2745
2746 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2747 {
2748 return do_dcor(ctx, a, false);
2749 }
2750
2751 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2752 {
2753 return do_dcor(ctx, a, true);
2754 }
2755
2756 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2757 {
2758 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2759 TCGv_reg cout;
2760
2761 nullify_over(ctx);
2762
2763 in1 = load_gpr(ctx, a->r1);
2764 in2 = load_gpr(ctx, a->r2);
2765
2766 add1 = tcg_temp_new();
2767 add2 = tcg_temp_new();
2768 addc = tcg_temp_new();
2769 dest = tcg_temp_new();
2770 zero = tcg_constant_reg(0);
2771
2772 /* Form R1 << 1 | PSW[CB]{8}. */
2773 tcg_gen_add_reg(add1, in1, in1);
2774 tcg_gen_add_reg(add1, add1, get_psw_carry(ctx, false));
2775
2776 /*
2777 * Add or subtract R2, depending on PSW[V]. Proper computation of
2778 * carry requires that we subtract via + ~R2 + 1, as described in
2779 * the manual. By extracting and masking V, we can produce the
2780 * proper inputs to the addition without movcond.
2781 */
2782 tcg_gen_sextract_reg(addc, cpu_psw_v, 31, 1);
2783 tcg_gen_xor_reg(add2, in2, addc);
2784 tcg_gen_andi_reg(addc, addc, 1);
2785
2786 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2787 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2788
2789 /* Write back the result register. */
2790 save_gpr(ctx, a->t, dest);
2791
2792 /* Write back PSW[CB]. */
2793 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2794 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2795
2796 /* Write back PSW[V] for the division step. */
2797 cout = get_psw_carry(ctx, false);
2798 tcg_gen_neg_reg(cpu_psw_v, cout);
2799 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2800
2801 /* Install the new nullification. */
2802 if (a->cf) {
2803 TCGv_reg sv = NULL;
2804 if (cond_need_sv(a->cf >> 1)) {
2805 /* ??? The lshift is supposed to contribute to overflow. */
2806 sv = do_add_sv(ctx, dest, add1, add2);
2807 }
2808 ctx->null_cond = do_cond(a->cf, dest, cout, sv);
2809 }
2810
2811 return nullify_end(ctx);
2812 }
2813
2814 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2815 {
2816 return do_add_imm(ctx, a, false, false);
2817 }
2818
2819 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2820 {
2821 return do_add_imm(ctx, a, true, false);
2822 }
2823
2824 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2825 {
2826 return do_add_imm(ctx, a, false, true);
2827 }
2828
2829 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2830 {
2831 return do_add_imm(ctx, a, true, true);
2832 }
2833
2834 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2835 {
2836 return do_sub_imm(ctx, a, false);
2837 }
2838
2839 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2840 {
2841 return do_sub_imm(ctx, a, true);
2842 }
2843
2844 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2845 {
2846 TCGv_reg tcg_im, tcg_r2;
2847
2848 if (a->cf) {
2849 nullify_over(ctx);
2850 }
2851
2852 tcg_im = tcg_constant_reg(a->i);
2853 tcg_r2 = load_gpr(ctx, a->r);
2854 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2855
2856 return nullify_end(ctx);
2857 }
2858
2859 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2860 {
2861 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2862 return gen_illegal(ctx);
2863 } else {
2864 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2865 a->disp, a->sp, a->m, a->size | MO_TE);
2866 }
2867 }
2868
2869 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2870 {
2871 assert(a->x == 0 && a->scale == 0);
2872 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2873 return gen_illegal(ctx);
2874 } else {
2875 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2876 }
2877 }
2878
2879 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2880 {
2881 MemOp mop = MO_TE | MO_ALIGN | a->size;
2882 TCGv_reg zero, dest, ofs;
2883 TCGv_tl addr;
2884
2885 nullify_over(ctx);
2886
2887 if (a->m) {
2888 /* Base register modification. Make sure if RT == RB,
2889 we see the result of the load. */
2890 dest = tcg_temp_new();
2891 } else {
2892 dest = dest_gpr(ctx, a->t);
2893 }
2894
2895 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2896 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2897
2898 /*
2899 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2900 * However actual hardware succeeds with aligned mod 4.
2901 * Detect this case and log a GUEST_ERROR.
2902 *
2903 * TODO: HPPA64 relaxes the over-alignment requirement
2904 * with the ,co completer.
2905 */
2906 gen_helper_ldc_check(addr);
2907
2908 zero = tcg_constant_reg(0);
2909 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2910
2911 if (a->m) {
2912 save_gpr(ctx, a->b, ofs);
2913 }
2914 save_gpr(ctx, a->t, dest);
2915
2916 return nullify_end(ctx);
2917 }
2918
2919 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2920 {
2921 TCGv_reg ofs, val;
2922 TCGv_tl addr;
2923
2924 nullify_over(ctx);
2925
2926 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2927 ctx->mmu_idx == MMU_PHYS_IDX);
2928 val = load_gpr(ctx, a->r);
2929 if (a->a) {
2930 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2931 gen_helper_stby_e_parallel(tcg_env, addr, val);
2932 } else {
2933 gen_helper_stby_e(tcg_env, addr, val);
2934 }
2935 } else {
2936 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2937 gen_helper_stby_b_parallel(tcg_env, addr, val);
2938 } else {
2939 gen_helper_stby_b(tcg_env, addr, val);
2940 }
2941 }
2942 if (a->m) {
2943 tcg_gen_andi_reg(ofs, ofs, ~3);
2944 save_gpr(ctx, a->b, ofs);
2945 }
2946
2947 return nullify_end(ctx);
2948 }
2949
2950 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2951 {
2952 int hold_mmu_idx = ctx->mmu_idx;
2953
2954 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2955 ctx->mmu_idx = MMU_PHYS_IDX;
2956 trans_ld(ctx, a);
2957 ctx->mmu_idx = hold_mmu_idx;
2958 return true;
2959 }
2960
2961 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2962 {
2963 int hold_mmu_idx = ctx->mmu_idx;
2964
2965 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2966 ctx->mmu_idx = MMU_PHYS_IDX;
2967 trans_st(ctx, a);
2968 ctx->mmu_idx = hold_mmu_idx;
2969 return true;
2970 }
2971
2972 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2973 {
2974 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2975
2976 tcg_gen_movi_reg(tcg_rt, a->i);
2977 save_gpr(ctx, a->t, tcg_rt);
2978 cond_free(&ctx->null_cond);
2979 return true;
2980 }
2981
2982 static bool trans_addil(DisasContext *ctx, arg_addil *a)
2983 {
2984 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
2985 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
2986
2987 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
2988 save_gpr(ctx, 1, tcg_r1);
2989 cond_free(&ctx->null_cond);
2990 return true;
2991 }
2992
2993 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
2994 {
2995 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2996
2997 /* Special case rb == 0, for the LDI pseudo-op.
2998 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2999 if (a->b == 0) {
3000 tcg_gen_movi_reg(tcg_rt, a->i);
3001 } else {
3002 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
3003 }
3004 save_gpr(ctx, a->t, tcg_rt);
3005 cond_free(&ctx->null_cond);
3006 return true;
3007 }
3008
3009 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3010 unsigned c, unsigned f, unsigned n, int disp)
3011 {
3012 TCGv_reg dest, in2, sv;
3013 DisasCond cond;
3014
3015 in2 = load_gpr(ctx, r);
3016 dest = tcg_temp_new();
3017
3018 tcg_gen_sub_reg(dest, in1, in2);
3019
3020 sv = NULL;
3021 if (cond_need_sv(c)) {
3022 sv = do_sub_sv(ctx, dest, in1, in2);
3023 }
3024
3025 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3026 return do_cbranch(ctx, disp, n, &cond);
3027 }
3028
3029 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3030 {
3031 nullify_over(ctx);
3032 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3033 }
3034
3035 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3036 {
3037 nullify_over(ctx);
3038 return do_cmpb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
3039 }
3040
3041 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3042 unsigned c, unsigned f, unsigned n, int disp)
3043 {
3044 TCGv_reg dest, in2, sv, cb_cond;
3045 DisasCond cond;
3046 bool d = false;
3047
3048 in2 = load_gpr(ctx, r);
3049 dest = tcg_temp_new();
3050 sv = NULL;
3051 cb_cond = NULL;
3052
3053 if (cond_need_cb(c)) {
3054 TCGv_reg cb = tcg_temp_new();
3055 TCGv_reg cb_msb = tcg_temp_new();
3056
3057 tcg_gen_movi_reg(cb_msb, 0);
3058 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3059 tcg_gen_xor_reg(cb, in1, in2);
3060 tcg_gen_xor_reg(cb, cb, dest);
3061 cb_cond = get_carry(ctx, d, cb, cb_msb);
3062 } else {
3063 tcg_gen_add_reg(dest, in1, in2);
3064 }
3065 if (cond_need_sv(c)) {
3066 sv = do_add_sv(ctx, dest, in1, in2);
3067 }
3068
3069 cond = do_cond(c * 2 + f, dest, cb_cond, sv);
3070 save_gpr(ctx, r, dest);
3071 return do_cbranch(ctx, disp, n, &cond);
3072 }
3073
3074 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3075 {
3076 nullify_over(ctx);
3077 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3078 }
3079
3080 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3081 {
3082 nullify_over(ctx);
3083 return do_addb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
3084 }
3085
3086 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3087 {
3088 TCGv_reg tmp, tcg_r;
3089 DisasCond cond;
3090 bool d = false;
3091
3092 nullify_over(ctx);
3093
3094 tmp = tcg_temp_new();
3095 tcg_r = load_gpr(ctx, a->r);
3096 if (cond_need_ext(ctx, d)) {
3097 /* Force shift into [32,63] */
3098 tcg_gen_ori_reg(tmp, cpu_sar, 32);
3099 tcg_gen_shl_reg(tmp, tcg_r, tmp);
3100 } else {
3101 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3102 }
3103
3104 cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3105 return do_cbranch(ctx, a->disp, a->n, &cond);
3106 }
3107
3108 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3109 {
3110 TCGv_reg tmp, tcg_r;
3111 DisasCond cond;
3112 bool d = false;
3113 int p;
3114
3115 nullify_over(ctx);
3116
3117 tmp = tcg_temp_new();
3118 tcg_r = load_gpr(ctx, a->r);
3119 p = a->p | (cond_need_ext(ctx, d) ? 32 : 0);
3120 tcg_gen_shli_reg(tmp, tcg_r, p);
3121
3122 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3123 return do_cbranch(ctx, a->disp, a->n, &cond);
3124 }
3125
3126 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3127 {
3128 TCGv_reg dest;
3129 DisasCond cond;
3130
3131 nullify_over(ctx);
3132
3133 dest = dest_gpr(ctx, a->r2);
3134 if (a->r1 == 0) {
3135 tcg_gen_movi_reg(dest, 0);
3136 } else {
3137 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3138 }
3139
3140 cond = do_sed_cond(a->c, dest);
3141 return do_cbranch(ctx, a->disp, a->n, &cond);
3142 }
3143
3144 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3145 {
3146 TCGv_reg dest;
3147 DisasCond cond;
3148
3149 nullify_over(ctx);
3150
3151 dest = dest_gpr(ctx, a->r);
3152 tcg_gen_movi_reg(dest, a->i);
3153
3154 cond = do_sed_cond(a->c, dest);
3155 return do_cbranch(ctx, a->disp, a->n, &cond);
3156 }
3157
3158 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3159 {
3160 TCGv_reg dest;
3161
3162 if (a->c) {
3163 nullify_over(ctx);
3164 }
3165
3166 dest = dest_gpr(ctx, a->t);
3167 if (a->r1 == 0) {
3168 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3169 tcg_gen_shr_reg(dest, dest, cpu_sar);
3170 } else if (a->r1 == a->r2) {
3171 TCGv_i32 t32 = tcg_temp_new_i32();
3172 TCGv_i32 s32 = tcg_temp_new_i32();
3173
3174 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3175 tcg_gen_trunc_reg_i32(s32, cpu_sar);
3176 tcg_gen_rotr_i32(t32, t32, s32);
3177 tcg_gen_extu_i32_reg(dest, t32);
3178 } else {
3179 TCGv_i64 t = tcg_temp_new_i64();
3180 TCGv_i64 s = tcg_temp_new_i64();
3181
3182 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3183 tcg_gen_extu_reg_i64(s, cpu_sar);
3184 tcg_gen_shr_i64(t, t, s);
3185 tcg_gen_trunc_i64_reg(dest, t);
3186 }
3187 save_gpr(ctx, a->t, dest);
3188
3189 /* Install the new nullification. */
3190 cond_free(&ctx->null_cond);
3191 if (a->c) {
3192 ctx->null_cond = do_sed_cond(a->c, dest);
3193 }
3194 return nullify_end(ctx);
3195 }
3196
3197 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3198 {
3199 unsigned sa = 31 - a->cpos;
3200 TCGv_reg dest, t2;
3201
3202 if (a->c) {
3203 nullify_over(ctx);
3204 }
3205
3206 dest = dest_gpr(ctx, a->t);
3207 t2 = load_gpr(ctx, a->r2);
3208 if (a->r1 == 0) {
3209 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3210 } else if (TARGET_REGISTER_BITS == 32) {
3211 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3212 } else if (a->r1 == a->r2) {
3213 TCGv_i32 t32 = tcg_temp_new_i32();
3214 tcg_gen_trunc_reg_i32(t32, t2);
3215 tcg_gen_rotri_i32(t32, t32, sa);
3216 tcg_gen_extu_i32_reg(dest, t32);
3217 } else {
3218 TCGv_i64 t64 = tcg_temp_new_i64();
3219 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3220 tcg_gen_shri_i64(t64, t64, sa);
3221 tcg_gen_trunc_i64_reg(dest, t64);
3222 }
3223 save_gpr(ctx, a->t, dest);
3224
3225 /* Install the new nullification. */
3226 cond_free(&ctx->null_cond);
3227 if (a->c) {
3228 ctx->null_cond = do_sed_cond(a->c, dest);
3229 }
3230 return nullify_end(ctx);
3231 }
3232
3233 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3234 {
3235 unsigned len = 32 - a->clen;
3236 TCGv_reg dest, src, tmp;
3237
3238 if (a->c) {
3239 nullify_over(ctx);
3240 }
3241
3242 dest = dest_gpr(ctx, a->t);
3243 src = load_gpr(ctx, a->r);
3244 tmp = tcg_temp_new();
3245
3246 /* Recall that SAR is using big-endian bit numbering. */
3247 tcg_gen_andi_reg(tmp, cpu_sar, 31);
3248 tcg_gen_xori_reg(tmp, tmp, 31);
3249
3250 if (a->se) {
3251 tcg_gen_sar_reg(dest, src, tmp);
3252 tcg_gen_sextract_reg(dest, dest, 0, len);
3253 } else {
3254 tcg_gen_shr_reg(dest, src, tmp);
3255 tcg_gen_extract_reg(dest, dest, 0, len);
3256 }
3257 save_gpr(ctx, a->t, dest);
3258
3259 /* Install the new nullification. */
3260 cond_free(&ctx->null_cond);
3261 if (a->c) {
3262 ctx->null_cond = do_sed_cond(a->c, dest);
3263 }
3264 return nullify_end(ctx);
3265 }
3266
3267 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3268 {
3269 unsigned len = 32 - a->clen;
3270 unsigned cpos = 31 - a->pos;
3271 TCGv_reg dest, src;
3272
3273 if (a->c) {
3274 nullify_over(ctx);
3275 }
3276
3277 dest = dest_gpr(ctx, a->t);
3278 src = load_gpr(ctx, a->r);
3279 if (a->se) {
3280 tcg_gen_sextract_reg(dest, src, cpos, len);
3281 } else {
3282 tcg_gen_extract_reg(dest, src, cpos, len);
3283 }
3284 save_gpr(ctx, a->t, dest);
3285
3286 /* Install the new nullification. */
3287 cond_free(&ctx->null_cond);
3288 if (a->c) {
3289 ctx->null_cond = do_sed_cond(a->c, dest);
3290 }
3291 return nullify_end(ctx);
3292 }
3293
3294 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3295 {
3296 unsigned len = 32 - a->clen;
3297 target_sreg mask0, mask1;
3298 TCGv_reg dest;
3299
3300 if (a->c) {
3301 nullify_over(ctx);
3302 }
3303 if (a->cpos + len > 32) {
3304 len = 32 - a->cpos;
3305 }
3306
3307 dest = dest_gpr(ctx, a->t);
3308 mask0 = deposit64(0, a->cpos, len, a->i);
3309 mask1 = deposit64(-1, a->cpos, len, a->i);
3310
3311 if (a->nz) {
3312 TCGv_reg src = load_gpr(ctx, a->t);
3313 if (mask1 != -1) {
3314 tcg_gen_andi_reg(dest, src, mask1);
3315 src = dest;
3316 }
3317 tcg_gen_ori_reg(dest, src, mask0);
3318 } else {
3319 tcg_gen_movi_reg(dest, mask0);
3320 }
3321 save_gpr(ctx, a->t, dest);
3322
3323 /* Install the new nullification. */
3324 cond_free(&ctx->null_cond);
3325 if (a->c) {
3326 ctx->null_cond = do_sed_cond(a->c, dest);
3327 }
3328 return nullify_end(ctx);
3329 }
3330
3331 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3332 {
3333 unsigned rs = a->nz ? a->t : 0;
3334 unsigned len = 32 - a->clen;
3335 TCGv_reg dest, val;
3336
3337 if (a->c) {
3338 nullify_over(ctx);
3339 }
3340 if (a->cpos + len > 32) {
3341 len = 32 - a->cpos;
3342 }
3343
3344 dest = dest_gpr(ctx, a->t);
3345 val = load_gpr(ctx, a->r);
3346 if (rs == 0) {
3347 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3348 } else {
3349 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3350 }
3351 save_gpr(ctx, a->t, dest);
3352
3353 /* Install the new nullification. */
3354 cond_free(&ctx->null_cond);
3355 if (a->c) {
3356 ctx->null_cond = do_sed_cond(a->c, dest);
3357 }
3358 return nullify_end(ctx);
3359 }
3360
3361 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3362 unsigned nz, unsigned clen, TCGv_reg val)
3363 {
3364 unsigned rs = nz ? rt : 0;
3365 unsigned len = 32 - clen;
3366 TCGv_reg mask, tmp, shift, dest;
3367 unsigned msb = 1U << (len - 1);
3368
3369 dest = dest_gpr(ctx, rt);
3370 shift = tcg_temp_new();
3371 tmp = tcg_temp_new();
3372
3373 /* Convert big-endian bit numbering in SAR to left-shift. */
3374 tcg_gen_andi_reg(shift, cpu_sar, 31);
3375 tcg_gen_xori_reg(shift, shift, 31);
3376
3377 mask = tcg_temp_new();
3378 tcg_gen_movi_reg(mask, msb + (msb - 1));
3379 tcg_gen_and_reg(tmp, val, mask);
3380 if (rs) {
3381 tcg_gen_shl_reg(mask, mask, shift);
3382 tcg_gen_shl_reg(tmp, tmp, shift);
3383 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3384 tcg_gen_or_reg(dest, dest, tmp);
3385 } else {
3386 tcg_gen_shl_reg(dest, tmp, shift);
3387 }
3388 save_gpr(ctx, rt, dest);
3389
3390 /* Install the new nullification. */
3391 cond_free(&ctx->null_cond);
3392 if (c) {
3393 ctx->null_cond = do_sed_cond(c, dest);
3394 }
3395 return nullify_end(ctx);
3396 }
3397
3398 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3399 {
3400 if (a->c) {
3401 nullify_over(ctx);
3402 }
3403 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3404 }
3405
3406 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3407 {
3408 if (a->c) {
3409 nullify_over(ctx);
3410 }
3411 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, tcg_constant_reg(a->i));
3412 }
3413
3414 static bool trans_be(DisasContext *ctx, arg_be *a)
3415 {
3416 TCGv_reg tmp;
3417
3418 #ifdef CONFIG_USER_ONLY
3419 /* ??? It seems like there should be a good way of using
3420 "be disp(sr2, r0)", the canonical gateway entry mechanism
3421 to our advantage. But that appears to be inconvenient to
3422 manage along side branch delay slots. Therefore we handle
3423 entry into the gateway page via absolute address. */
3424 /* Since we don't implement spaces, just branch. Do notice the special
3425 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3426 goto_tb to the TB containing the syscall. */
3427 if (a->b == 0) {
3428 return do_dbranch(ctx, a->disp, a->l, a->n);
3429 }
3430 #else
3431 nullify_over(ctx);
3432 #endif
3433
3434 tmp = tcg_temp_new();
3435 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3436 tmp = do_ibranch_priv(ctx, tmp);
3437
3438 #ifdef CONFIG_USER_ONLY
3439 return do_ibranch(ctx, tmp, a->l, a->n);
3440 #else
3441 TCGv_i64 new_spc = tcg_temp_new_i64();
3442
3443 load_spr(ctx, new_spc, a->sp);
3444 if (a->l) {
3445 copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3446 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3447 }
3448 if (a->n && use_nullify_skip(ctx)) {
3449 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3450 tcg_gen_addi_reg(tmp, tmp, 4);
3451 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3452 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3453 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3454 } else {
3455 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3456 if (ctx->iaoq_b == -1) {
3457 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3458 }
3459 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3460 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3461 nullify_set(ctx, a->n);
3462 }
3463 tcg_gen_lookup_and_goto_ptr();
3464 ctx->base.is_jmp = DISAS_NORETURN;
3465 return nullify_end(ctx);
3466 #endif
3467 }
3468
3469 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3470 {
3471 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3472 }
3473
3474 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3475 {
3476 target_ureg dest = iaoq_dest(ctx, a->disp);
3477
3478 nullify_over(ctx);
3479
3480 /* Make sure the caller hasn't done something weird with the queue.
3481 * ??? This is not quite the same as the PSW[B] bit, which would be
3482 * expensive to track. Real hardware will trap for
3483 * b gateway
3484 * b gateway+4 (in delay slot of first branch)
3485 * However, checking for a non-sequential instruction queue *will*
3486 * diagnose the security hole
3487 * b gateway
3488 * b evil
3489 * in which instructions at evil would run with increased privs.
3490 */
3491 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3492 return gen_illegal(ctx);
3493 }
3494
3495 #ifndef CONFIG_USER_ONLY
3496 if (ctx->tb_flags & PSW_C) {
3497 CPUHPPAState *env = cpu_env(ctx->cs);
3498 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3499 /* If we could not find a TLB entry, then we need to generate an
3500 ITLB miss exception so the kernel will provide it.
3501 The resulting TLB fill operation will invalidate this TB and
3502 we will re-translate, at which point we *will* be able to find
3503 the TLB entry and determine if this is in fact a gateway page. */
3504 if (type < 0) {
3505 gen_excp(ctx, EXCP_ITLB_MISS);
3506 return true;
3507 }
3508 /* No change for non-gateway pages or for priv decrease. */
3509 if (type >= 4 && type - 4 < ctx->privilege) {
3510 dest = deposit32(dest, 0, 2, type - 4);
3511 }
3512 } else {
3513 dest &= -4; /* priv = 0 */
3514 }
3515 #endif
3516
3517 if (a->l) {
3518 TCGv_reg tmp = dest_gpr(ctx, a->l);
3519 if (ctx->privilege < 3) {
3520 tcg_gen_andi_reg(tmp, tmp, -4);
3521 }
3522 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3523 save_gpr(ctx, a->l, tmp);
3524 }
3525
3526 return do_dbranch(ctx, dest, 0, a->n);
3527 }
3528
3529 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3530 {
3531 if (a->x) {
3532 TCGv_reg tmp = tcg_temp_new();
3533 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3534 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3535 /* The computation here never changes privilege level. */
3536 return do_ibranch(ctx, tmp, a->l, a->n);
3537 } else {
3538 /* BLR R0,RX is a good way to load PC+8 into RX. */
3539 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3540 }
3541 }
3542
3543 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3544 {
3545 TCGv_reg dest;
3546
3547 if (a->x == 0) {
3548 dest = load_gpr(ctx, a->b);
3549 } else {
3550 dest = tcg_temp_new();
3551 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3552 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3553 }
3554 dest = do_ibranch_priv(ctx, dest);
3555 return do_ibranch(ctx, dest, 0, a->n);
3556 }
3557
3558 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3559 {
3560 TCGv_reg dest;
3561
3562 #ifdef CONFIG_USER_ONLY
3563 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3564 return do_ibranch(ctx, dest, a->l, a->n);
3565 #else
3566 nullify_over(ctx);
3567 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3568
3569 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3570 if (ctx->iaoq_b == -1) {
3571 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3572 }
3573 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3574 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3575 if (a->l) {
3576 copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3577 }
3578 nullify_set(ctx, a->n);
3579 tcg_gen_lookup_and_goto_ptr();
3580 ctx->base.is_jmp = DISAS_NORETURN;
3581 return nullify_end(ctx);
3582 #endif
3583 }
3584
3585 /*
3586 * Float class 0
3587 */
3588
3589 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3590 {
3591 tcg_gen_mov_i32(dst, src);
3592 }
3593
3594 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3595 {
3596 uint64_t ret;
3597
3598 if (TARGET_REGISTER_BITS == 64) {
3599 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3600 } else {
3601 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3602 }
3603
3604 nullify_over(ctx);
3605 save_frd(0, tcg_constant_i64(ret));
3606 return nullify_end(ctx);
3607 }
3608
3609 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3610 {
3611 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3612 }
3613
3614 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3615 {
3616 tcg_gen_mov_i64(dst, src);
3617 }
3618
3619 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3620 {
3621 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3622 }
3623
3624 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3625 {
3626 tcg_gen_andi_i32(dst, src, INT32_MAX);
3627 }
3628
3629 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3630 {
3631 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3632 }
3633
3634 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3635 {
3636 tcg_gen_andi_i64(dst, src, INT64_MAX);
3637 }
3638
3639 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3640 {
3641 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3642 }
3643
3644 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3645 {
3646 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3647 }
3648
3649 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3650 {
3651 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3652 }
3653
3654 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3655 {
3656 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3657 }
3658
3659 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3660 {
3661 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3662 }
3663
3664 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3665 {
3666 tcg_gen_xori_i32(dst, src, INT32_MIN);
3667 }
3668
3669 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3670 {
3671 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3672 }
3673
3674 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3675 {
3676 tcg_gen_xori_i64(dst, src, INT64_MIN);
3677 }
3678
3679 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3680 {
3681 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3682 }
3683
3684 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3685 {
3686 tcg_gen_ori_i32(dst, src, INT32_MIN);
3687 }
3688
3689 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3690 {
3691 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3692 }
3693
3694 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3695 {
3696 tcg_gen_ori_i64(dst, src, INT64_MIN);
3697 }
3698
3699 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3700 {
3701 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3702 }
3703
3704 /*
3705 * Float class 1
3706 */
3707
3708 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3709 {
3710 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3711 }
3712
3713 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3714 {
3715 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3716 }
3717
3718 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3719 {
3720 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3721 }
3722
3723 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3724 {
3725 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3726 }
3727
3728 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3729 {
3730 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3731 }
3732
3733 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3734 {
3735 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3736 }
3737
3738 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3739 {
3740 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3741 }
3742
3743 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3744 {
3745 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3746 }
3747
3748 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3749 {
3750 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3751 }
3752
3753 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3754 {
3755 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3756 }
3757
3758 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3759 {
3760 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3761 }
3762
3763 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3764 {
3765 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3766 }
3767
3768 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3769 {
3770 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3771 }
3772
3773 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3774 {
3775 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3776 }
3777
3778 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3779 {
3780 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3781 }
3782
3783 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3784 {
3785 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3786 }
3787
3788 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3789 {
3790 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3791 }
3792
3793 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3794 {
3795 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3796 }
3797
3798 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3799 {
3800 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3801 }
3802
3803 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3804 {
3805 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3806 }
3807
3808 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3809 {
3810 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3811 }
3812
3813 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3814 {
3815 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3816 }
3817
3818 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3819 {
3820 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3821 }
3822
3823 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3824 {
3825 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3826 }
3827
3828 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3829 {
3830 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3831 }
3832
3833 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3834 {
3835 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3836 }
3837
3838 /*
3839 * Float class 2
3840 */
3841
3842 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3843 {
3844 TCGv_i32 ta, tb, tc, ty;
3845
3846 nullify_over(ctx);
3847
3848 ta = load_frw0_i32(a->r1);
3849 tb = load_frw0_i32(a->r2);
3850 ty = tcg_constant_i32(a->y);
3851 tc = tcg_constant_i32(a->c);
3852
3853 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
3854
3855 return nullify_end(ctx);
3856 }
3857
3858 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3859 {
3860 TCGv_i64 ta, tb;
3861 TCGv_i32 tc, ty;
3862
3863 nullify_over(ctx);
3864
3865 ta = load_frd0(a->r1);
3866 tb = load_frd0(a->r2);
3867 ty = tcg_constant_i32(a->y);
3868 tc = tcg_constant_i32(a->c);
3869
3870 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
3871
3872 return nullify_end(ctx);
3873 }
3874
3875 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3876 {
3877 TCGv_reg t;
3878
3879 nullify_over(ctx);
3880
3881 t = tcg_temp_new();
3882 tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
3883
3884 if (a->y == 1) {
3885 int mask;
3886 bool inv = false;
3887
3888 switch (a->c) {
3889 case 0: /* simple */
3890 tcg_gen_andi_reg(t, t, 0x4000000);
3891 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3892 goto done;
3893 case 2: /* rej */
3894 inv = true;
3895 /* fallthru */
3896 case 1: /* acc */
3897 mask = 0x43ff800;
3898 break;
3899 case 6: /* rej8 */
3900 inv = true;
3901 /* fallthru */
3902 case 5: /* acc8 */
3903 mask = 0x43f8000;
3904 break;
3905 case 9: /* acc6 */
3906 mask = 0x43e0000;
3907 break;
3908 case 13: /* acc4 */
3909 mask = 0x4380000;
3910 break;
3911 case 17: /* acc2 */
3912 mask = 0x4200000;
3913 break;
3914 default:
3915 gen_illegal(ctx);
3916 return true;
3917 }
3918 if (inv) {
3919 TCGv_reg c = tcg_constant_reg(mask);
3920 tcg_gen_or_reg(t, t, c);
3921 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3922 } else {
3923 tcg_gen_andi_reg(t, t, mask);
3924 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3925 }
3926 } else {
3927 unsigned cbit = (a->y ^ 1) - 1;
3928
3929 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3930 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3931 }
3932
3933 done:
3934 return nullify_end(ctx);
3935 }
3936
3937 /*
3938 * Float class 2
3939 */
3940
3941 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3942 {
3943 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3944 }
3945
3946 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3947 {
3948 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3949 }
3950
3951 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3952 {
3953 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3954 }
3955
3956 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3957 {
3958 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
3959 }
3960
3961 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
3962 {
3963 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3964 }
3965
3966 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3967 {
3968 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3969 }
3970
3971 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3972 {
3973 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3974 }
3975
3976 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3977 {
3978 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3979 }
3980
3981 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3982 {
3983 TCGv_i64 x, y;
3984
3985 nullify_over(ctx);
3986
3987 x = load_frw0_i64(a->r1);
3988 y = load_frw0_i64(a->r2);
3989 tcg_gen_mul_i64(x, x, y);
3990 save_frd(a->t, x);
3991
3992 return nullify_end(ctx);
3993 }
3994
3995 /* Convert the fmpyadd single-precision register encodings to standard. */
3996 static inline int fmpyadd_s_reg(unsigned r)
3997 {
3998 return (r & 16) * 2 + 16 + (r & 15);
3999 }
4000
4001 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4002 {
4003 int tm = fmpyadd_s_reg(a->tm);
4004 int ra = fmpyadd_s_reg(a->ra);
4005 int ta = fmpyadd_s_reg(a->ta);
4006 int rm2 = fmpyadd_s_reg(a->rm2);
4007 int rm1 = fmpyadd_s_reg(a->rm1);
4008
4009 nullify_over(ctx);
4010
4011 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4012 do_fop_weww(ctx, ta, ta, ra,
4013 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4014
4015 return nullify_end(ctx);
4016 }
4017
4018 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4019 {
4020 return do_fmpyadd_s(ctx, a, false);
4021 }
4022
4023 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4024 {
4025 return do_fmpyadd_s(ctx, a, true);
4026 }
4027
4028 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4029 {
4030 nullify_over(ctx);
4031
4032 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4033 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4034 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4035
4036 return nullify_end(ctx);
4037 }
4038
4039 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4040 {
4041 return do_fmpyadd_d(ctx, a, false);
4042 }
4043
4044 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4045 {
4046 return do_fmpyadd_d(ctx, a, true);
4047 }
4048
4049 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4050 {
4051 TCGv_i32 x, y, z;
4052
4053 nullify_over(ctx);
4054 x = load_frw0_i32(a->rm1);
4055 y = load_frw0_i32(a->rm2);
4056 z = load_frw0_i32(a->ra3);
4057
4058 if (a->neg) {
4059 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4060 } else {
4061 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4062 }
4063
4064 save_frw_i32(a->t, x);
4065 return nullify_end(ctx);
4066 }
4067
4068 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4069 {
4070 TCGv_i64 x, y, z;
4071
4072 nullify_over(ctx);
4073 x = load_frd0(a->rm1);
4074 y = load_frd0(a->rm2);
4075 z = load_frd0(a->ra3);
4076
4077 if (a->neg) {
4078 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4079 } else {
4080 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4081 }
4082
4083 save_frd(a->t, x);
4084 return nullify_end(ctx);
4085 }
4086
4087 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4088 {
4089 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4090 #ifndef CONFIG_USER_ONLY
4091 if (a->i == 0x100) {
4092 /* emulate PDC BTLB, called by SeaBIOS-hppa */
4093 nullify_over(ctx);
4094 gen_helper_diag_btlb(tcg_env);
4095 return nullify_end(ctx);
4096 }
4097 #endif
4098 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4099 return true;
4100 }
4101
4102 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4103 {
4104 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4105 int bound;
4106
4107 ctx->cs = cs;
4108 ctx->tb_flags = ctx->base.tb->flags;
4109 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4110
4111 #ifdef CONFIG_USER_ONLY
4112 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4113 ctx->mmu_idx = MMU_USER_IDX;
4114 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4115 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4116 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4117 #else
4118 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4119 ctx->mmu_idx = (ctx->tb_flags & PSW_D
4120 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4121 : MMU_PHYS_IDX);
4122
4123 /* Recover the IAOQ values from the GVA + PRIV. */
4124 uint64_t cs_base = ctx->base.tb->cs_base;
4125 uint64_t iasq_f = cs_base & ~0xffffffffull;
4126 int32_t diff = cs_base;
4127
4128 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4129 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4130 #endif
4131 ctx->iaoq_n = -1;
4132 ctx->iaoq_n_var = NULL;
4133
4134 /* Bound the number of instructions by those left on the page. */
4135 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4136 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4137 }
4138
4139 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4140 {
4141 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4142
4143 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4144 ctx->null_cond = cond_make_f();
4145 ctx->psw_n_nonzero = false;
4146 if (ctx->tb_flags & PSW_N) {
4147 ctx->null_cond.c = TCG_COND_ALWAYS;
4148 ctx->psw_n_nonzero = true;
4149 }
4150 ctx->null_lab = NULL;
4151 }
4152
4153 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4154 {
4155 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4156
4157 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4158 }
4159
4160 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4161 {
4162 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4163 CPUHPPAState *env = cpu_env(cs);
4164 DisasJumpType ret;
4165
4166 /* Execute one insn. */
4167 #ifdef CONFIG_USER_ONLY
4168 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4169 do_page_zero(ctx);
4170 ret = ctx->base.is_jmp;
4171 assert(ret != DISAS_NEXT);
4172 } else
4173 #endif
4174 {
4175 /* Always fetch the insn, even if nullified, so that we check
4176 the page permissions for execute. */
4177 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4178
4179 /* Set up the IA queue for the next insn.
4180 This will be overwritten by a branch. */
4181 if (ctx->iaoq_b == -1) {
4182 ctx->iaoq_n = -1;
4183 ctx->iaoq_n_var = tcg_temp_new();
4184 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4185 } else {
4186 ctx->iaoq_n = ctx->iaoq_b + 4;
4187 ctx->iaoq_n_var = NULL;
4188 }
4189
4190 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4191 ctx->null_cond.c = TCG_COND_NEVER;
4192 ret = DISAS_NEXT;
4193 } else {
4194 ctx->insn = insn;
4195 if (!decode(ctx, insn)) {
4196 gen_illegal(ctx);
4197 }
4198 ret = ctx->base.is_jmp;
4199 assert(ctx->null_lab == NULL);
4200 }
4201 }
4202
4203 /* Advance the insn queue. Note that this check also detects
4204 a priority change within the instruction queue. */
4205 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4206 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4207 && use_goto_tb(ctx, ctx->iaoq_b)
4208 && (ctx->null_cond.c == TCG_COND_NEVER
4209 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4210 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4211 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4212 ctx->base.is_jmp = ret = DISAS_NORETURN;
4213 } else {
4214 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4215 }
4216 }
4217 ctx->iaoq_f = ctx->iaoq_b;
4218 ctx->iaoq_b = ctx->iaoq_n;
4219 ctx->base.pc_next += 4;
4220
4221 switch (ret) {
4222 case DISAS_NORETURN:
4223 case DISAS_IAQ_N_UPDATED:
4224 break;
4225
4226 case DISAS_NEXT:
4227 case DISAS_IAQ_N_STALE:
4228 case DISAS_IAQ_N_STALE_EXIT:
4229 if (ctx->iaoq_f == -1) {
4230 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4231 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4232 #ifndef CONFIG_USER_ONLY
4233 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4234 #endif
4235 nullify_save(ctx);
4236 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4237 ? DISAS_EXIT
4238 : DISAS_IAQ_N_UPDATED);
4239 } else if (ctx->iaoq_b == -1) {
4240 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4241 }
4242 break;
4243
4244 default:
4245 g_assert_not_reached();
4246 }
4247 }
4248
4249 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4250 {
4251 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4252 DisasJumpType is_jmp = ctx->base.is_jmp;
4253
4254 switch (is_jmp) {
4255 case DISAS_NORETURN:
4256 break;
4257 case DISAS_TOO_MANY:
4258 case DISAS_IAQ_N_STALE:
4259 case DISAS_IAQ_N_STALE_EXIT:
4260 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4261 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4262 nullify_save(ctx);
4263 /* FALLTHRU */
4264 case DISAS_IAQ_N_UPDATED:
4265 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4266 tcg_gen_lookup_and_goto_ptr();
4267 break;
4268 }
4269 /* FALLTHRU */
4270 case DISAS_EXIT:
4271 tcg_gen_exit_tb(NULL, 0);
4272 break;
4273 default:
4274 g_assert_not_reached();
4275 }
4276 }
4277
4278 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4279 CPUState *cs, FILE *logfile)
4280 {
4281 target_ulong pc = dcbase->pc_first;
4282
4283 #ifdef CONFIG_USER_ONLY
4284 switch (pc) {
4285 case 0x00:
4286 fprintf(logfile, "IN:\n0x00000000: (null)\n");
4287 return;
4288 case 0xb0:
4289 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
4290 return;
4291 case 0xe0:
4292 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4293 return;
4294 case 0x100:
4295 fprintf(logfile, "IN:\n0x00000100: syscall\n");
4296 return;
4297 }
4298 #endif
4299
4300 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4301 target_disas(logfile, cs, pc, dcbase->tb->size);
4302 }
4303
4304 static const TranslatorOps hppa_tr_ops = {
4305 .init_disas_context = hppa_tr_init_disas_context,
4306 .tb_start = hppa_tr_tb_start,
4307 .insn_start = hppa_tr_insn_start,
4308 .translate_insn = hppa_tr_translate_insn,
4309 .tb_stop = hppa_tr_tb_stop,
4310 .disas_log = hppa_tr_disas_log,
4311 };
4312
4313 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4314 target_ulong pc, void *host_pc)
4315 {
4316 DisasContext ctx;
4317 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4318 }