]> git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/translate.c
target/hppa: Decode d for add instructions
[mirror_qemu.git] / target / hppa / translate.c
1 /*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/log.h"
30
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef HELPER_H
34
35
36 /* Since we have a distinction between register size and address size,
37 we need to redefine all of these. */
38
39 #undef TCGv
40 #undef tcg_temp_new
41 #undef tcg_global_mem_new
42
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #if TARGET_REGISTER_BITS == 64
47 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
48 #else
49 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
50 #endif
51 #else
52 #define TCGv_tl TCGv_i32
53 #define tcg_temp_new_tl tcg_temp_new_i32
54 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
55 #endif
56
57 #if TARGET_REGISTER_BITS == 64
58 #define TCGv_reg TCGv_i64
59
60 #define tcg_temp_new tcg_temp_new_i64
61 #define tcg_global_mem_new tcg_global_mem_new_i64
62
63 #define tcg_gen_movi_reg tcg_gen_movi_i64
64 #define tcg_gen_mov_reg tcg_gen_mov_i64
65 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
66 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
67 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
68 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
69 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
70 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
71 #define tcg_gen_ld_reg tcg_gen_ld_i64
72 #define tcg_gen_st8_reg tcg_gen_st8_i64
73 #define tcg_gen_st16_reg tcg_gen_st16_i64
74 #define tcg_gen_st32_reg tcg_gen_st32_i64
75 #define tcg_gen_st_reg tcg_gen_st_i64
76 #define tcg_gen_add_reg tcg_gen_add_i64
77 #define tcg_gen_addi_reg tcg_gen_addi_i64
78 #define tcg_gen_sub_reg tcg_gen_sub_i64
79 #define tcg_gen_neg_reg tcg_gen_neg_i64
80 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
81 #define tcg_gen_subi_reg tcg_gen_subi_i64
82 #define tcg_gen_and_reg tcg_gen_and_i64
83 #define tcg_gen_andi_reg tcg_gen_andi_i64
84 #define tcg_gen_or_reg tcg_gen_or_i64
85 #define tcg_gen_ori_reg tcg_gen_ori_i64
86 #define tcg_gen_xor_reg tcg_gen_xor_i64
87 #define tcg_gen_xori_reg tcg_gen_xori_i64
88 #define tcg_gen_not_reg tcg_gen_not_i64
89 #define tcg_gen_shl_reg tcg_gen_shl_i64
90 #define tcg_gen_shli_reg tcg_gen_shli_i64
91 #define tcg_gen_shr_reg tcg_gen_shr_i64
92 #define tcg_gen_shri_reg tcg_gen_shri_i64
93 #define tcg_gen_sar_reg tcg_gen_sar_i64
94 #define tcg_gen_sari_reg tcg_gen_sari_i64
95 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
96 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
97 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
98 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
99 #define tcg_gen_mul_reg tcg_gen_mul_i64
100 #define tcg_gen_muli_reg tcg_gen_muli_i64
101 #define tcg_gen_div_reg tcg_gen_div_i64
102 #define tcg_gen_rem_reg tcg_gen_rem_i64
103 #define tcg_gen_divu_reg tcg_gen_divu_i64
104 #define tcg_gen_remu_reg tcg_gen_remu_i64
105 #define tcg_gen_discard_reg tcg_gen_discard_i64
106 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
107 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
108 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
109 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
110 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
111 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
112 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
113 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
114 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
115 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
116 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
117 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
118 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
119 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
120 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
121 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
122 #define tcg_gen_andc_reg tcg_gen_andc_i64
123 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
124 #define tcg_gen_nand_reg tcg_gen_nand_i64
125 #define tcg_gen_nor_reg tcg_gen_nor_i64
126 #define tcg_gen_orc_reg tcg_gen_orc_i64
127 #define tcg_gen_clz_reg tcg_gen_clz_i64
128 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
129 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
130 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
131 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
132 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
133 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
134 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
135 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
136 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
137 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
138 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
139 #define tcg_gen_extract_reg tcg_gen_extract_i64
140 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
141 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
142 #define tcg_constant_reg tcg_constant_i64
143 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
144 #define tcg_gen_add2_reg tcg_gen_add2_i64
145 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
146 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
147 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
148 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
149 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
150 #else
151 #define TCGv_reg TCGv_i32
152 #define tcg_temp_new tcg_temp_new_i32
153 #define tcg_global_mem_new tcg_global_mem_new_i32
154
155 #define tcg_gen_movi_reg tcg_gen_movi_i32
156 #define tcg_gen_mov_reg tcg_gen_mov_i32
157 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
158 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
159 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
160 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
161 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
162 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
163 #define tcg_gen_ld_reg tcg_gen_ld_i32
164 #define tcg_gen_st8_reg tcg_gen_st8_i32
165 #define tcg_gen_st16_reg tcg_gen_st16_i32
166 #define tcg_gen_st32_reg tcg_gen_st32_i32
167 #define tcg_gen_st_reg tcg_gen_st_i32
168 #define tcg_gen_add_reg tcg_gen_add_i32
169 #define tcg_gen_addi_reg tcg_gen_addi_i32
170 #define tcg_gen_sub_reg tcg_gen_sub_i32
171 #define tcg_gen_neg_reg tcg_gen_neg_i32
172 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
173 #define tcg_gen_subi_reg tcg_gen_subi_i32
174 #define tcg_gen_and_reg tcg_gen_and_i32
175 #define tcg_gen_andi_reg tcg_gen_andi_i32
176 #define tcg_gen_or_reg tcg_gen_or_i32
177 #define tcg_gen_ori_reg tcg_gen_ori_i32
178 #define tcg_gen_xor_reg tcg_gen_xor_i32
179 #define tcg_gen_xori_reg tcg_gen_xori_i32
180 #define tcg_gen_not_reg tcg_gen_not_i32
181 #define tcg_gen_shl_reg tcg_gen_shl_i32
182 #define tcg_gen_shli_reg tcg_gen_shli_i32
183 #define tcg_gen_shr_reg tcg_gen_shr_i32
184 #define tcg_gen_shri_reg tcg_gen_shri_i32
185 #define tcg_gen_sar_reg tcg_gen_sar_i32
186 #define tcg_gen_sari_reg tcg_gen_sari_i32
187 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
188 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
189 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
190 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
191 #define tcg_gen_mul_reg tcg_gen_mul_i32
192 #define tcg_gen_muli_reg tcg_gen_muli_i32
193 #define tcg_gen_div_reg tcg_gen_div_i32
194 #define tcg_gen_rem_reg tcg_gen_rem_i32
195 #define tcg_gen_divu_reg tcg_gen_divu_i32
196 #define tcg_gen_remu_reg tcg_gen_remu_i32
197 #define tcg_gen_discard_reg tcg_gen_discard_i32
198 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
199 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
200 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
201 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
202 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
203 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
204 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
205 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
206 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
207 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
208 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
209 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
210 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
211 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
212 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
213 #define tcg_gen_andc_reg tcg_gen_andc_i32
214 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
215 #define tcg_gen_nand_reg tcg_gen_nand_i32
216 #define tcg_gen_nor_reg tcg_gen_nor_i32
217 #define tcg_gen_orc_reg tcg_gen_orc_i32
218 #define tcg_gen_clz_reg tcg_gen_clz_i32
219 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
220 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
221 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
222 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
223 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
224 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
225 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
226 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
227 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
228 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
229 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
230 #define tcg_gen_extract_reg tcg_gen_extract_i32
231 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
232 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
233 #define tcg_constant_reg tcg_constant_i32
234 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
235 #define tcg_gen_add2_reg tcg_gen_add2_i32
236 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
237 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
238 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
239 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
240 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
241 #endif /* TARGET_REGISTER_BITS */
242
243 typedef struct DisasCond {
244 TCGCond c;
245 TCGv_reg a0, a1;
246 } DisasCond;
247
248 typedef struct DisasContext {
249 DisasContextBase base;
250 CPUState *cs;
251
252 target_ureg iaoq_f;
253 target_ureg iaoq_b;
254 target_ureg iaoq_n;
255 TCGv_reg iaoq_n_var;
256
257 DisasCond null_cond;
258 TCGLabel *null_lab;
259
260 uint32_t insn;
261 uint32_t tb_flags;
262 int mmu_idx;
263 int privilege;
264 bool psw_n_nonzero;
265 bool is_pa20;
266
267 #ifdef CONFIG_USER_ONLY
268 MemOp unalign;
269 #endif
270 } DisasContext;
271
272 #ifdef CONFIG_USER_ONLY
273 #define UNALIGN(C) (C)->unalign
274 #else
275 #define UNALIGN(C) MO_ALIGN
276 #endif
277
278 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
279 static int expand_sm_imm(DisasContext *ctx, int val)
280 {
281 if (val & PSW_SM_E) {
282 val = (val & ~PSW_SM_E) | PSW_E;
283 }
284 if (val & PSW_SM_W) {
285 val = (val & ~PSW_SM_W) | PSW_W;
286 }
287 return val;
288 }
289
290 /* Inverted space register indicates 0 means sr0 not inferred from base. */
291 static int expand_sr3x(DisasContext *ctx, int val)
292 {
293 return ~val;
294 }
295
296 /* Convert the M:A bits within a memory insn to the tri-state value
297 we use for the final M. */
298 static int ma_to_m(DisasContext *ctx, int val)
299 {
300 return val & 2 ? (val & 1 ? -1 : 1) : 0;
301 }
302
303 /* Convert the sign of the displacement to a pre or post-modify. */
304 static int pos_to_m(DisasContext *ctx, int val)
305 {
306 return val ? 1 : -1;
307 }
308
309 static int neg_to_m(DisasContext *ctx, int val)
310 {
311 return val ? -1 : 1;
312 }
313
314 /* Used for branch targets and fp memory ops. */
315 static int expand_shl2(DisasContext *ctx, int val)
316 {
317 return val << 2;
318 }
319
320 /* Used for fp memory ops. */
321 static int expand_shl3(DisasContext *ctx, int val)
322 {
323 return val << 3;
324 }
325
326 /* Used for assemble_21. */
327 static int expand_shl11(DisasContext *ctx, int val)
328 {
329 return val << 11;
330 }
331
332
333 /* Include the auto-generated decoder. */
334 #include "decode-insns.c.inc"
335
336 /* We are not using a goto_tb (for whatever reason), but have updated
337 the iaq (for whatever reason), so don't do it again on exit. */
338 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
339
340 /* We are exiting the TB, but have neither emitted a goto_tb, nor
341 updated the iaq for the next instruction to be executed. */
342 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
343
344 /* Similarly, but we want to return to the main loop immediately
345 to recognize unmasked interrupts. */
346 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
347 #define DISAS_EXIT DISAS_TARGET_3
348
349 /* global register indexes */
350 static TCGv_reg cpu_gr[32];
351 static TCGv_i64 cpu_sr[4];
352 static TCGv_i64 cpu_srH;
353 static TCGv_reg cpu_iaoq_f;
354 static TCGv_reg cpu_iaoq_b;
355 static TCGv_i64 cpu_iasq_f;
356 static TCGv_i64 cpu_iasq_b;
357 static TCGv_reg cpu_sar;
358 static TCGv_reg cpu_psw_n;
359 static TCGv_reg cpu_psw_v;
360 static TCGv_reg cpu_psw_cb;
361 static TCGv_reg cpu_psw_cb_msb;
362
363 void hppa_translate_init(void)
364 {
365 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
366
367 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
368 static const GlobalVar vars[] = {
369 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
370 DEF_VAR(psw_n),
371 DEF_VAR(psw_v),
372 DEF_VAR(psw_cb),
373 DEF_VAR(psw_cb_msb),
374 DEF_VAR(iaoq_f),
375 DEF_VAR(iaoq_b),
376 };
377
378 #undef DEF_VAR
379
380 /* Use the symbolic register names that match the disassembler. */
381 static const char gr_names[32][4] = {
382 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
383 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
384 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
385 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
386 };
387 /* SR[4-7] are not global registers so that we can index them. */
388 static const char sr_names[5][4] = {
389 "sr0", "sr1", "sr2", "sr3", "srH"
390 };
391
392 int i;
393
394 cpu_gr[0] = NULL;
395 for (i = 1; i < 32; i++) {
396 cpu_gr[i] = tcg_global_mem_new(tcg_env,
397 offsetof(CPUHPPAState, gr[i]),
398 gr_names[i]);
399 }
400 for (i = 0; i < 4; i++) {
401 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
402 offsetof(CPUHPPAState, sr[i]),
403 sr_names[i]);
404 }
405 cpu_srH = tcg_global_mem_new_i64(tcg_env,
406 offsetof(CPUHPPAState, sr[4]),
407 sr_names[4]);
408
409 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
410 const GlobalVar *v = &vars[i];
411 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
412 }
413
414 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
415 offsetof(CPUHPPAState, iasq_f),
416 "iasq_f");
417 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
418 offsetof(CPUHPPAState, iasq_b),
419 "iasq_b");
420 }
421
422 static DisasCond cond_make_f(void)
423 {
424 return (DisasCond){
425 .c = TCG_COND_NEVER,
426 .a0 = NULL,
427 .a1 = NULL,
428 };
429 }
430
431 static DisasCond cond_make_t(void)
432 {
433 return (DisasCond){
434 .c = TCG_COND_ALWAYS,
435 .a0 = NULL,
436 .a1 = NULL,
437 };
438 }
439
440 static DisasCond cond_make_n(void)
441 {
442 return (DisasCond){
443 .c = TCG_COND_NE,
444 .a0 = cpu_psw_n,
445 .a1 = tcg_constant_reg(0)
446 };
447 }
448
449 static DisasCond cond_make_tmp(TCGCond c, TCGv_reg a0, TCGv_reg a1)
450 {
451 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
452 return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
453 }
454
455 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
456 {
457 return cond_make_tmp(c, a0, tcg_constant_reg(0));
458 }
459
460 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
461 {
462 TCGv_reg tmp = tcg_temp_new();
463 tcg_gen_mov_reg(tmp, a0);
464 return cond_make_0_tmp(c, tmp);
465 }
466
467 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
468 {
469 TCGv_reg t0 = tcg_temp_new();
470 TCGv_reg t1 = tcg_temp_new();
471
472 tcg_gen_mov_reg(t0, a0);
473 tcg_gen_mov_reg(t1, a1);
474 return cond_make_tmp(c, t0, t1);
475 }
476
477 static void cond_free(DisasCond *cond)
478 {
479 switch (cond->c) {
480 default:
481 cond->a0 = NULL;
482 cond->a1 = NULL;
483 /* fallthru */
484 case TCG_COND_ALWAYS:
485 cond->c = TCG_COND_NEVER;
486 break;
487 case TCG_COND_NEVER:
488 break;
489 }
490 }
491
492 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
493 {
494 if (reg == 0) {
495 TCGv_reg t = tcg_temp_new();
496 tcg_gen_movi_reg(t, 0);
497 return t;
498 } else {
499 return cpu_gr[reg];
500 }
501 }
502
503 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
504 {
505 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
506 return tcg_temp_new();
507 } else {
508 return cpu_gr[reg];
509 }
510 }
511
512 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
513 {
514 if (ctx->null_cond.c != TCG_COND_NEVER) {
515 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
516 ctx->null_cond.a1, dest, t);
517 } else {
518 tcg_gen_mov_reg(dest, t);
519 }
520 }
521
522 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
523 {
524 if (reg != 0) {
525 save_or_nullify(ctx, cpu_gr[reg], t);
526 }
527 }
528
529 #if HOST_BIG_ENDIAN
530 # define HI_OFS 0
531 # define LO_OFS 4
532 #else
533 # define HI_OFS 4
534 # define LO_OFS 0
535 #endif
536
537 static TCGv_i32 load_frw_i32(unsigned rt)
538 {
539 TCGv_i32 ret = tcg_temp_new_i32();
540 tcg_gen_ld_i32(ret, tcg_env,
541 offsetof(CPUHPPAState, fr[rt & 31])
542 + (rt & 32 ? LO_OFS : HI_OFS));
543 return ret;
544 }
545
546 static TCGv_i32 load_frw0_i32(unsigned rt)
547 {
548 if (rt == 0) {
549 TCGv_i32 ret = tcg_temp_new_i32();
550 tcg_gen_movi_i32(ret, 0);
551 return ret;
552 } else {
553 return load_frw_i32(rt);
554 }
555 }
556
557 static TCGv_i64 load_frw0_i64(unsigned rt)
558 {
559 TCGv_i64 ret = tcg_temp_new_i64();
560 if (rt == 0) {
561 tcg_gen_movi_i64(ret, 0);
562 } else {
563 tcg_gen_ld32u_i64(ret, tcg_env,
564 offsetof(CPUHPPAState, fr[rt & 31])
565 + (rt & 32 ? LO_OFS : HI_OFS));
566 }
567 return ret;
568 }
569
570 static void save_frw_i32(unsigned rt, TCGv_i32 val)
571 {
572 tcg_gen_st_i32(val, tcg_env,
573 offsetof(CPUHPPAState, fr[rt & 31])
574 + (rt & 32 ? LO_OFS : HI_OFS));
575 }
576
577 #undef HI_OFS
578 #undef LO_OFS
579
580 static TCGv_i64 load_frd(unsigned rt)
581 {
582 TCGv_i64 ret = tcg_temp_new_i64();
583 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
584 return ret;
585 }
586
587 static TCGv_i64 load_frd0(unsigned rt)
588 {
589 if (rt == 0) {
590 TCGv_i64 ret = tcg_temp_new_i64();
591 tcg_gen_movi_i64(ret, 0);
592 return ret;
593 } else {
594 return load_frd(rt);
595 }
596 }
597
598 static void save_frd(unsigned rt, TCGv_i64 val)
599 {
600 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
601 }
602
603 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
604 {
605 #ifdef CONFIG_USER_ONLY
606 tcg_gen_movi_i64(dest, 0);
607 #else
608 if (reg < 4) {
609 tcg_gen_mov_i64(dest, cpu_sr[reg]);
610 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
611 tcg_gen_mov_i64(dest, cpu_srH);
612 } else {
613 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
614 }
615 #endif
616 }
617
618 /* Skip over the implementation of an insn that has been nullified.
619 Use this when the insn is too complex for a conditional move. */
620 static void nullify_over(DisasContext *ctx)
621 {
622 if (ctx->null_cond.c != TCG_COND_NEVER) {
623 /* The always condition should have been handled in the main loop. */
624 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
625
626 ctx->null_lab = gen_new_label();
627
628 /* If we're using PSW[N], copy it to a temp because... */
629 if (ctx->null_cond.a0 == cpu_psw_n) {
630 ctx->null_cond.a0 = tcg_temp_new();
631 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
632 }
633 /* ... we clear it before branching over the implementation,
634 so that (1) it's clear after nullifying this insn and
635 (2) if this insn nullifies the next, PSW[N] is valid. */
636 if (ctx->psw_n_nonzero) {
637 ctx->psw_n_nonzero = false;
638 tcg_gen_movi_reg(cpu_psw_n, 0);
639 }
640
641 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
642 ctx->null_cond.a1, ctx->null_lab);
643 cond_free(&ctx->null_cond);
644 }
645 }
646
647 /* Save the current nullification state to PSW[N]. */
648 static void nullify_save(DisasContext *ctx)
649 {
650 if (ctx->null_cond.c == TCG_COND_NEVER) {
651 if (ctx->psw_n_nonzero) {
652 tcg_gen_movi_reg(cpu_psw_n, 0);
653 }
654 return;
655 }
656 if (ctx->null_cond.a0 != cpu_psw_n) {
657 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
658 ctx->null_cond.a0, ctx->null_cond.a1);
659 ctx->psw_n_nonzero = true;
660 }
661 cond_free(&ctx->null_cond);
662 }
663
664 /* Set a PSW[N] to X. The intention is that this is used immediately
665 before a goto_tb/exit_tb, so that there is no fallthru path to other
666 code within the TB. Therefore we do not update psw_n_nonzero. */
667 static void nullify_set(DisasContext *ctx, bool x)
668 {
669 if (ctx->psw_n_nonzero || x) {
670 tcg_gen_movi_reg(cpu_psw_n, x);
671 }
672 }
673
674 /* Mark the end of an instruction that may have been nullified.
675 This is the pair to nullify_over. Always returns true so that
676 it may be tail-called from a translate function. */
677 static bool nullify_end(DisasContext *ctx)
678 {
679 TCGLabel *null_lab = ctx->null_lab;
680 DisasJumpType status = ctx->base.is_jmp;
681
682 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
683 For UPDATED, we cannot update on the nullified path. */
684 assert(status != DISAS_IAQ_N_UPDATED);
685
686 if (likely(null_lab == NULL)) {
687 /* The current insn wasn't conditional or handled the condition
688 applied to it without a branch, so the (new) setting of
689 NULL_COND can be applied directly to the next insn. */
690 return true;
691 }
692 ctx->null_lab = NULL;
693
694 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
695 /* The next instruction will be unconditional,
696 and NULL_COND already reflects that. */
697 gen_set_label(null_lab);
698 } else {
699 /* The insn that we just executed is itself nullifying the next
700 instruction. Store the condition in the PSW[N] global.
701 We asserted PSW[N] = 0 in nullify_over, so that after the
702 label we have the proper value in place. */
703 nullify_save(ctx);
704 gen_set_label(null_lab);
705 ctx->null_cond = cond_make_n();
706 }
707 if (status == DISAS_NORETURN) {
708 ctx->base.is_jmp = DISAS_NEXT;
709 }
710 return true;
711 }
712
713 static target_ureg gva_offset_mask(DisasContext *ctx)
714 {
715 return (ctx->tb_flags & PSW_W
716 ? MAKE_64BIT_MASK(0, 62)
717 : MAKE_64BIT_MASK(0, 32));
718 }
719
720 static void copy_iaoq_entry(DisasContext *ctx, TCGv_reg dest,
721 target_ureg ival, TCGv_reg vval)
722 {
723 target_ureg mask = gva_offset_mask(ctx);
724
725 if (ival != -1) {
726 tcg_gen_movi_reg(dest, ival & mask);
727 return;
728 }
729 tcg_debug_assert(vval != NULL);
730
731 /*
732 * We know that the IAOQ is already properly masked.
733 * This optimization is primarily for "iaoq_f = iaoq_b".
734 */
735 if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
736 tcg_gen_mov_reg(dest, vval);
737 } else {
738 tcg_gen_andi_reg(dest, vval, mask);
739 }
740 }
741
742 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
743 {
744 return ctx->iaoq_f + disp + 8;
745 }
746
747 static void gen_excp_1(int exception)
748 {
749 gen_helper_excp(tcg_env, tcg_constant_i32(exception));
750 }
751
752 static void gen_excp(DisasContext *ctx, int exception)
753 {
754 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
755 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
756 nullify_save(ctx);
757 gen_excp_1(exception);
758 ctx->base.is_jmp = DISAS_NORETURN;
759 }
760
761 static bool gen_excp_iir(DisasContext *ctx, int exc)
762 {
763 nullify_over(ctx);
764 tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
765 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
766 gen_excp(ctx, exc);
767 return nullify_end(ctx);
768 }
769
770 static bool gen_illegal(DisasContext *ctx)
771 {
772 return gen_excp_iir(ctx, EXCP_ILL);
773 }
774
775 #ifdef CONFIG_USER_ONLY
776 #define CHECK_MOST_PRIVILEGED(EXCP) \
777 return gen_excp_iir(ctx, EXCP)
778 #else
779 #define CHECK_MOST_PRIVILEGED(EXCP) \
780 do { \
781 if (ctx->privilege != 0) { \
782 return gen_excp_iir(ctx, EXCP); \
783 } \
784 } while (0)
785 #endif
786
787 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
788 {
789 return translator_use_goto_tb(&ctx->base, dest);
790 }
791
792 /* If the next insn is to be nullified, and it's on the same page,
793 and we're not attempting to set a breakpoint on it, then we can
794 totally skip the nullified insn. This avoids creating and
795 executing a TB that merely branches to the next TB. */
796 static bool use_nullify_skip(DisasContext *ctx)
797 {
798 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
799 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
800 }
801
802 static void gen_goto_tb(DisasContext *ctx, int which,
803 target_ureg f, target_ureg b)
804 {
805 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
806 tcg_gen_goto_tb(which);
807 copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
808 copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
809 tcg_gen_exit_tb(ctx->base.tb, which);
810 } else {
811 copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
812 copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
813 tcg_gen_lookup_and_goto_ptr();
814 }
815 }
816
817 static bool cond_need_sv(int c)
818 {
819 return c == 2 || c == 3 || c == 6;
820 }
821
822 static bool cond_need_cb(int c)
823 {
824 return c == 4 || c == 5;
825 }
826
827 /* Need extensions from TCGv_i32 to TCGv_reg. */
828 static bool cond_need_ext(DisasContext *ctx, bool d)
829 {
830 return TARGET_REGISTER_BITS == 64 && !(ctx->is_pa20 && d);
831 }
832
833 /*
834 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
835 * the Parisc 1.1 Architecture Reference Manual for details.
836 */
837
838 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
839 TCGv_reg res, TCGv_reg cb_msb, TCGv_reg sv)
840 {
841 DisasCond cond;
842 TCGv_reg tmp;
843
844 switch (cf >> 1) {
845 case 0: /* Never / TR (0 / 1) */
846 cond = cond_make_f();
847 break;
848 case 1: /* = / <> (Z / !Z) */
849 if (cond_need_ext(ctx, d)) {
850 tmp = tcg_temp_new();
851 tcg_gen_ext32u_reg(tmp, res);
852 res = tmp;
853 }
854 cond = cond_make_0(TCG_COND_EQ, res);
855 break;
856 case 2: /* < / >= (N ^ V / !(N ^ V) */
857 tmp = tcg_temp_new();
858 tcg_gen_xor_reg(tmp, res, sv);
859 if (cond_need_ext(ctx, d)) {
860 tcg_gen_ext32s_reg(tmp, tmp);
861 }
862 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
863 break;
864 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
865 /*
866 * Simplify:
867 * (N ^ V) | Z
868 * ((res < 0) ^ (sv < 0)) | !res
869 * ((res ^ sv) < 0) | !res
870 * (~(res ^ sv) >= 0) | !res
871 * !(~(res ^ sv) >> 31) | !res
872 * !(~(res ^ sv) >> 31 & res)
873 */
874 tmp = tcg_temp_new();
875 tcg_gen_eqv_reg(tmp, res, sv);
876 if (cond_need_ext(ctx, d)) {
877 tcg_gen_sextract_reg(tmp, tmp, 31, 1);
878 tcg_gen_and_reg(tmp, tmp, res);
879 tcg_gen_ext32u_reg(tmp, tmp);
880 } else {
881 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
882 tcg_gen_and_reg(tmp, tmp, res);
883 }
884 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
885 break;
886 case 4: /* NUV / UV (!C / C) */
887 /* Only bit 0 of cb_msb is ever set. */
888 cond = cond_make_0(TCG_COND_EQ, cb_msb);
889 break;
890 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
891 tmp = tcg_temp_new();
892 tcg_gen_neg_reg(tmp, cb_msb);
893 tcg_gen_and_reg(tmp, tmp, res);
894 if (cond_need_ext(ctx, d)) {
895 tcg_gen_ext32u_reg(tmp, tmp);
896 }
897 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
898 break;
899 case 6: /* SV / NSV (V / !V) */
900 if (cond_need_ext(ctx, d)) {
901 tmp = tcg_temp_new();
902 tcg_gen_ext32s_reg(tmp, sv);
903 sv = tmp;
904 }
905 cond = cond_make_0(TCG_COND_LT, sv);
906 break;
907 case 7: /* OD / EV */
908 tmp = tcg_temp_new();
909 tcg_gen_andi_reg(tmp, res, 1);
910 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
911 break;
912 default:
913 g_assert_not_reached();
914 }
915 if (cf & 1) {
916 cond.c = tcg_invert_cond(cond.c);
917 }
918
919 return cond;
920 }
921
922 /* Similar, but for the special case of subtraction without borrow, we
923 can use the inputs directly. This can allow other computation to be
924 deleted as unused. */
925
926 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
927 TCGv_reg res, TCGv_reg in1,
928 TCGv_reg in2, TCGv_reg sv)
929 {
930 TCGCond tc;
931 bool ext_uns;
932
933 switch (cf >> 1) {
934 case 1: /* = / <> */
935 tc = TCG_COND_EQ;
936 ext_uns = true;
937 break;
938 case 2: /* < / >= */
939 tc = TCG_COND_LT;
940 ext_uns = false;
941 break;
942 case 3: /* <= / > */
943 tc = TCG_COND_LE;
944 ext_uns = false;
945 break;
946 case 4: /* << / >>= */
947 tc = TCG_COND_LTU;
948 ext_uns = true;
949 break;
950 case 5: /* <<= / >> */
951 tc = TCG_COND_LEU;
952 ext_uns = true;
953 break;
954 default:
955 return do_cond(ctx, cf, d, res, NULL, sv);
956 }
957
958 if (cf & 1) {
959 tc = tcg_invert_cond(tc);
960 }
961 if (cond_need_ext(ctx, d)) {
962 TCGv_reg t1 = tcg_temp_new();
963 TCGv_reg t2 = tcg_temp_new();
964
965 if (ext_uns) {
966 tcg_gen_ext32u_reg(t1, in1);
967 tcg_gen_ext32u_reg(t2, in2);
968 } else {
969 tcg_gen_ext32s_reg(t1, in1);
970 tcg_gen_ext32s_reg(t2, in2);
971 }
972 return cond_make_tmp(tc, t1, t2);
973 }
974 return cond_make(tc, in1, in2);
975 }
976
977 /*
978 * Similar, but for logicals, where the carry and overflow bits are not
979 * computed, and use of them is undefined.
980 *
981 * Undefined or not, hardware does not trap. It seems reasonable to
982 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
983 * how cases c={2,3} are treated.
984 */
985
986 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
987 TCGv_reg res)
988 {
989 TCGCond tc;
990 bool ext_uns;
991
992 switch (cf) {
993 case 0: /* never */
994 case 9: /* undef, C */
995 case 11: /* undef, C & !Z */
996 case 12: /* undef, V */
997 return cond_make_f();
998
999 case 1: /* true */
1000 case 8: /* undef, !C */
1001 case 10: /* undef, !C | Z */
1002 case 13: /* undef, !V */
1003 return cond_make_t();
1004
1005 case 2: /* == */
1006 tc = TCG_COND_EQ;
1007 ext_uns = true;
1008 break;
1009 case 3: /* <> */
1010 tc = TCG_COND_NE;
1011 ext_uns = true;
1012 break;
1013 case 4: /* < */
1014 tc = TCG_COND_LT;
1015 ext_uns = false;
1016 break;
1017 case 5: /* >= */
1018 tc = TCG_COND_GE;
1019 ext_uns = false;
1020 break;
1021 case 6: /* <= */
1022 tc = TCG_COND_LE;
1023 ext_uns = false;
1024 break;
1025 case 7: /* > */
1026 tc = TCG_COND_GT;
1027 ext_uns = false;
1028 break;
1029
1030 case 14: /* OD */
1031 case 15: /* EV */
1032 return do_cond(ctx, cf, d, res, NULL, NULL);
1033
1034 default:
1035 g_assert_not_reached();
1036 }
1037
1038 if (cond_need_ext(ctx, d)) {
1039 TCGv_reg tmp = tcg_temp_new();
1040
1041 if (ext_uns) {
1042 tcg_gen_ext32u_reg(tmp, res);
1043 } else {
1044 tcg_gen_ext32s_reg(tmp, res);
1045 }
1046 return cond_make_0_tmp(tc, tmp);
1047 }
1048 return cond_make_0(tc, res);
1049 }
1050
1051 /* Similar, but for shift/extract/deposit conditions. */
1052
1053 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
1054 TCGv_reg res)
1055 {
1056 unsigned c, f;
1057
1058 /* Convert the compressed condition codes to standard.
1059 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1060 4-7 are the reverse of 0-3. */
1061 c = orig & 3;
1062 if (c == 3) {
1063 c = 7;
1064 }
1065 f = (orig & 4) / 4;
1066
1067 return do_log_cond(ctx, c * 2 + f, d, res);
1068 }
1069
1070 /* Similar, but for unit conditions. */
1071
1072 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_reg res,
1073 TCGv_reg in1, TCGv_reg in2)
1074 {
1075 DisasCond cond;
1076 TCGv_reg tmp, cb = NULL;
1077 target_ureg d_repl = d ? 0x0000000100000001ull : 1;
1078
1079 if (cf & 8) {
1080 /* Since we want to test lots of carry-out bits all at once, do not
1081 * do our normal thing and compute carry-in of bit B+1 since that
1082 * leaves us with carry bits spread across two words.
1083 */
1084 cb = tcg_temp_new();
1085 tmp = tcg_temp_new();
1086 tcg_gen_or_reg(cb, in1, in2);
1087 tcg_gen_and_reg(tmp, in1, in2);
1088 tcg_gen_andc_reg(cb, cb, res);
1089 tcg_gen_or_reg(cb, cb, tmp);
1090 }
1091
1092 switch (cf >> 1) {
1093 case 0: /* never / TR */
1094 case 1: /* undefined */
1095 case 5: /* undefined */
1096 cond = cond_make_f();
1097 break;
1098
1099 case 2: /* SBZ / NBZ */
1100 /* See hasless(v,1) from
1101 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1102 */
1103 tmp = tcg_temp_new();
1104 tcg_gen_subi_reg(tmp, res, d_repl * 0x01010101u);
1105 tcg_gen_andc_reg(tmp, tmp, res);
1106 tcg_gen_andi_reg(tmp, tmp, d_repl * 0x80808080u);
1107 cond = cond_make_0(TCG_COND_NE, tmp);
1108 break;
1109
1110 case 3: /* SHZ / NHZ */
1111 tmp = tcg_temp_new();
1112 tcg_gen_subi_reg(tmp, res, d_repl * 0x00010001u);
1113 tcg_gen_andc_reg(tmp, tmp, res);
1114 tcg_gen_andi_reg(tmp, tmp, d_repl * 0x80008000u);
1115 cond = cond_make_0(TCG_COND_NE, tmp);
1116 break;
1117
1118 case 4: /* SDC / NDC */
1119 tcg_gen_andi_reg(cb, cb, d_repl * 0x88888888u);
1120 cond = cond_make_0(TCG_COND_NE, cb);
1121 break;
1122
1123 case 6: /* SBC / NBC */
1124 tcg_gen_andi_reg(cb, cb, d_repl * 0x80808080u);
1125 cond = cond_make_0(TCG_COND_NE, cb);
1126 break;
1127
1128 case 7: /* SHC / NHC */
1129 tcg_gen_andi_reg(cb, cb, d_repl * 0x80008000u);
1130 cond = cond_make_0(TCG_COND_NE, cb);
1131 break;
1132
1133 default:
1134 g_assert_not_reached();
1135 }
1136 if (cf & 1) {
1137 cond.c = tcg_invert_cond(cond.c);
1138 }
1139
1140 return cond;
1141 }
1142
1143 static TCGv_reg get_carry(DisasContext *ctx, bool d,
1144 TCGv_reg cb, TCGv_reg cb_msb)
1145 {
1146 if (cond_need_ext(ctx, d)) {
1147 TCGv_reg t = tcg_temp_new();
1148 tcg_gen_extract_reg(t, cb, 32, 1);
1149 return t;
1150 }
1151 return cb_msb;
1152 }
1153
1154 static TCGv_reg get_psw_carry(DisasContext *ctx, bool d)
1155 {
1156 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1157 }
1158
1159 /* Compute signed overflow for addition. */
1160 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1161 TCGv_reg in1, TCGv_reg in2)
1162 {
1163 TCGv_reg sv = tcg_temp_new();
1164 TCGv_reg tmp = tcg_temp_new();
1165
1166 tcg_gen_xor_reg(sv, res, in1);
1167 tcg_gen_xor_reg(tmp, in1, in2);
1168 tcg_gen_andc_reg(sv, sv, tmp);
1169
1170 return sv;
1171 }
1172
1173 /* Compute signed overflow for subtraction. */
1174 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1175 TCGv_reg in1, TCGv_reg in2)
1176 {
1177 TCGv_reg sv = tcg_temp_new();
1178 TCGv_reg tmp = tcg_temp_new();
1179
1180 tcg_gen_xor_reg(sv, res, in1);
1181 tcg_gen_xor_reg(tmp, in1, in2);
1182 tcg_gen_and_reg(sv, sv, tmp);
1183
1184 return sv;
1185 }
1186
1187 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1188 TCGv_reg in2, unsigned shift, bool is_l,
1189 bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1190 {
1191 TCGv_reg dest, cb, cb_msb, cb_cond, sv, tmp;
1192 unsigned c = cf >> 1;
1193 DisasCond cond;
1194
1195 dest = tcg_temp_new();
1196 cb = NULL;
1197 cb_msb = NULL;
1198 cb_cond = NULL;
1199
1200 if (shift) {
1201 tmp = tcg_temp_new();
1202 tcg_gen_shli_reg(tmp, in1, shift);
1203 in1 = tmp;
1204 }
1205
1206 if (!is_l || cond_need_cb(c)) {
1207 TCGv_reg zero = tcg_constant_reg(0);
1208 cb_msb = tcg_temp_new();
1209 cb = tcg_temp_new();
1210
1211 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1212 if (is_c) {
1213 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb,
1214 get_psw_carry(ctx, d), zero);
1215 }
1216 tcg_gen_xor_reg(cb, in1, in2);
1217 tcg_gen_xor_reg(cb, cb, dest);
1218 if (cond_need_cb(c)) {
1219 cb_cond = get_carry(ctx, d, cb, cb_msb);
1220 }
1221 } else {
1222 tcg_gen_add_reg(dest, in1, in2);
1223 if (is_c) {
1224 tcg_gen_add_reg(dest, dest, get_psw_carry(ctx, d));
1225 }
1226 }
1227
1228 /* Compute signed overflow if required. */
1229 sv = NULL;
1230 if (is_tsv || cond_need_sv(c)) {
1231 sv = do_add_sv(ctx, dest, in1, in2);
1232 if (is_tsv) {
1233 /* ??? Need to include overflow from shift. */
1234 gen_helper_tsv(tcg_env, sv);
1235 }
1236 }
1237
1238 /* Emit any conditional trap before any writeback. */
1239 cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1240 if (is_tc) {
1241 tmp = tcg_temp_new();
1242 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1243 gen_helper_tcond(tcg_env, tmp);
1244 }
1245
1246 /* Write back the result. */
1247 if (!is_l) {
1248 save_or_nullify(ctx, cpu_psw_cb, cb);
1249 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1250 }
1251 save_gpr(ctx, rt, dest);
1252
1253 /* Install the new nullification. */
1254 cond_free(&ctx->null_cond);
1255 ctx->null_cond = cond;
1256 }
1257
1258 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1259 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1260 {
1261 TCGv_reg tcg_r1, tcg_r2;
1262
1263 if (a->cf) {
1264 nullify_over(ctx);
1265 }
1266 tcg_r1 = load_gpr(ctx, a->r1);
1267 tcg_r2 = load_gpr(ctx, a->r2);
1268 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1269 is_tsv, is_tc, is_c, a->cf, a->d);
1270 return nullify_end(ctx);
1271 }
1272
1273 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1274 bool is_tsv, bool is_tc)
1275 {
1276 TCGv_reg tcg_im, tcg_r2;
1277
1278 if (a->cf) {
1279 nullify_over(ctx);
1280 }
1281 tcg_im = tcg_constant_reg(a->i);
1282 tcg_r2 = load_gpr(ctx, a->r);
1283 /* All ADDI conditions are 32-bit. */
1284 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1285 return nullify_end(ctx);
1286 }
1287
1288 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1289 TCGv_reg in2, bool is_tsv, bool is_b,
1290 bool is_tc, unsigned cf)
1291 {
1292 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1293 unsigned c = cf >> 1;
1294 DisasCond cond;
1295 bool d = false;
1296
1297 dest = tcg_temp_new();
1298 cb = tcg_temp_new();
1299 cb_msb = tcg_temp_new();
1300
1301 zero = tcg_constant_reg(0);
1302 if (is_b) {
1303 /* DEST,C = IN1 + ~IN2 + C. */
1304 tcg_gen_not_reg(cb, in2);
1305 tcg_gen_add2_reg(dest, cb_msb, in1, zero, get_psw_carry(ctx, d), zero);
1306 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1307 tcg_gen_xor_reg(cb, cb, in1);
1308 tcg_gen_xor_reg(cb, cb, dest);
1309 } else {
1310 /*
1311 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1312 * operations by seeding the high word with 1 and subtracting.
1313 */
1314 TCGv_reg one = tcg_constant_reg(1);
1315 tcg_gen_sub2_reg(dest, cb_msb, in1, one, in2, zero);
1316 tcg_gen_eqv_reg(cb, in1, in2);
1317 tcg_gen_xor_reg(cb, cb, dest);
1318 }
1319
1320 /* Compute signed overflow if required. */
1321 sv = NULL;
1322 if (is_tsv || cond_need_sv(c)) {
1323 sv = do_sub_sv(ctx, dest, in1, in2);
1324 if (is_tsv) {
1325 gen_helper_tsv(tcg_env, sv);
1326 }
1327 }
1328
1329 /* Compute the condition. We cannot use the special case for borrow. */
1330 if (!is_b) {
1331 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1332 } else {
1333 cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1334 }
1335
1336 /* Emit any conditional trap before any writeback. */
1337 if (is_tc) {
1338 tmp = tcg_temp_new();
1339 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1340 gen_helper_tcond(tcg_env, tmp);
1341 }
1342
1343 /* Write back the result. */
1344 save_or_nullify(ctx, cpu_psw_cb, cb);
1345 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1346 save_gpr(ctx, rt, dest);
1347
1348 /* Install the new nullification. */
1349 cond_free(&ctx->null_cond);
1350 ctx->null_cond = cond;
1351 }
1352
1353 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1354 bool is_tsv, bool is_b, bool is_tc)
1355 {
1356 TCGv_reg tcg_r1, tcg_r2;
1357
1358 if (a->cf) {
1359 nullify_over(ctx);
1360 }
1361 tcg_r1 = load_gpr(ctx, a->r1);
1362 tcg_r2 = load_gpr(ctx, a->r2);
1363 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1364 return nullify_end(ctx);
1365 }
1366
1367 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1368 {
1369 TCGv_reg tcg_im, tcg_r2;
1370
1371 if (a->cf) {
1372 nullify_over(ctx);
1373 }
1374 tcg_im = tcg_constant_reg(a->i);
1375 tcg_r2 = load_gpr(ctx, a->r);
1376 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1377 return nullify_end(ctx);
1378 }
1379
1380 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1381 TCGv_reg in2, unsigned cf, bool d)
1382 {
1383 TCGv_reg dest, sv;
1384 DisasCond cond;
1385
1386 dest = tcg_temp_new();
1387 tcg_gen_sub_reg(dest, in1, in2);
1388
1389 /* Compute signed overflow if required. */
1390 sv = NULL;
1391 if (cond_need_sv(cf >> 1)) {
1392 sv = do_sub_sv(ctx, dest, in1, in2);
1393 }
1394
1395 /* Form the condition for the compare. */
1396 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1397
1398 /* Clear. */
1399 tcg_gen_movi_reg(dest, 0);
1400 save_gpr(ctx, rt, dest);
1401
1402 /* Install the new nullification. */
1403 cond_free(&ctx->null_cond);
1404 ctx->null_cond = cond;
1405 }
1406
1407 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1408 TCGv_reg in2, unsigned cf, bool d,
1409 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1410 {
1411 TCGv_reg dest = dest_gpr(ctx, rt);
1412
1413 /* Perform the operation, and writeback. */
1414 fn(dest, in1, in2);
1415 save_gpr(ctx, rt, dest);
1416
1417 /* Install the new nullification. */
1418 cond_free(&ctx->null_cond);
1419 if (cf) {
1420 ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1421 }
1422 }
1423
1424 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1425 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1426 {
1427 TCGv_reg tcg_r1, tcg_r2;
1428
1429 if (a->cf) {
1430 nullify_over(ctx);
1431 }
1432 tcg_r1 = load_gpr(ctx, a->r1);
1433 tcg_r2 = load_gpr(ctx, a->r2);
1434 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1435 return nullify_end(ctx);
1436 }
1437
1438 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1439 TCGv_reg in2, unsigned cf, bool d, bool is_tc,
1440 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1441 {
1442 TCGv_reg dest;
1443 DisasCond cond;
1444
1445 if (cf == 0) {
1446 dest = dest_gpr(ctx, rt);
1447 fn(dest, in1, in2);
1448 save_gpr(ctx, rt, dest);
1449 cond_free(&ctx->null_cond);
1450 } else {
1451 dest = tcg_temp_new();
1452 fn(dest, in1, in2);
1453
1454 cond = do_unit_cond(cf, d, dest, in1, in2);
1455
1456 if (is_tc) {
1457 TCGv_reg tmp = tcg_temp_new();
1458 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1459 gen_helper_tcond(tcg_env, tmp);
1460 }
1461 save_gpr(ctx, rt, dest);
1462
1463 cond_free(&ctx->null_cond);
1464 ctx->null_cond = cond;
1465 }
1466 }
1467
1468 #ifndef CONFIG_USER_ONLY
1469 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1470 from the top 2 bits of the base register. There are a few system
1471 instructions that have a 3-bit space specifier, for which SR0 is
1472 not special. To handle this, pass ~SP. */
1473 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1474 {
1475 TCGv_ptr ptr;
1476 TCGv_reg tmp;
1477 TCGv_i64 spc;
1478
1479 if (sp != 0) {
1480 if (sp < 0) {
1481 sp = ~sp;
1482 }
1483 spc = tcg_temp_new_tl();
1484 load_spr(ctx, spc, sp);
1485 return spc;
1486 }
1487 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1488 return cpu_srH;
1489 }
1490
1491 ptr = tcg_temp_new_ptr();
1492 tmp = tcg_temp_new();
1493 spc = tcg_temp_new_tl();
1494
1495 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1496 tcg_gen_shri_reg(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1497 tcg_gen_andi_reg(tmp, tmp, 030);
1498 tcg_gen_trunc_reg_ptr(ptr, tmp);
1499
1500 tcg_gen_add_ptr(ptr, ptr, tcg_env);
1501 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1502
1503 return spc;
1504 }
1505 #endif
1506
1507 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1508 unsigned rb, unsigned rx, int scale, target_sreg disp,
1509 unsigned sp, int modify, bool is_phys)
1510 {
1511 TCGv_reg base = load_gpr(ctx, rb);
1512 TCGv_reg ofs;
1513 TCGv_tl addr;
1514
1515 /* Note that RX is mutually exclusive with DISP. */
1516 if (rx) {
1517 ofs = tcg_temp_new();
1518 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1519 tcg_gen_add_reg(ofs, ofs, base);
1520 } else if (disp || modify) {
1521 ofs = tcg_temp_new();
1522 tcg_gen_addi_reg(ofs, base, disp);
1523 } else {
1524 ofs = base;
1525 }
1526
1527 *pofs = ofs;
1528 *pgva = addr = tcg_temp_new_tl();
1529 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1530 tcg_gen_andi_tl(addr, addr, gva_offset_mask(ctx));
1531 #ifndef CONFIG_USER_ONLY
1532 if (!is_phys) {
1533 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1534 }
1535 #endif
1536 }
1537
1538 /* Emit a memory load. The modify parameter should be
1539 * < 0 for pre-modify,
1540 * > 0 for post-modify,
1541 * = 0 for no base register update.
1542 */
1543 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1544 unsigned rx, int scale, target_sreg disp,
1545 unsigned sp, int modify, MemOp mop)
1546 {
1547 TCGv_reg ofs;
1548 TCGv_tl addr;
1549
1550 /* Caller uses nullify_over/nullify_end. */
1551 assert(ctx->null_cond.c == TCG_COND_NEVER);
1552
1553 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1554 ctx->mmu_idx == MMU_PHYS_IDX);
1555 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1556 if (modify) {
1557 save_gpr(ctx, rb, ofs);
1558 }
1559 }
1560
1561 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1562 unsigned rx, int scale, target_sreg disp,
1563 unsigned sp, int modify, MemOp mop)
1564 {
1565 TCGv_reg ofs;
1566 TCGv_tl addr;
1567
1568 /* Caller uses nullify_over/nullify_end. */
1569 assert(ctx->null_cond.c == TCG_COND_NEVER);
1570
1571 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1572 ctx->mmu_idx == MMU_PHYS_IDX);
1573 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1574 if (modify) {
1575 save_gpr(ctx, rb, ofs);
1576 }
1577 }
1578
1579 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1580 unsigned rx, int scale, target_sreg disp,
1581 unsigned sp, int modify, MemOp mop)
1582 {
1583 TCGv_reg ofs;
1584 TCGv_tl addr;
1585
1586 /* Caller uses nullify_over/nullify_end. */
1587 assert(ctx->null_cond.c == TCG_COND_NEVER);
1588
1589 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1590 ctx->mmu_idx == MMU_PHYS_IDX);
1591 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1592 if (modify) {
1593 save_gpr(ctx, rb, ofs);
1594 }
1595 }
1596
1597 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1598 unsigned rx, int scale, target_sreg disp,
1599 unsigned sp, int modify, MemOp mop)
1600 {
1601 TCGv_reg ofs;
1602 TCGv_tl addr;
1603
1604 /* Caller uses nullify_over/nullify_end. */
1605 assert(ctx->null_cond.c == TCG_COND_NEVER);
1606
1607 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1608 ctx->mmu_idx == MMU_PHYS_IDX);
1609 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1610 if (modify) {
1611 save_gpr(ctx, rb, ofs);
1612 }
1613 }
1614
1615 #if TARGET_REGISTER_BITS == 64
1616 #define do_load_reg do_load_64
1617 #define do_store_reg do_store_64
1618 #else
1619 #define do_load_reg do_load_32
1620 #define do_store_reg do_store_32
1621 #endif
1622
1623 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1624 unsigned rx, int scale, target_sreg disp,
1625 unsigned sp, int modify, MemOp mop)
1626 {
1627 TCGv_reg dest;
1628
1629 nullify_over(ctx);
1630
1631 if (modify == 0) {
1632 /* No base register update. */
1633 dest = dest_gpr(ctx, rt);
1634 } else {
1635 /* Make sure if RT == RB, we see the result of the load. */
1636 dest = tcg_temp_new();
1637 }
1638 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1639 save_gpr(ctx, rt, dest);
1640
1641 return nullify_end(ctx);
1642 }
1643
1644 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1645 unsigned rx, int scale, target_sreg disp,
1646 unsigned sp, int modify)
1647 {
1648 TCGv_i32 tmp;
1649
1650 nullify_over(ctx);
1651
1652 tmp = tcg_temp_new_i32();
1653 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1654 save_frw_i32(rt, tmp);
1655
1656 if (rt == 0) {
1657 gen_helper_loaded_fr0(tcg_env);
1658 }
1659
1660 return nullify_end(ctx);
1661 }
1662
1663 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1664 {
1665 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1666 a->disp, a->sp, a->m);
1667 }
1668
1669 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1670 unsigned rx, int scale, target_sreg disp,
1671 unsigned sp, int modify)
1672 {
1673 TCGv_i64 tmp;
1674
1675 nullify_over(ctx);
1676
1677 tmp = tcg_temp_new_i64();
1678 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1679 save_frd(rt, tmp);
1680
1681 if (rt == 0) {
1682 gen_helper_loaded_fr0(tcg_env);
1683 }
1684
1685 return nullify_end(ctx);
1686 }
1687
1688 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1689 {
1690 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1691 a->disp, a->sp, a->m);
1692 }
1693
1694 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1695 target_sreg disp, unsigned sp,
1696 int modify, MemOp mop)
1697 {
1698 nullify_over(ctx);
1699 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1700 return nullify_end(ctx);
1701 }
1702
1703 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1704 unsigned rx, int scale, target_sreg disp,
1705 unsigned sp, int modify)
1706 {
1707 TCGv_i32 tmp;
1708
1709 nullify_over(ctx);
1710
1711 tmp = load_frw_i32(rt);
1712 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1713
1714 return nullify_end(ctx);
1715 }
1716
1717 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1718 {
1719 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1720 a->disp, a->sp, a->m);
1721 }
1722
1723 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1724 unsigned rx, int scale, target_sreg disp,
1725 unsigned sp, int modify)
1726 {
1727 TCGv_i64 tmp;
1728
1729 nullify_over(ctx);
1730
1731 tmp = load_frd(rt);
1732 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1733
1734 return nullify_end(ctx);
1735 }
1736
1737 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1738 {
1739 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1740 a->disp, a->sp, a->m);
1741 }
1742
1743 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1744 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1745 {
1746 TCGv_i32 tmp;
1747
1748 nullify_over(ctx);
1749 tmp = load_frw0_i32(ra);
1750
1751 func(tmp, tcg_env, tmp);
1752
1753 save_frw_i32(rt, tmp);
1754 return nullify_end(ctx);
1755 }
1756
1757 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1758 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1759 {
1760 TCGv_i32 dst;
1761 TCGv_i64 src;
1762
1763 nullify_over(ctx);
1764 src = load_frd(ra);
1765 dst = tcg_temp_new_i32();
1766
1767 func(dst, tcg_env, src);
1768
1769 save_frw_i32(rt, dst);
1770 return nullify_end(ctx);
1771 }
1772
1773 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1774 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1775 {
1776 TCGv_i64 tmp;
1777
1778 nullify_over(ctx);
1779 tmp = load_frd0(ra);
1780
1781 func(tmp, tcg_env, tmp);
1782
1783 save_frd(rt, tmp);
1784 return nullify_end(ctx);
1785 }
1786
1787 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1788 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1789 {
1790 TCGv_i32 src;
1791 TCGv_i64 dst;
1792
1793 nullify_over(ctx);
1794 src = load_frw0_i32(ra);
1795 dst = tcg_temp_new_i64();
1796
1797 func(dst, tcg_env, src);
1798
1799 save_frd(rt, dst);
1800 return nullify_end(ctx);
1801 }
1802
1803 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1804 unsigned ra, unsigned rb,
1805 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1806 {
1807 TCGv_i32 a, b;
1808
1809 nullify_over(ctx);
1810 a = load_frw0_i32(ra);
1811 b = load_frw0_i32(rb);
1812
1813 func(a, tcg_env, a, b);
1814
1815 save_frw_i32(rt, a);
1816 return nullify_end(ctx);
1817 }
1818
1819 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1820 unsigned ra, unsigned rb,
1821 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1822 {
1823 TCGv_i64 a, b;
1824
1825 nullify_over(ctx);
1826 a = load_frd0(ra);
1827 b = load_frd0(rb);
1828
1829 func(a, tcg_env, a, b);
1830
1831 save_frd(rt, a);
1832 return nullify_end(ctx);
1833 }
1834
1835 /* Emit an unconditional branch to a direct target, which may or may not
1836 have already had nullification handled. */
1837 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1838 unsigned link, bool is_n)
1839 {
1840 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1841 if (link != 0) {
1842 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1843 }
1844 ctx->iaoq_n = dest;
1845 if (is_n) {
1846 ctx->null_cond.c = TCG_COND_ALWAYS;
1847 }
1848 } else {
1849 nullify_over(ctx);
1850
1851 if (link != 0) {
1852 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1853 }
1854
1855 if (is_n && use_nullify_skip(ctx)) {
1856 nullify_set(ctx, 0);
1857 gen_goto_tb(ctx, 0, dest, dest + 4);
1858 } else {
1859 nullify_set(ctx, is_n);
1860 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1861 }
1862
1863 nullify_end(ctx);
1864
1865 nullify_set(ctx, 0);
1866 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1867 ctx->base.is_jmp = DISAS_NORETURN;
1868 }
1869 return true;
1870 }
1871
1872 /* Emit a conditional branch to a direct target. If the branch itself
1873 is nullified, we should have already used nullify_over. */
1874 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1875 DisasCond *cond)
1876 {
1877 target_ureg dest = iaoq_dest(ctx, disp);
1878 TCGLabel *taken = NULL;
1879 TCGCond c = cond->c;
1880 bool n;
1881
1882 assert(ctx->null_cond.c == TCG_COND_NEVER);
1883
1884 /* Handle TRUE and NEVER as direct branches. */
1885 if (c == TCG_COND_ALWAYS) {
1886 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1887 }
1888 if (c == TCG_COND_NEVER) {
1889 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1890 }
1891
1892 taken = gen_new_label();
1893 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1894 cond_free(cond);
1895
1896 /* Not taken: Condition not satisfied; nullify on backward branches. */
1897 n = is_n && disp < 0;
1898 if (n && use_nullify_skip(ctx)) {
1899 nullify_set(ctx, 0);
1900 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1901 } else {
1902 if (!n && ctx->null_lab) {
1903 gen_set_label(ctx->null_lab);
1904 ctx->null_lab = NULL;
1905 }
1906 nullify_set(ctx, n);
1907 if (ctx->iaoq_n == -1) {
1908 /* The temporary iaoq_n_var died at the branch above.
1909 Regenerate it here instead of saving it. */
1910 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1911 }
1912 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1913 }
1914
1915 gen_set_label(taken);
1916
1917 /* Taken: Condition satisfied; nullify on forward branches. */
1918 n = is_n && disp >= 0;
1919 if (n && use_nullify_skip(ctx)) {
1920 nullify_set(ctx, 0);
1921 gen_goto_tb(ctx, 1, dest, dest + 4);
1922 } else {
1923 nullify_set(ctx, n);
1924 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1925 }
1926
1927 /* Not taken: the branch itself was nullified. */
1928 if (ctx->null_lab) {
1929 gen_set_label(ctx->null_lab);
1930 ctx->null_lab = NULL;
1931 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1932 } else {
1933 ctx->base.is_jmp = DISAS_NORETURN;
1934 }
1935 return true;
1936 }
1937
1938 /* Emit an unconditional branch to an indirect target. This handles
1939 nullification of the branch itself. */
1940 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1941 unsigned link, bool is_n)
1942 {
1943 TCGv_reg a0, a1, next, tmp;
1944 TCGCond c;
1945
1946 assert(ctx->null_lab == NULL);
1947
1948 if (ctx->null_cond.c == TCG_COND_NEVER) {
1949 if (link != 0) {
1950 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1951 }
1952 next = tcg_temp_new();
1953 tcg_gen_mov_reg(next, dest);
1954 if (is_n) {
1955 if (use_nullify_skip(ctx)) {
1956 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1957 tcg_gen_addi_reg(next, next, 4);
1958 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1959 nullify_set(ctx, 0);
1960 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1961 return true;
1962 }
1963 ctx->null_cond.c = TCG_COND_ALWAYS;
1964 }
1965 ctx->iaoq_n = -1;
1966 ctx->iaoq_n_var = next;
1967 } else if (is_n && use_nullify_skip(ctx)) {
1968 /* The (conditional) branch, B, nullifies the next insn, N,
1969 and we're allowed to skip execution N (no single-step or
1970 tracepoint in effect). Since the goto_ptr that we must use
1971 for the indirect branch consumes no special resources, we
1972 can (conditionally) skip B and continue execution. */
1973 /* The use_nullify_skip test implies we have a known control path. */
1974 tcg_debug_assert(ctx->iaoq_b != -1);
1975 tcg_debug_assert(ctx->iaoq_n != -1);
1976
1977 /* We do have to handle the non-local temporary, DEST, before
1978 branching. Since IOAQ_F is not really live at this point, we
1979 can simply store DEST optimistically. Similarly with IAOQ_B. */
1980 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1981 next = tcg_temp_new();
1982 tcg_gen_addi_reg(next, dest, 4);
1983 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1984
1985 nullify_over(ctx);
1986 if (link != 0) {
1987 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1988 }
1989 tcg_gen_lookup_and_goto_ptr();
1990 return nullify_end(ctx);
1991 } else {
1992 c = ctx->null_cond.c;
1993 a0 = ctx->null_cond.a0;
1994 a1 = ctx->null_cond.a1;
1995
1996 tmp = tcg_temp_new();
1997 next = tcg_temp_new();
1998
1999 copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
2000 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
2001 ctx->iaoq_n = -1;
2002 ctx->iaoq_n_var = next;
2003
2004 if (link != 0) {
2005 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
2006 }
2007
2008 if (is_n) {
2009 /* The branch nullifies the next insn, which means the state of N
2010 after the branch is the inverse of the state of N that applied
2011 to the branch. */
2012 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
2013 cond_free(&ctx->null_cond);
2014 ctx->null_cond = cond_make_n();
2015 ctx->psw_n_nonzero = true;
2016 } else {
2017 cond_free(&ctx->null_cond);
2018 }
2019 }
2020 return true;
2021 }
2022
2023 /* Implement
2024 * if (IAOQ_Front{30..31} < GR[b]{30..31})
2025 * IAOQ_Next{30..31} ← GR[b]{30..31};
2026 * else
2027 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
2028 * which keeps the privilege level from being increased.
2029 */
2030 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
2031 {
2032 TCGv_reg dest;
2033 switch (ctx->privilege) {
2034 case 0:
2035 /* Privilege 0 is maximum and is allowed to decrease. */
2036 return offset;
2037 case 3:
2038 /* Privilege 3 is minimum and is never allowed to increase. */
2039 dest = tcg_temp_new();
2040 tcg_gen_ori_reg(dest, offset, 3);
2041 break;
2042 default:
2043 dest = tcg_temp_new();
2044 tcg_gen_andi_reg(dest, offset, -4);
2045 tcg_gen_ori_reg(dest, dest, ctx->privilege);
2046 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
2047 break;
2048 }
2049 return dest;
2050 }
2051
2052 #ifdef CONFIG_USER_ONLY
2053 /* On Linux, page zero is normally marked execute only + gateway.
2054 Therefore normal read or write is supposed to fail, but specific
2055 offsets have kernel code mapped to raise permissions to implement
2056 system calls. Handling this via an explicit check here, rather
2057 in than the "be disp(sr2,r0)" instruction that probably sent us
2058 here, is the easiest way to handle the branch delay slot on the
2059 aforementioned BE. */
2060 static void do_page_zero(DisasContext *ctx)
2061 {
2062 TCGv_reg tmp;
2063
2064 /* If by some means we get here with PSW[N]=1, that implies that
2065 the B,GATE instruction would be skipped, and we'd fault on the
2066 next insn within the privileged page. */
2067 switch (ctx->null_cond.c) {
2068 case TCG_COND_NEVER:
2069 break;
2070 case TCG_COND_ALWAYS:
2071 tcg_gen_movi_reg(cpu_psw_n, 0);
2072 goto do_sigill;
2073 default:
2074 /* Since this is always the first (and only) insn within the
2075 TB, we should know the state of PSW[N] from TB->FLAGS. */
2076 g_assert_not_reached();
2077 }
2078
2079 /* Check that we didn't arrive here via some means that allowed
2080 non-sequential instruction execution. Normally the PSW[B] bit
2081 detects this by disallowing the B,GATE instruction to execute
2082 under such conditions. */
2083 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2084 goto do_sigill;
2085 }
2086
2087 switch (ctx->iaoq_f & -4) {
2088 case 0x00: /* Null pointer call */
2089 gen_excp_1(EXCP_IMP);
2090 ctx->base.is_jmp = DISAS_NORETURN;
2091 break;
2092
2093 case 0xb0: /* LWS */
2094 gen_excp_1(EXCP_SYSCALL_LWS);
2095 ctx->base.is_jmp = DISAS_NORETURN;
2096 break;
2097
2098 case 0xe0: /* SET_THREAD_POINTER */
2099 tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
2100 tmp = tcg_temp_new();
2101 tcg_gen_ori_reg(tmp, cpu_gr[31], 3);
2102 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
2103 tcg_gen_addi_reg(tmp, tmp, 4);
2104 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
2105 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2106 break;
2107
2108 case 0x100: /* SYSCALL */
2109 gen_excp_1(EXCP_SYSCALL);
2110 ctx->base.is_jmp = DISAS_NORETURN;
2111 break;
2112
2113 default:
2114 do_sigill:
2115 gen_excp_1(EXCP_ILL);
2116 ctx->base.is_jmp = DISAS_NORETURN;
2117 break;
2118 }
2119 }
2120 #endif
2121
2122 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2123 {
2124 cond_free(&ctx->null_cond);
2125 return true;
2126 }
2127
2128 static bool trans_break(DisasContext *ctx, arg_break *a)
2129 {
2130 return gen_excp_iir(ctx, EXCP_BREAK);
2131 }
2132
2133 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2134 {
2135 /* No point in nullifying the memory barrier. */
2136 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2137
2138 cond_free(&ctx->null_cond);
2139 return true;
2140 }
2141
2142 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2143 {
2144 unsigned rt = a->t;
2145 TCGv_reg tmp = dest_gpr(ctx, rt);
2146 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2147 save_gpr(ctx, rt, tmp);
2148
2149 cond_free(&ctx->null_cond);
2150 return true;
2151 }
2152
2153 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2154 {
2155 unsigned rt = a->t;
2156 unsigned rs = a->sp;
2157 TCGv_i64 t0 = tcg_temp_new_i64();
2158 TCGv_reg t1 = tcg_temp_new();
2159
2160 load_spr(ctx, t0, rs);
2161 tcg_gen_shri_i64(t0, t0, 32);
2162 tcg_gen_trunc_i64_reg(t1, t0);
2163
2164 save_gpr(ctx, rt, t1);
2165
2166 cond_free(&ctx->null_cond);
2167 return true;
2168 }
2169
2170 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2171 {
2172 unsigned rt = a->t;
2173 unsigned ctl = a->r;
2174 TCGv_reg tmp;
2175
2176 switch (ctl) {
2177 case CR_SAR:
2178 if (a->e == 0) {
2179 /* MFSAR without ,W masks low 5 bits. */
2180 tmp = dest_gpr(ctx, rt);
2181 tcg_gen_andi_reg(tmp, cpu_sar, 31);
2182 save_gpr(ctx, rt, tmp);
2183 goto done;
2184 }
2185 save_gpr(ctx, rt, cpu_sar);
2186 goto done;
2187 case CR_IT: /* Interval Timer */
2188 /* FIXME: Respect PSW_S bit. */
2189 nullify_over(ctx);
2190 tmp = dest_gpr(ctx, rt);
2191 if (translator_io_start(&ctx->base)) {
2192 gen_helper_read_interval_timer(tmp);
2193 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2194 } else {
2195 gen_helper_read_interval_timer(tmp);
2196 }
2197 save_gpr(ctx, rt, tmp);
2198 return nullify_end(ctx);
2199 case 26:
2200 case 27:
2201 break;
2202 default:
2203 /* All other control registers are privileged. */
2204 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2205 break;
2206 }
2207
2208 tmp = tcg_temp_new();
2209 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2210 save_gpr(ctx, rt, tmp);
2211
2212 done:
2213 cond_free(&ctx->null_cond);
2214 return true;
2215 }
2216
2217 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2218 {
2219 unsigned rr = a->r;
2220 unsigned rs = a->sp;
2221 TCGv_i64 t64;
2222
2223 if (rs >= 5) {
2224 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2225 }
2226 nullify_over(ctx);
2227
2228 t64 = tcg_temp_new_i64();
2229 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2230 tcg_gen_shli_i64(t64, t64, 32);
2231
2232 if (rs >= 4) {
2233 tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2234 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2235 } else {
2236 tcg_gen_mov_i64(cpu_sr[rs], t64);
2237 }
2238
2239 return nullify_end(ctx);
2240 }
2241
2242 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2243 {
2244 unsigned ctl = a->t;
2245 TCGv_reg reg;
2246 TCGv_reg tmp;
2247
2248 if (ctl == CR_SAR) {
2249 reg = load_gpr(ctx, a->r);
2250 tmp = tcg_temp_new();
2251 tcg_gen_andi_reg(tmp, reg, ctx->is_pa20 ? 63 : 31);
2252 save_or_nullify(ctx, cpu_sar, tmp);
2253
2254 cond_free(&ctx->null_cond);
2255 return true;
2256 }
2257
2258 /* All other control registers are privileged or read-only. */
2259 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2260
2261 #ifndef CONFIG_USER_ONLY
2262 nullify_over(ctx);
2263 reg = load_gpr(ctx, a->r);
2264
2265 switch (ctl) {
2266 case CR_IT:
2267 gen_helper_write_interval_timer(tcg_env, reg);
2268 break;
2269 case CR_EIRR:
2270 gen_helper_write_eirr(tcg_env, reg);
2271 break;
2272 case CR_EIEM:
2273 gen_helper_write_eiem(tcg_env, reg);
2274 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2275 break;
2276
2277 case CR_IIASQ:
2278 case CR_IIAOQ:
2279 /* FIXME: Respect PSW_Q bit */
2280 /* The write advances the queue and stores to the back element. */
2281 tmp = tcg_temp_new();
2282 tcg_gen_ld_reg(tmp, tcg_env,
2283 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2284 tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2285 tcg_gen_st_reg(reg, tcg_env,
2286 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2287 break;
2288
2289 case CR_PID1:
2290 case CR_PID2:
2291 case CR_PID3:
2292 case CR_PID4:
2293 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2294 #ifndef CONFIG_USER_ONLY
2295 gen_helper_change_prot_id(tcg_env);
2296 #endif
2297 break;
2298
2299 default:
2300 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2301 break;
2302 }
2303 return nullify_end(ctx);
2304 #endif
2305 }
2306
2307 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2308 {
2309 TCGv_reg tmp = tcg_temp_new();
2310
2311 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2312 tcg_gen_andi_reg(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2313 save_or_nullify(ctx, cpu_sar, tmp);
2314
2315 cond_free(&ctx->null_cond);
2316 return true;
2317 }
2318
2319 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2320 {
2321 TCGv_reg dest = dest_gpr(ctx, a->t);
2322
2323 #ifdef CONFIG_USER_ONLY
2324 /* We don't implement space registers in user mode. */
2325 tcg_gen_movi_reg(dest, 0);
2326 #else
2327 TCGv_i64 t0 = tcg_temp_new_i64();
2328
2329 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2330 tcg_gen_shri_i64(t0, t0, 32);
2331 tcg_gen_trunc_i64_reg(dest, t0);
2332 #endif
2333 save_gpr(ctx, a->t, dest);
2334
2335 cond_free(&ctx->null_cond);
2336 return true;
2337 }
2338
2339 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2340 {
2341 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2342 #ifndef CONFIG_USER_ONLY
2343 TCGv_reg tmp;
2344
2345 nullify_over(ctx);
2346
2347 tmp = tcg_temp_new();
2348 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2349 tcg_gen_andi_reg(tmp, tmp, ~a->i);
2350 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2351 save_gpr(ctx, a->t, tmp);
2352
2353 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2354 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2355 return nullify_end(ctx);
2356 #endif
2357 }
2358
2359 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2360 {
2361 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2362 #ifndef CONFIG_USER_ONLY
2363 TCGv_reg tmp;
2364
2365 nullify_over(ctx);
2366
2367 tmp = tcg_temp_new();
2368 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2369 tcg_gen_ori_reg(tmp, tmp, a->i);
2370 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2371 save_gpr(ctx, a->t, tmp);
2372
2373 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2374 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2375 return nullify_end(ctx);
2376 #endif
2377 }
2378
2379 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2380 {
2381 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2382 #ifndef CONFIG_USER_ONLY
2383 TCGv_reg tmp, reg;
2384 nullify_over(ctx);
2385
2386 reg = load_gpr(ctx, a->r);
2387 tmp = tcg_temp_new();
2388 gen_helper_swap_system_mask(tmp, tcg_env, reg);
2389
2390 /* Exit the TB to recognize new interrupts. */
2391 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2392 return nullify_end(ctx);
2393 #endif
2394 }
2395
2396 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2397 {
2398 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2399 #ifndef CONFIG_USER_ONLY
2400 nullify_over(ctx);
2401
2402 if (rfi_r) {
2403 gen_helper_rfi_r(tcg_env);
2404 } else {
2405 gen_helper_rfi(tcg_env);
2406 }
2407 /* Exit the TB to recognize new interrupts. */
2408 tcg_gen_exit_tb(NULL, 0);
2409 ctx->base.is_jmp = DISAS_NORETURN;
2410
2411 return nullify_end(ctx);
2412 #endif
2413 }
2414
2415 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2416 {
2417 return do_rfi(ctx, false);
2418 }
2419
2420 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2421 {
2422 return do_rfi(ctx, true);
2423 }
2424
2425 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2426 {
2427 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2428 #ifndef CONFIG_USER_ONLY
2429 nullify_over(ctx);
2430 gen_helper_halt(tcg_env);
2431 ctx->base.is_jmp = DISAS_NORETURN;
2432 return nullify_end(ctx);
2433 #endif
2434 }
2435
2436 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2437 {
2438 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2439 #ifndef CONFIG_USER_ONLY
2440 nullify_over(ctx);
2441 gen_helper_reset(tcg_env);
2442 ctx->base.is_jmp = DISAS_NORETURN;
2443 return nullify_end(ctx);
2444 #endif
2445 }
2446
2447 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2448 {
2449 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2450 #ifndef CONFIG_USER_ONLY
2451 nullify_over(ctx);
2452 gen_helper_getshadowregs(tcg_env);
2453 return nullify_end(ctx);
2454 #endif
2455 }
2456
2457 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2458 {
2459 if (a->m) {
2460 TCGv_reg dest = dest_gpr(ctx, a->b);
2461 TCGv_reg src1 = load_gpr(ctx, a->b);
2462 TCGv_reg src2 = load_gpr(ctx, a->x);
2463
2464 /* The only thing we need to do is the base register modification. */
2465 tcg_gen_add_reg(dest, src1, src2);
2466 save_gpr(ctx, a->b, dest);
2467 }
2468 cond_free(&ctx->null_cond);
2469 return true;
2470 }
2471
2472 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2473 {
2474 TCGv_reg dest, ofs;
2475 TCGv_i32 level, want;
2476 TCGv_tl addr;
2477
2478 nullify_over(ctx);
2479
2480 dest = dest_gpr(ctx, a->t);
2481 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2482
2483 if (a->imm) {
2484 level = tcg_constant_i32(a->ri);
2485 } else {
2486 level = tcg_temp_new_i32();
2487 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2488 tcg_gen_andi_i32(level, level, 3);
2489 }
2490 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2491
2492 gen_helper_probe(dest, tcg_env, addr, level, want);
2493
2494 save_gpr(ctx, a->t, dest);
2495 return nullify_end(ctx);
2496 }
2497
2498 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2499 {
2500 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2501 #ifndef CONFIG_USER_ONLY
2502 TCGv_tl addr;
2503 TCGv_reg ofs, reg;
2504
2505 nullify_over(ctx);
2506
2507 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2508 reg = load_gpr(ctx, a->r);
2509 if (a->addr) {
2510 gen_helper_itlba(tcg_env, addr, reg);
2511 } else {
2512 gen_helper_itlbp(tcg_env, addr, reg);
2513 }
2514
2515 /* Exit TB for TLB change if mmu is enabled. */
2516 if (ctx->tb_flags & PSW_C) {
2517 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2518 }
2519 return nullify_end(ctx);
2520 #endif
2521 }
2522
2523 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2524 {
2525 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2526 #ifndef CONFIG_USER_ONLY
2527 TCGv_tl addr;
2528 TCGv_reg ofs;
2529
2530 nullify_over(ctx);
2531
2532 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2533 if (a->m) {
2534 save_gpr(ctx, a->b, ofs);
2535 }
2536 if (a->local) {
2537 gen_helper_ptlbe(tcg_env);
2538 } else {
2539 gen_helper_ptlb(tcg_env, addr);
2540 }
2541
2542 /* Exit TB for TLB change if mmu is enabled. */
2543 if (ctx->tb_flags & PSW_C) {
2544 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2545 }
2546 return nullify_end(ctx);
2547 #endif
2548 }
2549
2550 /*
2551 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2552 * See
2553 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2554 * page 13-9 (195/206)
2555 */
2556 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2557 {
2558 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2559 #ifndef CONFIG_USER_ONLY
2560 TCGv_tl addr, atl, stl;
2561 TCGv_reg reg;
2562
2563 nullify_over(ctx);
2564
2565 /*
2566 * FIXME:
2567 * if (not (pcxl or pcxl2))
2568 * return gen_illegal(ctx);
2569 *
2570 * Note for future: these are 32-bit systems; no hppa64.
2571 */
2572
2573 atl = tcg_temp_new_tl();
2574 stl = tcg_temp_new_tl();
2575 addr = tcg_temp_new_tl();
2576
2577 tcg_gen_ld32u_i64(stl, tcg_env,
2578 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2579 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2580 tcg_gen_ld32u_i64(atl, tcg_env,
2581 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2582 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2583 tcg_gen_shli_i64(stl, stl, 32);
2584 tcg_gen_or_tl(addr, atl, stl);
2585
2586 reg = load_gpr(ctx, a->r);
2587 if (a->addr) {
2588 gen_helper_itlba(tcg_env, addr, reg);
2589 } else {
2590 gen_helper_itlbp(tcg_env, addr, reg);
2591 }
2592
2593 /* Exit TB for TLB change if mmu is enabled. */
2594 if (ctx->tb_flags & PSW_C) {
2595 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2596 }
2597 return nullify_end(ctx);
2598 #endif
2599 }
2600
2601 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2602 {
2603 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2604 #ifndef CONFIG_USER_ONLY
2605 TCGv_tl vaddr;
2606 TCGv_reg ofs, paddr;
2607
2608 nullify_over(ctx);
2609
2610 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2611
2612 paddr = tcg_temp_new();
2613 gen_helper_lpa(paddr, tcg_env, vaddr);
2614
2615 /* Note that physical address result overrides base modification. */
2616 if (a->m) {
2617 save_gpr(ctx, a->b, ofs);
2618 }
2619 save_gpr(ctx, a->t, paddr);
2620
2621 return nullify_end(ctx);
2622 #endif
2623 }
2624
2625 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2626 {
2627 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2628
2629 /* The Coherence Index is an implementation-defined function of the
2630 physical address. Two addresses with the same CI have a coherent
2631 view of the cache. Our implementation is to return 0 for all,
2632 since the entire address space is coherent. */
2633 save_gpr(ctx, a->t, tcg_constant_reg(0));
2634
2635 cond_free(&ctx->null_cond);
2636 return true;
2637 }
2638
2639 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2640 {
2641 return do_add_reg(ctx, a, false, false, false, false);
2642 }
2643
2644 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2645 {
2646 return do_add_reg(ctx, a, true, false, false, false);
2647 }
2648
2649 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2650 {
2651 return do_add_reg(ctx, a, false, true, false, false);
2652 }
2653
2654 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2655 {
2656 return do_add_reg(ctx, a, false, false, false, true);
2657 }
2658
2659 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2660 {
2661 return do_add_reg(ctx, a, false, true, false, true);
2662 }
2663
2664 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2665 {
2666 return do_sub_reg(ctx, a, false, false, false);
2667 }
2668
2669 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2670 {
2671 return do_sub_reg(ctx, a, true, false, false);
2672 }
2673
2674 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2675 {
2676 return do_sub_reg(ctx, a, false, false, true);
2677 }
2678
2679 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2680 {
2681 return do_sub_reg(ctx, a, true, false, true);
2682 }
2683
2684 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2685 {
2686 return do_sub_reg(ctx, a, false, true, false);
2687 }
2688
2689 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2690 {
2691 return do_sub_reg(ctx, a, true, true, false);
2692 }
2693
2694 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2695 {
2696 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2697 }
2698
2699 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2700 {
2701 return do_log_reg(ctx, a, tcg_gen_and_reg);
2702 }
2703
2704 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2705 {
2706 if (a->cf == 0) {
2707 unsigned r2 = a->r2;
2708 unsigned r1 = a->r1;
2709 unsigned rt = a->t;
2710
2711 if (rt == 0) { /* NOP */
2712 cond_free(&ctx->null_cond);
2713 return true;
2714 }
2715 if (r2 == 0) { /* COPY */
2716 if (r1 == 0) {
2717 TCGv_reg dest = dest_gpr(ctx, rt);
2718 tcg_gen_movi_reg(dest, 0);
2719 save_gpr(ctx, rt, dest);
2720 } else {
2721 save_gpr(ctx, rt, cpu_gr[r1]);
2722 }
2723 cond_free(&ctx->null_cond);
2724 return true;
2725 }
2726 #ifndef CONFIG_USER_ONLY
2727 /* These are QEMU extensions and are nops in the real architecture:
2728 *
2729 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2730 * or %r31,%r31,%r31 -- death loop; offline cpu
2731 * currently implemented as idle.
2732 */
2733 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2734 /* No need to check for supervisor, as userland can only pause
2735 until the next timer interrupt. */
2736 nullify_over(ctx);
2737
2738 /* Advance the instruction queue. */
2739 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2740 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2741 nullify_set(ctx, 0);
2742
2743 /* Tell the qemu main loop to halt until this cpu has work. */
2744 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2745 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2746 gen_excp_1(EXCP_HALTED);
2747 ctx->base.is_jmp = DISAS_NORETURN;
2748
2749 return nullify_end(ctx);
2750 }
2751 #endif
2752 }
2753 return do_log_reg(ctx, a, tcg_gen_or_reg);
2754 }
2755
2756 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2757 {
2758 return do_log_reg(ctx, a, tcg_gen_xor_reg);
2759 }
2760
2761 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2762 {
2763 TCGv_reg tcg_r1, tcg_r2;
2764
2765 if (a->cf) {
2766 nullify_over(ctx);
2767 }
2768 tcg_r1 = load_gpr(ctx, a->r1);
2769 tcg_r2 = load_gpr(ctx, a->r2);
2770 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2771 return nullify_end(ctx);
2772 }
2773
2774 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2775 {
2776 TCGv_reg tcg_r1, tcg_r2;
2777
2778 if (a->cf) {
2779 nullify_over(ctx);
2780 }
2781 tcg_r1 = load_gpr(ctx, a->r1);
2782 tcg_r2 = load_gpr(ctx, a->r2);
2783 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_reg);
2784 return nullify_end(ctx);
2785 }
2786
2787 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2788 {
2789 TCGv_reg tcg_r1, tcg_r2, tmp;
2790
2791 if (a->cf) {
2792 nullify_over(ctx);
2793 }
2794 tcg_r1 = load_gpr(ctx, a->r1);
2795 tcg_r2 = load_gpr(ctx, a->r2);
2796 tmp = tcg_temp_new();
2797 tcg_gen_not_reg(tmp, tcg_r2);
2798 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_reg);
2799 return nullify_end(ctx);
2800 }
2801
2802 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2803 {
2804 return do_uaddcm(ctx, a, false);
2805 }
2806
2807 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2808 {
2809 return do_uaddcm(ctx, a, true);
2810 }
2811
2812 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2813 {
2814 TCGv_reg tmp;
2815
2816 nullify_over(ctx);
2817
2818 tmp = tcg_temp_new();
2819 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2820 if (!is_i) {
2821 tcg_gen_not_reg(tmp, tmp);
2822 }
2823 tcg_gen_andi_reg(tmp, tmp, (target_ureg)0x1111111111111111ull);
2824 tcg_gen_muli_reg(tmp, tmp, 6);
2825 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2826 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2827 return nullify_end(ctx);
2828 }
2829
2830 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2831 {
2832 return do_dcor(ctx, a, false);
2833 }
2834
2835 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2836 {
2837 return do_dcor(ctx, a, true);
2838 }
2839
2840 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2841 {
2842 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2843 TCGv_reg cout;
2844
2845 nullify_over(ctx);
2846
2847 in1 = load_gpr(ctx, a->r1);
2848 in2 = load_gpr(ctx, a->r2);
2849
2850 add1 = tcg_temp_new();
2851 add2 = tcg_temp_new();
2852 addc = tcg_temp_new();
2853 dest = tcg_temp_new();
2854 zero = tcg_constant_reg(0);
2855
2856 /* Form R1 << 1 | PSW[CB]{8}. */
2857 tcg_gen_add_reg(add1, in1, in1);
2858 tcg_gen_add_reg(add1, add1, get_psw_carry(ctx, false));
2859
2860 /*
2861 * Add or subtract R2, depending on PSW[V]. Proper computation of
2862 * carry requires that we subtract via + ~R2 + 1, as described in
2863 * the manual. By extracting and masking V, we can produce the
2864 * proper inputs to the addition without movcond.
2865 */
2866 tcg_gen_sextract_reg(addc, cpu_psw_v, 31, 1);
2867 tcg_gen_xor_reg(add2, in2, addc);
2868 tcg_gen_andi_reg(addc, addc, 1);
2869
2870 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2871 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2872
2873 /* Write back the result register. */
2874 save_gpr(ctx, a->t, dest);
2875
2876 /* Write back PSW[CB]. */
2877 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2878 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2879
2880 /* Write back PSW[V] for the division step. */
2881 cout = get_psw_carry(ctx, false);
2882 tcg_gen_neg_reg(cpu_psw_v, cout);
2883 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2884
2885 /* Install the new nullification. */
2886 if (a->cf) {
2887 TCGv_reg sv = NULL;
2888 if (cond_need_sv(a->cf >> 1)) {
2889 /* ??? The lshift is supposed to contribute to overflow. */
2890 sv = do_add_sv(ctx, dest, add1, add2);
2891 }
2892 ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2893 }
2894
2895 return nullify_end(ctx);
2896 }
2897
2898 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2899 {
2900 return do_add_imm(ctx, a, false, false);
2901 }
2902
2903 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2904 {
2905 return do_add_imm(ctx, a, true, false);
2906 }
2907
2908 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2909 {
2910 return do_add_imm(ctx, a, false, true);
2911 }
2912
2913 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2914 {
2915 return do_add_imm(ctx, a, true, true);
2916 }
2917
2918 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2919 {
2920 return do_sub_imm(ctx, a, false);
2921 }
2922
2923 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2924 {
2925 return do_sub_imm(ctx, a, true);
2926 }
2927
2928 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2929 {
2930 TCGv_reg tcg_im, tcg_r2;
2931
2932 if (a->cf) {
2933 nullify_over(ctx);
2934 }
2935
2936 tcg_im = tcg_constant_reg(a->i);
2937 tcg_r2 = load_gpr(ctx, a->r);
2938 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2939
2940 return nullify_end(ctx);
2941 }
2942
2943 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2944 {
2945 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2946 return gen_illegal(ctx);
2947 } else {
2948 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2949 a->disp, a->sp, a->m, a->size | MO_TE);
2950 }
2951 }
2952
2953 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2954 {
2955 assert(a->x == 0 && a->scale == 0);
2956 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2957 return gen_illegal(ctx);
2958 } else {
2959 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2960 }
2961 }
2962
2963 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2964 {
2965 MemOp mop = MO_TE | MO_ALIGN | a->size;
2966 TCGv_reg zero, dest, ofs;
2967 TCGv_tl addr;
2968
2969 nullify_over(ctx);
2970
2971 if (a->m) {
2972 /* Base register modification. Make sure if RT == RB,
2973 we see the result of the load. */
2974 dest = tcg_temp_new();
2975 } else {
2976 dest = dest_gpr(ctx, a->t);
2977 }
2978
2979 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2980 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2981
2982 /*
2983 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2984 * However actual hardware succeeds with aligned mod 4.
2985 * Detect this case and log a GUEST_ERROR.
2986 *
2987 * TODO: HPPA64 relaxes the over-alignment requirement
2988 * with the ,co completer.
2989 */
2990 gen_helper_ldc_check(addr);
2991
2992 zero = tcg_constant_reg(0);
2993 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2994
2995 if (a->m) {
2996 save_gpr(ctx, a->b, ofs);
2997 }
2998 save_gpr(ctx, a->t, dest);
2999
3000 return nullify_end(ctx);
3001 }
3002
3003 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3004 {
3005 TCGv_reg ofs, val;
3006 TCGv_tl addr;
3007
3008 nullify_over(ctx);
3009
3010 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3011 ctx->mmu_idx == MMU_PHYS_IDX);
3012 val = load_gpr(ctx, a->r);
3013 if (a->a) {
3014 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3015 gen_helper_stby_e_parallel(tcg_env, addr, val);
3016 } else {
3017 gen_helper_stby_e(tcg_env, addr, val);
3018 }
3019 } else {
3020 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3021 gen_helper_stby_b_parallel(tcg_env, addr, val);
3022 } else {
3023 gen_helper_stby_b(tcg_env, addr, val);
3024 }
3025 }
3026 if (a->m) {
3027 tcg_gen_andi_reg(ofs, ofs, ~3);
3028 save_gpr(ctx, a->b, ofs);
3029 }
3030
3031 return nullify_end(ctx);
3032 }
3033
3034 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3035 {
3036 int hold_mmu_idx = ctx->mmu_idx;
3037
3038 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3039 ctx->mmu_idx = MMU_PHYS_IDX;
3040 trans_ld(ctx, a);
3041 ctx->mmu_idx = hold_mmu_idx;
3042 return true;
3043 }
3044
3045 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3046 {
3047 int hold_mmu_idx = ctx->mmu_idx;
3048
3049 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3050 ctx->mmu_idx = MMU_PHYS_IDX;
3051 trans_st(ctx, a);
3052 ctx->mmu_idx = hold_mmu_idx;
3053 return true;
3054 }
3055
3056 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3057 {
3058 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
3059
3060 tcg_gen_movi_reg(tcg_rt, a->i);
3061 save_gpr(ctx, a->t, tcg_rt);
3062 cond_free(&ctx->null_cond);
3063 return true;
3064 }
3065
3066 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3067 {
3068 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
3069 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
3070
3071 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
3072 save_gpr(ctx, 1, tcg_r1);
3073 cond_free(&ctx->null_cond);
3074 return true;
3075 }
3076
3077 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3078 {
3079 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
3080
3081 /* Special case rb == 0, for the LDI pseudo-op.
3082 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
3083 if (a->b == 0) {
3084 tcg_gen_movi_reg(tcg_rt, a->i);
3085 } else {
3086 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
3087 }
3088 save_gpr(ctx, a->t, tcg_rt);
3089 cond_free(&ctx->null_cond);
3090 return true;
3091 }
3092
3093 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3094 unsigned c, unsigned f, unsigned n, int disp)
3095 {
3096 TCGv_reg dest, in2, sv;
3097 DisasCond cond;
3098 bool d = false;
3099
3100 in2 = load_gpr(ctx, r);
3101 dest = tcg_temp_new();
3102
3103 tcg_gen_sub_reg(dest, in1, in2);
3104
3105 sv = NULL;
3106 if (cond_need_sv(c)) {
3107 sv = do_sub_sv(ctx, dest, in1, in2);
3108 }
3109
3110 cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3111 return do_cbranch(ctx, disp, n, &cond);
3112 }
3113
3114 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3115 {
3116 nullify_over(ctx);
3117 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3118 }
3119
3120 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3121 {
3122 nullify_over(ctx);
3123 return do_cmpb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
3124 }
3125
3126 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3127 unsigned c, unsigned f, unsigned n, int disp)
3128 {
3129 TCGv_reg dest, in2, sv, cb_cond;
3130 DisasCond cond;
3131 bool d = false;
3132
3133 in2 = load_gpr(ctx, r);
3134 dest = tcg_temp_new();
3135 sv = NULL;
3136 cb_cond = NULL;
3137
3138 if (cond_need_cb(c)) {
3139 TCGv_reg cb = tcg_temp_new();
3140 TCGv_reg cb_msb = tcg_temp_new();
3141
3142 tcg_gen_movi_reg(cb_msb, 0);
3143 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3144 tcg_gen_xor_reg(cb, in1, in2);
3145 tcg_gen_xor_reg(cb, cb, dest);
3146 cb_cond = get_carry(ctx, d, cb, cb_msb);
3147 } else {
3148 tcg_gen_add_reg(dest, in1, in2);
3149 }
3150 if (cond_need_sv(c)) {
3151 sv = do_add_sv(ctx, dest, in1, in2);
3152 }
3153
3154 cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3155 save_gpr(ctx, r, dest);
3156 return do_cbranch(ctx, disp, n, &cond);
3157 }
3158
3159 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3160 {
3161 nullify_over(ctx);
3162 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3163 }
3164
3165 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3166 {
3167 nullify_over(ctx);
3168 return do_addb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
3169 }
3170
3171 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3172 {
3173 TCGv_reg tmp, tcg_r;
3174 DisasCond cond;
3175 bool d = false;
3176
3177 nullify_over(ctx);
3178
3179 tmp = tcg_temp_new();
3180 tcg_r = load_gpr(ctx, a->r);
3181 if (cond_need_ext(ctx, d)) {
3182 /* Force shift into [32,63] */
3183 tcg_gen_ori_reg(tmp, cpu_sar, 32);
3184 tcg_gen_shl_reg(tmp, tcg_r, tmp);
3185 } else {
3186 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3187 }
3188
3189 cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3190 return do_cbranch(ctx, a->disp, a->n, &cond);
3191 }
3192
3193 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3194 {
3195 TCGv_reg tmp, tcg_r;
3196 DisasCond cond;
3197 bool d = false;
3198 int p;
3199
3200 nullify_over(ctx);
3201
3202 tmp = tcg_temp_new();
3203 tcg_r = load_gpr(ctx, a->r);
3204 p = a->p | (cond_need_ext(ctx, d) ? 32 : 0);
3205 tcg_gen_shli_reg(tmp, tcg_r, p);
3206
3207 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3208 return do_cbranch(ctx, a->disp, a->n, &cond);
3209 }
3210
3211 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3212 {
3213 TCGv_reg dest;
3214 DisasCond cond;
3215
3216 nullify_over(ctx);
3217
3218 dest = dest_gpr(ctx, a->r2);
3219 if (a->r1 == 0) {
3220 tcg_gen_movi_reg(dest, 0);
3221 } else {
3222 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3223 }
3224
3225 /* All MOVB conditions are 32-bit. */
3226 cond = do_sed_cond(ctx, a->c, false, dest);
3227 return do_cbranch(ctx, a->disp, a->n, &cond);
3228 }
3229
3230 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3231 {
3232 TCGv_reg dest;
3233 DisasCond cond;
3234
3235 nullify_over(ctx);
3236
3237 dest = dest_gpr(ctx, a->r);
3238 tcg_gen_movi_reg(dest, a->i);
3239
3240 /* All MOVBI conditions are 32-bit. */
3241 cond = do_sed_cond(ctx, a->c, false, dest);
3242 return do_cbranch(ctx, a->disp, a->n, &cond);
3243 }
3244
3245 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3246 {
3247 TCGv_reg dest;
3248
3249 if (a->c) {
3250 nullify_over(ctx);
3251 }
3252
3253 dest = dest_gpr(ctx, a->t);
3254 if (a->r1 == 0) {
3255 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3256 tcg_gen_shr_reg(dest, dest, cpu_sar);
3257 } else if (a->r1 == a->r2) {
3258 TCGv_i32 t32 = tcg_temp_new_i32();
3259 TCGv_i32 s32 = tcg_temp_new_i32();
3260
3261 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3262 tcg_gen_trunc_reg_i32(s32, cpu_sar);
3263 tcg_gen_rotr_i32(t32, t32, s32);
3264 tcg_gen_extu_i32_reg(dest, t32);
3265 } else {
3266 TCGv_i64 t = tcg_temp_new_i64();
3267 TCGv_i64 s = tcg_temp_new_i64();
3268
3269 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3270 tcg_gen_extu_reg_i64(s, cpu_sar);
3271 tcg_gen_shr_i64(t, t, s);
3272 tcg_gen_trunc_i64_reg(dest, t);
3273 }
3274 save_gpr(ctx, a->t, dest);
3275
3276 /* Install the new nullification. */
3277 cond_free(&ctx->null_cond);
3278 if (a->c) {
3279 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3280 }
3281 return nullify_end(ctx);
3282 }
3283
3284 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3285 {
3286 unsigned sa = 31 - a->cpos;
3287 TCGv_reg dest, t2;
3288
3289 if (a->c) {
3290 nullify_over(ctx);
3291 }
3292
3293 dest = dest_gpr(ctx, a->t);
3294 t2 = load_gpr(ctx, a->r2);
3295 if (a->r1 == 0) {
3296 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3297 } else if (TARGET_REGISTER_BITS == 32) {
3298 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3299 } else if (a->r1 == a->r2) {
3300 TCGv_i32 t32 = tcg_temp_new_i32();
3301 tcg_gen_trunc_reg_i32(t32, t2);
3302 tcg_gen_rotri_i32(t32, t32, sa);
3303 tcg_gen_extu_i32_reg(dest, t32);
3304 } else {
3305 TCGv_i64 t64 = tcg_temp_new_i64();
3306 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3307 tcg_gen_shri_i64(t64, t64, sa);
3308 tcg_gen_trunc_i64_reg(dest, t64);
3309 }
3310 save_gpr(ctx, a->t, dest);
3311
3312 /* Install the new nullification. */
3313 cond_free(&ctx->null_cond);
3314 if (a->c) {
3315 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3316 }
3317 return nullify_end(ctx);
3318 }
3319
3320 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3321 {
3322 unsigned len = 32 - a->clen;
3323 TCGv_reg dest, src, tmp;
3324
3325 if (a->c) {
3326 nullify_over(ctx);
3327 }
3328
3329 dest = dest_gpr(ctx, a->t);
3330 src = load_gpr(ctx, a->r);
3331 tmp = tcg_temp_new();
3332
3333 /* Recall that SAR is using big-endian bit numbering. */
3334 tcg_gen_andi_reg(tmp, cpu_sar, 31);
3335 tcg_gen_xori_reg(tmp, tmp, 31);
3336
3337 if (a->se) {
3338 tcg_gen_sar_reg(dest, src, tmp);
3339 tcg_gen_sextract_reg(dest, dest, 0, len);
3340 } else {
3341 tcg_gen_shr_reg(dest, src, tmp);
3342 tcg_gen_extract_reg(dest, dest, 0, len);
3343 }
3344 save_gpr(ctx, a->t, dest);
3345
3346 /* Install the new nullification. */
3347 cond_free(&ctx->null_cond);
3348 if (a->c) {
3349 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3350 }
3351 return nullify_end(ctx);
3352 }
3353
3354 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3355 {
3356 unsigned len = 32 - a->clen;
3357 unsigned cpos = 31 - a->pos;
3358 TCGv_reg dest, src;
3359
3360 if (a->c) {
3361 nullify_over(ctx);
3362 }
3363
3364 dest = dest_gpr(ctx, a->t);
3365 src = load_gpr(ctx, a->r);
3366 if (a->se) {
3367 tcg_gen_sextract_reg(dest, src, cpos, len);
3368 } else {
3369 tcg_gen_extract_reg(dest, src, cpos, len);
3370 }
3371 save_gpr(ctx, a->t, dest);
3372
3373 /* Install the new nullification. */
3374 cond_free(&ctx->null_cond);
3375 if (a->c) {
3376 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3377 }
3378 return nullify_end(ctx);
3379 }
3380
3381 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3382 {
3383 unsigned len = 32 - a->clen;
3384 target_sreg mask0, mask1;
3385 TCGv_reg dest;
3386
3387 if (a->c) {
3388 nullify_over(ctx);
3389 }
3390 if (a->cpos + len > 32) {
3391 len = 32 - a->cpos;
3392 }
3393
3394 dest = dest_gpr(ctx, a->t);
3395 mask0 = deposit64(0, a->cpos, len, a->i);
3396 mask1 = deposit64(-1, a->cpos, len, a->i);
3397
3398 if (a->nz) {
3399 TCGv_reg src = load_gpr(ctx, a->t);
3400 if (mask1 != -1) {
3401 tcg_gen_andi_reg(dest, src, mask1);
3402 src = dest;
3403 }
3404 tcg_gen_ori_reg(dest, src, mask0);
3405 } else {
3406 tcg_gen_movi_reg(dest, mask0);
3407 }
3408 save_gpr(ctx, a->t, dest);
3409
3410 /* Install the new nullification. */
3411 cond_free(&ctx->null_cond);
3412 if (a->c) {
3413 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3414 }
3415 return nullify_end(ctx);
3416 }
3417
3418 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3419 {
3420 unsigned rs = a->nz ? a->t : 0;
3421 unsigned len = 32 - a->clen;
3422 TCGv_reg dest, val;
3423
3424 if (a->c) {
3425 nullify_over(ctx);
3426 }
3427 if (a->cpos + len > 32) {
3428 len = 32 - a->cpos;
3429 }
3430
3431 dest = dest_gpr(ctx, a->t);
3432 val = load_gpr(ctx, a->r);
3433 if (rs == 0) {
3434 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3435 } else {
3436 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3437 }
3438 save_gpr(ctx, a->t, dest);
3439
3440 /* Install the new nullification. */
3441 cond_free(&ctx->null_cond);
3442 if (a->c) {
3443 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3444 }
3445 return nullify_end(ctx);
3446 }
3447
3448 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3449 unsigned nz, unsigned clen, TCGv_reg val)
3450 {
3451 unsigned rs = nz ? rt : 0;
3452 unsigned len = 32 - clen;
3453 TCGv_reg mask, tmp, shift, dest;
3454 unsigned msb = 1U << (len - 1);
3455
3456 dest = dest_gpr(ctx, rt);
3457 shift = tcg_temp_new();
3458 tmp = tcg_temp_new();
3459
3460 /* Convert big-endian bit numbering in SAR to left-shift. */
3461 tcg_gen_andi_reg(shift, cpu_sar, 31);
3462 tcg_gen_xori_reg(shift, shift, 31);
3463
3464 mask = tcg_temp_new();
3465 tcg_gen_movi_reg(mask, msb + (msb - 1));
3466 tcg_gen_and_reg(tmp, val, mask);
3467 if (rs) {
3468 tcg_gen_shl_reg(mask, mask, shift);
3469 tcg_gen_shl_reg(tmp, tmp, shift);
3470 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3471 tcg_gen_or_reg(dest, dest, tmp);
3472 } else {
3473 tcg_gen_shl_reg(dest, tmp, shift);
3474 }
3475 save_gpr(ctx, rt, dest);
3476
3477 /* Install the new nullification. */
3478 cond_free(&ctx->null_cond);
3479 if (c) {
3480 ctx->null_cond = do_sed_cond(ctx, c, false, dest);
3481 }
3482 return nullify_end(ctx);
3483 }
3484
3485 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3486 {
3487 if (a->c) {
3488 nullify_over(ctx);
3489 }
3490 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3491 }
3492
3493 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3494 {
3495 if (a->c) {
3496 nullify_over(ctx);
3497 }
3498 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, tcg_constant_reg(a->i));
3499 }
3500
3501 static bool trans_be(DisasContext *ctx, arg_be *a)
3502 {
3503 TCGv_reg tmp;
3504
3505 #ifdef CONFIG_USER_ONLY
3506 /* ??? It seems like there should be a good way of using
3507 "be disp(sr2, r0)", the canonical gateway entry mechanism
3508 to our advantage. But that appears to be inconvenient to
3509 manage along side branch delay slots. Therefore we handle
3510 entry into the gateway page via absolute address. */
3511 /* Since we don't implement spaces, just branch. Do notice the special
3512 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3513 goto_tb to the TB containing the syscall. */
3514 if (a->b == 0) {
3515 return do_dbranch(ctx, a->disp, a->l, a->n);
3516 }
3517 #else
3518 nullify_over(ctx);
3519 #endif
3520
3521 tmp = tcg_temp_new();
3522 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3523 tmp = do_ibranch_priv(ctx, tmp);
3524
3525 #ifdef CONFIG_USER_ONLY
3526 return do_ibranch(ctx, tmp, a->l, a->n);
3527 #else
3528 TCGv_i64 new_spc = tcg_temp_new_i64();
3529
3530 load_spr(ctx, new_spc, a->sp);
3531 if (a->l) {
3532 copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3533 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3534 }
3535 if (a->n && use_nullify_skip(ctx)) {
3536 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3537 tcg_gen_addi_reg(tmp, tmp, 4);
3538 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3539 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3540 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3541 } else {
3542 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3543 if (ctx->iaoq_b == -1) {
3544 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3545 }
3546 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3547 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3548 nullify_set(ctx, a->n);
3549 }
3550 tcg_gen_lookup_and_goto_ptr();
3551 ctx->base.is_jmp = DISAS_NORETURN;
3552 return nullify_end(ctx);
3553 #endif
3554 }
3555
3556 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3557 {
3558 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3559 }
3560
3561 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3562 {
3563 target_ureg dest = iaoq_dest(ctx, a->disp);
3564
3565 nullify_over(ctx);
3566
3567 /* Make sure the caller hasn't done something weird with the queue.
3568 * ??? This is not quite the same as the PSW[B] bit, which would be
3569 * expensive to track. Real hardware will trap for
3570 * b gateway
3571 * b gateway+4 (in delay slot of first branch)
3572 * However, checking for a non-sequential instruction queue *will*
3573 * diagnose the security hole
3574 * b gateway
3575 * b evil
3576 * in which instructions at evil would run with increased privs.
3577 */
3578 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3579 return gen_illegal(ctx);
3580 }
3581
3582 #ifndef CONFIG_USER_ONLY
3583 if (ctx->tb_flags & PSW_C) {
3584 CPUHPPAState *env = cpu_env(ctx->cs);
3585 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3586 /* If we could not find a TLB entry, then we need to generate an
3587 ITLB miss exception so the kernel will provide it.
3588 The resulting TLB fill operation will invalidate this TB and
3589 we will re-translate, at which point we *will* be able to find
3590 the TLB entry and determine if this is in fact a gateway page. */
3591 if (type < 0) {
3592 gen_excp(ctx, EXCP_ITLB_MISS);
3593 return true;
3594 }
3595 /* No change for non-gateway pages or for priv decrease. */
3596 if (type >= 4 && type - 4 < ctx->privilege) {
3597 dest = deposit32(dest, 0, 2, type - 4);
3598 }
3599 } else {
3600 dest &= -4; /* priv = 0 */
3601 }
3602 #endif
3603
3604 if (a->l) {
3605 TCGv_reg tmp = dest_gpr(ctx, a->l);
3606 if (ctx->privilege < 3) {
3607 tcg_gen_andi_reg(tmp, tmp, -4);
3608 }
3609 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3610 save_gpr(ctx, a->l, tmp);
3611 }
3612
3613 return do_dbranch(ctx, dest, 0, a->n);
3614 }
3615
3616 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3617 {
3618 if (a->x) {
3619 TCGv_reg tmp = tcg_temp_new();
3620 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3621 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3622 /* The computation here never changes privilege level. */
3623 return do_ibranch(ctx, tmp, a->l, a->n);
3624 } else {
3625 /* BLR R0,RX is a good way to load PC+8 into RX. */
3626 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3627 }
3628 }
3629
3630 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3631 {
3632 TCGv_reg dest;
3633
3634 if (a->x == 0) {
3635 dest = load_gpr(ctx, a->b);
3636 } else {
3637 dest = tcg_temp_new();
3638 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3639 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3640 }
3641 dest = do_ibranch_priv(ctx, dest);
3642 return do_ibranch(ctx, dest, 0, a->n);
3643 }
3644
3645 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3646 {
3647 TCGv_reg dest;
3648
3649 #ifdef CONFIG_USER_ONLY
3650 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3651 return do_ibranch(ctx, dest, a->l, a->n);
3652 #else
3653 nullify_over(ctx);
3654 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3655
3656 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3657 if (ctx->iaoq_b == -1) {
3658 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3659 }
3660 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3661 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3662 if (a->l) {
3663 copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3664 }
3665 nullify_set(ctx, a->n);
3666 tcg_gen_lookup_and_goto_ptr();
3667 ctx->base.is_jmp = DISAS_NORETURN;
3668 return nullify_end(ctx);
3669 #endif
3670 }
3671
3672 /*
3673 * Float class 0
3674 */
3675
3676 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3677 {
3678 tcg_gen_mov_i32(dst, src);
3679 }
3680
3681 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3682 {
3683 uint64_t ret;
3684
3685 if (TARGET_REGISTER_BITS == 64) {
3686 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3687 } else {
3688 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3689 }
3690
3691 nullify_over(ctx);
3692 save_frd(0, tcg_constant_i64(ret));
3693 return nullify_end(ctx);
3694 }
3695
3696 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3697 {
3698 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3699 }
3700
3701 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3702 {
3703 tcg_gen_mov_i64(dst, src);
3704 }
3705
3706 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3707 {
3708 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3709 }
3710
3711 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3712 {
3713 tcg_gen_andi_i32(dst, src, INT32_MAX);
3714 }
3715
3716 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3717 {
3718 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3719 }
3720
3721 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3722 {
3723 tcg_gen_andi_i64(dst, src, INT64_MAX);
3724 }
3725
3726 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3727 {
3728 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3729 }
3730
3731 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3732 {
3733 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3734 }
3735
3736 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3737 {
3738 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3739 }
3740
3741 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3742 {
3743 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3744 }
3745
3746 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3747 {
3748 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3749 }
3750
3751 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3752 {
3753 tcg_gen_xori_i32(dst, src, INT32_MIN);
3754 }
3755
3756 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3757 {
3758 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3759 }
3760
3761 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3762 {
3763 tcg_gen_xori_i64(dst, src, INT64_MIN);
3764 }
3765
3766 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3767 {
3768 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3769 }
3770
3771 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3772 {
3773 tcg_gen_ori_i32(dst, src, INT32_MIN);
3774 }
3775
3776 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3777 {
3778 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3779 }
3780
3781 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3782 {
3783 tcg_gen_ori_i64(dst, src, INT64_MIN);
3784 }
3785
3786 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3787 {
3788 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3789 }
3790
3791 /*
3792 * Float class 1
3793 */
3794
3795 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3796 {
3797 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3798 }
3799
3800 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3801 {
3802 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3803 }
3804
3805 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3806 {
3807 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3808 }
3809
3810 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3811 {
3812 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3813 }
3814
3815 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3816 {
3817 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3818 }
3819
3820 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3821 {
3822 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3823 }
3824
3825 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3826 {
3827 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3828 }
3829
3830 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3831 {
3832 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3833 }
3834
3835 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3836 {
3837 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3838 }
3839
3840 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3841 {
3842 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3843 }
3844
3845 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3846 {
3847 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3848 }
3849
3850 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3851 {
3852 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3853 }
3854
3855 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3856 {
3857 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3858 }
3859
3860 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3861 {
3862 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3863 }
3864
3865 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3866 {
3867 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3868 }
3869
3870 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3871 {
3872 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3873 }
3874
3875 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3876 {
3877 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3878 }
3879
3880 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3881 {
3882 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3883 }
3884
3885 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3886 {
3887 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3888 }
3889
3890 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3891 {
3892 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3893 }
3894
3895 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3896 {
3897 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3898 }
3899
3900 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3901 {
3902 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3903 }
3904
3905 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3906 {
3907 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3908 }
3909
3910 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3911 {
3912 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3913 }
3914
3915 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3916 {
3917 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3918 }
3919
3920 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3921 {
3922 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3923 }
3924
3925 /*
3926 * Float class 2
3927 */
3928
3929 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3930 {
3931 TCGv_i32 ta, tb, tc, ty;
3932
3933 nullify_over(ctx);
3934
3935 ta = load_frw0_i32(a->r1);
3936 tb = load_frw0_i32(a->r2);
3937 ty = tcg_constant_i32(a->y);
3938 tc = tcg_constant_i32(a->c);
3939
3940 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
3941
3942 return nullify_end(ctx);
3943 }
3944
3945 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3946 {
3947 TCGv_i64 ta, tb;
3948 TCGv_i32 tc, ty;
3949
3950 nullify_over(ctx);
3951
3952 ta = load_frd0(a->r1);
3953 tb = load_frd0(a->r2);
3954 ty = tcg_constant_i32(a->y);
3955 tc = tcg_constant_i32(a->c);
3956
3957 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
3958
3959 return nullify_end(ctx);
3960 }
3961
3962 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3963 {
3964 TCGv_reg t;
3965
3966 nullify_over(ctx);
3967
3968 t = tcg_temp_new();
3969 tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
3970
3971 if (a->y == 1) {
3972 int mask;
3973 bool inv = false;
3974
3975 switch (a->c) {
3976 case 0: /* simple */
3977 tcg_gen_andi_reg(t, t, 0x4000000);
3978 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3979 goto done;
3980 case 2: /* rej */
3981 inv = true;
3982 /* fallthru */
3983 case 1: /* acc */
3984 mask = 0x43ff800;
3985 break;
3986 case 6: /* rej8 */
3987 inv = true;
3988 /* fallthru */
3989 case 5: /* acc8 */
3990 mask = 0x43f8000;
3991 break;
3992 case 9: /* acc6 */
3993 mask = 0x43e0000;
3994 break;
3995 case 13: /* acc4 */
3996 mask = 0x4380000;
3997 break;
3998 case 17: /* acc2 */
3999 mask = 0x4200000;
4000 break;
4001 default:
4002 gen_illegal(ctx);
4003 return true;
4004 }
4005 if (inv) {
4006 TCGv_reg c = tcg_constant_reg(mask);
4007 tcg_gen_or_reg(t, t, c);
4008 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4009 } else {
4010 tcg_gen_andi_reg(t, t, mask);
4011 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4012 }
4013 } else {
4014 unsigned cbit = (a->y ^ 1) - 1;
4015
4016 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
4017 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4018 }
4019
4020 done:
4021 return nullify_end(ctx);
4022 }
4023
4024 /*
4025 * Float class 2
4026 */
4027
4028 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4029 {
4030 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4031 }
4032
4033 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4034 {
4035 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4036 }
4037
4038 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4039 {
4040 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4041 }
4042
4043 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4044 {
4045 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4046 }
4047
4048 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4049 {
4050 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4051 }
4052
4053 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4054 {
4055 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4056 }
4057
4058 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4059 {
4060 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4061 }
4062
4063 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4064 {
4065 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4066 }
4067
4068 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4069 {
4070 TCGv_i64 x, y;
4071
4072 nullify_over(ctx);
4073
4074 x = load_frw0_i64(a->r1);
4075 y = load_frw0_i64(a->r2);
4076 tcg_gen_mul_i64(x, x, y);
4077 save_frd(a->t, x);
4078
4079 return nullify_end(ctx);
4080 }
4081
4082 /* Convert the fmpyadd single-precision register encodings to standard. */
4083 static inline int fmpyadd_s_reg(unsigned r)
4084 {
4085 return (r & 16) * 2 + 16 + (r & 15);
4086 }
4087
4088 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4089 {
4090 int tm = fmpyadd_s_reg(a->tm);
4091 int ra = fmpyadd_s_reg(a->ra);
4092 int ta = fmpyadd_s_reg(a->ta);
4093 int rm2 = fmpyadd_s_reg(a->rm2);
4094 int rm1 = fmpyadd_s_reg(a->rm1);
4095
4096 nullify_over(ctx);
4097
4098 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4099 do_fop_weww(ctx, ta, ta, ra,
4100 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4101
4102 return nullify_end(ctx);
4103 }
4104
4105 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4106 {
4107 return do_fmpyadd_s(ctx, a, false);
4108 }
4109
4110 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4111 {
4112 return do_fmpyadd_s(ctx, a, true);
4113 }
4114
4115 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4116 {
4117 nullify_over(ctx);
4118
4119 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4120 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4121 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4122
4123 return nullify_end(ctx);
4124 }
4125
4126 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4127 {
4128 return do_fmpyadd_d(ctx, a, false);
4129 }
4130
4131 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4132 {
4133 return do_fmpyadd_d(ctx, a, true);
4134 }
4135
4136 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4137 {
4138 TCGv_i32 x, y, z;
4139
4140 nullify_over(ctx);
4141 x = load_frw0_i32(a->rm1);
4142 y = load_frw0_i32(a->rm2);
4143 z = load_frw0_i32(a->ra3);
4144
4145 if (a->neg) {
4146 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4147 } else {
4148 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4149 }
4150
4151 save_frw_i32(a->t, x);
4152 return nullify_end(ctx);
4153 }
4154
4155 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4156 {
4157 TCGv_i64 x, y, z;
4158
4159 nullify_over(ctx);
4160 x = load_frd0(a->rm1);
4161 y = load_frd0(a->rm2);
4162 z = load_frd0(a->ra3);
4163
4164 if (a->neg) {
4165 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4166 } else {
4167 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4168 }
4169
4170 save_frd(a->t, x);
4171 return nullify_end(ctx);
4172 }
4173
4174 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4175 {
4176 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4177 #ifndef CONFIG_USER_ONLY
4178 if (a->i == 0x100) {
4179 /* emulate PDC BTLB, called by SeaBIOS-hppa */
4180 nullify_over(ctx);
4181 gen_helper_diag_btlb(tcg_env);
4182 return nullify_end(ctx);
4183 }
4184 #endif
4185 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4186 return true;
4187 }
4188
4189 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4190 {
4191 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4192 int bound;
4193
4194 ctx->cs = cs;
4195 ctx->tb_flags = ctx->base.tb->flags;
4196 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4197
4198 #ifdef CONFIG_USER_ONLY
4199 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4200 ctx->mmu_idx = MMU_USER_IDX;
4201 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4202 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4203 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4204 #else
4205 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4206 ctx->mmu_idx = (ctx->tb_flags & PSW_D
4207 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4208 : MMU_PHYS_IDX);
4209
4210 /* Recover the IAOQ values from the GVA + PRIV. */
4211 uint64_t cs_base = ctx->base.tb->cs_base;
4212 uint64_t iasq_f = cs_base & ~0xffffffffull;
4213 int32_t diff = cs_base;
4214
4215 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4216 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4217 #endif
4218 ctx->iaoq_n = -1;
4219 ctx->iaoq_n_var = NULL;
4220
4221 /* Bound the number of instructions by those left on the page. */
4222 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4223 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4224 }
4225
4226 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4227 {
4228 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4229
4230 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4231 ctx->null_cond = cond_make_f();
4232 ctx->psw_n_nonzero = false;
4233 if (ctx->tb_flags & PSW_N) {
4234 ctx->null_cond.c = TCG_COND_ALWAYS;
4235 ctx->psw_n_nonzero = true;
4236 }
4237 ctx->null_lab = NULL;
4238 }
4239
4240 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4241 {
4242 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4243
4244 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4245 }
4246
4247 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4248 {
4249 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4250 CPUHPPAState *env = cpu_env(cs);
4251 DisasJumpType ret;
4252
4253 /* Execute one insn. */
4254 #ifdef CONFIG_USER_ONLY
4255 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4256 do_page_zero(ctx);
4257 ret = ctx->base.is_jmp;
4258 assert(ret != DISAS_NEXT);
4259 } else
4260 #endif
4261 {
4262 /* Always fetch the insn, even if nullified, so that we check
4263 the page permissions for execute. */
4264 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4265
4266 /* Set up the IA queue for the next insn.
4267 This will be overwritten by a branch. */
4268 if (ctx->iaoq_b == -1) {
4269 ctx->iaoq_n = -1;
4270 ctx->iaoq_n_var = tcg_temp_new();
4271 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4272 } else {
4273 ctx->iaoq_n = ctx->iaoq_b + 4;
4274 ctx->iaoq_n_var = NULL;
4275 }
4276
4277 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4278 ctx->null_cond.c = TCG_COND_NEVER;
4279 ret = DISAS_NEXT;
4280 } else {
4281 ctx->insn = insn;
4282 if (!decode(ctx, insn)) {
4283 gen_illegal(ctx);
4284 }
4285 ret = ctx->base.is_jmp;
4286 assert(ctx->null_lab == NULL);
4287 }
4288 }
4289
4290 /* Advance the insn queue. Note that this check also detects
4291 a priority change within the instruction queue. */
4292 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4293 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4294 && use_goto_tb(ctx, ctx->iaoq_b)
4295 && (ctx->null_cond.c == TCG_COND_NEVER
4296 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4297 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4298 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4299 ctx->base.is_jmp = ret = DISAS_NORETURN;
4300 } else {
4301 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4302 }
4303 }
4304 ctx->iaoq_f = ctx->iaoq_b;
4305 ctx->iaoq_b = ctx->iaoq_n;
4306 ctx->base.pc_next += 4;
4307
4308 switch (ret) {
4309 case DISAS_NORETURN:
4310 case DISAS_IAQ_N_UPDATED:
4311 break;
4312
4313 case DISAS_NEXT:
4314 case DISAS_IAQ_N_STALE:
4315 case DISAS_IAQ_N_STALE_EXIT:
4316 if (ctx->iaoq_f == -1) {
4317 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4318 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4319 #ifndef CONFIG_USER_ONLY
4320 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4321 #endif
4322 nullify_save(ctx);
4323 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4324 ? DISAS_EXIT
4325 : DISAS_IAQ_N_UPDATED);
4326 } else if (ctx->iaoq_b == -1) {
4327 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4328 }
4329 break;
4330
4331 default:
4332 g_assert_not_reached();
4333 }
4334 }
4335
4336 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4337 {
4338 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4339 DisasJumpType is_jmp = ctx->base.is_jmp;
4340
4341 switch (is_jmp) {
4342 case DISAS_NORETURN:
4343 break;
4344 case DISAS_TOO_MANY:
4345 case DISAS_IAQ_N_STALE:
4346 case DISAS_IAQ_N_STALE_EXIT:
4347 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4348 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4349 nullify_save(ctx);
4350 /* FALLTHRU */
4351 case DISAS_IAQ_N_UPDATED:
4352 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4353 tcg_gen_lookup_and_goto_ptr();
4354 break;
4355 }
4356 /* FALLTHRU */
4357 case DISAS_EXIT:
4358 tcg_gen_exit_tb(NULL, 0);
4359 break;
4360 default:
4361 g_assert_not_reached();
4362 }
4363 }
4364
4365 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4366 CPUState *cs, FILE *logfile)
4367 {
4368 target_ulong pc = dcbase->pc_first;
4369
4370 #ifdef CONFIG_USER_ONLY
4371 switch (pc) {
4372 case 0x00:
4373 fprintf(logfile, "IN:\n0x00000000: (null)\n");
4374 return;
4375 case 0xb0:
4376 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
4377 return;
4378 case 0xe0:
4379 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4380 return;
4381 case 0x100:
4382 fprintf(logfile, "IN:\n0x00000100: syscall\n");
4383 return;
4384 }
4385 #endif
4386
4387 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4388 target_disas(logfile, cs, pc, dcbase->tb->size);
4389 }
4390
4391 static const TranslatorOps hppa_tr_ops = {
4392 .init_disas_context = hppa_tr_init_disas_context,
4393 .tb_start = hppa_tr_tb_start,
4394 .insn_start = hppa_tr_insn_start,
4395 .translate_insn = hppa_tr_translate_insn,
4396 .tb_stop = hppa_tr_tb_stop,
4397 .disas_log = hppa_tr_disas_log,
4398 };
4399
4400 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4401 target_ulong pc, void *host_pc)
4402 {
4403 DisasContext ctx;
4404 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4405 }