]> git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/translate.c
target/hppa: Implement STDBY
[mirror_qemu.git] / target / hppa / translate.c
1 /*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/log.h"
30
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef HELPER_H
34
35
36 /* Since we have a distinction between register size and address size,
37 we need to redefine all of these. */
38
39 #undef TCGv
40 #undef tcg_temp_new
41 #undef tcg_global_mem_new
42
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #if TARGET_REGISTER_BITS == 64
47 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
48 #else
49 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
50 #endif
51 #else
52 #define TCGv_tl TCGv_i32
53 #define tcg_temp_new_tl tcg_temp_new_i32
54 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
55 #endif
56
57 #if TARGET_REGISTER_BITS == 64
58 #define TCGv_reg TCGv_i64
59
60 #define tcg_temp_new tcg_temp_new_i64
61 #define tcg_global_mem_new tcg_global_mem_new_i64
62
63 #define tcg_gen_movi_reg tcg_gen_movi_i64
64 #define tcg_gen_mov_reg tcg_gen_mov_i64
65 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
66 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
67 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
68 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
69 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
70 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
71 #define tcg_gen_ld_reg tcg_gen_ld_i64
72 #define tcg_gen_st8_reg tcg_gen_st8_i64
73 #define tcg_gen_st16_reg tcg_gen_st16_i64
74 #define tcg_gen_st32_reg tcg_gen_st32_i64
75 #define tcg_gen_st_reg tcg_gen_st_i64
76 #define tcg_gen_add_reg tcg_gen_add_i64
77 #define tcg_gen_addi_reg tcg_gen_addi_i64
78 #define tcg_gen_sub_reg tcg_gen_sub_i64
79 #define tcg_gen_neg_reg tcg_gen_neg_i64
80 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
81 #define tcg_gen_subi_reg tcg_gen_subi_i64
82 #define tcg_gen_and_reg tcg_gen_and_i64
83 #define tcg_gen_andi_reg tcg_gen_andi_i64
84 #define tcg_gen_or_reg tcg_gen_or_i64
85 #define tcg_gen_ori_reg tcg_gen_ori_i64
86 #define tcg_gen_xor_reg tcg_gen_xor_i64
87 #define tcg_gen_xori_reg tcg_gen_xori_i64
88 #define tcg_gen_not_reg tcg_gen_not_i64
89 #define tcg_gen_shl_reg tcg_gen_shl_i64
90 #define tcg_gen_shli_reg tcg_gen_shli_i64
91 #define tcg_gen_shr_reg tcg_gen_shr_i64
92 #define tcg_gen_shri_reg tcg_gen_shri_i64
93 #define tcg_gen_sar_reg tcg_gen_sar_i64
94 #define tcg_gen_sari_reg tcg_gen_sari_i64
95 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
96 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
97 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
98 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
99 #define tcg_gen_mul_reg tcg_gen_mul_i64
100 #define tcg_gen_muli_reg tcg_gen_muli_i64
101 #define tcg_gen_div_reg tcg_gen_div_i64
102 #define tcg_gen_rem_reg tcg_gen_rem_i64
103 #define tcg_gen_divu_reg tcg_gen_divu_i64
104 #define tcg_gen_remu_reg tcg_gen_remu_i64
105 #define tcg_gen_discard_reg tcg_gen_discard_i64
106 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
107 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
108 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
109 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
110 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
111 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
112 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
113 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
114 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
115 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
116 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
117 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
118 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
119 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
120 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
121 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
122 #define tcg_gen_andc_reg tcg_gen_andc_i64
123 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
124 #define tcg_gen_nand_reg tcg_gen_nand_i64
125 #define tcg_gen_nor_reg tcg_gen_nor_i64
126 #define tcg_gen_orc_reg tcg_gen_orc_i64
127 #define tcg_gen_clz_reg tcg_gen_clz_i64
128 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
129 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
130 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
131 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
132 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
133 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
134 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
135 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
136 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
137 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
138 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
139 #define tcg_gen_extract_reg tcg_gen_extract_i64
140 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
141 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
142 #define tcg_constant_reg tcg_constant_i64
143 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
144 #define tcg_gen_add2_reg tcg_gen_add2_i64
145 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
146 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
147 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
148 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
149 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
150 #else
151 #define TCGv_reg TCGv_i32
152 #define tcg_temp_new tcg_temp_new_i32
153 #define tcg_global_mem_new tcg_global_mem_new_i32
154
155 #define tcg_gen_movi_reg tcg_gen_movi_i32
156 #define tcg_gen_mov_reg tcg_gen_mov_i32
157 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
158 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
159 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
160 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
161 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
162 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
163 #define tcg_gen_ld_reg tcg_gen_ld_i32
164 #define tcg_gen_st8_reg tcg_gen_st8_i32
165 #define tcg_gen_st16_reg tcg_gen_st16_i32
166 #define tcg_gen_st32_reg tcg_gen_st32_i32
167 #define tcg_gen_st_reg tcg_gen_st_i32
168 #define tcg_gen_add_reg tcg_gen_add_i32
169 #define tcg_gen_addi_reg tcg_gen_addi_i32
170 #define tcg_gen_sub_reg tcg_gen_sub_i32
171 #define tcg_gen_neg_reg tcg_gen_neg_i32
172 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
173 #define tcg_gen_subi_reg tcg_gen_subi_i32
174 #define tcg_gen_and_reg tcg_gen_and_i32
175 #define tcg_gen_andi_reg tcg_gen_andi_i32
176 #define tcg_gen_or_reg tcg_gen_or_i32
177 #define tcg_gen_ori_reg tcg_gen_ori_i32
178 #define tcg_gen_xor_reg tcg_gen_xor_i32
179 #define tcg_gen_xori_reg tcg_gen_xori_i32
180 #define tcg_gen_not_reg tcg_gen_not_i32
181 #define tcg_gen_shl_reg tcg_gen_shl_i32
182 #define tcg_gen_shli_reg tcg_gen_shli_i32
183 #define tcg_gen_shr_reg tcg_gen_shr_i32
184 #define tcg_gen_shri_reg tcg_gen_shri_i32
185 #define tcg_gen_sar_reg tcg_gen_sar_i32
186 #define tcg_gen_sari_reg tcg_gen_sari_i32
187 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
188 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
189 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
190 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
191 #define tcg_gen_mul_reg tcg_gen_mul_i32
192 #define tcg_gen_muli_reg tcg_gen_muli_i32
193 #define tcg_gen_div_reg tcg_gen_div_i32
194 #define tcg_gen_rem_reg tcg_gen_rem_i32
195 #define tcg_gen_divu_reg tcg_gen_divu_i32
196 #define tcg_gen_remu_reg tcg_gen_remu_i32
197 #define tcg_gen_discard_reg tcg_gen_discard_i32
198 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
199 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
200 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
201 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
202 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
203 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
204 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
205 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
206 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
207 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
208 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
209 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
210 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
211 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
212 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
213 #define tcg_gen_andc_reg tcg_gen_andc_i32
214 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
215 #define tcg_gen_nand_reg tcg_gen_nand_i32
216 #define tcg_gen_nor_reg tcg_gen_nor_i32
217 #define tcg_gen_orc_reg tcg_gen_orc_i32
218 #define tcg_gen_clz_reg tcg_gen_clz_i32
219 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
220 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
221 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
222 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
223 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
224 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
225 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
226 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
227 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
228 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
229 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
230 #define tcg_gen_extract_reg tcg_gen_extract_i32
231 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
232 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
233 #define tcg_constant_reg tcg_constant_i32
234 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
235 #define tcg_gen_add2_reg tcg_gen_add2_i32
236 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
237 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
238 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
239 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
240 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
241 #endif /* TARGET_REGISTER_BITS */
242
243 typedef struct DisasCond {
244 TCGCond c;
245 TCGv_reg a0, a1;
246 } DisasCond;
247
248 typedef struct DisasContext {
249 DisasContextBase base;
250 CPUState *cs;
251
252 target_ureg iaoq_f;
253 target_ureg iaoq_b;
254 target_ureg iaoq_n;
255 TCGv_reg iaoq_n_var;
256
257 DisasCond null_cond;
258 TCGLabel *null_lab;
259
260 uint32_t insn;
261 uint32_t tb_flags;
262 int mmu_idx;
263 int privilege;
264 bool psw_n_nonzero;
265 bool is_pa20;
266
267 #ifdef CONFIG_USER_ONLY
268 MemOp unalign;
269 #endif
270 } DisasContext;
271
272 #ifdef CONFIG_USER_ONLY
273 #define UNALIGN(C) (C)->unalign
274 #else
275 #define UNALIGN(C) MO_ALIGN
276 #endif
277
278 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
279 static int expand_sm_imm(DisasContext *ctx, int val)
280 {
281 if (val & PSW_SM_E) {
282 val = (val & ~PSW_SM_E) | PSW_E;
283 }
284 if (val & PSW_SM_W) {
285 val = (val & ~PSW_SM_W) | PSW_W;
286 }
287 return val;
288 }
289
290 /* Inverted space register indicates 0 means sr0 not inferred from base. */
291 static int expand_sr3x(DisasContext *ctx, int val)
292 {
293 return ~val;
294 }
295
296 /* Convert the M:A bits within a memory insn to the tri-state value
297 we use for the final M. */
298 static int ma_to_m(DisasContext *ctx, int val)
299 {
300 return val & 2 ? (val & 1 ? -1 : 1) : 0;
301 }
302
303 /* Convert the sign of the displacement to a pre or post-modify. */
304 static int pos_to_m(DisasContext *ctx, int val)
305 {
306 return val ? 1 : -1;
307 }
308
309 static int neg_to_m(DisasContext *ctx, int val)
310 {
311 return val ? -1 : 1;
312 }
313
314 /* Used for branch targets and fp memory ops. */
315 static int expand_shl2(DisasContext *ctx, int val)
316 {
317 return val << 2;
318 }
319
320 /* Used for fp memory ops. */
321 static int expand_shl3(DisasContext *ctx, int val)
322 {
323 return val << 3;
324 }
325
326 /* Used for assemble_21. */
327 static int expand_shl11(DisasContext *ctx, int val)
328 {
329 return val << 11;
330 }
331
332 static int assemble_6(DisasContext *ctx, int val)
333 {
334 /*
335 * Officially, 32 * x + 32 - y.
336 * Here, x is already in bit 5, and y is [4:0].
337 * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
338 * with the overflow from bit 4 summing with x.
339 */
340 return (val ^ 31) + 1;
341 }
342
343 /* Translate CMPI doubleword conditions to standard. */
344 static int cmpbid_c(DisasContext *ctx, int val)
345 {
346 return val ? val : 4; /* 0 == "*<<" */
347 }
348
349
350 /* Include the auto-generated decoder. */
351 #include "decode-insns.c.inc"
352
353 /* We are not using a goto_tb (for whatever reason), but have updated
354 the iaq (for whatever reason), so don't do it again on exit. */
355 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
356
357 /* We are exiting the TB, but have neither emitted a goto_tb, nor
358 updated the iaq for the next instruction to be executed. */
359 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
360
361 /* Similarly, but we want to return to the main loop immediately
362 to recognize unmasked interrupts. */
363 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
364 #define DISAS_EXIT DISAS_TARGET_3
365
366 /* global register indexes */
367 static TCGv_reg cpu_gr[32];
368 static TCGv_i64 cpu_sr[4];
369 static TCGv_i64 cpu_srH;
370 static TCGv_reg cpu_iaoq_f;
371 static TCGv_reg cpu_iaoq_b;
372 static TCGv_i64 cpu_iasq_f;
373 static TCGv_i64 cpu_iasq_b;
374 static TCGv_reg cpu_sar;
375 static TCGv_reg cpu_psw_n;
376 static TCGv_reg cpu_psw_v;
377 static TCGv_reg cpu_psw_cb;
378 static TCGv_reg cpu_psw_cb_msb;
379
380 void hppa_translate_init(void)
381 {
382 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
383
384 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
385 static const GlobalVar vars[] = {
386 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
387 DEF_VAR(psw_n),
388 DEF_VAR(psw_v),
389 DEF_VAR(psw_cb),
390 DEF_VAR(psw_cb_msb),
391 DEF_VAR(iaoq_f),
392 DEF_VAR(iaoq_b),
393 };
394
395 #undef DEF_VAR
396
397 /* Use the symbolic register names that match the disassembler. */
398 static const char gr_names[32][4] = {
399 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
400 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
401 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
402 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
403 };
404 /* SR[4-7] are not global registers so that we can index them. */
405 static const char sr_names[5][4] = {
406 "sr0", "sr1", "sr2", "sr3", "srH"
407 };
408
409 int i;
410
411 cpu_gr[0] = NULL;
412 for (i = 1; i < 32; i++) {
413 cpu_gr[i] = tcg_global_mem_new(tcg_env,
414 offsetof(CPUHPPAState, gr[i]),
415 gr_names[i]);
416 }
417 for (i = 0; i < 4; i++) {
418 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
419 offsetof(CPUHPPAState, sr[i]),
420 sr_names[i]);
421 }
422 cpu_srH = tcg_global_mem_new_i64(tcg_env,
423 offsetof(CPUHPPAState, sr[4]),
424 sr_names[4]);
425
426 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
427 const GlobalVar *v = &vars[i];
428 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
429 }
430
431 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
432 offsetof(CPUHPPAState, iasq_f),
433 "iasq_f");
434 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
435 offsetof(CPUHPPAState, iasq_b),
436 "iasq_b");
437 }
438
439 static DisasCond cond_make_f(void)
440 {
441 return (DisasCond){
442 .c = TCG_COND_NEVER,
443 .a0 = NULL,
444 .a1 = NULL,
445 };
446 }
447
448 static DisasCond cond_make_t(void)
449 {
450 return (DisasCond){
451 .c = TCG_COND_ALWAYS,
452 .a0 = NULL,
453 .a1 = NULL,
454 };
455 }
456
457 static DisasCond cond_make_n(void)
458 {
459 return (DisasCond){
460 .c = TCG_COND_NE,
461 .a0 = cpu_psw_n,
462 .a1 = tcg_constant_reg(0)
463 };
464 }
465
466 static DisasCond cond_make_tmp(TCGCond c, TCGv_reg a0, TCGv_reg a1)
467 {
468 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
469 return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
470 }
471
472 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
473 {
474 return cond_make_tmp(c, a0, tcg_constant_reg(0));
475 }
476
477 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
478 {
479 TCGv_reg tmp = tcg_temp_new();
480 tcg_gen_mov_reg(tmp, a0);
481 return cond_make_0_tmp(c, tmp);
482 }
483
484 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
485 {
486 TCGv_reg t0 = tcg_temp_new();
487 TCGv_reg t1 = tcg_temp_new();
488
489 tcg_gen_mov_reg(t0, a0);
490 tcg_gen_mov_reg(t1, a1);
491 return cond_make_tmp(c, t0, t1);
492 }
493
494 static void cond_free(DisasCond *cond)
495 {
496 switch (cond->c) {
497 default:
498 cond->a0 = NULL;
499 cond->a1 = NULL;
500 /* fallthru */
501 case TCG_COND_ALWAYS:
502 cond->c = TCG_COND_NEVER;
503 break;
504 case TCG_COND_NEVER:
505 break;
506 }
507 }
508
509 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
510 {
511 if (reg == 0) {
512 TCGv_reg t = tcg_temp_new();
513 tcg_gen_movi_reg(t, 0);
514 return t;
515 } else {
516 return cpu_gr[reg];
517 }
518 }
519
520 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
521 {
522 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
523 return tcg_temp_new();
524 } else {
525 return cpu_gr[reg];
526 }
527 }
528
529 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
530 {
531 if (ctx->null_cond.c != TCG_COND_NEVER) {
532 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
533 ctx->null_cond.a1, dest, t);
534 } else {
535 tcg_gen_mov_reg(dest, t);
536 }
537 }
538
539 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
540 {
541 if (reg != 0) {
542 save_or_nullify(ctx, cpu_gr[reg], t);
543 }
544 }
545
546 #if HOST_BIG_ENDIAN
547 # define HI_OFS 0
548 # define LO_OFS 4
549 #else
550 # define HI_OFS 4
551 # define LO_OFS 0
552 #endif
553
554 static TCGv_i32 load_frw_i32(unsigned rt)
555 {
556 TCGv_i32 ret = tcg_temp_new_i32();
557 tcg_gen_ld_i32(ret, tcg_env,
558 offsetof(CPUHPPAState, fr[rt & 31])
559 + (rt & 32 ? LO_OFS : HI_OFS));
560 return ret;
561 }
562
563 static TCGv_i32 load_frw0_i32(unsigned rt)
564 {
565 if (rt == 0) {
566 TCGv_i32 ret = tcg_temp_new_i32();
567 tcg_gen_movi_i32(ret, 0);
568 return ret;
569 } else {
570 return load_frw_i32(rt);
571 }
572 }
573
574 static TCGv_i64 load_frw0_i64(unsigned rt)
575 {
576 TCGv_i64 ret = tcg_temp_new_i64();
577 if (rt == 0) {
578 tcg_gen_movi_i64(ret, 0);
579 } else {
580 tcg_gen_ld32u_i64(ret, tcg_env,
581 offsetof(CPUHPPAState, fr[rt & 31])
582 + (rt & 32 ? LO_OFS : HI_OFS));
583 }
584 return ret;
585 }
586
587 static void save_frw_i32(unsigned rt, TCGv_i32 val)
588 {
589 tcg_gen_st_i32(val, tcg_env,
590 offsetof(CPUHPPAState, fr[rt & 31])
591 + (rt & 32 ? LO_OFS : HI_OFS));
592 }
593
594 #undef HI_OFS
595 #undef LO_OFS
596
597 static TCGv_i64 load_frd(unsigned rt)
598 {
599 TCGv_i64 ret = tcg_temp_new_i64();
600 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
601 return ret;
602 }
603
604 static TCGv_i64 load_frd0(unsigned rt)
605 {
606 if (rt == 0) {
607 TCGv_i64 ret = tcg_temp_new_i64();
608 tcg_gen_movi_i64(ret, 0);
609 return ret;
610 } else {
611 return load_frd(rt);
612 }
613 }
614
615 static void save_frd(unsigned rt, TCGv_i64 val)
616 {
617 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
618 }
619
620 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
621 {
622 #ifdef CONFIG_USER_ONLY
623 tcg_gen_movi_i64(dest, 0);
624 #else
625 if (reg < 4) {
626 tcg_gen_mov_i64(dest, cpu_sr[reg]);
627 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
628 tcg_gen_mov_i64(dest, cpu_srH);
629 } else {
630 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
631 }
632 #endif
633 }
634
635 /* Skip over the implementation of an insn that has been nullified.
636 Use this when the insn is too complex for a conditional move. */
637 static void nullify_over(DisasContext *ctx)
638 {
639 if (ctx->null_cond.c != TCG_COND_NEVER) {
640 /* The always condition should have been handled in the main loop. */
641 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
642
643 ctx->null_lab = gen_new_label();
644
645 /* If we're using PSW[N], copy it to a temp because... */
646 if (ctx->null_cond.a0 == cpu_psw_n) {
647 ctx->null_cond.a0 = tcg_temp_new();
648 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
649 }
650 /* ... we clear it before branching over the implementation,
651 so that (1) it's clear after nullifying this insn and
652 (2) if this insn nullifies the next, PSW[N] is valid. */
653 if (ctx->psw_n_nonzero) {
654 ctx->psw_n_nonzero = false;
655 tcg_gen_movi_reg(cpu_psw_n, 0);
656 }
657
658 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
659 ctx->null_cond.a1, ctx->null_lab);
660 cond_free(&ctx->null_cond);
661 }
662 }
663
664 /* Save the current nullification state to PSW[N]. */
665 static void nullify_save(DisasContext *ctx)
666 {
667 if (ctx->null_cond.c == TCG_COND_NEVER) {
668 if (ctx->psw_n_nonzero) {
669 tcg_gen_movi_reg(cpu_psw_n, 0);
670 }
671 return;
672 }
673 if (ctx->null_cond.a0 != cpu_psw_n) {
674 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
675 ctx->null_cond.a0, ctx->null_cond.a1);
676 ctx->psw_n_nonzero = true;
677 }
678 cond_free(&ctx->null_cond);
679 }
680
681 /* Set a PSW[N] to X. The intention is that this is used immediately
682 before a goto_tb/exit_tb, so that there is no fallthru path to other
683 code within the TB. Therefore we do not update psw_n_nonzero. */
684 static void nullify_set(DisasContext *ctx, bool x)
685 {
686 if (ctx->psw_n_nonzero || x) {
687 tcg_gen_movi_reg(cpu_psw_n, x);
688 }
689 }
690
691 /* Mark the end of an instruction that may have been nullified.
692 This is the pair to nullify_over. Always returns true so that
693 it may be tail-called from a translate function. */
694 static bool nullify_end(DisasContext *ctx)
695 {
696 TCGLabel *null_lab = ctx->null_lab;
697 DisasJumpType status = ctx->base.is_jmp;
698
699 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
700 For UPDATED, we cannot update on the nullified path. */
701 assert(status != DISAS_IAQ_N_UPDATED);
702
703 if (likely(null_lab == NULL)) {
704 /* The current insn wasn't conditional or handled the condition
705 applied to it without a branch, so the (new) setting of
706 NULL_COND can be applied directly to the next insn. */
707 return true;
708 }
709 ctx->null_lab = NULL;
710
711 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
712 /* The next instruction will be unconditional,
713 and NULL_COND already reflects that. */
714 gen_set_label(null_lab);
715 } else {
716 /* The insn that we just executed is itself nullifying the next
717 instruction. Store the condition in the PSW[N] global.
718 We asserted PSW[N] = 0 in nullify_over, so that after the
719 label we have the proper value in place. */
720 nullify_save(ctx);
721 gen_set_label(null_lab);
722 ctx->null_cond = cond_make_n();
723 }
724 if (status == DISAS_NORETURN) {
725 ctx->base.is_jmp = DISAS_NEXT;
726 }
727 return true;
728 }
729
730 static target_ureg gva_offset_mask(DisasContext *ctx)
731 {
732 return (ctx->tb_flags & PSW_W
733 ? MAKE_64BIT_MASK(0, 62)
734 : MAKE_64BIT_MASK(0, 32));
735 }
736
737 static void copy_iaoq_entry(DisasContext *ctx, TCGv_reg dest,
738 target_ureg ival, TCGv_reg vval)
739 {
740 target_ureg mask = gva_offset_mask(ctx);
741
742 if (ival != -1) {
743 tcg_gen_movi_reg(dest, ival & mask);
744 return;
745 }
746 tcg_debug_assert(vval != NULL);
747
748 /*
749 * We know that the IAOQ is already properly masked.
750 * This optimization is primarily for "iaoq_f = iaoq_b".
751 */
752 if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
753 tcg_gen_mov_reg(dest, vval);
754 } else {
755 tcg_gen_andi_reg(dest, vval, mask);
756 }
757 }
758
759 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
760 {
761 return ctx->iaoq_f + disp + 8;
762 }
763
764 static void gen_excp_1(int exception)
765 {
766 gen_helper_excp(tcg_env, tcg_constant_i32(exception));
767 }
768
769 static void gen_excp(DisasContext *ctx, int exception)
770 {
771 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
772 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
773 nullify_save(ctx);
774 gen_excp_1(exception);
775 ctx->base.is_jmp = DISAS_NORETURN;
776 }
777
778 static bool gen_excp_iir(DisasContext *ctx, int exc)
779 {
780 nullify_over(ctx);
781 tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
782 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
783 gen_excp(ctx, exc);
784 return nullify_end(ctx);
785 }
786
787 static bool gen_illegal(DisasContext *ctx)
788 {
789 return gen_excp_iir(ctx, EXCP_ILL);
790 }
791
792 #ifdef CONFIG_USER_ONLY
793 #define CHECK_MOST_PRIVILEGED(EXCP) \
794 return gen_excp_iir(ctx, EXCP)
795 #else
796 #define CHECK_MOST_PRIVILEGED(EXCP) \
797 do { \
798 if (ctx->privilege != 0) { \
799 return gen_excp_iir(ctx, EXCP); \
800 } \
801 } while (0)
802 #endif
803
804 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
805 {
806 return translator_use_goto_tb(&ctx->base, dest);
807 }
808
809 /* If the next insn is to be nullified, and it's on the same page,
810 and we're not attempting to set a breakpoint on it, then we can
811 totally skip the nullified insn. This avoids creating and
812 executing a TB that merely branches to the next TB. */
813 static bool use_nullify_skip(DisasContext *ctx)
814 {
815 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
816 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
817 }
818
819 static void gen_goto_tb(DisasContext *ctx, int which,
820 target_ureg f, target_ureg b)
821 {
822 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
823 tcg_gen_goto_tb(which);
824 copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
825 copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
826 tcg_gen_exit_tb(ctx->base.tb, which);
827 } else {
828 copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
829 copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
830 tcg_gen_lookup_and_goto_ptr();
831 }
832 }
833
834 static bool cond_need_sv(int c)
835 {
836 return c == 2 || c == 3 || c == 6;
837 }
838
839 static bool cond_need_cb(int c)
840 {
841 return c == 4 || c == 5;
842 }
843
844 /* Need extensions from TCGv_i32 to TCGv_reg. */
845 static bool cond_need_ext(DisasContext *ctx, bool d)
846 {
847 return TARGET_REGISTER_BITS == 64 && !(ctx->is_pa20 && d);
848 }
849
850 /*
851 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
852 * the Parisc 1.1 Architecture Reference Manual for details.
853 */
854
855 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
856 TCGv_reg res, TCGv_reg cb_msb, TCGv_reg sv)
857 {
858 DisasCond cond;
859 TCGv_reg tmp;
860
861 switch (cf >> 1) {
862 case 0: /* Never / TR (0 / 1) */
863 cond = cond_make_f();
864 break;
865 case 1: /* = / <> (Z / !Z) */
866 if (cond_need_ext(ctx, d)) {
867 tmp = tcg_temp_new();
868 tcg_gen_ext32u_reg(tmp, res);
869 res = tmp;
870 }
871 cond = cond_make_0(TCG_COND_EQ, res);
872 break;
873 case 2: /* < / >= (N ^ V / !(N ^ V) */
874 tmp = tcg_temp_new();
875 tcg_gen_xor_reg(tmp, res, sv);
876 if (cond_need_ext(ctx, d)) {
877 tcg_gen_ext32s_reg(tmp, tmp);
878 }
879 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
880 break;
881 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
882 /*
883 * Simplify:
884 * (N ^ V) | Z
885 * ((res < 0) ^ (sv < 0)) | !res
886 * ((res ^ sv) < 0) | !res
887 * (~(res ^ sv) >= 0) | !res
888 * !(~(res ^ sv) >> 31) | !res
889 * !(~(res ^ sv) >> 31 & res)
890 */
891 tmp = tcg_temp_new();
892 tcg_gen_eqv_reg(tmp, res, sv);
893 if (cond_need_ext(ctx, d)) {
894 tcg_gen_sextract_reg(tmp, tmp, 31, 1);
895 tcg_gen_and_reg(tmp, tmp, res);
896 tcg_gen_ext32u_reg(tmp, tmp);
897 } else {
898 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
899 tcg_gen_and_reg(tmp, tmp, res);
900 }
901 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
902 break;
903 case 4: /* NUV / UV (!C / C) */
904 /* Only bit 0 of cb_msb is ever set. */
905 cond = cond_make_0(TCG_COND_EQ, cb_msb);
906 break;
907 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
908 tmp = tcg_temp_new();
909 tcg_gen_neg_reg(tmp, cb_msb);
910 tcg_gen_and_reg(tmp, tmp, res);
911 if (cond_need_ext(ctx, d)) {
912 tcg_gen_ext32u_reg(tmp, tmp);
913 }
914 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
915 break;
916 case 6: /* SV / NSV (V / !V) */
917 if (cond_need_ext(ctx, d)) {
918 tmp = tcg_temp_new();
919 tcg_gen_ext32s_reg(tmp, sv);
920 sv = tmp;
921 }
922 cond = cond_make_0(TCG_COND_LT, sv);
923 break;
924 case 7: /* OD / EV */
925 tmp = tcg_temp_new();
926 tcg_gen_andi_reg(tmp, res, 1);
927 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
928 break;
929 default:
930 g_assert_not_reached();
931 }
932 if (cf & 1) {
933 cond.c = tcg_invert_cond(cond.c);
934 }
935
936 return cond;
937 }
938
939 /* Similar, but for the special case of subtraction without borrow, we
940 can use the inputs directly. This can allow other computation to be
941 deleted as unused. */
942
943 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
944 TCGv_reg res, TCGv_reg in1,
945 TCGv_reg in2, TCGv_reg sv)
946 {
947 TCGCond tc;
948 bool ext_uns;
949
950 switch (cf >> 1) {
951 case 1: /* = / <> */
952 tc = TCG_COND_EQ;
953 ext_uns = true;
954 break;
955 case 2: /* < / >= */
956 tc = TCG_COND_LT;
957 ext_uns = false;
958 break;
959 case 3: /* <= / > */
960 tc = TCG_COND_LE;
961 ext_uns = false;
962 break;
963 case 4: /* << / >>= */
964 tc = TCG_COND_LTU;
965 ext_uns = true;
966 break;
967 case 5: /* <<= / >> */
968 tc = TCG_COND_LEU;
969 ext_uns = true;
970 break;
971 default:
972 return do_cond(ctx, cf, d, res, NULL, sv);
973 }
974
975 if (cf & 1) {
976 tc = tcg_invert_cond(tc);
977 }
978 if (cond_need_ext(ctx, d)) {
979 TCGv_reg t1 = tcg_temp_new();
980 TCGv_reg t2 = tcg_temp_new();
981
982 if (ext_uns) {
983 tcg_gen_ext32u_reg(t1, in1);
984 tcg_gen_ext32u_reg(t2, in2);
985 } else {
986 tcg_gen_ext32s_reg(t1, in1);
987 tcg_gen_ext32s_reg(t2, in2);
988 }
989 return cond_make_tmp(tc, t1, t2);
990 }
991 return cond_make(tc, in1, in2);
992 }
993
994 /*
995 * Similar, but for logicals, where the carry and overflow bits are not
996 * computed, and use of them is undefined.
997 *
998 * Undefined or not, hardware does not trap. It seems reasonable to
999 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
1000 * how cases c={2,3} are treated.
1001 */
1002
1003 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
1004 TCGv_reg res)
1005 {
1006 TCGCond tc;
1007 bool ext_uns;
1008
1009 switch (cf) {
1010 case 0: /* never */
1011 case 9: /* undef, C */
1012 case 11: /* undef, C & !Z */
1013 case 12: /* undef, V */
1014 return cond_make_f();
1015
1016 case 1: /* true */
1017 case 8: /* undef, !C */
1018 case 10: /* undef, !C | Z */
1019 case 13: /* undef, !V */
1020 return cond_make_t();
1021
1022 case 2: /* == */
1023 tc = TCG_COND_EQ;
1024 ext_uns = true;
1025 break;
1026 case 3: /* <> */
1027 tc = TCG_COND_NE;
1028 ext_uns = true;
1029 break;
1030 case 4: /* < */
1031 tc = TCG_COND_LT;
1032 ext_uns = false;
1033 break;
1034 case 5: /* >= */
1035 tc = TCG_COND_GE;
1036 ext_uns = false;
1037 break;
1038 case 6: /* <= */
1039 tc = TCG_COND_LE;
1040 ext_uns = false;
1041 break;
1042 case 7: /* > */
1043 tc = TCG_COND_GT;
1044 ext_uns = false;
1045 break;
1046
1047 case 14: /* OD */
1048 case 15: /* EV */
1049 return do_cond(ctx, cf, d, res, NULL, NULL);
1050
1051 default:
1052 g_assert_not_reached();
1053 }
1054
1055 if (cond_need_ext(ctx, d)) {
1056 TCGv_reg tmp = tcg_temp_new();
1057
1058 if (ext_uns) {
1059 tcg_gen_ext32u_reg(tmp, res);
1060 } else {
1061 tcg_gen_ext32s_reg(tmp, res);
1062 }
1063 return cond_make_0_tmp(tc, tmp);
1064 }
1065 return cond_make_0(tc, res);
1066 }
1067
1068 /* Similar, but for shift/extract/deposit conditions. */
1069
1070 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
1071 TCGv_reg res)
1072 {
1073 unsigned c, f;
1074
1075 /* Convert the compressed condition codes to standard.
1076 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1077 4-7 are the reverse of 0-3. */
1078 c = orig & 3;
1079 if (c == 3) {
1080 c = 7;
1081 }
1082 f = (orig & 4) / 4;
1083
1084 return do_log_cond(ctx, c * 2 + f, d, res);
1085 }
1086
1087 /* Similar, but for unit conditions. */
1088
1089 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_reg res,
1090 TCGv_reg in1, TCGv_reg in2)
1091 {
1092 DisasCond cond;
1093 TCGv_reg tmp, cb = NULL;
1094 target_ureg d_repl = d ? 0x0000000100000001ull : 1;
1095
1096 if (cf & 8) {
1097 /* Since we want to test lots of carry-out bits all at once, do not
1098 * do our normal thing and compute carry-in of bit B+1 since that
1099 * leaves us with carry bits spread across two words.
1100 */
1101 cb = tcg_temp_new();
1102 tmp = tcg_temp_new();
1103 tcg_gen_or_reg(cb, in1, in2);
1104 tcg_gen_and_reg(tmp, in1, in2);
1105 tcg_gen_andc_reg(cb, cb, res);
1106 tcg_gen_or_reg(cb, cb, tmp);
1107 }
1108
1109 switch (cf >> 1) {
1110 case 0: /* never / TR */
1111 case 1: /* undefined */
1112 case 5: /* undefined */
1113 cond = cond_make_f();
1114 break;
1115
1116 case 2: /* SBZ / NBZ */
1117 /* See hasless(v,1) from
1118 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1119 */
1120 tmp = tcg_temp_new();
1121 tcg_gen_subi_reg(tmp, res, d_repl * 0x01010101u);
1122 tcg_gen_andc_reg(tmp, tmp, res);
1123 tcg_gen_andi_reg(tmp, tmp, d_repl * 0x80808080u);
1124 cond = cond_make_0(TCG_COND_NE, tmp);
1125 break;
1126
1127 case 3: /* SHZ / NHZ */
1128 tmp = tcg_temp_new();
1129 tcg_gen_subi_reg(tmp, res, d_repl * 0x00010001u);
1130 tcg_gen_andc_reg(tmp, tmp, res);
1131 tcg_gen_andi_reg(tmp, tmp, d_repl * 0x80008000u);
1132 cond = cond_make_0(TCG_COND_NE, tmp);
1133 break;
1134
1135 case 4: /* SDC / NDC */
1136 tcg_gen_andi_reg(cb, cb, d_repl * 0x88888888u);
1137 cond = cond_make_0(TCG_COND_NE, cb);
1138 break;
1139
1140 case 6: /* SBC / NBC */
1141 tcg_gen_andi_reg(cb, cb, d_repl * 0x80808080u);
1142 cond = cond_make_0(TCG_COND_NE, cb);
1143 break;
1144
1145 case 7: /* SHC / NHC */
1146 tcg_gen_andi_reg(cb, cb, d_repl * 0x80008000u);
1147 cond = cond_make_0(TCG_COND_NE, cb);
1148 break;
1149
1150 default:
1151 g_assert_not_reached();
1152 }
1153 if (cf & 1) {
1154 cond.c = tcg_invert_cond(cond.c);
1155 }
1156
1157 return cond;
1158 }
1159
1160 static TCGv_reg get_carry(DisasContext *ctx, bool d,
1161 TCGv_reg cb, TCGv_reg cb_msb)
1162 {
1163 if (cond_need_ext(ctx, d)) {
1164 TCGv_reg t = tcg_temp_new();
1165 tcg_gen_extract_reg(t, cb, 32, 1);
1166 return t;
1167 }
1168 return cb_msb;
1169 }
1170
1171 static TCGv_reg get_psw_carry(DisasContext *ctx, bool d)
1172 {
1173 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1174 }
1175
1176 /* Compute signed overflow for addition. */
1177 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1178 TCGv_reg in1, TCGv_reg in2)
1179 {
1180 TCGv_reg sv = tcg_temp_new();
1181 TCGv_reg tmp = tcg_temp_new();
1182
1183 tcg_gen_xor_reg(sv, res, in1);
1184 tcg_gen_xor_reg(tmp, in1, in2);
1185 tcg_gen_andc_reg(sv, sv, tmp);
1186
1187 return sv;
1188 }
1189
1190 /* Compute signed overflow for subtraction. */
1191 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1192 TCGv_reg in1, TCGv_reg in2)
1193 {
1194 TCGv_reg sv = tcg_temp_new();
1195 TCGv_reg tmp = tcg_temp_new();
1196
1197 tcg_gen_xor_reg(sv, res, in1);
1198 tcg_gen_xor_reg(tmp, in1, in2);
1199 tcg_gen_and_reg(sv, sv, tmp);
1200
1201 return sv;
1202 }
1203
1204 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1205 TCGv_reg in2, unsigned shift, bool is_l,
1206 bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1207 {
1208 TCGv_reg dest, cb, cb_msb, cb_cond, sv, tmp;
1209 unsigned c = cf >> 1;
1210 DisasCond cond;
1211
1212 dest = tcg_temp_new();
1213 cb = NULL;
1214 cb_msb = NULL;
1215 cb_cond = NULL;
1216
1217 if (shift) {
1218 tmp = tcg_temp_new();
1219 tcg_gen_shli_reg(tmp, in1, shift);
1220 in1 = tmp;
1221 }
1222
1223 if (!is_l || cond_need_cb(c)) {
1224 TCGv_reg zero = tcg_constant_reg(0);
1225 cb_msb = tcg_temp_new();
1226 cb = tcg_temp_new();
1227
1228 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1229 if (is_c) {
1230 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb,
1231 get_psw_carry(ctx, d), zero);
1232 }
1233 tcg_gen_xor_reg(cb, in1, in2);
1234 tcg_gen_xor_reg(cb, cb, dest);
1235 if (cond_need_cb(c)) {
1236 cb_cond = get_carry(ctx, d, cb, cb_msb);
1237 }
1238 } else {
1239 tcg_gen_add_reg(dest, in1, in2);
1240 if (is_c) {
1241 tcg_gen_add_reg(dest, dest, get_psw_carry(ctx, d));
1242 }
1243 }
1244
1245 /* Compute signed overflow if required. */
1246 sv = NULL;
1247 if (is_tsv || cond_need_sv(c)) {
1248 sv = do_add_sv(ctx, dest, in1, in2);
1249 if (is_tsv) {
1250 /* ??? Need to include overflow from shift. */
1251 gen_helper_tsv(tcg_env, sv);
1252 }
1253 }
1254
1255 /* Emit any conditional trap before any writeback. */
1256 cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1257 if (is_tc) {
1258 tmp = tcg_temp_new();
1259 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1260 gen_helper_tcond(tcg_env, tmp);
1261 }
1262
1263 /* Write back the result. */
1264 if (!is_l) {
1265 save_or_nullify(ctx, cpu_psw_cb, cb);
1266 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1267 }
1268 save_gpr(ctx, rt, dest);
1269
1270 /* Install the new nullification. */
1271 cond_free(&ctx->null_cond);
1272 ctx->null_cond = cond;
1273 }
1274
1275 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1276 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1277 {
1278 TCGv_reg tcg_r1, tcg_r2;
1279
1280 if (a->cf) {
1281 nullify_over(ctx);
1282 }
1283 tcg_r1 = load_gpr(ctx, a->r1);
1284 tcg_r2 = load_gpr(ctx, a->r2);
1285 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1286 is_tsv, is_tc, is_c, a->cf, a->d);
1287 return nullify_end(ctx);
1288 }
1289
1290 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1291 bool is_tsv, bool is_tc)
1292 {
1293 TCGv_reg tcg_im, tcg_r2;
1294
1295 if (a->cf) {
1296 nullify_over(ctx);
1297 }
1298 tcg_im = tcg_constant_reg(a->i);
1299 tcg_r2 = load_gpr(ctx, a->r);
1300 /* All ADDI conditions are 32-bit. */
1301 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1302 return nullify_end(ctx);
1303 }
1304
1305 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1306 TCGv_reg in2, bool is_tsv, bool is_b,
1307 bool is_tc, unsigned cf, bool d)
1308 {
1309 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1310 unsigned c = cf >> 1;
1311 DisasCond cond;
1312
1313 dest = tcg_temp_new();
1314 cb = tcg_temp_new();
1315 cb_msb = tcg_temp_new();
1316
1317 zero = tcg_constant_reg(0);
1318 if (is_b) {
1319 /* DEST,C = IN1 + ~IN2 + C. */
1320 tcg_gen_not_reg(cb, in2);
1321 tcg_gen_add2_reg(dest, cb_msb, in1, zero, get_psw_carry(ctx, d), zero);
1322 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1323 tcg_gen_xor_reg(cb, cb, in1);
1324 tcg_gen_xor_reg(cb, cb, dest);
1325 } else {
1326 /*
1327 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1328 * operations by seeding the high word with 1 and subtracting.
1329 */
1330 TCGv_reg one = tcg_constant_reg(1);
1331 tcg_gen_sub2_reg(dest, cb_msb, in1, one, in2, zero);
1332 tcg_gen_eqv_reg(cb, in1, in2);
1333 tcg_gen_xor_reg(cb, cb, dest);
1334 }
1335
1336 /* Compute signed overflow if required. */
1337 sv = NULL;
1338 if (is_tsv || cond_need_sv(c)) {
1339 sv = do_sub_sv(ctx, dest, in1, in2);
1340 if (is_tsv) {
1341 gen_helper_tsv(tcg_env, sv);
1342 }
1343 }
1344
1345 /* Compute the condition. We cannot use the special case for borrow. */
1346 if (!is_b) {
1347 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1348 } else {
1349 cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1350 }
1351
1352 /* Emit any conditional trap before any writeback. */
1353 if (is_tc) {
1354 tmp = tcg_temp_new();
1355 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1356 gen_helper_tcond(tcg_env, tmp);
1357 }
1358
1359 /* Write back the result. */
1360 save_or_nullify(ctx, cpu_psw_cb, cb);
1361 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1362 save_gpr(ctx, rt, dest);
1363
1364 /* Install the new nullification. */
1365 cond_free(&ctx->null_cond);
1366 ctx->null_cond = cond;
1367 }
1368
1369 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1370 bool is_tsv, bool is_b, bool is_tc)
1371 {
1372 TCGv_reg tcg_r1, tcg_r2;
1373
1374 if (a->cf) {
1375 nullify_over(ctx);
1376 }
1377 tcg_r1 = load_gpr(ctx, a->r1);
1378 tcg_r2 = load_gpr(ctx, a->r2);
1379 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1380 return nullify_end(ctx);
1381 }
1382
1383 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1384 {
1385 TCGv_reg tcg_im, tcg_r2;
1386
1387 if (a->cf) {
1388 nullify_over(ctx);
1389 }
1390 tcg_im = tcg_constant_reg(a->i);
1391 tcg_r2 = load_gpr(ctx, a->r);
1392 /* All SUBI conditions are 32-bit. */
1393 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1394 return nullify_end(ctx);
1395 }
1396
1397 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1398 TCGv_reg in2, unsigned cf, bool d)
1399 {
1400 TCGv_reg dest, sv;
1401 DisasCond cond;
1402
1403 dest = tcg_temp_new();
1404 tcg_gen_sub_reg(dest, in1, in2);
1405
1406 /* Compute signed overflow if required. */
1407 sv = NULL;
1408 if (cond_need_sv(cf >> 1)) {
1409 sv = do_sub_sv(ctx, dest, in1, in2);
1410 }
1411
1412 /* Form the condition for the compare. */
1413 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1414
1415 /* Clear. */
1416 tcg_gen_movi_reg(dest, 0);
1417 save_gpr(ctx, rt, dest);
1418
1419 /* Install the new nullification. */
1420 cond_free(&ctx->null_cond);
1421 ctx->null_cond = cond;
1422 }
1423
1424 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1425 TCGv_reg in2, unsigned cf, bool d,
1426 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1427 {
1428 TCGv_reg dest = dest_gpr(ctx, rt);
1429
1430 /* Perform the operation, and writeback. */
1431 fn(dest, in1, in2);
1432 save_gpr(ctx, rt, dest);
1433
1434 /* Install the new nullification. */
1435 cond_free(&ctx->null_cond);
1436 if (cf) {
1437 ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1438 }
1439 }
1440
1441 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1442 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1443 {
1444 TCGv_reg tcg_r1, tcg_r2;
1445
1446 if (a->cf) {
1447 nullify_over(ctx);
1448 }
1449 tcg_r1 = load_gpr(ctx, a->r1);
1450 tcg_r2 = load_gpr(ctx, a->r2);
1451 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1452 return nullify_end(ctx);
1453 }
1454
1455 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1456 TCGv_reg in2, unsigned cf, bool d, bool is_tc,
1457 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1458 {
1459 TCGv_reg dest;
1460 DisasCond cond;
1461
1462 if (cf == 0) {
1463 dest = dest_gpr(ctx, rt);
1464 fn(dest, in1, in2);
1465 save_gpr(ctx, rt, dest);
1466 cond_free(&ctx->null_cond);
1467 } else {
1468 dest = tcg_temp_new();
1469 fn(dest, in1, in2);
1470
1471 cond = do_unit_cond(cf, d, dest, in1, in2);
1472
1473 if (is_tc) {
1474 TCGv_reg tmp = tcg_temp_new();
1475 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1476 gen_helper_tcond(tcg_env, tmp);
1477 }
1478 save_gpr(ctx, rt, dest);
1479
1480 cond_free(&ctx->null_cond);
1481 ctx->null_cond = cond;
1482 }
1483 }
1484
1485 #ifndef CONFIG_USER_ONLY
1486 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1487 from the top 2 bits of the base register. There are a few system
1488 instructions that have a 3-bit space specifier, for which SR0 is
1489 not special. To handle this, pass ~SP. */
1490 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1491 {
1492 TCGv_ptr ptr;
1493 TCGv_reg tmp;
1494 TCGv_i64 spc;
1495
1496 if (sp != 0) {
1497 if (sp < 0) {
1498 sp = ~sp;
1499 }
1500 spc = tcg_temp_new_tl();
1501 load_spr(ctx, spc, sp);
1502 return spc;
1503 }
1504 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1505 return cpu_srH;
1506 }
1507
1508 ptr = tcg_temp_new_ptr();
1509 tmp = tcg_temp_new();
1510 spc = tcg_temp_new_tl();
1511
1512 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1513 tcg_gen_shri_reg(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1514 tcg_gen_andi_reg(tmp, tmp, 030);
1515 tcg_gen_trunc_reg_ptr(ptr, tmp);
1516
1517 tcg_gen_add_ptr(ptr, ptr, tcg_env);
1518 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1519
1520 return spc;
1521 }
1522 #endif
1523
1524 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1525 unsigned rb, unsigned rx, int scale, target_sreg disp,
1526 unsigned sp, int modify, bool is_phys)
1527 {
1528 TCGv_reg base = load_gpr(ctx, rb);
1529 TCGv_reg ofs;
1530 TCGv_tl addr;
1531
1532 /* Note that RX is mutually exclusive with DISP. */
1533 if (rx) {
1534 ofs = tcg_temp_new();
1535 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1536 tcg_gen_add_reg(ofs, ofs, base);
1537 } else if (disp || modify) {
1538 ofs = tcg_temp_new();
1539 tcg_gen_addi_reg(ofs, base, disp);
1540 } else {
1541 ofs = base;
1542 }
1543
1544 *pofs = ofs;
1545 *pgva = addr = tcg_temp_new_tl();
1546 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1547 tcg_gen_andi_tl(addr, addr, gva_offset_mask(ctx));
1548 #ifndef CONFIG_USER_ONLY
1549 if (!is_phys) {
1550 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1551 }
1552 #endif
1553 }
1554
1555 /* Emit a memory load. The modify parameter should be
1556 * < 0 for pre-modify,
1557 * > 0 for post-modify,
1558 * = 0 for no base register update.
1559 */
1560 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1561 unsigned rx, int scale, target_sreg disp,
1562 unsigned sp, int modify, MemOp mop)
1563 {
1564 TCGv_reg ofs;
1565 TCGv_tl addr;
1566
1567 /* Caller uses nullify_over/nullify_end. */
1568 assert(ctx->null_cond.c == TCG_COND_NEVER);
1569
1570 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1571 ctx->mmu_idx == MMU_PHYS_IDX);
1572 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1573 if (modify) {
1574 save_gpr(ctx, rb, ofs);
1575 }
1576 }
1577
1578 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1579 unsigned rx, int scale, target_sreg disp,
1580 unsigned sp, int modify, MemOp mop)
1581 {
1582 TCGv_reg ofs;
1583 TCGv_tl addr;
1584
1585 /* Caller uses nullify_over/nullify_end. */
1586 assert(ctx->null_cond.c == TCG_COND_NEVER);
1587
1588 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1589 ctx->mmu_idx == MMU_PHYS_IDX);
1590 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1591 if (modify) {
1592 save_gpr(ctx, rb, ofs);
1593 }
1594 }
1595
1596 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1597 unsigned rx, int scale, target_sreg disp,
1598 unsigned sp, int modify, MemOp mop)
1599 {
1600 TCGv_reg ofs;
1601 TCGv_tl addr;
1602
1603 /* Caller uses nullify_over/nullify_end. */
1604 assert(ctx->null_cond.c == TCG_COND_NEVER);
1605
1606 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1607 ctx->mmu_idx == MMU_PHYS_IDX);
1608 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1609 if (modify) {
1610 save_gpr(ctx, rb, ofs);
1611 }
1612 }
1613
1614 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1615 unsigned rx, int scale, target_sreg disp,
1616 unsigned sp, int modify, MemOp mop)
1617 {
1618 TCGv_reg ofs;
1619 TCGv_tl addr;
1620
1621 /* Caller uses nullify_over/nullify_end. */
1622 assert(ctx->null_cond.c == TCG_COND_NEVER);
1623
1624 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1625 ctx->mmu_idx == MMU_PHYS_IDX);
1626 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1627 if (modify) {
1628 save_gpr(ctx, rb, ofs);
1629 }
1630 }
1631
1632 #if TARGET_REGISTER_BITS == 64
1633 #define do_load_reg do_load_64
1634 #define do_store_reg do_store_64
1635 #else
1636 #define do_load_reg do_load_32
1637 #define do_store_reg do_store_32
1638 #endif
1639
1640 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1641 unsigned rx, int scale, target_sreg disp,
1642 unsigned sp, int modify, MemOp mop)
1643 {
1644 TCGv_reg dest;
1645
1646 nullify_over(ctx);
1647
1648 if (modify == 0) {
1649 /* No base register update. */
1650 dest = dest_gpr(ctx, rt);
1651 } else {
1652 /* Make sure if RT == RB, we see the result of the load. */
1653 dest = tcg_temp_new();
1654 }
1655 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1656 save_gpr(ctx, rt, dest);
1657
1658 return nullify_end(ctx);
1659 }
1660
1661 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1662 unsigned rx, int scale, target_sreg disp,
1663 unsigned sp, int modify)
1664 {
1665 TCGv_i32 tmp;
1666
1667 nullify_over(ctx);
1668
1669 tmp = tcg_temp_new_i32();
1670 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1671 save_frw_i32(rt, tmp);
1672
1673 if (rt == 0) {
1674 gen_helper_loaded_fr0(tcg_env);
1675 }
1676
1677 return nullify_end(ctx);
1678 }
1679
1680 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1681 {
1682 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1683 a->disp, a->sp, a->m);
1684 }
1685
1686 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1687 unsigned rx, int scale, target_sreg disp,
1688 unsigned sp, int modify)
1689 {
1690 TCGv_i64 tmp;
1691
1692 nullify_over(ctx);
1693
1694 tmp = tcg_temp_new_i64();
1695 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1696 save_frd(rt, tmp);
1697
1698 if (rt == 0) {
1699 gen_helper_loaded_fr0(tcg_env);
1700 }
1701
1702 return nullify_end(ctx);
1703 }
1704
1705 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1706 {
1707 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1708 a->disp, a->sp, a->m);
1709 }
1710
1711 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1712 target_sreg disp, unsigned sp,
1713 int modify, MemOp mop)
1714 {
1715 nullify_over(ctx);
1716 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1717 return nullify_end(ctx);
1718 }
1719
1720 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1721 unsigned rx, int scale, target_sreg disp,
1722 unsigned sp, int modify)
1723 {
1724 TCGv_i32 tmp;
1725
1726 nullify_over(ctx);
1727
1728 tmp = load_frw_i32(rt);
1729 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1730
1731 return nullify_end(ctx);
1732 }
1733
1734 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1735 {
1736 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1737 a->disp, a->sp, a->m);
1738 }
1739
1740 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1741 unsigned rx, int scale, target_sreg disp,
1742 unsigned sp, int modify)
1743 {
1744 TCGv_i64 tmp;
1745
1746 nullify_over(ctx);
1747
1748 tmp = load_frd(rt);
1749 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1750
1751 return nullify_end(ctx);
1752 }
1753
1754 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1755 {
1756 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1757 a->disp, a->sp, a->m);
1758 }
1759
1760 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1761 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1762 {
1763 TCGv_i32 tmp;
1764
1765 nullify_over(ctx);
1766 tmp = load_frw0_i32(ra);
1767
1768 func(tmp, tcg_env, tmp);
1769
1770 save_frw_i32(rt, tmp);
1771 return nullify_end(ctx);
1772 }
1773
1774 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1775 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1776 {
1777 TCGv_i32 dst;
1778 TCGv_i64 src;
1779
1780 nullify_over(ctx);
1781 src = load_frd(ra);
1782 dst = tcg_temp_new_i32();
1783
1784 func(dst, tcg_env, src);
1785
1786 save_frw_i32(rt, dst);
1787 return nullify_end(ctx);
1788 }
1789
1790 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1791 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1792 {
1793 TCGv_i64 tmp;
1794
1795 nullify_over(ctx);
1796 tmp = load_frd0(ra);
1797
1798 func(tmp, tcg_env, tmp);
1799
1800 save_frd(rt, tmp);
1801 return nullify_end(ctx);
1802 }
1803
1804 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1805 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1806 {
1807 TCGv_i32 src;
1808 TCGv_i64 dst;
1809
1810 nullify_over(ctx);
1811 src = load_frw0_i32(ra);
1812 dst = tcg_temp_new_i64();
1813
1814 func(dst, tcg_env, src);
1815
1816 save_frd(rt, dst);
1817 return nullify_end(ctx);
1818 }
1819
1820 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1821 unsigned ra, unsigned rb,
1822 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1823 {
1824 TCGv_i32 a, b;
1825
1826 nullify_over(ctx);
1827 a = load_frw0_i32(ra);
1828 b = load_frw0_i32(rb);
1829
1830 func(a, tcg_env, a, b);
1831
1832 save_frw_i32(rt, a);
1833 return nullify_end(ctx);
1834 }
1835
1836 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1837 unsigned ra, unsigned rb,
1838 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1839 {
1840 TCGv_i64 a, b;
1841
1842 nullify_over(ctx);
1843 a = load_frd0(ra);
1844 b = load_frd0(rb);
1845
1846 func(a, tcg_env, a, b);
1847
1848 save_frd(rt, a);
1849 return nullify_end(ctx);
1850 }
1851
1852 /* Emit an unconditional branch to a direct target, which may or may not
1853 have already had nullification handled. */
1854 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1855 unsigned link, bool is_n)
1856 {
1857 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1858 if (link != 0) {
1859 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1860 }
1861 ctx->iaoq_n = dest;
1862 if (is_n) {
1863 ctx->null_cond.c = TCG_COND_ALWAYS;
1864 }
1865 } else {
1866 nullify_over(ctx);
1867
1868 if (link != 0) {
1869 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1870 }
1871
1872 if (is_n && use_nullify_skip(ctx)) {
1873 nullify_set(ctx, 0);
1874 gen_goto_tb(ctx, 0, dest, dest + 4);
1875 } else {
1876 nullify_set(ctx, is_n);
1877 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1878 }
1879
1880 nullify_end(ctx);
1881
1882 nullify_set(ctx, 0);
1883 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1884 ctx->base.is_jmp = DISAS_NORETURN;
1885 }
1886 return true;
1887 }
1888
1889 /* Emit a conditional branch to a direct target. If the branch itself
1890 is nullified, we should have already used nullify_over. */
1891 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1892 DisasCond *cond)
1893 {
1894 target_ureg dest = iaoq_dest(ctx, disp);
1895 TCGLabel *taken = NULL;
1896 TCGCond c = cond->c;
1897 bool n;
1898
1899 assert(ctx->null_cond.c == TCG_COND_NEVER);
1900
1901 /* Handle TRUE and NEVER as direct branches. */
1902 if (c == TCG_COND_ALWAYS) {
1903 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1904 }
1905 if (c == TCG_COND_NEVER) {
1906 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1907 }
1908
1909 taken = gen_new_label();
1910 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1911 cond_free(cond);
1912
1913 /* Not taken: Condition not satisfied; nullify on backward branches. */
1914 n = is_n && disp < 0;
1915 if (n && use_nullify_skip(ctx)) {
1916 nullify_set(ctx, 0);
1917 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1918 } else {
1919 if (!n && ctx->null_lab) {
1920 gen_set_label(ctx->null_lab);
1921 ctx->null_lab = NULL;
1922 }
1923 nullify_set(ctx, n);
1924 if (ctx->iaoq_n == -1) {
1925 /* The temporary iaoq_n_var died at the branch above.
1926 Regenerate it here instead of saving it. */
1927 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1928 }
1929 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1930 }
1931
1932 gen_set_label(taken);
1933
1934 /* Taken: Condition satisfied; nullify on forward branches. */
1935 n = is_n && disp >= 0;
1936 if (n && use_nullify_skip(ctx)) {
1937 nullify_set(ctx, 0);
1938 gen_goto_tb(ctx, 1, dest, dest + 4);
1939 } else {
1940 nullify_set(ctx, n);
1941 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1942 }
1943
1944 /* Not taken: the branch itself was nullified. */
1945 if (ctx->null_lab) {
1946 gen_set_label(ctx->null_lab);
1947 ctx->null_lab = NULL;
1948 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1949 } else {
1950 ctx->base.is_jmp = DISAS_NORETURN;
1951 }
1952 return true;
1953 }
1954
1955 /* Emit an unconditional branch to an indirect target. This handles
1956 nullification of the branch itself. */
1957 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1958 unsigned link, bool is_n)
1959 {
1960 TCGv_reg a0, a1, next, tmp;
1961 TCGCond c;
1962
1963 assert(ctx->null_lab == NULL);
1964
1965 if (ctx->null_cond.c == TCG_COND_NEVER) {
1966 if (link != 0) {
1967 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1968 }
1969 next = tcg_temp_new();
1970 tcg_gen_mov_reg(next, dest);
1971 if (is_n) {
1972 if (use_nullify_skip(ctx)) {
1973 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1974 tcg_gen_addi_reg(next, next, 4);
1975 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1976 nullify_set(ctx, 0);
1977 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1978 return true;
1979 }
1980 ctx->null_cond.c = TCG_COND_ALWAYS;
1981 }
1982 ctx->iaoq_n = -1;
1983 ctx->iaoq_n_var = next;
1984 } else if (is_n && use_nullify_skip(ctx)) {
1985 /* The (conditional) branch, B, nullifies the next insn, N,
1986 and we're allowed to skip execution N (no single-step or
1987 tracepoint in effect). Since the goto_ptr that we must use
1988 for the indirect branch consumes no special resources, we
1989 can (conditionally) skip B and continue execution. */
1990 /* The use_nullify_skip test implies we have a known control path. */
1991 tcg_debug_assert(ctx->iaoq_b != -1);
1992 tcg_debug_assert(ctx->iaoq_n != -1);
1993
1994 /* We do have to handle the non-local temporary, DEST, before
1995 branching. Since IOAQ_F is not really live at this point, we
1996 can simply store DEST optimistically. Similarly with IAOQ_B. */
1997 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1998 next = tcg_temp_new();
1999 tcg_gen_addi_reg(next, dest, 4);
2000 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
2001
2002 nullify_over(ctx);
2003 if (link != 0) {
2004 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
2005 }
2006 tcg_gen_lookup_and_goto_ptr();
2007 return nullify_end(ctx);
2008 } else {
2009 c = ctx->null_cond.c;
2010 a0 = ctx->null_cond.a0;
2011 a1 = ctx->null_cond.a1;
2012
2013 tmp = tcg_temp_new();
2014 next = tcg_temp_new();
2015
2016 copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
2017 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
2018 ctx->iaoq_n = -1;
2019 ctx->iaoq_n_var = next;
2020
2021 if (link != 0) {
2022 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
2023 }
2024
2025 if (is_n) {
2026 /* The branch nullifies the next insn, which means the state of N
2027 after the branch is the inverse of the state of N that applied
2028 to the branch. */
2029 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
2030 cond_free(&ctx->null_cond);
2031 ctx->null_cond = cond_make_n();
2032 ctx->psw_n_nonzero = true;
2033 } else {
2034 cond_free(&ctx->null_cond);
2035 }
2036 }
2037 return true;
2038 }
2039
2040 /* Implement
2041 * if (IAOQ_Front{30..31} < GR[b]{30..31})
2042 * IAOQ_Next{30..31} ← GR[b]{30..31};
2043 * else
2044 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
2045 * which keeps the privilege level from being increased.
2046 */
2047 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
2048 {
2049 TCGv_reg dest;
2050 switch (ctx->privilege) {
2051 case 0:
2052 /* Privilege 0 is maximum and is allowed to decrease. */
2053 return offset;
2054 case 3:
2055 /* Privilege 3 is minimum and is never allowed to increase. */
2056 dest = tcg_temp_new();
2057 tcg_gen_ori_reg(dest, offset, 3);
2058 break;
2059 default:
2060 dest = tcg_temp_new();
2061 tcg_gen_andi_reg(dest, offset, -4);
2062 tcg_gen_ori_reg(dest, dest, ctx->privilege);
2063 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
2064 break;
2065 }
2066 return dest;
2067 }
2068
2069 #ifdef CONFIG_USER_ONLY
2070 /* On Linux, page zero is normally marked execute only + gateway.
2071 Therefore normal read or write is supposed to fail, but specific
2072 offsets have kernel code mapped to raise permissions to implement
2073 system calls. Handling this via an explicit check here, rather
2074 in than the "be disp(sr2,r0)" instruction that probably sent us
2075 here, is the easiest way to handle the branch delay slot on the
2076 aforementioned BE. */
2077 static void do_page_zero(DisasContext *ctx)
2078 {
2079 TCGv_reg tmp;
2080
2081 /* If by some means we get here with PSW[N]=1, that implies that
2082 the B,GATE instruction would be skipped, and we'd fault on the
2083 next insn within the privileged page. */
2084 switch (ctx->null_cond.c) {
2085 case TCG_COND_NEVER:
2086 break;
2087 case TCG_COND_ALWAYS:
2088 tcg_gen_movi_reg(cpu_psw_n, 0);
2089 goto do_sigill;
2090 default:
2091 /* Since this is always the first (and only) insn within the
2092 TB, we should know the state of PSW[N] from TB->FLAGS. */
2093 g_assert_not_reached();
2094 }
2095
2096 /* Check that we didn't arrive here via some means that allowed
2097 non-sequential instruction execution. Normally the PSW[B] bit
2098 detects this by disallowing the B,GATE instruction to execute
2099 under such conditions. */
2100 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2101 goto do_sigill;
2102 }
2103
2104 switch (ctx->iaoq_f & -4) {
2105 case 0x00: /* Null pointer call */
2106 gen_excp_1(EXCP_IMP);
2107 ctx->base.is_jmp = DISAS_NORETURN;
2108 break;
2109
2110 case 0xb0: /* LWS */
2111 gen_excp_1(EXCP_SYSCALL_LWS);
2112 ctx->base.is_jmp = DISAS_NORETURN;
2113 break;
2114
2115 case 0xe0: /* SET_THREAD_POINTER */
2116 tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
2117 tmp = tcg_temp_new();
2118 tcg_gen_ori_reg(tmp, cpu_gr[31], 3);
2119 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
2120 tcg_gen_addi_reg(tmp, tmp, 4);
2121 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
2122 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2123 break;
2124
2125 case 0x100: /* SYSCALL */
2126 gen_excp_1(EXCP_SYSCALL);
2127 ctx->base.is_jmp = DISAS_NORETURN;
2128 break;
2129
2130 default:
2131 do_sigill:
2132 gen_excp_1(EXCP_ILL);
2133 ctx->base.is_jmp = DISAS_NORETURN;
2134 break;
2135 }
2136 }
2137 #endif
2138
2139 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2140 {
2141 cond_free(&ctx->null_cond);
2142 return true;
2143 }
2144
2145 static bool trans_break(DisasContext *ctx, arg_break *a)
2146 {
2147 return gen_excp_iir(ctx, EXCP_BREAK);
2148 }
2149
2150 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2151 {
2152 /* No point in nullifying the memory barrier. */
2153 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2154
2155 cond_free(&ctx->null_cond);
2156 return true;
2157 }
2158
2159 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2160 {
2161 unsigned rt = a->t;
2162 TCGv_reg tmp = dest_gpr(ctx, rt);
2163 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2164 save_gpr(ctx, rt, tmp);
2165
2166 cond_free(&ctx->null_cond);
2167 return true;
2168 }
2169
2170 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2171 {
2172 unsigned rt = a->t;
2173 unsigned rs = a->sp;
2174 TCGv_i64 t0 = tcg_temp_new_i64();
2175 TCGv_reg t1 = tcg_temp_new();
2176
2177 load_spr(ctx, t0, rs);
2178 tcg_gen_shri_i64(t0, t0, 32);
2179 tcg_gen_trunc_i64_reg(t1, t0);
2180
2181 save_gpr(ctx, rt, t1);
2182
2183 cond_free(&ctx->null_cond);
2184 return true;
2185 }
2186
2187 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2188 {
2189 unsigned rt = a->t;
2190 unsigned ctl = a->r;
2191 TCGv_reg tmp;
2192
2193 switch (ctl) {
2194 case CR_SAR:
2195 if (a->e == 0) {
2196 /* MFSAR without ,W masks low 5 bits. */
2197 tmp = dest_gpr(ctx, rt);
2198 tcg_gen_andi_reg(tmp, cpu_sar, 31);
2199 save_gpr(ctx, rt, tmp);
2200 goto done;
2201 }
2202 save_gpr(ctx, rt, cpu_sar);
2203 goto done;
2204 case CR_IT: /* Interval Timer */
2205 /* FIXME: Respect PSW_S bit. */
2206 nullify_over(ctx);
2207 tmp = dest_gpr(ctx, rt);
2208 if (translator_io_start(&ctx->base)) {
2209 gen_helper_read_interval_timer(tmp);
2210 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2211 } else {
2212 gen_helper_read_interval_timer(tmp);
2213 }
2214 save_gpr(ctx, rt, tmp);
2215 return nullify_end(ctx);
2216 case 26:
2217 case 27:
2218 break;
2219 default:
2220 /* All other control registers are privileged. */
2221 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2222 break;
2223 }
2224
2225 tmp = tcg_temp_new();
2226 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2227 save_gpr(ctx, rt, tmp);
2228
2229 done:
2230 cond_free(&ctx->null_cond);
2231 return true;
2232 }
2233
2234 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2235 {
2236 unsigned rr = a->r;
2237 unsigned rs = a->sp;
2238 TCGv_i64 t64;
2239
2240 if (rs >= 5) {
2241 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2242 }
2243 nullify_over(ctx);
2244
2245 t64 = tcg_temp_new_i64();
2246 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2247 tcg_gen_shli_i64(t64, t64, 32);
2248
2249 if (rs >= 4) {
2250 tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2251 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2252 } else {
2253 tcg_gen_mov_i64(cpu_sr[rs], t64);
2254 }
2255
2256 return nullify_end(ctx);
2257 }
2258
2259 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2260 {
2261 unsigned ctl = a->t;
2262 TCGv_reg reg;
2263 TCGv_reg tmp;
2264
2265 if (ctl == CR_SAR) {
2266 reg = load_gpr(ctx, a->r);
2267 tmp = tcg_temp_new();
2268 tcg_gen_andi_reg(tmp, reg, ctx->is_pa20 ? 63 : 31);
2269 save_or_nullify(ctx, cpu_sar, tmp);
2270
2271 cond_free(&ctx->null_cond);
2272 return true;
2273 }
2274
2275 /* All other control registers are privileged or read-only. */
2276 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2277
2278 #ifndef CONFIG_USER_ONLY
2279 nullify_over(ctx);
2280 reg = load_gpr(ctx, a->r);
2281
2282 switch (ctl) {
2283 case CR_IT:
2284 gen_helper_write_interval_timer(tcg_env, reg);
2285 break;
2286 case CR_EIRR:
2287 gen_helper_write_eirr(tcg_env, reg);
2288 break;
2289 case CR_EIEM:
2290 gen_helper_write_eiem(tcg_env, reg);
2291 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2292 break;
2293
2294 case CR_IIASQ:
2295 case CR_IIAOQ:
2296 /* FIXME: Respect PSW_Q bit */
2297 /* The write advances the queue and stores to the back element. */
2298 tmp = tcg_temp_new();
2299 tcg_gen_ld_reg(tmp, tcg_env,
2300 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2301 tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2302 tcg_gen_st_reg(reg, tcg_env,
2303 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2304 break;
2305
2306 case CR_PID1:
2307 case CR_PID2:
2308 case CR_PID3:
2309 case CR_PID4:
2310 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2311 #ifndef CONFIG_USER_ONLY
2312 gen_helper_change_prot_id(tcg_env);
2313 #endif
2314 break;
2315
2316 default:
2317 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2318 break;
2319 }
2320 return nullify_end(ctx);
2321 #endif
2322 }
2323
2324 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2325 {
2326 TCGv_reg tmp = tcg_temp_new();
2327
2328 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2329 tcg_gen_andi_reg(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2330 save_or_nullify(ctx, cpu_sar, tmp);
2331
2332 cond_free(&ctx->null_cond);
2333 return true;
2334 }
2335
2336 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2337 {
2338 TCGv_reg dest = dest_gpr(ctx, a->t);
2339
2340 #ifdef CONFIG_USER_ONLY
2341 /* We don't implement space registers in user mode. */
2342 tcg_gen_movi_reg(dest, 0);
2343 #else
2344 TCGv_i64 t0 = tcg_temp_new_i64();
2345
2346 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2347 tcg_gen_shri_i64(t0, t0, 32);
2348 tcg_gen_trunc_i64_reg(dest, t0);
2349 #endif
2350 save_gpr(ctx, a->t, dest);
2351
2352 cond_free(&ctx->null_cond);
2353 return true;
2354 }
2355
2356 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2357 {
2358 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2359 #ifndef CONFIG_USER_ONLY
2360 TCGv_reg tmp;
2361
2362 nullify_over(ctx);
2363
2364 tmp = tcg_temp_new();
2365 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2366 tcg_gen_andi_reg(tmp, tmp, ~a->i);
2367 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2368 save_gpr(ctx, a->t, tmp);
2369
2370 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2371 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2372 return nullify_end(ctx);
2373 #endif
2374 }
2375
2376 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2377 {
2378 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2379 #ifndef CONFIG_USER_ONLY
2380 TCGv_reg tmp;
2381
2382 nullify_over(ctx);
2383
2384 tmp = tcg_temp_new();
2385 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2386 tcg_gen_ori_reg(tmp, tmp, a->i);
2387 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2388 save_gpr(ctx, a->t, tmp);
2389
2390 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2391 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2392 return nullify_end(ctx);
2393 #endif
2394 }
2395
2396 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2397 {
2398 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2399 #ifndef CONFIG_USER_ONLY
2400 TCGv_reg tmp, reg;
2401 nullify_over(ctx);
2402
2403 reg = load_gpr(ctx, a->r);
2404 tmp = tcg_temp_new();
2405 gen_helper_swap_system_mask(tmp, tcg_env, reg);
2406
2407 /* Exit the TB to recognize new interrupts. */
2408 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2409 return nullify_end(ctx);
2410 #endif
2411 }
2412
2413 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2414 {
2415 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2416 #ifndef CONFIG_USER_ONLY
2417 nullify_over(ctx);
2418
2419 if (rfi_r) {
2420 gen_helper_rfi_r(tcg_env);
2421 } else {
2422 gen_helper_rfi(tcg_env);
2423 }
2424 /* Exit the TB to recognize new interrupts. */
2425 tcg_gen_exit_tb(NULL, 0);
2426 ctx->base.is_jmp = DISAS_NORETURN;
2427
2428 return nullify_end(ctx);
2429 #endif
2430 }
2431
2432 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2433 {
2434 return do_rfi(ctx, false);
2435 }
2436
2437 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2438 {
2439 return do_rfi(ctx, true);
2440 }
2441
2442 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2443 {
2444 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2445 #ifndef CONFIG_USER_ONLY
2446 nullify_over(ctx);
2447 gen_helper_halt(tcg_env);
2448 ctx->base.is_jmp = DISAS_NORETURN;
2449 return nullify_end(ctx);
2450 #endif
2451 }
2452
2453 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2454 {
2455 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2456 #ifndef CONFIG_USER_ONLY
2457 nullify_over(ctx);
2458 gen_helper_reset(tcg_env);
2459 ctx->base.is_jmp = DISAS_NORETURN;
2460 return nullify_end(ctx);
2461 #endif
2462 }
2463
2464 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2465 {
2466 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2467 #ifndef CONFIG_USER_ONLY
2468 nullify_over(ctx);
2469 gen_helper_getshadowregs(tcg_env);
2470 return nullify_end(ctx);
2471 #endif
2472 }
2473
2474 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2475 {
2476 if (a->m) {
2477 TCGv_reg dest = dest_gpr(ctx, a->b);
2478 TCGv_reg src1 = load_gpr(ctx, a->b);
2479 TCGv_reg src2 = load_gpr(ctx, a->x);
2480
2481 /* The only thing we need to do is the base register modification. */
2482 tcg_gen_add_reg(dest, src1, src2);
2483 save_gpr(ctx, a->b, dest);
2484 }
2485 cond_free(&ctx->null_cond);
2486 return true;
2487 }
2488
2489 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2490 {
2491 TCGv_reg dest, ofs;
2492 TCGv_i32 level, want;
2493 TCGv_tl addr;
2494
2495 nullify_over(ctx);
2496
2497 dest = dest_gpr(ctx, a->t);
2498 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2499
2500 if (a->imm) {
2501 level = tcg_constant_i32(a->ri);
2502 } else {
2503 level = tcg_temp_new_i32();
2504 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2505 tcg_gen_andi_i32(level, level, 3);
2506 }
2507 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2508
2509 gen_helper_probe(dest, tcg_env, addr, level, want);
2510
2511 save_gpr(ctx, a->t, dest);
2512 return nullify_end(ctx);
2513 }
2514
2515 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2516 {
2517 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2518 #ifndef CONFIG_USER_ONLY
2519 TCGv_tl addr;
2520 TCGv_reg ofs, reg;
2521
2522 nullify_over(ctx);
2523
2524 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2525 reg = load_gpr(ctx, a->r);
2526 if (a->addr) {
2527 gen_helper_itlba(tcg_env, addr, reg);
2528 } else {
2529 gen_helper_itlbp(tcg_env, addr, reg);
2530 }
2531
2532 /* Exit TB for TLB change if mmu is enabled. */
2533 if (ctx->tb_flags & PSW_C) {
2534 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2535 }
2536 return nullify_end(ctx);
2537 #endif
2538 }
2539
2540 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2541 {
2542 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2543 #ifndef CONFIG_USER_ONLY
2544 TCGv_tl addr;
2545 TCGv_reg ofs;
2546
2547 nullify_over(ctx);
2548
2549 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2550 if (a->m) {
2551 save_gpr(ctx, a->b, ofs);
2552 }
2553 if (a->local) {
2554 gen_helper_ptlbe(tcg_env);
2555 } else {
2556 gen_helper_ptlb(tcg_env, addr);
2557 }
2558
2559 /* Exit TB for TLB change if mmu is enabled. */
2560 if (ctx->tb_flags & PSW_C) {
2561 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2562 }
2563 return nullify_end(ctx);
2564 #endif
2565 }
2566
2567 /*
2568 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2569 * See
2570 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2571 * page 13-9 (195/206)
2572 */
2573 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2574 {
2575 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2576 #ifndef CONFIG_USER_ONLY
2577 TCGv_tl addr, atl, stl;
2578 TCGv_reg reg;
2579
2580 nullify_over(ctx);
2581
2582 /*
2583 * FIXME:
2584 * if (not (pcxl or pcxl2))
2585 * return gen_illegal(ctx);
2586 *
2587 * Note for future: these are 32-bit systems; no hppa64.
2588 */
2589
2590 atl = tcg_temp_new_tl();
2591 stl = tcg_temp_new_tl();
2592 addr = tcg_temp_new_tl();
2593
2594 tcg_gen_ld32u_i64(stl, tcg_env,
2595 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2596 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2597 tcg_gen_ld32u_i64(atl, tcg_env,
2598 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2599 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2600 tcg_gen_shli_i64(stl, stl, 32);
2601 tcg_gen_or_tl(addr, atl, stl);
2602
2603 reg = load_gpr(ctx, a->r);
2604 if (a->addr) {
2605 gen_helper_itlba(tcg_env, addr, reg);
2606 } else {
2607 gen_helper_itlbp(tcg_env, addr, reg);
2608 }
2609
2610 /* Exit TB for TLB change if mmu is enabled. */
2611 if (ctx->tb_flags & PSW_C) {
2612 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2613 }
2614 return nullify_end(ctx);
2615 #endif
2616 }
2617
2618 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2619 {
2620 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2621 #ifndef CONFIG_USER_ONLY
2622 TCGv_tl vaddr;
2623 TCGv_reg ofs, paddr;
2624
2625 nullify_over(ctx);
2626
2627 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2628
2629 paddr = tcg_temp_new();
2630 gen_helper_lpa(paddr, tcg_env, vaddr);
2631
2632 /* Note that physical address result overrides base modification. */
2633 if (a->m) {
2634 save_gpr(ctx, a->b, ofs);
2635 }
2636 save_gpr(ctx, a->t, paddr);
2637
2638 return nullify_end(ctx);
2639 #endif
2640 }
2641
2642 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2643 {
2644 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2645
2646 /* The Coherence Index is an implementation-defined function of the
2647 physical address. Two addresses with the same CI have a coherent
2648 view of the cache. Our implementation is to return 0 for all,
2649 since the entire address space is coherent. */
2650 save_gpr(ctx, a->t, tcg_constant_reg(0));
2651
2652 cond_free(&ctx->null_cond);
2653 return true;
2654 }
2655
2656 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2657 {
2658 return do_add_reg(ctx, a, false, false, false, false);
2659 }
2660
2661 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2662 {
2663 return do_add_reg(ctx, a, true, false, false, false);
2664 }
2665
2666 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2667 {
2668 return do_add_reg(ctx, a, false, true, false, false);
2669 }
2670
2671 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2672 {
2673 return do_add_reg(ctx, a, false, false, false, true);
2674 }
2675
2676 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2677 {
2678 return do_add_reg(ctx, a, false, true, false, true);
2679 }
2680
2681 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2682 {
2683 return do_sub_reg(ctx, a, false, false, false);
2684 }
2685
2686 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2687 {
2688 return do_sub_reg(ctx, a, true, false, false);
2689 }
2690
2691 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2692 {
2693 return do_sub_reg(ctx, a, false, false, true);
2694 }
2695
2696 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2697 {
2698 return do_sub_reg(ctx, a, true, false, true);
2699 }
2700
2701 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2702 {
2703 return do_sub_reg(ctx, a, false, true, false);
2704 }
2705
2706 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2707 {
2708 return do_sub_reg(ctx, a, true, true, false);
2709 }
2710
2711 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2712 {
2713 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2714 }
2715
2716 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2717 {
2718 return do_log_reg(ctx, a, tcg_gen_and_reg);
2719 }
2720
2721 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2722 {
2723 if (a->cf == 0) {
2724 unsigned r2 = a->r2;
2725 unsigned r1 = a->r1;
2726 unsigned rt = a->t;
2727
2728 if (rt == 0) { /* NOP */
2729 cond_free(&ctx->null_cond);
2730 return true;
2731 }
2732 if (r2 == 0) { /* COPY */
2733 if (r1 == 0) {
2734 TCGv_reg dest = dest_gpr(ctx, rt);
2735 tcg_gen_movi_reg(dest, 0);
2736 save_gpr(ctx, rt, dest);
2737 } else {
2738 save_gpr(ctx, rt, cpu_gr[r1]);
2739 }
2740 cond_free(&ctx->null_cond);
2741 return true;
2742 }
2743 #ifndef CONFIG_USER_ONLY
2744 /* These are QEMU extensions and are nops in the real architecture:
2745 *
2746 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2747 * or %r31,%r31,%r31 -- death loop; offline cpu
2748 * currently implemented as idle.
2749 */
2750 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2751 /* No need to check for supervisor, as userland can only pause
2752 until the next timer interrupt. */
2753 nullify_over(ctx);
2754
2755 /* Advance the instruction queue. */
2756 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2757 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2758 nullify_set(ctx, 0);
2759
2760 /* Tell the qemu main loop to halt until this cpu has work. */
2761 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2762 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2763 gen_excp_1(EXCP_HALTED);
2764 ctx->base.is_jmp = DISAS_NORETURN;
2765
2766 return nullify_end(ctx);
2767 }
2768 #endif
2769 }
2770 return do_log_reg(ctx, a, tcg_gen_or_reg);
2771 }
2772
2773 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2774 {
2775 return do_log_reg(ctx, a, tcg_gen_xor_reg);
2776 }
2777
2778 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2779 {
2780 TCGv_reg tcg_r1, tcg_r2;
2781
2782 if (a->cf) {
2783 nullify_over(ctx);
2784 }
2785 tcg_r1 = load_gpr(ctx, a->r1);
2786 tcg_r2 = load_gpr(ctx, a->r2);
2787 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2788 return nullify_end(ctx);
2789 }
2790
2791 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2792 {
2793 TCGv_reg tcg_r1, tcg_r2;
2794
2795 if (a->cf) {
2796 nullify_over(ctx);
2797 }
2798 tcg_r1 = load_gpr(ctx, a->r1);
2799 tcg_r2 = load_gpr(ctx, a->r2);
2800 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_reg);
2801 return nullify_end(ctx);
2802 }
2803
2804 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2805 {
2806 TCGv_reg tcg_r1, tcg_r2, tmp;
2807
2808 if (a->cf) {
2809 nullify_over(ctx);
2810 }
2811 tcg_r1 = load_gpr(ctx, a->r1);
2812 tcg_r2 = load_gpr(ctx, a->r2);
2813 tmp = tcg_temp_new();
2814 tcg_gen_not_reg(tmp, tcg_r2);
2815 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_reg);
2816 return nullify_end(ctx);
2817 }
2818
2819 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2820 {
2821 return do_uaddcm(ctx, a, false);
2822 }
2823
2824 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2825 {
2826 return do_uaddcm(ctx, a, true);
2827 }
2828
2829 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2830 {
2831 TCGv_reg tmp;
2832
2833 nullify_over(ctx);
2834
2835 tmp = tcg_temp_new();
2836 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2837 if (!is_i) {
2838 tcg_gen_not_reg(tmp, tmp);
2839 }
2840 tcg_gen_andi_reg(tmp, tmp, (target_ureg)0x1111111111111111ull);
2841 tcg_gen_muli_reg(tmp, tmp, 6);
2842 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2843 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2844 return nullify_end(ctx);
2845 }
2846
2847 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2848 {
2849 return do_dcor(ctx, a, false);
2850 }
2851
2852 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2853 {
2854 return do_dcor(ctx, a, true);
2855 }
2856
2857 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2858 {
2859 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2860 TCGv_reg cout;
2861
2862 nullify_over(ctx);
2863
2864 in1 = load_gpr(ctx, a->r1);
2865 in2 = load_gpr(ctx, a->r2);
2866
2867 add1 = tcg_temp_new();
2868 add2 = tcg_temp_new();
2869 addc = tcg_temp_new();
2870 dest = tcg_temp_new();
2871 zero = tcg_constant_reg(0);
2872
2873 /* Form R1 << 1 | PSW[CB]{8}. */
2874 tcg_gen_add_reg(add1, in1, in1);
2875 tcg_gen_add_reg(add1, add1, get_psw_carry(ctx, false));
2876
2877 /*
2878 * Add or subtract R2, depending on PSW[V]. Proper computation of
2879 * carry requires that we subtract via + ~R2 + 1, as described in
2880 * the manual. By extracting and masking V, we can produce the
2881 * proper inputs to the addition without movcond.
2882 */
2883 tcg_gen_sextract_reg(addc, cpu_psw_v, 31, 1);
2884 tcg_gen_xor_reg(add2, in2, addc);
2885 tcg_gen_andi_reg(addc, addc, 1);
2886
2887 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2888 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2889
2890 /* Write back the result register. */
2891 save_gpr(ctx, a->t, dest);
2892
2893 /* Write back PSW[CB]. */
2894 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2895 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2896
2897 /* Write back PSW[V] for the division step. */
2898 cout = get_psw_carry(ctx, false);
2899 tcg_gen_neg_reg(cpu_psw_v, cout);
2900 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2901
2902 /* Install the new nullification. */
2903 if (a->cf) {
2904 TCGv_reg sv = NULL;
2905 if (cond_need_sv(a->cf >> 1)) {
2906 /* ??? The lshift is supposed to contribute to overflow. */
2907 sv = do_add_sv(ctx, dest, add1, add2);
2908 }
2909 ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2910 }
2911
2912 return nullify_end(ctx);
2913 }
2914
2915 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2916 {
2917 return do_add_imm(ctx, a, false, false);
2918 }
2919
2920 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2921 {
2922 return do_add_imm(ctx, a, true, false);
2923 }
2924
2925 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2926 {
2927 return do_add_imm(ctx, a, false, true);
2928 }
2929
2930 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2931 {
2932 return do_add_imm(ctx, a, true, true);
2933 }
2934
2935 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2936 {
2937 return do_sub_imm(ctx, a, false);
2938 }
2939
2940 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2941 {
2942 return do_sub_imm(ctx, a, true);
2943 }
2944
2945 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2946 {
2947 TCGv_reg tcg_im, tcg_r2;
2948
2949 if (a->cf) {
2950 nullify_over(ctx);
2951 }
2952
2953 tcg_im = tcg_constant_reg(a->i);
2954 tcg_r2 = load_gpr(ctx, a->r);
2955 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2956
2957 return nullify_end(ctx);
2958 }
2959
2960 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2961 {
2962 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2963 return gen_illegal(ctx);
2964 } else {
2965 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2966 a->disp, a->sp, a->m, a->size | MO_TE);
2967 }
2968 }
2969
2970 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2971 {
2972 assert(a->x == 0 && a->scale == 0);
2973 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2974 return gen_illegal(ctx);
2975 } else {
2976 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2977 }
2978 }
2979
2980 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2981 {
2982 MemOp mop = MO_TE | MO_ALIGN | a->size;
2983 TCGv_reg zero, dest, ofs;
2984 TCGv_tl addr;
2985
2986 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2987 return gen_illegal(ctx);
2988 }
2989
2990 nullify_over(ctx);
2991
2992 if (a->m) {
2993 /* Base register modification. Make sure if RT == RB,
2994 we see the result of the load. */
2995 dest = tcg_temp_new();
2996 } else {
2997 dest = dest_gpr(ctx, a->t);
2998 }
2999
3000 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
3001 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
3002
3003 /*
3004 * For hppa1.1, LDCW is undefined unless aligned mod 16.
3005 * However actual hardware succeeds with aligned mod 4.
3006 * Detect this case and log a GUEST_ERROR.
3007 *
3008 * TODO: HPPA64 relaxes the over-alignment requirement
3009 * with the ,co completer.
3010 */
3011 gen_helper_ldc_check(addr);
3012
3013 zero = tcg_constant_reg(0);
3014 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
3015
3016 if (a->m) {
3017 save_gpr(ctx, a->b, ofs);
3018 }
3019 save_gpr(ctx, a->t, dest);
3020
3021 return nullify_end(ctx);
3022 }
3023
3024 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3025 {
3026 TCGv_reg ofs, val;
3027 TCGv_tl addr;
3028
3029 nullify_over(ctx);
3030
3031 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3032 ctx->mmu_idx == MMU_PHYS_IDX);
3033 val = load_gpr(ctx, a->r);
3034 if (a->a) {
3035 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3036 gen_helper_stby_e_parallel(tcg_env, addr, val);
3037 } else {
3038 gen_helper_stby_e(tcg_env, addr, val);
3039 }
3040 } else {
3041 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3042 gen_helper_stby_b_parallel(tcg_env, addr, val);
3043 } else {
3044 gen_helper_stby_b(tcg_env, addr, val);
3045 }
3046 }
3047 if (a->m) {
3048 tcg_gen_andi_reg(ofs, ofs, ~3);
3049 save_gpr(ctx, a->b, ofs);
3050 }
3051
3052 return nullify_end(ctx);
3053 }
3054
3055 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3056 {
3057 TCGv_reg ofs, val;
3058 TCGv_tl addr;
3059
3060 if (!ctx->is_pa20) {
3061 return false;
3062 }
3063 nullify_over(ctx);
3064
3065 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3066 ctx->mmu_idx == MMU_PHYS_IDX);
3067 val = load_gpr(ctx, a->r);
3068 if (a->a) {
3069 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3070 gen_helper_stdby_e_parallel(tcg_env, addr, val);
3071 } else {
3072 gen_helper_stdby_e(tcg_env, addr, val);
3073 }
3074 } else {
3075 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3076 gen_helper_stdby_b_parallel(tcg_env, addr, val);
3077 } else {
3078 gen_helper_stdby_b(tcg_env, addr, val);
3079 }
3080 }
3081 if (a->m) {
3082 tcg_gen_andi_reg(ofs, ofs, ~7);
3083 save_gpr(ctx, a->b, ofs);
3084 }
3085
3086 return nullify_end(ctx);
3087 }
3088
3089 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3090 {
3091 int hold_mmu_idx = ctx->mmu_idx;
3092
3093 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3094 ctx->mmu_idx = MMU_PHYS_IDX;
3095 trans_ld(ctx, a);
3096 ctx->mmu_idx = hold_mmu_idx;
3097 return true;
3098 }
3099
3100 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3101 {
3102 int hold_mmu_idx = ctx->mmu_idx;
3103
3104 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3105 ctx->mmu_idx = MMU_PHYS_IDX;
3106 trans_st(ctx, a);
3107 ctx->mmu_idx = hold_mmu_idx;
3108 return true;
3109 }
3110
3111 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3112 {
3113 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
3114
3115 tcg_gen_movi_reg(tcg_rt, a->i);
3116 save_gpr(ctx, a->t, tcg_rt);
3117 cond_free(&ctx->null_cond);
3118 return true;
3119 }
3120
3121 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3122 {
3123 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
3124 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
3125
3126 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
3127 save_gpr(ctx, 1, tcg_r1);
3128 cond_free(&ctx->null_cond);
3129 return true;
3130 }
3131
3132 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3133 {
3134 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
3135
3136 /* Special case rb == 0, for the LDI pseudo-op.
3137 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
3138 if (a->b == 0) {
3139 tcg_gen_movi_reg(tcg_rt, a->i);
3140 } else {
3141 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
3142 }
3143 save_gpr(ctx, a->t, tcg_rt);
3144 cond_free(&ctx->null_cond);
3145 return true;
3146 }
3147
3148 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3149 unsigned c, unsigned f, bool d, unsigned n, int disp)
3150 {
3151 TCGv_reg dest, in2, sv;
3152 DisasCond cond;
3153
3154 in2 = load_gpr(ctx, r);
3155 dest = tcg_temp_new();
3156
3157 tcg_gen_sub_reg(dest, in1, in2);
3158
3159 sv = NULL;
3160 if (cond_need_sv(c)) {
3161 sv = do_sub_sv(ctx, dest, in1, in2);
3162 }
3163
3164 cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3165 return do_cbranch(ctx, disp, n, &cond);
3166 }
3167
3168 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3169 {
3170 if (!ctx->is_pa20 && a->d) {
3171 return false;
3172 }
3173 nullify_over(ctx);
3174 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3175 a->c, a->f, a->d, a->n, a->disp);
3176 }
3177
3178 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3179 {
3180 if (!ctx->is_pa20 && a->d) {
3181 return false;
3182 }
3183 nullify_over(ctx);
3184 return do_cmpb(ctx, a->r, tcg_constant_reg(a->i),
3185 a->c, a->f, a->d, a->n, a->disp);
3186 }
3187
3188 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3189 unsigned c, unsigned f, unsigned n, int disp)
3190 {
3191 TCGv_reg dest, in2, sv, cb_cond;
3192 DisasCond cond;
3193 bool d = false;
3194
3195 /*
3196 * For hppa64, the ADDB conditions change with PSW.W,
3197 * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3198 */
3199 if (ctx->tb_flags & PSW_W) {
3200 d = c >= 5;
3201 if (d) {
3202 c &= 3;
3203 }
3204 }
3205
3206 in2 = load_gpr(ctx, r);
3207 dest = tcg_temp_new();
3208 sv = NULL;
3209 cb_cond = NULL;
3210
3211 if (cond_need_cb(c)) {
3212 TCGv_reg cb = tcg_temp_new();
3213 TCGv_reg cb_msb = tcg_temp_new();
3214
3215 tcg_gen_movi_reg(cb_msb, 0);
3216 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3217 tcg_gen_xor_reg(cb, in1, in2);
3218 tcg_gen_xor_reg(cb, cb, dest);
3219 cb_cond = get_carry(ctx, d, cb, cb_msb);
3220 } else {
3221 tcg_gen_add_reg(dest, in1, in2);
3222 }
3223 if (cond_need_sv(c)) {
3224 sv = do_add_sv(ctx, dest, in1, in2);
3225 }
3226
3227 cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3228 save_gpr(ctx, r, dest);
3229 return do_cbranch(ctx, disp, n, &cond);
3230 }
3231
3232 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3233 {
3234 nullify_over(ctx);
3235 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3236 }
3237
3238 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3239 {
3240 nullify_over(ctx);
3241 return do_addb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
3242 }
3243
3244 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3245 {
3246 TCGv_reg tmp, tcg_r;
3247 DisasCond cond;
3248
3249 nullify_over(ctx);
3250
3251 tmp = tcg_temp_new();
3252 tcg_r = load_gpr(ctx, a->r);
3253 if (cond_need_ext(ctx, a->d)) {
3254 /* Force shift into [32,63] */
3255 tcg_gen_ori_reg(tmp, cpu_sar, 32);
3256 tcg_gen_shl_reg(tmp, tcg_r, tmp);
3257 } else {
3258 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3259 }
3260
3261 cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3262 return do_cbranch(ctx, a->disp, a->n, &cond);
3263 }
3264
3265 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3266 {
3267 TCGv_reg tmp, tcg_r;
3268 DisasCond cond;
3269 int p;
3270
3271 nullify_over(ctx);
3272
3273 tmp = tcg_temp_new();
3274 tcg_r = load_gpr(ctx, a->r);
3275 p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
3276 tcg_gen_shli_reg(tmp, tcg_r, p);
3277
3278 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3279 return do_cbranch(ctx, a->disp, a->n, &cond);
3280 }
3281
3282 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3283 {
3284 TCGv_reg dest;
3285 DisasCond cond;
3286
3287 nullify_over(ctx);
3288
3289 dest = dest_gpr(ctx, a->r2);
3290 if (a->r1 == 0) {
3291 tcg_gen_movi_reg(dest, 0);
3292 } else {
3293 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3294 }
3295
3296 /* All MOVB conditions are 32-bit. */
3297 cond = do_sed_cond(ctx, a->c, false, dest);
3298 return do_cbranch(ctx, a->disp, a->n, &cond);
3299 }
3300
3301 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3302 {
3303 TCGv_reg dest;
3304 DisasCond cond;
3305
3306 nullify_over(ctx);
3307
3308 dest = dest_gpr(ctx, a->r);
3309 tcg_gen_movi_reg(dest, a->i);
3310
3311 /* All MOVBI conditions are 32-bit. */
3312 cond = do_sed_cond(ctx, a->c, false, dest);
3313 return do_cbranch(ctx, a->disp, a->n, &cond);
3314 }
3315
3316 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3317 {
3318 TCGv_reg dest, src2;
3319
3320 if (!ctx->is_pa20 && a->d) {
3321 return false;
3322 }
3323 if (a->c) {
3324 nullify_over(ctx);
3325 }
3326
3327 dest = dest_gpr(ctx, a->t);
3328 src2 = load_gpr(ctx, a->r2);
3329 if (a->r1 == 0) {
3330 if (a->d) {
3331 tcg_gen_shr_reg(dest, src2, cpu_sar);
3332 } else {
3333 TCGv_reg tmp = tcg_temp_new();
3334
3335 tcg_gen_ext32u_reg(dest, src2);
3336 tcg_gen_andi_reg(tmp, cpu_sar, 31);
3337 tcg_gen_shr_reg(dest, dest, tmp);
3338 }
3339 } else if (a->r1 == a->r2) {
3340 if (a->d) {
3341 tcg_gen_rotr_reg(dest, src2, cpu_sar);
3342 } else {
3343 TCGv_i32 t32 = tcg_temp_new_i32();
3344 TCGv_i32 s32 = tcg_temp_new_i32();
3345
3346 tcg_gen_trunc_reg_i32(t32, src2);
3347 tcg_gen_trunc_reg_i32(s32, cpu_sar);
3348 tcg_gen_andi_i32(s32, s32, 31);
3349 tcg_gen_rotr_i32(t32, t32, s32);
3350 tcg_gen_extu_i32_reg(dest, t32);
3351 }
3352 } else {
3353 TCGv_reg src1 = load_gpr(ctx, a->r1);
3354
3355 if (a->d) {
3356 TCGv_reg t = tcg_temp_new();
3357 TCGv_reg n = tcg_temp_new();
3358
3359 tcg_gen_xori_reg(n, cpu_sar, 63);
3360 tcg_gen_shl_reg(t, src2, n);
3361 tcg_gen_shli_reg(t, t, 1);
3362 tcg_gen_shr_reg(dest, src1, cpu_sar);
3363 tcg_gen_or_reg(dest, dest, t);
3364 } else {
3365 TCGv_i64 t = tcg_temp_new_i64();
3366 TCGv_i64 s = tcg_temp_new_i64();
3367
3368 tcg_gen_concat_reg_i64(t, src2, src1);
3369 tcg_gen_extu_reg_i64(s, cpu_sar);
3370 tcg_gen_andi_i64(s, s, 31);
3371 tcg_gen_shr_i64(t, t, s);
3372 tcg_gen_trunc_i64_reg(dest, t);
3373 }
3374 }
3375 save_gpr(ctx, a->t, dest);
3376
3377 /* Install the new nullification. */
3378 cond_free(&ctx->null_cond);
3379 if (a->c) {
3380 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3381 }
3382 return nullify_end(ctx);
3383 }
3384
3385 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3386 {
3387 unsigned width, sa;
3388 TCGv_reg dest, t2;
3389
3390 if (!ctx->is_pa20 && a->d) {
3391 return false;
3392 }
3393 if (a->c) {
3394 nullify_over(ctx);
3395 }
3396
3397 width = a->d ? 64 : 32;
3398 sa = width - 1 - a->cpos;
3399
3400 dest = dest_gpr(ctx, a->t);
3401 t2 = load_gpr(ctx, a->r2);
3402 if (a->r1 == 0) {
3403 tcg_gen_extract_reg(dest, t2, sa, width - sa);
3404 } else if (width == TARGET_REGISTER_BITS) {
3405 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3406 } else {
3407 assert(!a->d);
3408 if (a->r1 == a->r2) {
3409 TCGv_i32 t32 = tcg_temp_new_i32();
3410 tcg_gen_trunc_reg_i32(t32, t2);
3411 tcg_gen_rotri_i32(t32, t32, sa);
3412 tcg_gen_extu_i32_reg(dest, t32);
3413 } else {
3414 TCGv_i64 t64 = tcg_temp_new_i64();
3415 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3416 tcg_gen_shri_i64(t64, t64, sa);
3417 tcg_gen_trunc_i64_reg(dest, t64);
3418 }
3419 }
3420 save_gpr(ctx, a->t, dest);
3421
3422 /* Install the new nullification. */
3423 cond_free(&ctx->null_cond);
3424 if (a->c) {
3425 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3426 }
3427 return nullify_end(ctx);
3428 }
3429
3430 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3431 {
3432 unsigned widthm1 = a->d ? 63 : 31;
3433 TCGv_reg dest, src, tmp;
3434
3435 if (!ctx->is_pa20 && a->d) {
3436 return false;
3437 }
3438 if (a->c) {
3439 nullify_over(ctx);
3440 }
3441
3442 dest = dest_gpr(ctx, a->t);
3443 src = load_gpr(ctx, a->r);
3444 tmp = tcg_temp_new();
3445
3446 /* Recall that SAR is using big-endian bit numbering. */
3447 tcg_gen_andi_reg(tmp, cpu_sar, widthm1);
3448 tcg_gen_xori_reg(tmp, tmp, widthm1);
3449
3450 if (a->se) {
3451 if (!a->d) {
3452 tcg_gen_ext32s_reg(dest, src);
3453 src = dest;
3454 }
3455 tcg_gen_sar_reg(dest, src, tmp);
3456 tcg_gen_sextract_reg(dest, dest, 0, a->len);
3457 } else {
3458 if (!a->d) {
3459 tcg_gen_ext32u_reg(dest, src);
3460 src = dest;
3461 }
3462 tcg_gen_shr_reg(dest, src, tmp);
3463 tcg_gen_extract_reg(dest, dest, 0, a->len);
3464 }
3465 save_gpr(ctx, a->t, dest);
3466
3467 /* Install the new nullification. */
3468 cond_free(&ctx->null_cond);
3469 if (a->c) {
3470 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3471 }
3472 return nullify_end(ctx);
3473 }
3474
3475 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3476 {
3477 unsigned len, cpos, width;
3478 TCGv_reg dest, src;
3479
3480 if (!ctx->is_pa20 && a->d) {
3481 return false;
3482 }
3483 if (a->c) {
3484 nullify_over(ctx);
3485 }
3486
3487 len = a->len;
3488 width = a->d ? 64 : 32;
3489 cpos = width - 1 - a->pos;
3490 if (cpos + len > width) {
3491 len = width - cpos;
3492 }
3493
3494 dest = dest_gpr(ctx, a->t);
3495 src = load_gpr(ctx, a->r);
3496 if (a->se) {
3497 tcg_gen_sextract_reg(dest, src, cpos, len);
3498 } else {
3499 tcg_gen_extract_reg(dest, src, cpos, len);
3500 }
3501 save_gpr(ctx, a->t, dest);
3502
3503 /* Install the new nullification. */
3504 cond_free(&ctx->null_cond);
3505 if (a->c) {
3506 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3507 }
3508 return nullify_end(ctx);
3509 }
3510
3511 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3512 {
3513 unsigned len, width;
3514 target_sreg mask0, mask1;
3515 TCGv_reg dest;
3516
3517 if (!ctx->is_pa20 && a->d) {
3518 return false;
3519 }
3520 if (a->c) {
3521 nullify_over(ctx);
3522 }
3523
3524 len = a->len;
3525 width = a->d ? 64 : 32;
3526 if (a->cpos + len > width) {
3527 len = width - a->cpos;
3528 }
3529
3530 dest = dest_gpr(ctx, a->t);
3531 mask0 = deposit64(0, a->cpos, len, a->i);
3532 mask1 = deposit64(-1, a->cpos, len, a->i);
3533
3534 if (a->nz) {
3535 TCGv_reg src = load_gpr(ctx, a->t);
3536 tcg_gen_andi_reg(dest, src, mask1);
3537 tcg_gen_ori_reg(dest, dest, mask0);
3538 } else {
3539 tcg_gen_movi_reg(dest, mask0);
3540 }
3541 save_gpr(ctx, a->t, dest);
3542
3543 /* Install the new nullification. */
3544 cond_free(&ctx->null_cond);
3545 if (a->c) {
3546 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3547 }
3548 return nullify_end(ctx);
3549 }
3550
3551 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3552 {
3553 unsigned rs = a->nz ? a->t : 0;
3554 unsigned len, width;
3555 TCGv_reg dest, val;
3556
3557 if (!ctx->is_pa20 && a->d) {
3558 return false;
3559 }
3560 if (a->c) {
3561 nullify_over(ctx);
3562 }
3563
3564 len = a->len;
3565 width = a->d ? 64 : 32;
3566 if (a->cpos + len > width) {
3567 len = width - a->cpos;
3568 }
3569
3570 dest = dest_gpr(ctx, a->t);
3571 val = load_gpr(ctx, a->r);
3572 if (rs == 0) {
3573 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3574 } else {
3575 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3576 }
3577 save_gpr(ctx, a->t, dest);
3578
3579 /* Install the new nullification. */
3580 cond_free(&ctx->null_cond);
3581 if (a->c) {
3582 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3583 }
3584 return nullify_end(ctx);
3585 }
3586
3587 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3588 bool d, bool nz, unsigned len, TCGv_reg val)
3589 {
3590 unsigned rs = nz ? rt : 0;
3591 unsigned widthm1 = d ? 63 : 31;
3592 TCGv_reg mask, tmp, shift, dest;
3593 target_ureg msb = 1ULL << (len - 1);
3594
3595 dest = dest_gpr(ctx, rt);
3596 shift = tcg_temp_new();
3597 tmp = tcg_temp_new();
3598
3599 /* Convert big-endian bit numbering in SAR to left-shift. */
3600 tcg_gen_andi_reg(shift, cpu_sar, widthm1);
3601 tcg_gen_xori_reg(shift, shift, widthm1);
3602
3603 mask = tcg_temp_new();
3604 tcg_gen_movi_reg(mask, msb + (msb - 1));
3605 tcg_gen_and_reg(tmp, val, mask);
3606 if (rs) {
3607 tcg_gen_shl_reg(mask, mask, shift);
3608 tcg_gen_shl_reg(tmp, tmp, shift);
3609 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3610 tcg_gen_or_reg(dest, dest, tmp);
3611 } else {
3612 tcg_gen_shl_reg(dest, tmp, shift);
3613 }
3614 save_gpr(ctx, rt, dest);
3615
3616 /* Install the new nullification. */
3617 cond_free(&ctx->null_cond);
3618 if (c) {
3619 ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3620 }
3621 return nullify_end(ctx);
3622 }
3623
3624 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3625 {
3626 if (!ctx->is_pa20 && a->d) {
3627 return false;
3628 }
3629 if (a->c) {
3630 nullify_over(ctx);
3631 }
3632 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3633 load_gpr(ctx, a->r));
3634 }
3635
3636 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3637 {
3638 if (!ctx->is_pa20 && a->d) {
3639 return false;
3640 }
3641 if (a->c) {
3642 nullify_over(ctx);
3643 }
3644 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3645 tcg_constant_reg(a->i));
3646 }
3647
3648 static bool trans_be(DisasContext *ctx, arg_be *a)
3649 {
3650 TCGv_reg tmp;
3651
3652 #ifdef CONFIG_USER_ONLY
3653 /* ??? It seems like there should be a good way of using
3654 "be disp(sr2, r0)", the canonical gateway entry mechanism
3655 to our advantage. But that appears to be inconvenient to
3656 manage along side branch delay slots. Therefore we handle
3657 entry into the gateway page via absolute address. */
3658 /* Since we don't implement spaces, just branch. Do notice the special
3659 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3660 goto_tb to the TB containing the syscall. */
3661 if (a->b == 0) {
3662 return do_dbranch(ctx, a->disp, a->l, a->n);
3663 }
3664 #else
3665 nullify_over(ctx);
3666 #endif
3667
3668 tmp = tcg_temp_new();
3669 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3670 tmp = do_ibranch_priv(ctx, tmp);
3671
3672 #ifdef CONFIG_USER_ONLY
3673 return do_ibranch(ctx, tmp, a->l, a->n);
3674 #else
3675 TCGv_i64 new_spc = tcg_temp_new_i64();
3676
3677 load_spr(ctx, new_spc, a->sp);
3678 if (a->l) {
3679 copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3680 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3681 }
3682 if (a->n && use_nullify_skip(ctx)) {
3683 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3684 tcg_gen_addi_reg(tmp, tmp, 4);
3685 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3686 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3687 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3688 } else {
3689 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3690 if (ctx->iaoq_b == -1) {
3691 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3692 }
3693 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3694 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3695 nullify_set(ctx, a->n);
3696 }
3697 tcg_gen_lookup_and_goto_ptr();
3698 ctx->base.is_jmp = DISAS_NORETURN;
3699 return nullify_end(ctx);
3700 #endif
3701 }
3702
3703 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3704 {
3705 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3706 }
3707
3708 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3709 {
3710 target_ureg dest = iaoq_dest(ctx, a->disp);
3711
3712 nullify_over(ctx);
3713
3714 /* Make sure the caller hasn't done something weird with the queue.
3715 * ??? This is not quite the same as the PSW[B] bit, which would be
3716 * expensive to track. Real hardware will trap for
3717 * b gateway
3718 * b gateway+4 (in delay slot of first branch)
3719 * However, checking for a non-sequential instruction queue *will*
3720 * diagnose the security hole
3721 * b gateway
3722 * b evil
3723 * in which instructions at evil would run with increased privs.
3724 */
3725 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3726 return gen_illegal(ctx);
3727 }
3728
3729 #ifndef CONFIG_USER_ONLY
3730 if (ctx->tb_flags & PSW_C) {
3731 CPUHPPAState *env = cpu_env(ctx->cs);
3732 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3733 /* If we could not find a TLB entry, then we need to generate an
3734 ITLB miss exception so the kernel will provide it.
3735 The resulting TLB fill operation will invalidate this TB and
3736 we will re-translate, at which point we *will* be able to find
3737 the TLB entry and determine if this is in fact a gateway page. */
3738 if (type < 0) {
3739 gen_excp(ctx, EXCP_ITLB_MISS);
3740 return true;
3741 }
3742 /* No change for non-gateway pages or for priv decrease. */
3743 if (type >= 4 && type - 4 < ctx->privilege) {
3744 dest = deposit32(dest, 0, 2, type - 4);
3745 }
3746 } else {
3747 dest &= -4; /* priv = 0 */
3748 }
3749 #endif
3750
3751 if (a->l) {
3752 TCGv_reg tmp = dest_gpr(ctx, a->l);
3753 if (ctx->privilege < 3) {
3754 tcg_gen_andi_reg(tmp, tmp, -4);
3755 }
3756 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3757 save_gpr(ctx, a->l, tmp);
3758 }
3759
3760 return do_dbranch(ctx, dest, 0, a->n);
3761 }
3762
3763 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3764 {
3765 if (a->x) {
3766 TCGv_reg tmp = tcg_temp_new();
3767 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3768 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3769 /* The computation here never changes privilege level. */
3770 return do_ibranch(ctx, tmp, a->l, a->n);
3771 } else {
3772 /* BLR R0,RX is a good way to load PC+8 into RX. */
3773 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3774 }
3775 }
3776
3777 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3778 {
3779 TCGv_reg dest;
3780
3781 if (a->x == 0) {
3782 dest = load_gpr(ctx, a->b);
3783 } else {
3784 dest = tcg_temp_new();
3785 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3786 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3787 }
3788 dest = do_ibranch_priv(ctx, dest);
3789 return do_ibranch(ctx, dest, 0, a->n);
3790 }
3791
3792 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3793 {
3794 TCGv_reg dest;
3795
3796 #ifdef CONFIG_USER_ONLY
3797 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3798 return do_ibranch(ctx, dest, a->l, a->n);
3799 #else
3800 nullify_over(ctx);
3801 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3802
3803 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3804 if (ctx->iaoq_b == -1) {
3805 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3806 }
3807 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3808 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3809 if (a->l) {
3810 copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3811 }
3812 nullify_set(ctx, a->n);
3813 tcg_gen_lookup_and_goto_ptr();
3814 ctx->base.is_jmp = DISAS_NORETURN;
3815 return nullify_end(ctx);
3816 #endif
3817 }
3818
3819 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
3820 {
3821 /* All branch target stack instructions implement as nop. */
3822 return ctx->is_pa20;
3823 }
3824
3825 /*
3826 * Float class 0
3827 */
3828
3829 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3830 {
3831 tcg_gen_mov_i32(dst, src);
3832 }
3833
3834 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3835 {
3836 uint64_t ret;
3837
3838 if (TARGET_REGISTER_BITS == 64) {
3839 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3840 } else {
3841 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3842 }
3843
3844 nullify_over(ctx);
3845 save_frd(0, tcg_constant_i64(ret));
3846 return nullify_end(ctx);
3847 }
3848
3849 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3850 {
3851 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3852 }
3853
3854 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3855 {
3856 tcg_gen_mov_i64(dst, src);
3857 }
3858
3859 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3860 {
3861 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3862 }
3863
3864 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3865 {
3866 tcg_gen_andi_i32(dst, src, INT32_MAX);
3867 }
3868
3869 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3870 {
3871 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3872 }
3873
3874 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3875 {
3876 tcg_gen_andi_i64(dst, src, INT64_MAX);
3877 }
3878
3879 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3880 {
3881 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3882 }
3883
3884 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3885 {
3886 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3887 }
3888
3889 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3890 {
3891 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3892 }
3893
3894 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3895 {
3896 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3897 }
3898
3899 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3900 {
3901 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3902 }
3903
3904 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3905 {
3906 tcg_gen_xori_i32(dst, src, INT32_MIN);
3907 }
3908
3909 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3910 {
3911 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3912 }
3913
3914 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3915 {
3916 tcg_gen_xori_i64(dst, src, INT64_MIN);
3917 }
3918
3919 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3920 {
3921 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3922 }
3923
3924 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3925 {
3926 tcg_gen_ori_i32(dst, src, INT32_MIN);
3927 }
3928
3929 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3930 {
3931 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3932 }
3933
3934 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3935 {
3936 tcg_gen_ori_i64(dst, src, INT64_MIN);
3937 }
3938
3939 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3940 {
3941 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3942 }
3943
3944 /*
3945 * Float class 1
3946 */
3947
3948 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3949 {
3950 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3951 }
3952
3953 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3954 {
3955 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3956 }
3957
3958 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3959 {
3960 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3961 }
3962
3963 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3964 {
3965 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3966 }
3967
3968 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3969 {
3970 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3971 }
3972
3973 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3974 {
3975 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3976 }
3977
3978 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3979 {
3980 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3981 }
3982
3983 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3984 {
3985 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3986 }
3987
3988 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3989 {
3990 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3991 }
3992
3993 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3994 {
3995 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3996 }
3997
3998 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3999 {
4000 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4001 }
4002
4003 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4004 {
4005 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4006 }
4007
4008 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4009 {
4010 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4011 }
4012
4013 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4014 {
4015 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4016 }
4017
4018 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4019 {
4020 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4021 }
4022
4023 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4024 {
4025 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4026 }
4027
4028 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4029 {
4030 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4031 }
4032
4033 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4034 {
4035 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4036 }
4037
4038 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4039 {
4040 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4041 }
4042
4043 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4044 {
4045 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4046 }
4047
4048 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4049 {
4050 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4051 }
4052
4053 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4054 {
4055 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4056 }
4057
4058 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4059 {
4060 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4061 }
4062
4063 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4064 {
4065 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4066 }
4067
4068 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4069 {
4070 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4071 }
4072
4073 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4074 {
4075 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4076 }
4077
4078 /*
4079 * Float class 2
4080 */
4081
4082 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4083 {
4084 TCGv_i32 ta, tb, tc, ty;
4085
4086 nullify_over(ctx);
4087
4088 ta = load_frw0_i32(a->r1);
4089 tb = load_frw0_i32(a->r2);
4090 ty = tcg_constant_i32(a->y);
4091 tc = tcg_constant_i32(a->c);
4092
4093 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4094
4095 return nullify_end(ctx);
4096 }
4097
4098 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4099 {
4100 TCGv_i64 ta, tb;
4101 TCGv_i32 tc, ty;
4102
4103 nullify_over(ctx);
4104
4105 ta = load_frd0(a->r1);
4106 tb = load_frd0(a->r2);
4107 ty = tcg_constant_i32(a->y);
4108 tc = tcg_constant_i32(a->c);
4109
4110 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4111
4112 return nullify_end(ctx);
4113 }
4114
4115 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4116 {
4117 TCGv_reg t;
4118
4119 nullify_over(ctx);
4120
4121 t = tcg_temp_new();
4122 tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4123
4124 if (a->y == 1) {
4125 int mask;
4126 bool inv = false;
4127
4128 switch (a->c) {
4129 case 0: /* simple */
4130 tcg_gen_andi_reg(t, t, 0x4000000);
4131 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4132 goto done;
4133 case 2: /* rej */
4134 inv = true;
4135 /* fallthru */
4136 case 1: /* acc */
4137 mask = 0x43ff800;
4138 break;
4139 case 6: /* rej8 */
4140 inv = true;
4141 /* fallthru */
4142 case 5: /* acc8 */
4143 mask = 0x43f8000;
4144 break;
4145 case 9: /* acc6 */
4146 mask = 0x43e0000;
4147 break;
4148 case 13: /* acc4 */
4149 mask = 0x4380000;
4150 break;
4151 case 17: /* acc2 */
4152 mask = 0x4200000;
4153 break;
4154 default:
4155 gen_illegal(ctx);
4156 return true;
4157 }
4158 if (inv) {
4159 TCGv_reg c = tcg_constant_reg(mask);
4160 tcg_gen_or_reg(t, t, c);
4161 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4162 } else {
4163 tcg_gen_andi_reg(t, t, mask);
4164 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4165 }
4166 } else {
4167 unsigned cbit = (a->y ^ 1) - 1;
4168
4169 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
4170 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4171 }
4172
4173 done:
4174 return nullify_end(ctx);
4175 }
4176
4177 /*
4178 * Float class 2
4179 */
4180
4181 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4182 {
4183 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4184 }
4185
4186 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4187 {
4188 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4189 }
4190
4191 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4192 {
4193 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4194 }
4195
4196 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4197 {
4198 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4199 }
4200
4201 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4202 {
4203 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4204 }
4205
4206 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4207 {
4208 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4209 }
4210
4211 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4212 {
4213 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4214 }
4215
4216 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4217 {
4218 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4219 }
4220
4221 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4222 {
4223 TCGv_i64 x, y;
4224
4225 nullify_over(ctx);
4226
4227 x = load_frw0_i64(a->r1);
4228 y = load_frw0_i64(a->r2);
4229 tcg_gen_mul_i64(x, x, y);
4230 save_frd(a->t, x);
4231
4232 return nullify_end(ctx);
4233 }
4234
4235 /* Convert the fmpyadd single-precision register encodings to standard. */
4236 static inline int fmpyadd_s_reg(unsigned r)
4237 {
4238 return (r & 16) * 2 + 16 + (r & 15);
4239 }
4240
4241 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4242 {
4243 int tm = fmpyadd_s_reg(a->tm);
4244 int ra = fmpyadd_s_reg(a->ra);
4245 int ta = fmpyadd_s_reg(a->ta);
4246 int rm2 = fmpyadd_s_reg(a->rm2);
4247 int rm1 = fmpyadd_s_reg(a->rm1);
4248
4249 nullify_over(ctx);
4250
4251 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4252 do_fop_weww(ctx, ta, ta, ra,
4253 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4254
4255 return nullify_end(ctx);
4256 }
4257
4258 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4259 {
4260 return do_fmpyadd_s(ctx, a, false);
4261 }
4262
4263 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4264 {
4265 return do_fmpyadd_s(ctx, a, true);
4266 }
4267
4268 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4269 {
4270 nullify_over(ctx);
4271
4272 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4273 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4274 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4275
4276 return nullify_end(ctx);
4277 }
4278
4279 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4280 {
4281 return do_fmpyadd_d(ctx, a, false);
4282 }
4283
4284 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4285 {
4286 return do_fmpyadd_d(ctx, a, true);
4287 }
4288
4289 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4290 {
4291 TCGv_i32 x, y, z;
4292
4293 nullify_over(ctx);
4294 x = load_frw0_i32(a->rm1);
4295 y = load_frw0_i32(a->rm2);
4296 z = load_frw0_i32(a->ra3);
4297
4298 if (a->neg) {
4299 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4300 } else {
4301 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4302 }
4303
4304 save_frw_i32(a->t, x);
4305 return nullify_end(ctx);
4306 }
4307
4308 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4309 {
4310 TCGv_i64 x, y, z;
4311
4312 nullify_over(ctx);
4313 x = load_frd0(a->rm1);
4314 y = load_frd0(a->rm2);
4315 z = load_frd0(a->ra3);
4316
4317 if (a->neg) {
4318 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4319 } else {
4320 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4321 }
4322
4323 save_frd(a->t, x);
4324 return nullify_end(ctx);
4325 }
4326
4327 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4328 {
4329 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4330 #ifndef CONFIG_USER_ONLY
4331 if (a->i == 0x100) {
4332 /* emulate PDC BTLB, called by SeaBIOS-hppa */
4333 nullify_over(ctx);
4334 gen_helper_diag_btlb(tcg_env);
4335 return nullify_end(ctx);
4336 }
4337 #endif
4338 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4339 return true;
4340 }
4341
4342 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4343 {
4344 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4345 int bound;
4346
4347 ctx->cs = cs;
4348 ctx->tb_flags = ctx->base.tb->flags;
4349 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4350
4351 #ifdef CONFIG_USER_ONLY
4352 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4353 ctx->mmu_idx = MMU_USER_IDX;
4354 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4355 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4356 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4357 #else
4358 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4359 ctx->mmu_idx = (ctx->tb_flags & PSW_D
4360 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4361 : MMU_PHYS_IDX);
4362
4363 /* Recover the IAOQ values from the GVA + PRIV. */
4364 uint64_t cs_base = ctx->base.tb->cs_base;
4365 uint64_t iasq_f = cs_base & ~0xffffffffull;
4366 int32_t diff = cs_base;
4367
4368 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4369 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4370 #endif
4371 ctx->iaoq_n = -1;
4372 ctx->iaoq_n_var = NULL;
4373
4374 /* Bound the number of instructions by those left on the page. */
4375 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4376 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4377 }
4378
4379 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4380 {
4381 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4382
4383 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4384 ctx->null_cond = cond_make_f();
4385 ctx->psw_n_nonzero = false;
4386 if (ctx->tb_flags & PSW_N) {
4387 ctx->null_cond.c = TCG_COND_ALWAYS;
4388 ctx->psw_n_nonzero = true;
4389 }
4390 ctx->null_lab = NULL;
4391 }
4392
4393 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4394 {
4395 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4396
4397 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4398 }
4399
4400 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4401 {
4402 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4403 CPUHPPAState *env = cpu_env(cs);
4404 DisasJumpType ret;
4405
4406 /* Execute one insn. */
4407 #ifdef CONFIG_USER_ONLY
4408 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4409 do_page_zero(ctx);
4410 ret = ctx->base.is_jmp;
4411 assert(ret != DISAS_NEXT);
4412 } else
4413 #endif
4414 {
4415 /* Always fetch the insn, even if nullified, so that we check
4416 the page permissions for execute. */
4417 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4418
4419 /* Set up the IA queue for the next insn.
4420 This will be overwritten by a branch. */
4421 if (ctx->iaoq_b == -1) {
4422 ctx->iaoq_n = -1;
4423 ctx->iaoq_n_var = tcg_temp_new();
4424 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4425 } else {
4426 ctx->iaoq_n = ctx->iaoq_b + 4;
4427 ctx->iaoq_n_var = NULL;
4428 }
4429
4430 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4431 ctx->null_cond.c = TCG_COND_NEVER;
4432 ret = DISAS_NEXT;
4433 } else {
4434 ctx->insn = insn;
4435 if (!decode(ctx, insn)) {
4436 gen_illegal(ctx);
4437 }
4438 ret = ctx->base.is_jmp;
4439 assert(ctx->null_lab == NULL);
4440 }
4441 }
4442
4443 /* Advance the insn queue. Note that this check also detects
4444 a priority change within the instruction queue. */
4445 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4446 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4447 && use_goto_tb(ctx, ctx->iaoq_b)
4448 && (ctx->null_cond.c == TCG_COND_NEVER
4449 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4450 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4451 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4452 ctx->base.is_jmp = ret = DISAS_NORETURN;
4453 } else {
4454 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4455 }
4456 }
4457 ctx->iaoq_f = ctx->iaoq_b;
4458 ctx->iaoq_b = ctx->iaoq_n;
4459 ctx->base.pc_next += 4;
4460
4461 switch (ret) {
4462 case DISAS_NORETURN:
4463 case DISAS_IAQ_N_UPDATED:
4464 break;
4465
4466 case DISAS_NEXT:
4467 case DISAS_IAQ_N_STALE:
4468 case DISAS_IAQ_N_STALE_EXIT:
4469 if (ctx->iaoq_f == -1) {
4470 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4471 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4472 #ifndef CONFIG_USER_ONLY
4473 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4474 #endif
4475 nullify_save(ctx);
4476 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4477 ? DISAS_EXIT
4478 : DISAS_IAQ_N_UPDATED);
4479 } else if (ctx->iaoq_b == -1) {
4480 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4481 }
4482 break;
4483
4484 default:
4485 g_assert_not_reached();
4486 }
4487 }
4488
4489 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4490 {
4491 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4492 DisasJumpType is_jmp = ctx->base.is_jmp;
4493
4494 switch (is_jmp) {
4495 case DISAS_NORETURN:
4496 break;
4497 case DISAS_TOO_MANY:
4498 case DISAS_IAQ_N_STALE:
4499 case DISAS_IAQ_N_STALE_EXIT:
4500 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4501 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4502 nullify_save(ctx);
4503 /* FALLTHRU */
4504 case DISAS_IAQ_N_UPDATED:
4505 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4506 tcg_gen_lookup_and_goto_ptr();
4507 break;
4508 }
4509 /* FALLTHRU */
4510 case DISAS_EXIT:
4511 tcg_gen_exit_tb(NULL, 0);
4512 break;
4513 default:
4514 g_assert_not_reached();
4515 }
4516 }
4517
4518 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4519 CPUState *cs, FILE *logfile)
4520 {
4521 target_ulong pc = dcbase->pc_first;
4522
4523 #ifdef CONFIG_USER_ONLY
4524 switch (pc) {
4525 case 0x00:
4526 fprintf(logfile, "IN:\n0x00000000: (null)\n");
4527 return;
4528 case 0xb0:
4529 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
4530 return;
4531 case 0xe0:
4532 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4533 return;
4534 case 0x100:
4535 fprintf(logfile, "IN:\n0x00000100: syscall\n");
4536 return;
4537 }
4538 #endif
4539
4540 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4541 target_disas(logfile, cs, pc, dcbase->tb->size);
4542 }
4543
4544 static const TranslatorOps hppa_tr_ops = {
4545 .init_disas_context = hppa_tr_init_disas_context,
4546 .tb_start = hppa_tr_tb_start,
4547 .insn_start = hppa_tr_insn_start,
4548 .translate_insn = hppa_tr_translate_insn,
4549 .tb_stop = hppa_tr_tb_stop,
4550 .disas_log = hppa_tr_disas_log,
4551 };
4552
4553 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4554 target_ulong pc, void *host_pc)
4555 {
4556 DisasContext ctx;
4557 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4558 }