]> git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/translate.c
target/hppa: Implement HSHLADD, HSHRADD
[mirror_qemu.git] / target / hppa / translate.c
1 /*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
34 #undef HELPER_H
35
36 /* Choose to use explicit sizes within this file. */
37 #undef tcg_temp_new
38
39 typedef struct DisasCond {
40 TCGCond c;
41 TCGv_i64 a0, a1;
42 } DisasCond;
43
44 typedef struct DisasContext {
45 DisasContextBase base;
46 CPUState *cs;
47
48 uint64_t iaoq_f;
49 uint64_t iaoq_b;
50 uint64_t iaoq_n;
51 TCGv_i64 iaoq_n_var;
52
53 DisasCond null_cond;
54 TCGLabel *null_lab;
55
56 uint32_t insn;
57 uint32_t tb_flags;
58 int mmu_idx;
59 int privilege;
60 bool psw_n_nonzero;
61 bool is_pa20;
62
63 #ifdef CONFIG_USER_ONLY
64 MemOp unalign;
65 #endif
66 } DisasContext;
67
68 #ifdef CONFIG_USER_ONLY
69 #define UNALIGN(C) (C)->unalign
70 #else
71 #define UNALIGN(C) MO_ALIGN
72 #endif
73
74 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
75 static int expand_sm_imm(DisasContext *ctx, int val)
76 {
77 if (val & PSW_SM_E) {
78 val = (val & ~PSW_SM_E) | PSW_E;
79 }
80 if (val & PSW_SM_W) {
81 val = (val & ~PSW_SM_W) | PSW_W;
82 }
83 return val;
84 }
85
86 /* Inverted space register indicates 0 means sr0 not inferred from base. */
87 static int expand_sr3x(DisasContext *ctx, int val)
88 {
89 return ~val;
90 }
91
92 /* Convert the M:A bits within a memory insn to the tri-state value
93 we use for the final M. */
94 static int ma_to_m(DisasContext *ctx, int val)
95 {
96 return val & 2 ? (val & 1 ? -1 : 1) : 0;
97 }
98
99 /* Convert the sign of the displacement to a pre or post-modify. */
100 static int pos_to_m(DisasContext *ctx, int val)
101 {
102 return val ? 1 : -1;
103 }
104
105 static int neg_to_m(DisasContext *ctx, int val)
106 {
107 return val ? -1 : 1;
108 }
109
110 /* Used for branch targets and fp memory ops. */
111 static int expand_shl2(DisasContext *ctx, int val)
112 {
113 return val << 2;
114 }
115
116 /* Used for fp memory ops. */
117 static int expand_shl3(DisasContext *ctx, int val)
118 {
119 return val << 3;
120 }
121
122 /* Used for assemble_21. */
123 static int expand_shl11(DisasContext *ctx, int val)
124 {
125 return val << 11;
126 }
127
128 static int assemble_6(DisasContext *ctx, int val)
129 {
130 /*
131 * Officially, 32 * x + 32 - y.
132 * Here, x is already in bit 5, and y is [4:0].
133 * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
134 * with the overflow from bit 4 summing with x.
135 */
136 return (val ^ 31) + 1;
137 }
138
139 /* Translate CMPI doubleword conditions to standard. */
140 static int cmpbid_c(DisasContext *ctx, int val)
141 {
142 return val ? val : 4; /* 0 == "*<<" */
143 }
144
145
146 /* Include the auto-generated decoder. */
147 #include "decode-insns.c.inc"
148
149 /* We are not using a goto_tb (for whatever reason), but have updated
150 the iaq (for whatever reason), so don't do it again on exit. */
151 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
152
153 /* We are exiting the TB, but have neither emitted a goto_tb, nor
154 updated the iaq for the next instruction to be executed. */
155 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
156
157 /* Similarly, but we want to return to the main loop immediately
158 to recognize unmasked interrupts. */
159 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
160 #define DISAS_EXIT DISAS_TARGET_3
161
162 /* global register indexes */
163 static TCGv_i64 cpu_gr[32];
164 static TCGv_i64 cpu_sr[4];
165 static TCGv_i64 cpu_srH;
166 static TCGv_i64 cpu_iaoq_f;
167 static TCGv_i64 cpu_iaoq_b;
168 static TCGv_i64 cpu_iasq_f;
169 static TCGv_i64 cpu_iasq_b;
170 static TCGv_i64 cpu_sar;
171 static TCGv_i64 cpu_psw_n;
172 static TCGv_i64 cpu_psw_v;
173 static TCGv_i64 cpu_psw_cb;
174 static TCGv_i64 cpu_psw_cb_msb;
175
176 void hppa_translate_init(void)
177 {
178 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
179
180 typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
181 static const GlobalVar vars[] = {
182 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
183 DEF_VAR(psw_n),
184 DEF_VAR(psw_v),
185 DEF_VAR(psw_cb),
186 DEF_VAR(psw_cb_msb),
187 DEF_VAR(iaoq_f),
188 DEF_VAR(iaoq_b),
189 };
190
191 #undef DEF_VAR
192
193 /* Use the symbolic register names that match the disassembler. */
194 static const char gr_names[32][4] = {
195 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
196 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
197 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
198 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
199 };
200 /* SR[4-7] are not global registers so that we can index them. */
201 static const char sr_names[5][4] = {
202 "sr0", "sr1", "sr2", "sr3", "srH"
203 };
204
205 int i;
206
207 cpu_gr[0] = NULL;
208 for (i = 1; i < 32; i++) {
209 cpu_gr[i] = tcg_global_mem_new(tcg_env,
210 offsetof(CPUHPPAState, gr[i]),
211 gr_names[i]);
212 }
213 for (i = 0; i < 4; i++) {
214 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
215 offsetof(CPUHPPAState, sr[i]),
216 sr_names[i]);
217 }
218 cpu_srH = tcg_global_mem_new_i64(tcg_env,
219 offsetof(CPUHPPAState, sr[4]),
220 sr_names[4]);
221
222 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
223 const GlobalVar *v = &vars[i];
224 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
225 }
226
227 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
228 offsetof(CPUHPPAState, iasq_f),
229 "iasq_f");
230 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
231 offsetof(CPUHPPAState, iasq_b),
232 "iasq_b");
233 }
234
235 static DisasCond cond_make_f(void)
236 {
237 return (DisasCond){
238 .c = TCG_COND_NEVER,
239 .a0 = NULL,
240 .a1 = NULL,
241 };
242 }
243
244 static DisasCond cond_make_t(void)
245 {
246 return (DisasCond){
247 .c = TCG_COND_ALWAYS,
248 .a0 = NULL,
249 .a1 = NULL,
250 };
251 }
252
253 static DisasCond cond_make_n(void)
254 {
255 return (DisasCond){
256 .c = TCG_COND_NE,
257 .a0 = cpu_psw_n,
258 .a1 = tcg_constant_i64(0)
259 };
260 }
261
262 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
263 {
264 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
265 return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
266 }
267
268 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
269 {
270 return cond_make_tmp(c, a0, tcg_constant_i64(0));
271 }
272
273 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
274 {
275 TCGv_i64 tmp = tcg_temp_new_i64();
276 tcg_gen_mov_i64(tmp, a0);
277 return cond_make_0_tmp(c, tmp);
278 }
279
280 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
281 {
282 TCGv_i64 t0 = tcg_temp_new_i64();
283 TCGv_i64 t1 = tcg_temp_new_i64();
284
285 tcg_gen_mov_i64(t0, a0);
286 tcg_gen_mov_i64(t1, a1);
287 return cond_make_tmp(c, t0, t1);
288 }
289
290 static void cond_free(DisasCond *cond)
291 {
292 switch (cond->c) {
293 default:
294 cond->a0 = NULL;
295 cond->a1 = NULL;
296 /* fallthru */
297 case TCG_COND_ALWAYS:
298 cond->c = TCG_COND_NEVER;
299 break;
300 case TCG_COND_NEVER:
301 break;
302 }
303 }
304
305 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
306 {
307 if (reg == 0) {
308 TCGv_i64 t = tcg_temp_new_i64();
309 tcg_gen_movi_i64(t, 0);
310 return t;
311 } else {
312 return cpu_gr[reg];
313 }
314 }
315
316 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
317 {
318 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
319 return tcg_temp_new_i64();
320 } else {
321 return cpu_gr[reg];
322 }
323 }
324
325 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
326 {
327 if (ctx->null_cond.c != TCG_COND_NEVER) {
328 tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
329 ctx->null_cond.a1, dest, t);
330 } else {
331 tcg_gen_mov_i64(dest, t);
332 }
333 }
334
335 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
336 {
337 if (reg != 0) {
338 save_or_nullify(ctx, cpu_gr[reg], t);
339 }
340 }
341
342 #if HOST_BIG_ENDIAN
343 # define HI_OFS 0
344 # define LO_OFS 4
345 #else
346 # define HI_OFS 4
347 # define LO_OFS 0
348 #endif
349
350 static TCGv_i32 load_frw_i32(unsigned rt)
351 {
352 TCGv_i32 ret = tcg_temp_new_i32();
353 tcg_gen_ld_i32(ret, tcg_env,
354 offsetof(CPUHPPAState, fr[rt & 31])
355 + (rt & 32 ? LO_OFS : HI_OFS));
356 return ret;
357 }
358
359 static TCGv_i32 load_frw0_i32(unsigned rt)
360 {
361 if (rt == 0) {
362 TCGv_i32 ret = tcg_temp_new_i32();
363 tcg_gen_movi_i32(ret, 0);
364 return ret;
365 } else {
366 return load_frw_i32(rt);
367 }
368 }
369
370 static TCGv_i64 load_frw0_i64(unsigned rt)
371 {
372 TCGv_i64 ret = tcg_temp_new_i64();
373 if (rt == 0) {
374 tcg_gen_movi_i64(ret, 0);
375 } else {
376 tcg_gen_ld32u_i64(ret, tcg_env,
377 offsetof(CPUHPPAState, fr[rt & 31])
378 + (rt & 32 ? LO_OFS : HI_OFS));
379 }
380 return ret;
381 }
382
383 static void save_frw_i32(unsigned rt, TCGv_i32 val)
384 {
385 tcg_gen_st_i32(val, tcg_env,
386 offsetof(CPUHPPAState, fr[rt & 31])
387 + (rt & 32 ? LO_OFS : HI_OFS));
388 }
389
390 #undef HI_OFS
391 #undef LO_OFS
392
393 static TCGv_i64 load_frd(unsigned rt)
394 {
395 TCGv_i64 ret = tcg_temp_new_i64();
396 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
397 return ret;
398 }
399
400 static TCGv_i64 load_frd0(unsigned rt)
401 {
402 if (rt == 0) {
403 TCGv_i64 ret = tcg_temp_new_i64();
404 tcg_gen_movi_i64(ret, 0);
405 return ret;
406 } else {
407 return load_frd(rt);
408 }
409 }
410
411 static void save_frd(unsigned rt, TCGv_i64 val)
412 {
413 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
414 }
415
416 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
417 {
418 #ifdef CONFIG_USER_ONLY
419 tcg_gen_movi_i64(dest, 0);
420 #else
421 if (reg < 4) {
422 tcg_gen_mov_i64(dest, cpu_sr[reg]);
423 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
424 tcg_gen_mov_i64(dest, cpu_srH);
425 } else {
426 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
427 }
428 #endif
429 }
430
431 /* Skip over the implementation of an insn that has been nullified.
432 Use this when the insn is too complex for a conditional move. */
433 static void nullify_over(DisasContext *ctx)
434 {
435 if (ctx->null_cond.c != TCG_COND_NEVER) {
436 /* The always condition should have been handled in the main loop. */
437 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
438
439 ctx->null_lab = gen_new_label();
440
441 /* If we're using PSW[N], copy it to a temp because... */
442 if (ctx->null_cond.a0 == cpu_psw_n) {
443 ctx->null_cond.a0 = tcg_temp_new_i64();
444 tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
445 }
446 /* ... we clear it before branching over the implementation,
447 so that (1) it's clear after nullifying this insn and
448 (2) if this insn nullifies the next, PSW[N] is valid. */
449 if (ctx->psw_n_nonzero) {
450 ctx->psw_n_nonzero = false;
451 tcg_gen_movi_i64(cpu_psw_n, 0);
452 }
453
454 tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
455 ctx->null_cond.a1, ctx->null_lab);
456 cond_free(&ctx->null_cond);
457 }
458 }
459
460 /* Save the current nullification state to PSW[N]. */
461 static void nullify_save(DisasContext *ctx)
462 {
463 if (ctx->null_cond.c == TCG_COND_NEVER) {
464 if (ctx->psw_n_nonzero) {
465 tcg_gen_movi_i64(cpu_psw_n, 0);
466 }
467 return;
468 }
469 if (ctx->null_cond.a0 != cpu_psw_n) {
470 tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
471 ctx->null_cond.a0, ctx->null_cond.a1);
472 ctx->psw_n_nonzero = true;
473 }
474 cond_free(&ctx->null_cond);
475 }
476
477 /* Set a PSW[N] to X. The intention is that this is used immediately
478 before a goto_tb/exit_tb, so that there is no fallthru path to other
479 code within the TB. Therefore we do not update psw_n_nonzero. */
480 static void nullify_set(DisasContext *ctx, bool x)
481 {
482 if (ctx->psw_n_nonzero || x) {
483 tcg_gen_movi_i64(cpu_psw_n, x);
484 }
485 }
486
487 /* Mark the end of an instruction that may have been nullified.
488 This is the pair to nullify_over. Always returns true so that
489 it may be tail-called from a translate function. */
490 static bool nullify_end(DisasContext *ctx)
491 {
492 TCGLabel *null_lab = ctx->null_lab;
493 DisasJumpType status = ctx->base.is_jmp;
494
495 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
496 For UPDATED, we cannot update on the nullified path. */
497 assert(status != DISAS_IAQ_N_UPDATED);
498
499 if (likely(null_lab == NULL)) {
500 /* The current insn wasn't conditional or handled the condition
501 applied to it without a branch, so the (new) setting of
502 NULL_COND can be applied directly to the next insn. */
503 return true;
504 }
505 ctx->null_lab = NULL;
506
507 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
508 /* The next instruction will be unconditional,
509 and NULL_COND already reflects that. */
510 gen_set_label(null_lab);
511 } else {
512 /* The insn that we just executed is itself nullifying the next
513 instruction. Store the condition in the PSW[N] global.
514 We asserted PSW[N] = 0 in nullify_over, so that after the
515 label we have the proper value in place. */
516 nullify_save(ctx);
517 gen_set_label(null_lab);
518 ctx->null_cond = cond_make_n();
519 }
520 if (status == DISAS_NORETURN) {
521 ctx->base.is_jmp = DISAS_NEXT;
522 }
523 return true;
524 }
525
526 static uint64_t gva_offset_mask(DisasContext *ctx)
527 {
528 return (ctx->tb_flags & PSW_W
529 ? MAKE_64BIT_MASK(0, 62)
530 : MAKE_64BIT_MASK(0, 32));
531 }
532
533 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
534 uint64_t ival, TCGv_i64 vval)
535 {
536 uint64_t mask = gva_offset_mask(ctx);
537
538 if (ival != -1) {
539 tcg_gen_movi_i64(dest, ival & mask);
540 return;
541 }
542 tcg_debug_assert(vval != NULL);
543
544 /*
545 * We know that the IAOQ is already properly masked.
546 * This optimization is primarily for "iaoq_f = iaoq_b".
547 */
548 if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
549 tcg_gen_mov_i64(dest, vval);
550 } else {
551 tcg_gen_andi_i64(dest, vval, mask);
552 }
553 }
554
555 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
556 {
557 return ctx->iaoq_f + disp + 8;
558 }
559
560 static void gen_excp_1(int exception)
561 {
562 gen_helper_excp(tcg_env, tcg_constant_i32(exception));
563 }
564
565 static void gen_excp(DisasContext *ctx, int exception)
566 {
567 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
568 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
569 nullify_save(ctx);
570 gen_excp_1(exception);
571 ctx->base.is_jmp = DISAS_NORETURN;
572 }
573
574 static bool gen_excp_iir(DisasContext *ctx, int exc)
575 {
576 nullify_over(ctx);
577 tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
578 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
579 gen_excp(ctx, exc);
580 return nullify_end(ctx);
581 }
582
583 static bool gen_illegal(DisasContext *ctx)
584 {
585 return gen_excp_iir(ctx, EXCP_ILL);
586 }
587
588 #ifdef CONFIG_USER_ONLY
589 #define CHECK_MOST_PRIVILEGED(EXCP) \
590 return gen_excp_iir(ctx, EXCP)
591 #else
592 #define CHECK_MOST_PRIVILEGED(EXCP) \
593 do { \
594 if (ctx->privilege != 0) { \
595 return gen_excp_iir(ctx, EXCP); \
596 } \
597 } while (0)
598 #endif
599
600 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
601 {
602 return translator_use_goto_tb(&ctx->base, dest);
603 }
604
605 /* If the next insn is to be nullified, and it's on the same page,
606 and we're not attempting to set a breakpoint on it, then we can
607 totally skip the nullified insn. This avoids creating and
608 executing a TB that merely branches to the next TB. */
609 static bool use_nullify_skip(DisasContext *ctx)
610 {
611 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
612 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
613 }
614
615 static void gen_goto_tb(DisasContext *ctx, int which,
616 uint64_t f, uint64_t b)
617 {
618 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
619 tcg_gen_goto_tb(which);
620 copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
621 copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
622 tcg_gen_exit_tb(ctx->base.tb, which);
623 } else {
624 copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
625 copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
626 tcg_gen_lookup_and_goto_ptr();
627 }
628 }
629
630 static bool cond_need_sv(int c)
631 {
632 return c == 2 || c == 3 || c == 6;
633 }
634
635 static bool cond_need_cb(int c)
636 {
637 return c == 4 || c == 5;
638 }
639
640 /* Need extensions from TCGv_i32 to TCGv_i64. */
641 static bool cond_need_ext(DisasContext *ctx, bool d)
642 {
643 return !(ctx->is_pa20 && d);
644 }
645
646 /*
647 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
648 * the Parisc 1.1 Architecture Reference Manual for details.
649 */
650
651 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
652 TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv)
653 {
654 DisasCond cond;
655 TCGv_i64 tmp;
656
657 switch (cf >> 1) {
658 case 0: /* Never / TR (0 / 1) */
659 cond = cond_make_f();
660 break;
661 case 1: /* = / <> (Z / !Z) */
662 if (cond_need_ext(ctx, d)) {
663 tmp = tcg_temp_new_i64();
664 tcg_gen_ext32u_i64(tmp, res);
665 res = tmp;
666 }
667 cond = cond_make_0(TCG_COND_EQ, res);
668 break;
669 case 2: /* < / >= (N ^ V / !(N ^ V) */
670 tmp = tcg_temp_new_i64();
671 tcg_gen_xor_i64(tmp, res, sv);
672 if (cond_need_ext(ctx, d)) {
673 tcg_gen_ext32s_i64(tmp, tmp);
674 }
675 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
676 break;
677 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
678 /*
679 * Simplify:
680 * (N ^ V) | Z
681 * ((res < 0) ^ (sv < 0)) | !res
682 * ((res ^ sv) < 0) | !res
683 * (~(res ^ sv) >= 0) | !res
684 * !(~(res ^ sv) >> 31) | !res
685 * !(~(res ^ sv) >> 31 & res)
686 */
687 tmp = tcg_temp_new_i64();
688 tcg_gen_eqv_i64(tmp, res, sv);
689 if (cond_need_ext(ctx, d)) {
690 tcg_gen_sextract_i64(tmp, tmp, 31, 1);
691 tcg_gen_and_i64(tmp, tmp, res);
692 tcg_gen_ext32u_i64(tmp, tmp);
693 } else {
694 tcg_gen_sari_i64(tmp, tmp, 63);
695 tcg_gen_and_i64(tmp, tmp, res);
696 }
697 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
698 break;
699 case 4: /* NUV / UV (!C / C) */
700 /* Only bit 0 of cb_msb is ever set. */
701 cond = cond_make_0(TCG_COND_EQ, cb_msb);
702 break;
703 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
704 tmp = tcg_temp_new_i64();
705 tcg_gen_neg_i64(tmp, cb_msb);
706 tcg_gen_and_i64(tmp, tmp, res);
707 if (cond_need_ext(ctx, d)) {
708 tcg_gen_ext32u_i64(tmp, tmp);
709 }
710 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
711 break;
712 case 6: /* SV / NSV (V / !V) */
713 if (cond_need_ext(ctx, d)) {
714 tmp = tcg_temp_new_i64();
715 tcg_gen_ext32s_i64(tmp, sv);
716 sv = tmp;
717 }
718 cond = cond_make_0(TCG_COND_LT, sv);
719 break;
720 case 7: /* OD / EV */
721 tmp = tcg_temp_new_i64();
722 tcg_gen_andi_i64(tmp, res, 1);
723 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
724 break;
725 default:
726 g_assert_not_reached();
727 }
728 if (cf & 1) {
729 cond.c = tcg_invert_cond(cond.c);
730 }
731
732 return cond;
733 }
734
735 /* Similar, but for the special case of subtraction without borrow, we
736 can use the inputs directly. This can allow other computation to be
737 deleted as unused. */
738
739 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
740 TCGv_i64 res, TCGv_i64 in1,
741 TCGv_i64 in2, TCGv_i64 sv)
742 {
743 TCGCond tc;
744 bool ext_uns;
745
746 switch (cf >> 1) {
747 case 1: /* = / <> */
748 tc = TCG_COND_EQ;
749 ext_uns = true;
750 break;
751 case 2: /* < / >= */
752 tc = TCG_COND_LT;
753 ext_uns = false;
754 break;
755 case 3: /* <= / > */
756 tc = TCG_COND_LE;
757 ext_uns = false;
758 break;
759 case 4: /* << / >>= */
760 tc = TCG_COND_LTU;
761 ext_uns = true;
762 break;
763 case 5: /* <<= / >> */
764 tc = TCG_COND_LEU;
765 ext_uns = true;
766 break;
767 default:
768 return do_cond(ctx, cf, d, res, NULL, sv);
769 }
770
771 if (cf & 1) {
772 tc = tcg_invert_cond(tc);
773 }
774 if (cond_need_ext(ctx, d)) {
775 TCGv_i64 t1 = tcg_temp_new_i64();
776 TCGv_i64 t2 = tcg_temp_new_i64();
777
778 if (ext_uns) {
779 tcg_gen_ext32u_i64(t1, in1);
780 tcg_gen_ext32u_i64(t2, in2);
781 } else {
782 tcg_gen_ext32s_i64(t1, in1);
783 tcg_gen_ext32s_i64(t2, in2);
784 }
785 return cond_make_tmp(tc, t1, t2);
786 }
787 return cond_make(tc, in1, in2);
788 }
789
790 /*
791 * Similar, but for logicals, where the carry and overflow bits are not
792 * computed, and use of them is undefined.
793 *
794 * Undefined or not, hardware does not trap. It seems reasonable to
795 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
796 * how cases c={2,3} are treated.
797 */
798
799 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
800 TCGv_i64 res)
801 {
802 TCGCond tc;
803 bool ext_uns;
804
805 switch (cf) {
806 case 0: /* never */
807 case 9: /* undef, C */
808 case 11: /* undef, C & !Z */
809 case 12: /* undef, V */
810 return cond_make_f();
811
812 case 1: /* true */
813 case 8: /* undef, !C */
814 case 10: /* undef, !C | Z */
815 case 13: /* undef, !V */
816 return cond_make_t();
817
818 case 2: /* == */
819 tc = TCG_COND_EQ;
820 ext_uns = true;
821 break;
822 case 3: /* <> */
823 tc = TCG_COND_NE;
824 ext_uns = true;
825 break;
826 case 4: /* < */
827 tc = TCG_COND_LT;
828 ext_uns = false;
829 break;
830 case 5: /* >= */
831 tc = TCG_COND_GE;
832 ext_uns = false;
833 break;
834 case 6: /* <= */
835 tc = TCG_COND_LE;
836 ext_uns = false;
837 break;
838 case 7: /* > */
839 tc = TCG_COND_GT;
840 ext_uns = false;
841 break;
842
843 case 14: /* OD */
844 case 15: /* EV */
845 return do_cond(ctx, cf, d, res, NULL, NULL);
846
847 default:
848 g_assert_not_reached();
849 }
850
851 if (cond_need_ext(ctx, d)) {
852 TCGv_i64 tmp = tcg_temp_new_i64();
853
854 if (ext_uns) {
855 tcg_gen_ext32u_i64(tmp, res);
856 } else {
857 tcg_gen_ext32s_i64(tmp, res);
858 }
859 return cond_make_0_tmp(tc, tmp);
860 }
861 return cond_make_0(tc, res);
862 }
863
864 /* Similar, but for shift/extract/deposit conditions. */
865
866 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
867 TCGv_i64 res)
868 {
869 unsigned c, f;
870
871 /* Convert the compressed condition codes to standard.
872 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
873 4-7 are the reverse of 0-3. */
874 c = orig & 3;
875 if (c == 3) {
876 c = 7;
877 }
878 f = (orig & 4) / 4;
879
880 return do_log_cond(ctx, c * 2 + f, d, res);
881 }
882
883 /* Similar, but for unit conditions. */
884
885 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res,
886 TCGv_i64 in1, TCGv_i64 in2)
887 {
888 DisasCond cond;
889 TCGv_i64 tmp, cb = NULL;
890 uint64_t d_repl = d ? 0x0000000100000001ull : 1;
891
892 if (cf & 8) {
893 /* Since we want to test lots of carry-out bits all at once, do not
894 * do our normal thing and compute carry-in of bit B+1 since that
895 * leaves us with carry bits spread across two words.
896 */
897 cb = tcg_temp_new_i64();
898 tmp = tcg_temp_new_i64();
899 tcg_gen_or_i64(cb, in1, in2);
900 tcg_gen_and_i64(tmp, in1, in2);
901 tcg_gen_andc_i64(cb, cb, res);
902 tcg_gen_or_i64(cb, cb, tmp);
903 }
904
905 switch (cf >> 1) {
906 case 0: /* never / TR */
907 case 1: /* undefined */
908 case 5: /* undefined */
909 cond = cond_make_f();
910 break;
911
912 case 2: /* SBZ / NBZ */
913 /* See hasless(v,1) from
914 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
915 */
916 tmp = tcg_temp_new_i64();
917 tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u);
918 tcg_gen_andc_i64(tmp, tmp, res);
919 tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u);
920 cond = cond_make_0(TCG_COND_NE, tmp);
921 break;
922
923 case 3: /* SHZ / NHZ */
924 tmp = tcg_temp_new_i64();
925 tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u);
926 tcg_gen_andc_i64(tmp, tmp, res);
927 tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u);
928 cond = cond_make_0(TCG_COND_NE, tmp);
929 break;
930
931 case 4: /* SDC / NDC */
932 tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u);
933 cond = cond_make_0(TCG_COND_NE, cb);
934 break;
935
936 case 6: /* SBC / NBC */
937 tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u);
938 cond = cond_make_0(TCG_COND_NE, cb);
939 break;
940
941 case 7: /* SHC / NHC */
942 tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u);
943 cond = cond_make_0(TCG_COND_NE, cb);
944 break;
945
946 default:
947 g_assert_not_reached();
948 }
949 if (cf & 1) {
950 cond.c = tcg_invert_cond(cond.c);
951 }
952
953 return cond;
954 }
955
956 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
957 TCGv_i64 cb, TCGv_i64 cb_msb)
958 {
959 if (cond_need_ext(ctx, d)) {
960 TCGv_i64 t = tcg_temp_new_i64();
961 tcg_gen_extract_i64(t, cb, 32, 1);
962 return t;
963 }
964 return cb_msb;
965 }
966
967 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
968 {
969 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
970 }
971
972 /* Compute signed overflow for addition. */
973 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
974 TCGv_i64 in1, TCGv_i64 in2)
975 {
976 TCGv_i64 sv = tcg_temp_new_i64();
977 TCGv_i64 tmp = tcg_temp_new_i64();
978
979 tcg_gen_xor_i64(sv, res, in1);
980 tcg_gen_xor_i64(tmp, in1, in2);
981 tcg_gen_andc_i64(sv, sv, tmp);
982
983 return sv;
984 }
985
986 /* Compute signed overflow for subtraction. */
987 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
988 TCGv_i64 in1, TCGv_i64 in2)
989 {
990 TCGv_i64 sv = tcg_temp_new_i64();
991 TCGv_i64 tmp = tcg_temp_new_i64();
992
993 tcg_gen_xor_i64(sv, res, in1);
994 tcg_gen_xor_i64(tmp, in1, in2);
995 tcg_gen_and_i64(sv, sv, tmp);
996
997 return sv;
998 }
999
1000 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1001 TCGv_i64 in2, unsigned shift, bool is_l,
1002 bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1003 {
1004 TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp;
1005 unsigned c = cf >> 1;
1006 DisasCond cond;
1007
1008 dest = tcg_temp_new_i64();
1009 cb = NULL;
1010 cb_msb = NULL;
1011 cb_cond = NULL;
1012
1013 if (shift) {
1014 tmp = tcg_temp_new_i64();
1015 tcg_gen_shli_i64(tmp, in1, shift);
1016 in1 = tmp;
1017 }
1018
1019 if (!is_l || cond_need_cb(c)) {
1020 TCGv_i64 zero = tcg_constant_i64(0);
1021 cb_msb = tcg_temp_new_i64();
1022 cb = tcg_temp_new_i64();
1023
1024 tcg_gen_add2_i64(dest, cb_msb, in1, zero, in2, zero);
1025 if (is_c) {
1026 tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1027 get_psw_carry(ctx, d), zero);
1028 }
1029 tcg_gen_xor_i64(cb, in1, in2);
1030 tcg_gen_xor_i64(cb, cb, dest);
1031 if (cond_need_cb(c)) {
1032 cb_cond = get_carry(ctx, d, cb, cb_msb);
1033 }
1034 } else {
1035 tcg_gen_add_i64(dest, in1, in2);
1036 if (is_c) {
1037 tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1038 }
1039 }
1040
1041 /* Compute signed overflow if required. */
1042 sv = NULL;
1043 if (is_tsv || cond_need_sv(c)) {
1044 sv = do_add_sv(ctx, dest, in1, in2);
1045 if (is_tsv) {
1046 /* ??? Need to include overflow from shift. */
1047 gen_helper_tsv(tcg_env, sv);
1048 }
1049 }
1050
1051 /* Emit any conditional trap before any writeback. */
1052 cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1053 if (is_tc) {
1054 tmp = tcg_temp_new_i64();
1055 tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1056 gen_helper_tcond(tcg_env, tmp);
1057 }
1058
1059 /* Write back the result. */
1060 if (!is_l) {
1061 save_or_nullify(ctx, cpu_psw_cb, cb);
1062 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1063 }
1064 save_gpr(ctx, rt, dest);
1065
1066 /* Install the new nullification. */
1067 cond_free(&ctx->null_cond);
1068 ctx->null_cond = cond;
1069 }
1070
1071 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1072 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1073 {
1074 TCGv_i64 tcg_r1, tcg_r2;
1075
1076 if (a->cf) {
1077 nullify_over(ctx);
1078 }
1079 tcg_r1 = load_gpr(ctx, a->r1);
1080 tcg_r2 = load_gpr(ctx, a->r2);
1081 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1082 is_tsv, is_tc, is_c, a->cf, a->d);
1083 return nullify_end(ctx);
1084 }
1085
1086 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1087 bool is_tsv, bool is_tc)
1088 {
1089 TCGv_i64 tcg_im, tcg_r2;
1090
1091 if (a->cf) {
1092 nullify_over(ctx);
1093 }
1094 tcg_im = tcg_constant_i64(a->i);
1095 tcg_r2 = load_gpr(ctx, a->r);
1096 /* All ADDI conditions are 32-bit. */
1097 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1098 return nullify_end(ctx);
1099 }
1100
1101 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1102 TCGv_i64 in2, bool is_tsv, bool is_b,
1103 bool is_tc, unsigned cf, bool d)
1104 {
1105 TCGv_i64 dest, sv, cb, cb_msb, zero, tmp;
1106 unsigned c = cf >> 1;
1107 DisasCond cond;
1108
1109 dest = tcg_temp_new_i64();
1110 cb = tcg_temp_new_i64();
1111 cb_msb = tcg_temp_new_i64();
1112
1113 zero = tcg_constant_i64(0);
1114 if (is_b) {
1115 /* DEST,C = IN1 + ~IN2 + C. */
1116 tcg_gen_not_i64(cb, in2);
1117 tcg_gen_add2_i64(dest, cb_msb, in1, zero, get_psw_carry(ctx, d), zero);
1118 tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, zero);
1119 tcg_gen_xor_i64(cb, cb, in1);
1120 tcg_gen_xor_i64(cb, cb, dest);
1121 } else {
1122 /*
1123 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1124 * operations by seeding the high word with 1 and subtracting.
1125 */
1126 TCGv_i64 one = tcg_constant_i64(1);
1127 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, zero);
1128 tcg_gen_eqv_i64(cb, in1, in2);
1129 tcg_gen_xor_i64(cb, cb, dest);
1130 }
1131
1132 /* Compute signed overflow if required. */
1133 sv = NULL;
1134 if (is_tsv || cond_need_sv(c)) {
1135 sv = do_sub_sv(ctx, dest, in1, in2);
1136 if (is_tsv) {
1137 gen_helper_tsv(tcg_env, sv);
1138 }
1139 }
1140
1141 /* Compute the condition. We cannot use the special case for borrow. */
1142 if (!is_b) {
1143 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1144 } else {
1145 cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1146 }
1147
1148 /* Emit any conditional trap before any writeback. */
1149 if (is_tc) {
1150 tmp = tcg_temp_new_i64();
1151 tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1152 gen_helper_tcond(tcg_env, tmp);
1153 }
1154
1155 /* Write back the result. */
1156 save_or_nullify(ctx, cpu_psw_cb, cb);
1157 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1158 save_gpr(ctx, rt, dest);
1159
1160 /* Install the new nullification. */
1161 cond_free(&ctx->null_cond);
1162 ctx->null_cond = cond;
1163 }
1164
1165 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1166 bool is_tsv, bool is_b, bool is_tc)
1167 {
1168 TCGv_i64 tcg_r1, tcg_r2;
1169
1170 if (a->cf) {
1171 nullify_over(ctx);
1172 }
1173 tcg_r1 = load_gpr(ctx, a->r1);
1174 tcg_r2 = load_gpr(ctx, a->r2);
1175 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1176 return nullify_end(ctx);
1177 }
1178
1179 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1180 {
1181 TCGv_i64 tcg_im, tcg_r2;
1182
1183 if (a->cf) {
1184 nullify_over(ctx);
1185 }
1186 tcg_im = tcg_constant_i64(a->i);
1187 tcg_r2 = load_gpr(ctx, a->r);
1188 /* All SUBI conditions are 32-bit. */
1189 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1190 return nullify_end(ctx);
1191 }
1192
1193 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1194 TCGv_i64 in2, unsigned cf, bool d)
1195 {
1196 TCGv_i64 dest, sv;
1197 DisasCond cond;
1198
1199 dest = tcg_temp_new_i64();
1200 tcg_gen_sub_i64(dest, in1, in2);
1201
1202 /* Compute signed overflow if required. */
1203 sv = NULL;
1204 if (cond_need_sv(cf >> 1)) {
1205 sv = do_sub_sv(ctx, dest, in1, in2);
1206 }
1207
1208 /* Form the condition for the compare. */
1209 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1210
1211 /* Clear. */
1212 tcg_gen_movi_i64(dest, 0);
1213 save_gpr(ctx, rt, dest);
1214
1215 /* Install the new nullification. */
1216 cond_free(&ctx->null_cond);
1217 ctx->null_cond = cond;
1218 }
1219
1220 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1221 TCGv_i64 in2, unsigned cf, bool d,
1222 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1223 {
1224 TCGv_i64 dest = dest_gpr(ctx, rt);
1225
1226 /* Perform the operation, and writeback. */
1227 fn(dest, in1, in2);
1228 save_gpr(ctx, rt, dest);
1229
1230 /* Install the new nullification. */
1231 cond_free(&ctx->null_cond);
1232 if (cf) {
1233 ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1234 }
1235 }
1236
1237 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1238 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1239 {
1240 TCGv_i64 tcg_r1, tcg_r2;
1241
1242 if (a->cf) {
1243 nullify_over(ctx);
1244 }
1245 tcg_r1 = load_gpr(ctx, a->r1);
1246 tcg_r2 = load_gpr(ctx, a->r2);
1247 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1248 return nullify_end(ctx);
1249 }
1250
1251 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1252 TCGv_i64 in2, unsigned cf, bool d, bool is_tc,
1253 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1254 {
1255 TCGv_i64 dest;
1256 DisasCond cond;
1257
1258 if (cf == 0) {
1259 dest = dest_gpr(ctx, rt);
1260 fn(dest, in1, in2);
1261 save_gpr(ctx, rt, dest);
1262 cond_free(&ctx->null_cond);
1263 } else {
1264 dest = tcg_temp_new_i64();
1265 fn(dest, in1, in2);
1266
1267 cond = do_unit_cond(cf, d, dest, in1, in2);
1268
1269 if (is_tc) {
1270 TCGv_i64 tmp = tcg_temp_new_i64();
1271 tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1272 gen_helper_tcond(tcg_env, tmp);
1273 }
1274 save_gpr(ctx, rt, dest);
1275
1276 cond_free(&ctx->null_cond);
1277 ctx->null_cond = cond;
1278 }
1279 }
1280
1281 #ifndef CONFIG_USER_ONLY
1282 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1283 from the top 2 bits of the base register. There are a few system
1284 instructions that have a 3-bit space specifier, for which SR0 is
1285 not special. To handle this, pass ~SP. */
1286 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1287 {
1288 TCGv_ptr ptr;
1289 TCGv_i64 tmp;
1290 TCGv_i64 spc;
1291
1292 if (sp != 0) {
1293 if (sp < 0) {
1294 sp = ~sp;
1295 }
1296 spc = tcg_temp_new_i64();
1297 load_spr(ctx, spc, sp);
1298 return spc;
1299 }
1300 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1301 return cpu_srH;
1302 }
1303
1304 ptr = tcg_temp_new_ptr();
1305 tmp = tcg_temp_new_i64();
1306 spc = tcg_temp_new_i64();
1307
1308 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1309 tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1310 tcg_gen_andi_i64(tmp, tmp, 030);
1311 tcg_gen_trunc_i64_ptr(ptr, tmp);
1312
1313 tcg_gen_add_ptr(ptr, ptr, tcg_env);
1314 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1315
1316 return spc;
1317 }
1318 #endif
1319
1320 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1321 unsigned rb, unsigned rx, int scale, int64_t disp,
1322 unsigned sp, int modify, bool is_phys)
1323 {
1324 TCGv_i64 base = load_gpr(ctx, rb);
1325 TCGv_i64 ofs;
1326 TCGv_i64 addr;
1327
1328 /* Note that RX is mutually exclusive with DISP. */
1329 if (rx) {
1330 ofs = tcg_temp_new_i64();
1331 tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1332 tcg_gen_add_i64(ofs, ofs, base);
1333 } else if (disp || modify) {
1334 ofs = tcg_temp_new_i64();
1335 tcg_gen_addi_i64(ofs, base, disp);
1336 } else {
1337 ofs = base;
1338 }
1339
1340 *pofs = ofs;
1341 *pgva = addr = tcg_temp_new_i64();
1342 tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx));
1343 #ifndef CONFIG_USER_ONLY
1344 if (!is_phys) {
1345 tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1346 }
1347 #endif
1348 }
1349
1350 /* Emit a memory load. The modify parameter should be
1351 * < 0 for pre-modify,
1352 * > 0 for post-modify,
1353 * = 0 for no base register update.
1354 */
1355 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1356 unsigned rx, int scale, int64_t disp,
1357 unsigned sp, int modify, MemOp mop)
1358 {
1359 TCGv_i64 ofs;
1360 TCGv_i64 addr;
1361
1362 /* Caller uses nullify_over/nullify_end. */
1363 assert(ctx->null_cond.c == TCG_COND_NEVER);
1364
1365 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1366 ctx->mmu_idx == MMU_PHYS_IDX);
1367 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1368 if (modify) {
1369 save_gpr(ctx, rb, ofs);
1370 }
1371 }
1372
1373 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1374 unsigned rx, int scale, int64_t disp,
1375 unsigned sp, int modify, MemOp mop)
1376 {
1377 TCGv_i64 ofs;
1378 TCGv_i64 addr;
1379
1380 /* Caller uses nullify_over/nullify_end. */
1381 assert(ctx->null_cond.c == TCG_COND_NEVER);
1382
1383 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1384 ctx->mmu_idx == MMU_PHYS_IDX);
1385 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1386 if (modify) {
1387 save_gpr(ctx, rb, ofs);
1388 }
1389 }
1390
1391 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1392 unsigned rx, int scale, int64_t disp,
1393 unsigned sp, int modify, MemOp mop)
1394 {
1395 TCGv_i64 ofs;
1396 TCGv_i64 addr;
1397
1398 /* Caller uses nullify_over/nullify_end. */
1399 assert(ctx->null_cond.c == TCG_COND_NEVER);
1400
1401 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1402 ctx->mmu_idx == MMU_PHYS_IDX);
1403 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1404 if (modify) {
1405 save_gpr(ctx, rb, ofs);
1406 }
1407 }
1408
1409 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1410 unsigned rx, int scale, int64_t disp,
1411 unsigned sp, int modify, MemOp mop)
1412 {
1413 TCGv_i64 ofs;
1414 TCGv_i64 addr;
1415
1416 /* Caller uses nullify_over/nullify_end. */
1417 assert(ctx->null_cond.c == TCG_COND_NEVER);
1418
1419 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1420 ctx->mmu_idx == MMU_PHYS_IDX);
1421 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1422 if (modify) {
1423 save_gpr(ctx, rb, ofs);
1424 }
1425 }
1426
1427 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1428 unsigned rx, int scale, int64_t disp,
1429 unsigned sp, int modify, MemOp mop)
1430 {
1431 TCGv_i64 dest;
1432
1433 nullify_over(ctx);
1434
1435 if (modify == 0) {
1436 /* No base register update. */
1437 dest = dest_gpr(ctx, rt);
1438 } else {
1439 /* Make sure if RT == RB, we see the result of the load. */
1440 dest = tcg_temp_new_i64();
1441 }
1442 do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1443 save_gpr(ctx, rt, dest);
1444
1445 return nullify_end(ctx);
1446 }
1447
1448 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1449 unsigned rx, int scale, int64_t disp,
1450 unsigned sp, int modify)
1451 {
1452 TCGv_i32 tmp;
1453
1454 nullify_over(ctx);
1455
1456 tmp = tcg_temp_new_i32();
1457 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1458 save_frw_i32(rt, tmp);
1459
1460 if (rt == 0) {
1461 gen_helper_loaded_fr0(tcg_env);
1462 }
1463
1464 return nullify_end(ctx);
1465 }
1466
1467 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1468 {
1469 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1470 a->disp, a->sp, a->m);
1471 }
1472
1473 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1474 unsigned rx, int scale, int64_t disp,
1475 unsigned sp, int modify)
1476 {
1477 TCGv_i64 tmp;
1478
1479 nullify_over(ctx);
1480
1481 tmp = tcg_temp_new_i64();
1482 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1483 save_frd(rt, tmp);
1484
1485 if (rt == 0) {
1486 gen_helper_loaded_fr0(tcg_env);
1487 }
1488
1489 return nullify_end(ctx);
1490 }
1491
1492 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1493 {
1494 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1495 a->disp, a->sp, a->m);
1496 }
1497
1498 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1499 int64_t disp, unsigned sp,
1500 int modify, MemOp mop)
1501 {
1502 nullify_over(ctx);
1503 do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1504 return nullify_end(ctx);
1505 }
1506
1507 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1508 unsigned rx, int scale, int64_t disp,
1509 unsigned sp, int modify)
1510 {
1511 TCGv_i32 tmp;
1512
1513 nullify_over(ctx);
1514
1515 tmp = load_frw_i32(rt);
1516 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1517
1518 return nullify_end(ctx);
1519 }
1520
1521 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1522 {
1523 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1524 a->disp, a->sp, a->m);
1525 }
1526
1527 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1528 unsigned rx, int scale, int64_t disp,
1529 unsigned sp, int modify)
1530 {
1531 TCGv_i64 tmp;
1532
1533 nullify_over(ctx);
1534
1535 tmp = load_frd(rt);
1536 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1537
1538 return nullify_end(ctx);
1539 }
1540
1541 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1542 {
1543 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1544 a->disp, a->sp, a->m);
1545 }
1546
1547 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1548 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1549 {
1550 TCGv_i32 tmp;
1551
1552 nullify_over(ctx);
1553 tmp = load_frw0_i32(ra);
1554
1555 func(tmp, tcg_env, tmp);
1556
1557 save_frw_i32(rt, tmp);
1558 return nullify_end(ctx);
1559 }
1560
1561 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1562 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1563 {
1564 TCGv_i32 dst;
1565 TCGv_i64 src;
1566
1567 nullify_over(ctx);
1568 src = load_frd(ra);
1569 dst = tcg_temp_new_i32();
1570
1571 func(dst, tcg_env, src);
1572
1573 save_frw_i32(rt, dst);
1574 return nullify_end(ctx);
1575 }
1576
1577 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1578 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1579 {
1580 TCGv_i64 tmp;
1581
1582 nullify_over(ctx);
1583 tmp = load_frd0(ra);
1584
1585 func(tmp, tcg_env, tmp);
1586
1587 save_frd(rt, tmp);
1588 return nullify_end(ctx);
1589 }
1590
1591 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1592 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1593 {
1594 TCGv_i32 src;
1595 TCGv_i64 dst;
1596
1597 nullify_over(ctx);
1598 src = load_frw0_i32(ra);
1599 dst = tcg_temp_new_i64();
1600
1601 func(dst, tcg_env, src);
1602
1603 save_frd(rt, dst);
1604 return nullify_end(ctx);
1605 }
1606
1607 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1608 unsigned ra, unsigned rb,
1609 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1610 {
1611 TCGv_i32 a, b;
1612
1613 nullify_over(ctx);
1614 a = load_frw0_i32(ra);
1615 b = load_frw0_i32(rb);
1616
1617 func(a, tcg_env, a, b);
1618
1619 save_frw_i32(rt, a);
1620 return nullify_end(ctx);
1621 }
1622
1623 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1624 unsigned ra, unsigned rb,
1625 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1626 {
1627 TCGv_i64 a, b;
1628
1629 nullify_over(ctx);
1630 a = load_frd0(ra);
1631 b = load_frd0(rb);
1632
1633 func(a, tcg_env, a, b);
1634
1635 save_frd(rt, a);
1636 return nullify_end(ctx);
1637 }
1638
1639 /* Emit an unconditional branch to a direct target, which may or may not
1640 have already had nullification handled. */
1641 static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1642 unsigned link, bool is_n)
1643 {
1644 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1645 if (link != 0) {
1646 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1647 }
1648 ctx->iaoq_n = dest;
1649 if (is_n) {
1650 ctx->null_cond.c = TCG_COND_ALWAYS;
1651 }
1652 } else {
1653 nullify_over(ctx);
1654
1655 if (link != 0) {
1656 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1657 }
1658
1659 if (is_n && use_nullify_skip(ctx)) {
1660 nullify_set(ctx, 0);
1661 gen_goto_tb(ctx, 0, dest, dest + 4);
1662 } else {
1663 nullify_set(ctx, is_n);
1664 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1665 }
1666
1667 nullify_end(ctx);
1668
1669 nullify_set(ctx, 0);
1670 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1671 ctx->base.is_jmp = DISAS_NORETURN;
1672 }
1673 return true;
1674 }
1675
1676 /* Emit a conditional branch to a direct target. If the branch itself
1677 is nullified, we should have already used nullify_over. */
1678 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1679 DisasCond *cond)
1680 {
1681 uint64_t dest = iaoq_dest(ctx, disp);
1682 TCGLabel *taken = NULL;
1683 TCGCond c = cond->c;
1684 bool n;
1685
1686 assert(ctx->null_cond.c == TCG_COND_NEVER);
1687
1688 /* Handle TRUE and NEVER as direct branches. */
1689 if (c == TCG_COND_ALWAYS) {
1690 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1691 }
1692 if (c == TCG_COND_NEVER) {
1693 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1694 }
1695
1696 taken = gen_new_label();
1697 tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1698 cond_free(cond);
1699
1700 /* Not taken: Condition not satisfied; nullify on backward branches. */
1701 n = is_n && disp < 0;
1702 if (n && use_nullify_skip(ctx)) {
1703 nullify_set(ctx, 0);
1704 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1705 } else {
1706 if (!n && ctx->null_lab) {
1707 gen_set_label(ctx->null_lab);
1708 ctx->null_lab = NULL;
1709 }
1710 nullify_set(ctx, n);
1711 if (ctx->iaoq_n == -1) {
1712 /* The temporary iaoq_n_var died at the branch above.
1713 Regenerate it here instead of saving it. */
1714 tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1715 }
1716 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1717 }
1718
1719 gen_set_label(taken);
1720
1721 /* Taken: Condition satisfied; nullify on forward branches. */
1722 n = is_n && disp >= 0;
1723 if (n && use_nullify_skip(ctx)) {
1724 nullify_set(ctx, 0);
1725 gen_goto_tb(ctx, 1, dest, dest + 4);
1726 } else {
1727 nullify_set(ctx, n);
1728 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1729 }
1730
1731 /* Not taken: the branch itself was nullified. */
1732 if (ctx->null_lab) {
1733 gen_set_label(ctx->null_lab);
1734 ctx->null_lab = NULL;
1735 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1736 } else {
1737 ctx->base.is_jmp = DISAS_NORETURN;
1738 }
1739 return true;
1740 }
1741
1742 /* Emit an unconditional branch to an indirect target. This handles
1743 nullification of the branch itself. */
1744 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
1745 unsigned link, bool is_n)
1746 {
1747 TCGv_i64 a0, a1, next, tmp;
1748 TCGCond c;
1749
1750 assert(ctx->null_lab == NULL);
1751
1752 if (ctx->null_cond.c == TCG_COND_NEVER) {
1753 if (link != 0) {
1754 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1755 }
1756 next = tcg_temp_new_i64();
1757 tcg_gen_mov_i64(next, dest);
1758 if (is_n) {
1759 if (use_nullify_skip(ctx)) {
1760 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1761 tcg_gen_addi_i64(next, next, 4);
1762 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1763 nullify_set(ctx, 0);
1764 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1765 return true;
1766 }
1767 ctx->null_cond.c = TCG_COND_ALWAYS;
1768 }
1769 ctx->iaoq_n = -1;
1770 ctx->iaoq_n_var = next;
1771 } else if (is_n && use_nullify_skip(ctx)) {
1772 /* The (conditional) branch, B, nullifies the next insn, N,
1773 and we're allowed to skip execution N (no single-step or
1774 tracepoint in effect). Since the goto_ptr that we must use
1775 for the indirect branch consumes no special resources, we
1776 can (conditionally) skip B and continue execution. */
1777 /* The use_nullify_skip test implies we have a known control path. */
1778 tcg_debug_assert(ctx->iaoq_b != -1);
1779 tcg_debug_assert(ctx->iaoq_n != -1);
1780
1781 /* We do have to handle the non-local temporary, DEST, before
1782 branching. Since IOAQ_F is not really live at this point, we
1783 can simply store DEST optimistically. Similarly with IAOQ_B. */
1784 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1785 next = tcg_temp_new_i64();
1786 tcg_gen_addi_i64(next, dest, 4);
1787 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1788
1789 nullify_over(ctx);
1790 if (link != 0) {
1791 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1792 }
1793 tcg_gen_lookup_and_goto_ptr();
1794 return nullify_end(ctx);
1795 } else {
1796 c = ctx->null_cond.c;
1797 a0 = ctx->null_cond.a0;
1798 a1 = ctx->null_cond.a1;
1799
1800 tmp = tcg_temp_new_i64();
1801 next = tcg_temp_new_i64();
1802
1803 copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1804 tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
1805 ctx->iaoq_n = -1;
1806 ctx->iaoq_n_var = next;
1807
1808 if (link != 0) {
1809 tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1810 }
1811
1812 if (is_n) {
1813 /* The branch nullifies the next insn, which means the state of N
1814 after the branch is the inverse of the state of N that applied
1815 to the branch. */
1816 tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1817 cond_free(&ctx->null_cond);
1818 ctx->null_cond = cond_make_n();
1819 ctx->psw_n_nonzero = true;
1820 } else {
1821 cond_free(&ctx->null_cond);
1822 }
1823 }
1824 return true;
1825 }
1826
1827 /* Implement
1828 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1829 * IAOQ_Next{30..31} ← GR[b]{30..31};
1830 * else
1831 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1832 * which keeps the privilege level from being increased.
1833 */
1834 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
1835 {
1836 TCGv_i64 dest;
1837 switch (ctx->privilege) {
1838 case 0:
1839 /* Privilege 0 is maximum and is allowed to decrease. */
1840 return offset;
1841 case 3:
1842 /* Privilege 3 is minimum and is never allowed to increase. */
1843 dest = tcg_temp_new_i64();
1844 tcg_gen_ori_i64(dest, offset, 3);
1845 break;
1846 default:
1847 dest = tcg_temp_new_i64();
1848 tcg_gen_andi_i64(dest, offset, -4);
1849 tcg_gen_ori_i64(dest, dest, ctx->privilege);
1850 tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
1851 break;
1852 }
1853 return dest;
1854 }
1855
1856 #ifdef CONFIG_USER_ONLY
1857 /* On Linux, page zero is normally marked execute only + gateway.
1858 Therefore normal read or write is supposed to fail, but specific
1859 offsets have kernel code mapped to raise permissions to implement
1860 system calls. Handling this via an explicit check here, rather
1861 in than the "be disp(sr2,r0)" instruction that probably sent us
1862 here, is the easiest way to handle the branch delay slot on the
1863 aforementioned BE. */
1864 static void do_page_zero(DisasContext *ctx)
1865 {
1866 TCGv_i64 tmp;
1867
1868 /* If by some means we get here with PSW[N]=1, that implies that
1869 the B,GATE instruction would be skipped, and we'd fault on the
1870 next insn within the privileged page. */
1871 switch (ctx->null_cond.c) {
1872 case TCG_COND_NEVER:
1873 break;
1874 case TCG_COND_ALWAYS:
1875 tcg_gen_movi_i64(cpu_psw_n, 0);
1876 goto do_sigill;
1877 default:
1878 /* Since this is always the first (and only) insn within the
1879 TB, we should know the state of PSW[N] from TB->FLAGS. */
1880 g_assert_not_reached();
1881 }
1882
1883 /* Check that we didn't arrive here via some means that allowed
1884 non-sequential instruction execution. Normally the PSW[B] bit
1885 detects this by disallowing the B,GATE instruction to execute
1886 under such conditions. */
1887 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1888 goto do_sigill;
1889 }
1890
1891 switch (ctx->iaoq_f & -4) {
1892 case 0x00: /* Null pointer call */
1893 gen_excp_1(EXCP_IMP);
1894 ctx->base.is_jmp = DISAS_NORETURN;
1895 break;
1896
1897 case 0xb0: /* LWS */
1898 gen_excp_1(EXCP_SYSCALL_LWS);
1899 ctx->base.is_jmp = DISAS_NORETURN;
1900 break;
1901
1902 case 0xe0: /* SET_THREAD_POINTER */
1903 tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1904 tmp = tcg_temp_new_i64();
1905 tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
1906 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
1907 tcg_gen_addi_i64(tmp, tmp, 4);
1908 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
1909 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1910 break;
1911
1912 case 0x100: /* SYSCALL */
1913 gen_excp_1(EXCP_SYSCALL);
1914 ctx->base.is_jmp = DISAS_NORETURN;
1915 break;
1916
1917 default:
1918 do_sigill:
1919 gen_excp_1(EXCP_ILL);
1920 ctx->base.is_jmp = DISAS_NORETURN;
1921 break;
1922 }
1923 }
1924 #endif
1925
1926 static bool trans_nop(DisasContext *ctx, arg_nop *a)
1927 {
1928 cond_free(&ctx->null_cond);
1929 return true;
1930 }
1931
1932 static bool trans_break(DisasContext *ctx, arg_break *a)
1933 {
1934 return gen_excp_iir(ctx, EXCP_BREAK);
1935 }
1936
1937 static bool trans_sync(DisasContext *ctx, arg_sync *a)
1938 {
1939 /* No point in nullifying the memory barrier. */
1940 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1941
1942 cond_free(&ctx->null_cond);
1943 return true;
1944 }
1945
1946 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
1947 {
1948 unsigned rt = a->t;
1949 TCGv_i64 tmp = dest_gpr(ctx, rt);
1950 tcg_gen_movi_i64(tmp, ctx->iaoq_f);
1951 save_gpr(ctx, rt, tmp);
1952
1953 cond_free(&ctx->null_cond);
1954 return true;
1955 }
1956
1957 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
1958 {
1959 unsigned rt = a->t;
1960 unsigned rs = a->sp;
1961 TCGv_i64 t0 = tcg_temp_new_i64();
1962
1963 load_spr(ctx, t0, rs);
1964 tcg_gen_shri_i64(t0, t0, 32);
1965
1966 save_gpr(ctx, rt, t0);
1967
1968 cond_free(&ctx->null_cond);
1969 return true;
1970 }
1971
1972 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
1973 {
1974 unsigned rt = a->t;
1975 unsigned ctl = a->r;
1976 TCGv_i64 tmp;
1977
1978 switch (ctl) {
1979 case CR_SAR:
1980 if (a->e == 0) {
1981 /* MFSAR without ,W masks low 5 bits. */
1982 tmp = dest_gpr(ctx, rt);
1983 tcg_gen_andi_i64(tmp, cpu_sar, 31);
1984 save_gpr(ctx, rt, tmp);
1985 goto done;
1986 }
1987 save_gpr(ctx, rt, cpu_sar);
1988 goto done;
1989 case CR_IT: /* Interval Timer */
1990 /* FIXME: Respect PSW_S bit. */
1991 nullify_over(ctx);
1992 tmp = dest_gpr(ctx, rt);
1993 if (translator_io_start(&ctx->base)) {
1994 gen_helper_read_interval_timer(tmp);
1995 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1996 } else {
1997 gen_helper_read_interval_timer(tmp);
1998 }
1999 save_gpr(ctx, rt, tmp);
2000 return nullify_end(ctx);
2001 case 26:
2002 case 27:
2003 break;
2004 default:
2005 /* All other control registers are privileged. */
2006 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2007 break;
2008 }
2009
2010 tmp = tcg_temp_new_i64();
2011 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2012 save_gpr(ctx, rt, tmp);
2013
2014 done:
2015 cond_free(&ctx->null_cond);
2016 return true;
2017 }
2018
2019 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2020 {
2021 unsigned rr = a->r;
2022 unsigned rs = a->sp;
2023 TCGv_i64 tmp;
2024
2025 if (rs >= 5) {
2026 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2027 }
2028 nullify_over(ctx);
2029
2030 tmp = tcg_temp_new_i64();
2031 tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2032
2033 if (rs >= 4) {
2034 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2035 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2036 } else {
2037 tcg_gen_mov_i64(cpu_sr[rs], tmp);
2038 }
2039
2040 return nullify_end(ctx);
2041 }
2042
2043 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2044 {
2045 unsigned ctl = a->t;
2046 TCGv_i64 reg;
2047 TCGv_i64 tmp;
2048
2049 if (ctl == CR_SAR) {
2050 reg = load_gpr(ctx, a->r);
2051 tmp = tcg_temp_new_i64();
2052 tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2053 save_or_nullify(ctx, cpu_sar, tmp);
2054
2055 cond_free(&ctx->null_cond);
2056 return true;
2057 }
2058
2059 /* All other control registers are privileged or read-only. */
2060 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2061
2062 #ifndef CONFIG_USER_ONLY
2063 nullify_over(ctx);
2064 reg = load_gpr(ctx, a->r);
2065
2066 switch (ctl) {
2067 case CR_IT:
2068 gen_helper_write_interval_timer(tcg_env, reg);
2069 break;
2070 case CR_EIRR:
2071 gen_helper_write_eirr(tcg_env, reg);
2072 break;
2073 case CR_EIEM:
2074 gen_helper_write_eiem(tcg_env, reg);
2075 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2076 break;
2077
2078 case CR_IIASQ:
2079 case CR_IIAOQ:
2080 /* FIXME: Respect PSW_Q bit */
2081 /* The write advances the queue and stores to the back element. */
2082 tmp = tcg_temp_new_i64();
2083 tcg_gen_ld_i64(tmp, tcg_env,
2084 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2085 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2086 tcg_gen_st_i64(reg, tcg_env,
2087 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2088 break;
2089
2090 case CR_PID1:
2091 case CR_PID2:
2092 case CR_PID3:
2093 case CR_PID4:
2094 tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2095 #ifndef CONFIG_USER_ONLY
2096 gen_helper_change_prot_id(tcg_env);
2097 #endif
2098 break;
2099
2100 default:
2101 tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2102 break;
2103 }
2104 return nullify_end(ctx);
2105 #endif
2106 }
2107
2108 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2109 {
2110 TCGv_i64 tmp = tcg_temp_new_i64();
2111
2112 tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2113 tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2114 save_or_nullify(ctx, cpu_sar, tmp);
2115
2116 cond_free(&ctx->null_cond);
2117 return true;
2118 }
2119
2120 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2121 {
2122 TCGv_i64 dest = dest_gpr(ctx, a->t);
2123
2124 #ifdef CONFIG_USER_ONLY
2125 /* We don't implement space registers in user mode. */
2126 tcg_gen_movi_i64(dest, 0);
2127 #else
2128 tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2129 tcg_gen_shri_i64(dest, dest, 32);
2130 #endif
2131 save_gpr(ctx, a->t, dest);
2132
2133 cond_free(&ctx->null_cond);
2134 return true;
2135 }
2136
2137 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2138 {
2139 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2140 #ifndef CONFIG_USER_ONLY
2141 TCGv_i64 tmp;
2142
2143 nullify_over(ctx);
2144
2145 tmp = tcg_temp_new_i64();
2146 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2147 tcg_gen_andi_i64(tmp, tmp, ~a->i);
2148 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2149 save_gpr(ctx, a->t, tmp);
2150
2151 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2152 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2153 return nullify_end(ctx);
2154 #endif
2155 }
2156
2157 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2158 {
2159 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2160 #ifndef CONFIG_USER_ONLY
2161 TCGv_i64 tmp;
2162
2163 nullify_over(ctx);
2164
2165 tmp = tcg_temp_new_i64();
2166 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2167 tcg_gen_ori_i64(tmp, tmp, a->i);
2168 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2169 save_gpr(ctx, a->t, tmp);
2170
2171 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2172 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2173 return nullify_end(ctx);
2174 #endif
2175 }
2176
2177 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2178 {
2179 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2180 #ifndef CONFIG_USER_ONLY
2181 TCGv_i64 tmp, reg;
2182 nullify_over(ctx);
2183
2184 reg = load_gpr(ctx, a->r);
2185 tmp = tcg_temp_new_i64();
2186 gen_helper_swap_system_mask(tmp, tcg_env, reg);
2187
2188 /* Exit the TB to recognize new interrupts. */
2189 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2190 return nullify_end(ctx);
2191 #endif
2192 }
2193
2194 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2195 {
2196 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2197 #ifndef CONFIG_USER_ONLY
2198 nullify_over(ctx);
2199
2200 if (rfi_r) {
2201 gen_helper_rfi_r(tcg_env);
2202 } else {
2203 gen_helper_rfi(tcg_env);
2204 }
2205 /* Exit the TB to recognize new interrupts. */
2206 tcg_gen_exit_tb(NULL, 0);
2207 ctx->base.is_jmp = DISAS_NORETURN;
2208
2209 return nullify_end(ctx);
2210 #endif
2211 }
2212
2213 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2214 {
2215 return do_rfi(ctx, false);
2216 }
2217
2218 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2219 {
2220 return do_rfi(ctx, true);
2221 }
2222
2223 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2224 {
2225 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2226 #ifndef CONFIG_USER_ONLY
2227 nullify_over(ctx);
2228 gen_helper_halt(tcg_env);
2229 ctx->base.is_jmp = DISAS_NORETURN;
2230 return nullify_end(ctx);
2231 #endif
2232 }
2233
2234 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2235 {
2236 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2237 #ifndef CONFIG_USER_ONLY
2238 nullify_over(ctx);
2239 gen_helper_reset(tcg_env);
2240 ctx->base.is_jmp = DISAS_NORETURN;
2241 return nullify_end(ctx);
2242 #endif
2243 }
2244
2245 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2246 {
2247 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2248 #ifndef CONFIG_USER_ONLY
2249 nullify_over(ctx);
2250 gen_helper_getshadowregs(tcg_env);
2251 return nullify_end(ctx);
2252 #endif
2253 }
2254
2255 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2256 {
2257 if (a->m) {
2258 TCGv_i64 dest = dest_gpr(ctx, a->b);
2259 TCGv_i64 src1 = load_gpr(ctx, a->b);
2260 TCGv_i64 src2 = load_gpr(ctx, a->x);
2261
2262 /* The only thing we need to do is the base register modification. */
2263 tcg_gen_add_i64(dest, src1, src2);
2264 save_gpr(ctx, a->b, dest);
2265 }
2266 cond_free(&ctx->null_cond);
2267 return true;
2268 }
2269
2270 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2271 {
2272 TCGv_i64 dest, ofs;
2273 TCGv_i32 level, want;
2274 TCGv_i64 addr;
2275
2276 nullify_over(ctx);
2277
2278 dest = dest_gpr(ctx, a->t);
2279 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2280
2281 if (a->imm) {
2282 level = tcg_constant_i32(a->ri);
2283 } else {
2284 level = tcg_temp_new_i32();
2285 tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2286 tcg_gen_andi_i32(level, level, 3);
2287 }
2288 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2289
2290 gen_helper_probe(dest, tcg_env, addr, level, want);
2291
2292 save_gpr(ctx, a->t, dest);
2293 return nullify_end(ctx);
2294 }
2295
2296 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2297 {
2298 if (ctx->is_pa20) {
2299 return false;
2300 }
2301 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2302 #ifndef CONFIG_USER_ONLY
2303 TCGv_i64 addr;
2304 TCGv_i64 ofs, reg;
2305
2306 nullify_over(ctx);
2307
2308 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2309 reg = load_gpr(ctx, a->r);
2310 if (a->addr) {
2311 gen_helper_itlba_pa11(tcg_env, addr, reg);
2312 } else {
2313 gen_helper_itlbp_pa11(tcg_env, addr, reg);
2314 }
2315
2316 /* Exit TB for TLB change if mmu is enabled. */
2317 if (ctx->tb_flags & PSW_C) {
2318 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2319 }
2320 return nullify_end(ctx);
2321 #endif
2322 }
2323
2324 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2325 {
2326 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2327 #ifndef CONFIG_USER_ONLY
2328 TCGv_i64 addr;
2329 TCGv_i64 ofs;
2330
2331 nullify_over(ctx);
2332
2333 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2334 if (a->m) {
2335 save_gpr(ctx, a->b, ofs);
2336 }
2337 if (a->local) {
2338 gen_helper_ptlbe(tcg_env);
2339 } else {
2340 gen_helper_ptlb(tcg_env, addr);
2341 }
2342
2343 /* Exit TB for TLB change if mmu is enabled. */
2344 if (ctx->tb_flags & PSW_C) {
2345 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2346 }
2347 return nullify_end(ctx);
2348 #endif
2349 }
2350
2351 /*
2352 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2353 * See
2354 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2355 * page 13-9 (195/206)
2356 */
2357 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2358 {
2359 if (ctx->is_pa20) {
2360 return false;
2361 }
2362 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2363 #ifndef CONFIG_USER_ONLY
2364 TCGv_i64 addr, atl, stl;
2365 TCGv_i64 reg;
2366
2367 nullify_over(ctx);
2368
2369 /*
2370 * FIXME:
2371 * if (not (pcxl or pcxl2))
2372 * return gen_illegal(ctx);
2373 */
2374
2375 atl = tcg_temp_new_i64();
2376 stl = tcg_temp_new_i64();
2377 addr = tcg_temp_new_i64();
2378
2379 tcg_gen_ld32u_i64(stl, tcg_env,
2380 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2381 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2382 tcg_gen_ld32u_i64(atl, tcg_env,
2383 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2384 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2385 tcg_gen_shli_i64(stl, stl, 32);
2386 tcg_gen_or_i64(addr, atl, stl);
2387
2388 reg = load_gpr(ctx, a->r);
2389 if (a->addr) {
2390 gen_helper_itlba_pa11(tcg_env, addr, reg);
2391 } else {
2392 gen_helper_itlbp_pa11(tcg_env, addr, reg);
2393 }
2394
2395 /* Exit TB for TLB change if mmu is enabled. */
2396 if (ctx->tb_flags & PSW_C) {
2397 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2398 }
2399 return nullify_end(ctx);
2400 #endif
2401 }
2402
2403 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2404 {
2405 if (!ctx->is_pa20) {
2406 return false;
2407 }
2408 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2409 #ifndef CONFIG_USER_ONLY
2410 nullify_over(ctx);
2411 {
2412 TCGv_i64 src1 = load_gpr(ctx, a->r1);
2413 TCGv_i64 src2 = load_gpr(ctx, a->r2);
2414
2415 if (a->data) {
2416 gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2417 } else {
2418 gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2419 }
2420 }
2421 /* Exit TB for TLB change if mmu is enabled. */
2422 if (ctx->tb_flags & PSW_C) {
2423 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2424 }
2425 return nullify_end(ctx);
2426 #endif
2427 }
2428
2429 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2430 {
2431 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2432 #ifndef CONFIG_USER_ONLY
2433 TCGv_i64 vaddr;
2434 TCGv_i64 ofs, paddr;
2435
2436 nullify_over(ctx);
2437
2438 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2439
2440 paddr = tcg_temp_new_i64();
2441 gen_helper_lpa(paddr, tcg_env, vaddr);
2442
2443 /* Note that physical address result overrides base modification. */
2444 if (a->m) {
2445 save_gpr(ctx, a->b, ofs);
2446 }
2447 save_gpr(ctx, a->t, paddr);
2448
2449 return nullify_end(ctx);
2450 #endif
2451 }
2452
2453 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2454 {
2455 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2456
2457 /* The Coherence Index is an implementation-defined function of the
2458 physical address. Two addresses with the same CI have a coherent
2459 view of the cache. Our implementation is to return 0 for all,
2460 since the entire address space is coherent. */
2461 save_gpr(ctx, a->t, tcg_constant_i64(0));
2462
2463 cond_free(&ctx->null_cond);
2464 return true;
2465 }
2466
2467 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2468 {
2469 return do_add_reg(ctx, a, false, false, false, false);
2470 }
2471
2472 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2473 {
2474 return do_add_reg(ctx, a, true, false, false, false);
2475 }
2476
2477 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2478 {
2479 return do_add_reg(ctx, a, false, true, false, false);
2480 }
2481
2482 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2483 {
2484 return do_add_reg(ctx, a, false, false, false, true);
2485 }
2486
2487 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2488 {
2489 return do_add_reg(ctx, a, false, true, false, true);
2490 }
2491
2492 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2493 {
2494 return do_sub_reg(ctx, a, false, false, false);
2495 }
2496
2497 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2498 {
2499 return do_sub_reg(ctx, a, true, false, false);
2500 }
2501
2502 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2503 {
2504 return do_sub_reg(ctx, a, false, false, true);
2505 }
2506
2507 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2508 {
2509 return do_sub_reg(ctx, a, true, false, true);
2510 }
2511
2512 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2513 {
2514 return do_sub_reg(ctx, a, false, true, false);
2515 }
2516
2517 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2518 {
2519 return do_sub_reg(ctx, a, true, true, false);
2520 }
2521
2522 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2523 {
2524 return do_log_reg(ctx, a, tcg_gen_andc_i64);
2525 }
2526
2527 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2528 {
2529 return do_log_reg(ctx, a, tcg_gen_and_i64);
2530 }
2531
2532 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2533 {
2534 if (a->cf == 0) {
2535 unsigned r2 = a->r2;
2536 unsigned r1 = a->r1;
2537 unsigned rt = a->t;
2538
2539 if (rt == 0) { /* NOP */
2540 cond_free(&ctx->null_cond);
2541 return true;
2542 }
2543 if (r2 == 0) { /* COPY */
2544 if (r1 == 0) {
2545 TCGv_i64 dest = dest_gpr(ctx, rt);
2546 tcg_gen_movi_i64(dest, 0);
2547 save_gpr(ctx, rt, dest);
2548 } else {
2549 save_gpr(ctx, rt, cpu_gr[r1]);
2550 }
2551 cond_free(&ctx->null_cond);
2552 return true;
2553 }
2554 #ifndef CONFIG_USER_ONLY
2555 /* These are QEMU extensions and are nops in the real architecture:
2556 *
2557 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2558 * or %r31,%r31,%r31 -- death loop; offline cpu
2559 * currently implemented as idle.
2560 */
2561 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2562 /* No need to check for supervisor, as userland can only pause
2563 until the next timer interrupt. */
2564 nullify_over(ctx);
2565
2566 /* Advance the instruction queue. */
2567 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2568 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2569 nullify_set(ctx, 0);
2570
2571 /* Tell the qemu main loop to halt until this cpu has work. */
2572 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2573 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2574 gen_excp_1(EXCP_HALTED);
2575 ctx->base.is_jmp = DISAS_NORETURN;
2576
2577 return nullify_end(ctx);
2578 }
2579 #endif
2580 }
2581 return do_log_reg(ctx, a, tcg_gen_or_i64);
2582 }
2583
2584 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2585 {
2586 return do_log_reg(ctx, a, tcg_gen_xor_i64);
2587 }
2588
2589 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2590 {
2591 TCGv_i64 tcg_r1, tcg_r2;
2592
2593 if (a->cf) {
2594 nullify_over(ctx);
2595 }
2596 tcg_r1 = load_gpr(ctx, a->r1);
2597 tcg_r2 = load_gpr(ctx, a->r2);
2598 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2599 return nullify_end(ctx);
2600 }
2601
2602 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2603 {
2604 TCGv_i64 tcg_r1, tcg_r2;
2605
2606 if (a->cf) {
2607 nullify_over(ctx);
2608 }
2609 tcg_r1 = load_gpr(ctx, a->r1);
2610 tcg_r2 = load_gpr(ctx, a->r2);
2611 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64);
2612 return nullify_end(ctx);
2613 }
2614
2615 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2616 {
2617 TCGv_i64 tcg_r1, tcg_r2, tmp;
2618
2619 if (a->cf) {
2620 nullify_over(ctx);
2621 }
2622 tcg_r1 = load_gpr(ctx, a->r1);
2623 tcg_r2 = load_gpr(ctx, a->r2);
2624 tmp = tcg_temp_new_i64();
2625 tcg_gen_not_i64(tmp, tcg_r2);
2626 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64);
2627 return nullify_end(ctx);
2628 }
2629
2630 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2631 {
2632 return do_uaddcm(ctx, a, false);
2633 }
2634
2635 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2636 {
2637 return do_uaddcm(ctx, a, true);
2638 }
2639
2640 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2641 {
2642 TCGv_i64 tmp;
2643
2644 nullify_over(ctx);
2645
2646 tmp = tcg_temp_new_i64();
2647 tcg_gen_shri_i64(tmp, cpu_psw_cb, 3);
2648 if (!is_i) {
2649 tcg_gen_not_i64(tmp, tmp);
2650 }
2651 tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2652 tcg_gen_muli_i64(tmp, tmp, 6);
2653 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2654 is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64);
2655 return nullify_end(ctx);
2656 }
2657
2658 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2659 {
2660 return do_dcor(ctx, a, false);
2661 }
2662
2663 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2664 {
2665 return do_dcor(ctx, a, true);
2666 }
2667
2668 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2669 {
2670 TCGv_i64 dest, add1, add2, addc, zero, in1, in2;
2671 TCGv_i64 cout;
2672
2673 nullify_over(ctx);
2674
2675 in1 = load_gpr(ctx, a->r1);
2676 in2 = load_gpr(ctx, a->r2);
2677
2678 add1 = tcg_temp_new_i64();
2679 add2 = tcg_temp_new_i64();
2680 addc = tcg_temp_new_i64();
2681 dest = tcg_temp_new_i64();
2682 zero = tcg_constant_i64(0);
2683
2684 /* Form R1 << 1 | PSW[CB]{8}. */
2685 tcg_gen_add_i64(add1, in1, in1);
2686 tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2687
2688 /*
2689 * Add or subtract R2, depending on PSW[V]. Proper computation of
2690 * carry requires that we subtract via + ~R2 + 1, as described in
2691 * the manual. By extracting and masking V, we can produce the
2692 * proper inputs to the addition without movcond.
2693 */
2694 tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
2695 tcg_gen_xor_i64(add2, in2, addc);
2696 tcg_gen_andi_i64(addc, addc, 1);
2697
2698 tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2699 tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2700
2701 /* Write back the result register. */
2702 save_gpr(ctx, a->t, dest);
2703
2704 /* Write back PSW[CB]. */
2705 tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
2706 tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
2707
2708 /* Write back PSW[V] for the division step. */
2709 cout = get_psw_carry(ctx, false);
2710 tcg_gen_neg_i64(cpu_psw_v, cout);
2711 tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
2712
2713 /* Install the new nullification. */
2714 if (a->cf) {
2715 TCGv_i64 sv = NULL;
2716 if (cond_need_sv(a->cf >> 1)) {
2717 /* ??? The lshift is supposed to contribute to overflow. */
2718 sv = do_add_sv(ctx, dest, add1, add2);
2719 }
2720 ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2721 }
2722
2723 return nullify_end(ctx);
2724 }
2725
2726 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2727 {
2728 return do_add_imm(ctx, a, false, false);
2729 }
2730
2731 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2732 {
2733 return do_add_imm(ctx, a, true, false);
2734 }
2735
2736 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2737 {
2738 return do_add_imm(ctx, a, false, true);
2739 }
2740
2741 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2742 {
2743 return do_add_imm(ctx, a, true, true);
2744 }
2745
2746 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2747 {
2748 return do_sub_imm(ctx, a, false);
2749 }
2750
2751 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2752 {
2753 return do_sub_imm(ctx, a, true);
2754 }
2755
2756 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2757 {
2758 TCGv_i64 tcg_im, tcg_r2;
2759
2760 if (a->cf) {
2761 nullify_over(ctx);
2762 }
2763
2764 tcg_im = tcg_constant_i64(a->i);
2765 tcg_r2 = load_gpr(ctx, a->r);
2766 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2767
2768 return nullify_end(ctx);
2769 }
2770
2771 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
2772 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
2773 {
2774 TCGv_i64 r1, r2, dest;
2775
2776 if (!ctx->is_pa20) {
2777 return false;
2778 }
2779
2780 nullify_over(ctx);
2781
2782 r1 = load_gpr(ctx, a->r1);
2783 r2 = load_gpr(ctx, a->r2);
2784 dest = dest_gpr(ctx, a->t);
2785
2786 fn(dest, r1, r2);
2787 save_gpr(ctx, a->t, dest);
2788
2789 return nullify_end(ctx);
2790 }
2791
2792 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
2793 void (*fn)(TCGv_i64, TCGv_i64, int64_t))
2794 {
2795 TCGv_i64 r, dest;
2796
2797 if (!ctx->is_pa20) {
2798 return false;
2799 }
2800
2801 nullify_over(ctx);
2802
2803 r = load_gpr(ctx, a->r);
2804 dest = dest_gpr(ctx, a->t);
2805
2806 fn(dest, r, a->i);
2807 save_gpr(ctx, a->t, dest);
2808
2809 return nullify_end(ctx);
2810 }
2811
2812 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
2813 void (*fn)(TCGv_i64, TCGv_i64,
2814 TCGv_i64, TCGv_i32))
2815 {
2816 TCGv_i64 r1, r2, dest;
2817
2818 if (!ctx->is_pa20) {
2819 return false;
2820 }
2821
2822 nullify_over(ctx);
2823
2824 r1 = load_gpr(ctx, a->r1);
2825 r2 = load_gpr(ctx, a->r2);
2826 dest = dest_gpr(ctx, a->t);
2827
2828 fn(dest, r1, r2, tcg_constant_i32(a->sh));
2829 save_gpr(ctx, a->t, dest);
2830
2831 return nullify_end(ctx);
2832 }
2833
2834 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
2835 {
2836 return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
2837 }
2838
2839 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
2840 {
2841 return do_multimedia(ctx, a, gen_helper_hadd_ss);
2842 }
2843
2844 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
2845 {
2846 return do_multimedia(ctx, a, gen_helper_hadd_us);
2847 }
2848
2849 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
2850 {
2851 return do_multimedia(ctx, a, gen_helper_havg);
2852 }
2853
2854 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
2855 {
2856 return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
2857 }
2858
2859 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
2860 {
2861 return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
2862 }
2863
2864 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
2865 {
2866 return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
2867 }
2868
2869 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
2870 {
2871 return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
2872 }
2873
2874 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
2875 {
2876 return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
2877 }
2878
2879 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
2880 {
2881 return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
2882 }
2883
2884 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
2885 {
2886 return do_multimedia(ctx, a, gen_helper_hsub_ss);
2887 }
2888
2889 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
2890 {
2891 return do_multimedia(ctx, a, gen_helper_hsub_us);
2892 }
2893
2894 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2895 {
2896 if (!ctx->is_pa20 && a->size > MO_32) {
2897 return gen_illegal(ctx);
2898 }
2899 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2900 a->disp, a->sp, a->m, a->size | MO_TE);
2901 }
2902
2903 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2904 {
2905 assert(a->x == 0 && a->scale == 0);
2906 if (!ctx->is_pa20 && a->size > MO_32) {
2907 return gen_illegal(ctx);
2908 }
2909 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2910 }
2911
2912 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2913 {
2914 MemOp mop = MO_TE | MO_ALIGN | a->size;
2915 TCGv_i64 zero, dest, ofs;
2916 TCGv_i64 addr;
2917
2918 if (!ctx->is_pa20 && a->size > MO_32) {
2919 return gen_illegal(ctx);
2920 }
2921
2922 nullify_over(ctx);
2923
2924 if (a->m) {
2925 /* Base register modification. Make sure if RT == RB,
2926 we see the result of the load. */
2927 dest = tcg_temp_new_i64();
2928 } else {
2929 dest = dest_gpr(ctx, a->t);
2930 }
2931
2932 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2933 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2934
2935 /*
2936 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2937 * However actual hardware succeeds with aligned mod 4.
2938 * Detect this case and log a GUEST_ERROR.
2939 *
2940 * TODO: HPPA64 relaxes the over-alignment requirement
2941 * with the ,co completer.
2942 */
2943 gen_helper_ldc_check(addr);
2944
2945 zero = tcg_constant_i64(0);
2946 tcg_gen_atomic_xchg_i64(dest, addr, zero, ctx->mmu_idx, mop);
2947
2948 if (a->m) {
2949 save_gpr(ctx, a->b, ofs);
2950 }
2951 save_gpr(ctx, a->t, dest);
2952
2953 return nullify_end(ctx);
2954 }
2955
2956 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2957 {
2958 TCGv_i64 ofs, val;
2959 TCGv_i64 addr;
2960
2961 nullify_over(ctx);
2962
2963 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2964 ctx->mmu_idx == MMU_PHYS_IDX);
2965 val = load_gpr(ctx, a->r);
2966 if (a->a) {
2967 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2968 gen_helper_stby_e_parallel(tcg_env, addr, val);
2969 } else {
2970 gen_helper_stby_e(tcg_env, addr, val);
2971 }
2972 } else {
2973 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2974 gen_helper_stby_b_parallel(tcg_env, addr, val);
2975 } else {
2976 gen_helper_stby_b(tcg_env, addr, val);
2977 }
2978 }
2979 if (a->m) {
2980 tcg_gen_andi_i64(ofs, ofs, ~3);
2981 save_gpr(ctx, a->b, ofs);
2982 }
2983
2984 return nullify_end(ctx);
2985 }
2986
2987 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
2988 {
2989 TCGv_i64 ofs, val;
2990 TCGv_i64 addr;
2991
2992 if (!ctx->is_pa20) {
2993 return false;
2994 }
2995 nullify_over(ctx);
2996
2997 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2998 ctx->mmu_idx == MMU_PHYS_IDX);
2999 val = load_gpr(ctx, a->r);
3000 if (a->a) {
3001 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3002 gen_helper_stdby_e_parallel(tcg_env, addr, val);
3003 } else {
3004 gen_helper_stdby_e(tcg_env, addr, val);
3005 }
3006 } else {
3007 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3008 gen_helper_stdby_b_parallel(tcg_env, addr, val);
3009 } else {
3010 gen_helper_stdby_b(tcg_env, addr, val);
3011 }
3012 }
3013 if (a->m) {
3014 tcg_gen_andi_i64(ofs, ofs, ~7);
3015 save_gpr(ctx, a->b, ofs);
3016 }
3017
3018 return nullify_end(ctx);
3019 }
3020
3021 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3022 {
3023 int hold_mmu_idx = ctx->mmu_idx;
3024
3025 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3026 ctx->mmu_idx = MMU_PHYS_IDX;
3027 trans_ld(ctx, a);
3028 ctx->mmu_idx = hold_mmu_idx;
3029 return true;
3030 }
3031
3032 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3033 {
3034 int hold_mmu_idx = ctx->mmu_idx;
3035
3036 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3037 ctx->mmu_idx = MMU_PHYS_IDX;
3038 trans_st(ctx, a);
3039 ctx->mmu_idx = hold_mmu_idx;
3040 return true;
3041 }
3042
3043 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3044 {
3045 TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3046
3047 tcg_gen_movi_i64(tcg_rt, a->i);
3048 save_gpr(ctx, a->t, tcg_rt);
3049 cond_free(&ctx->null_cond);
3050 return true;
3051 }
3052
3053 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3054 {
3055 TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3056 TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3057
3058 tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3059 save_gpr(ctx, 1, tcg_r1);
3060 cond_free(&ctx->null_cond);
3061 return true;
3062 }
3063
3064 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3065 {
3066 TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3067
3068 /* Special case rb == 0, for the LDI pseudo-op.
3069 The COPY pseudo-op is handled for free within tcg_gen_addi_i64. */
3070 if (a->b == 0) {
3071 tcg_gen_movi_i64(tcg_rt, a->i);
3072 } else {
3073 tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3074 }
3075 save_gpr(ctx, a->t, tcg_rt);
3076 cond_free(&ctx->null_cond);
3077 return true;
3078 }
3079
3080 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3081 unsigned c, unsigned f, bool d, unsigned n, int disp)
3082 {
3083 TCGv_i64 dest, in2, sv;
3084 DisasCond cond;
3085
3086 in2 = load_gpr(ctx, r);
3087 dest = tcg_temp_new_i64();
3088
3089 tcg_gen_sub_i64(dest, in1, in2);
3090
3091 sv = NULL;
3092 if (cond_need_sv(c)) {
3093 sv = do_sub_sv(ctx, dest, in1, in2);
3094 }
3095
3096 cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3097 return do_cbranch(ctx, disp, n, &cond);
3098 }
3099
3100 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3101 {
3102 if (!ctx->is_pa20 && a->d) {
3103 return false;
3104 }
3105 nullify_over(ctx);
3106 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3107 a->c, a->f, a->d, a->n, a->disp);
3108 }
3109
3110 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3111 {
3112 if (!ctx->is_pa20 && a->d) {
3113 return false;
3114 }
3115 nullify_over(ctx);
3116 return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3117 a->c, a->f, a->d, a->n, a->disp);
3118 }
3119
3120 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3121 unsigned c, unsigned f, unsigned n, int disp)
3122 {
3123 TCGv_i64 dest, in2, sv, cb_cond;
3124 DisasCond cond;
3125 bool d = false;
3126
3127 /*
3128 * For hppa64, the ADDB conditions change with PSW.W,
3129 * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3130 */
3131 if (ctx->tb_flags & PSW_W) {
3132 d = c >= 5;
3133 if (d) {
3134 c &= 3;
3135 }
3136 }
3137
3138 in2 = load_gpr(ctx, r);
3139 dest = tcg_temp_new_i64();
3140 sv = NULL;
3141 cb_cond = NULL;
3142
3143 if (cond_need_cb(c)) {
3144 TCGv_i64 cb = tcg_temp_new_i64();
3145 TCGv_i64 cb_msb = tcg_temp_new_i64();
3146
3147 tcg_gen_movi_i64(cb_msb, 0);
3148 tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3149 tcg_gen_xor_i64(cb, in1, in2);
3150 tcg_gen_xor_i64(cb, cb, dest);
3151 cb_cond = get_carry(ctx, d, cb, cb_msb);
3152 } else {
3153 tcg_gen_add_i64(dest, in1, in2);
3154 }
3155 if (cond_need_sv(c)) {
3156 sv = do_add_sv(ctx, dest, in1, in2);
3157 }
3158
3159 cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3160 save_gpr(ctx, r, dest);
3161 return do_cbranch(ctx, disp, n, &cond);
3162 }
3163
3164 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3165 {
3166 nullify_over(ctx);
3167 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3168 }
3169
3170 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3171 {
3172 nullify_over(ctx);
3173 return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3174 }
3175
3176 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3177 {
3178 TCGv_i64 tmp, tcg_r;
3179 DisasCond cond;
3180
3181 nullify_over(ctx);
3182
3183 tmp = tcg_temp_new_i64();
3184 tcg_r = load_gpr(ctx, a->r);
3185 if (cond_need_ext(ctx, a->d)) {
3186 /* Force shift into [32,63] */
3187 tcg_gen_ori_i64(tmp, cpu_sar, 32);
3188 tcg_gen_shl_i64(tmp, tcg_r, tmp);
3189 } else {
3190 tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3191 }
3192
3193 cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3194 return do_cbranch(ctx, a->disp, a->n, &cond);
3195 }
3196
3197 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3198 {
3199 TCGv_i64 tmp, tcg_r;
3200 DisasCond cond;
3201 int p;
3202
3203 nullify_over(ctx);
3204
3205 tmp = tcg_temp_new_i64();
3206 tcg_r = load_gpr(ctx, a->r);
3207 p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
3208 tcg_gen_shli_i64(tmp, tcg_r, p);
3209
3210 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3211 return do_cbranch(ctx, a->disp, a->n, &cond);
3212 }
3213
3214 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3215 {
3216 TCGv_i64 dest;
3217 DisasCond cond;
3218
3219 nullify_over(ctx);
3220
3221 dest = dest_gpr(ctx, a->r2);
3222 if (a->r1 == 0) {
3223 tcg_gen_movi_i64(dest, 0);
3224 } else {
3225 tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3226 }
3227
3228 /* All MOVB conditions are 32-bit. */
3229 cond = do_sed_cond(ctx, a->c, false, dest);
3230 return do_cbranch(ctx, a->disp, a->n, &cond);
3231 }
3232
3233 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3234 {
3235 TCGv_i64 dest;
3236 DisasCond cond;
3237
3238 nullify_over(ctx);
3239
3240 dest = dest_gpr(ctx, a->r);
3241 tcg_gen_movi_i64(dest, a->i);
3242
3243 /* All MOVBI conditions are 32-bit. */
3244 cond = do_sed_cond(ctx, a->c, false, dest);
3245 return do_cbranch(ctx, a->disp, a->n, &cond);
3246 }
3247
3248 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3249 {
3250 TCGv_i64 dest, src2;
3251
3252 if (!ctx->is_pa20 && a->d) {
3253 return false;
3254 }
3255 if (a->c) {
3256 nullify_over(ctx);
3257 }
3258
3259 dest = dest_gpr(ctx, a->t);
3260 src2 = load_gpr(ctx, a->r2);
3261 if (a->r1 == 0) {
3262 if (a->d) {
3263 tcg_gen_shr_i64(dest, src2, cpu_sar);
3264 } else {
3265 TCGv_i64 tmp = tcg_temp_new_i64();
3266
3267 tcg_gen_ext32u_i64(dest, src2);
3268 tcg_gen_andi_i64(tmp, cpu_sar, 31);
3269 tcg_gen_shr_i64(dest, dest, tmp);
3270 }
3271 } else if (a->r1 == a->r2) {
3272 if (a->d) {
3273 tcg_gen_rotr_i64(dest, src2, cpu_sar);
3274 } else {
3275 TCGv_i32 t32 = tcg_temp_new_i32();
3276 TCGv_i32 s32 = tcg_temp_new_i32();
3277
3278 tcg_gen_extrl_i64_i32(t32, src2);
3279 tcg_gen_extrl_i64_i32(s32, cpu_sar);
3280 tcg_gen_andi_i32(s32, s32, 31);
3281 tcg_gen_rotr_i32(t32, t32, s32);
3282 tcg_gen_extu_i32_i64(dest, t32);
3283 }
3284 } else {
3285 TCGv_i64 src1 = load_gpr(ctx, a->r1);
3286
3287 if (a->d) {
3288 TCGv_i64 t = tcg_temp_new_i64();
3289 TCGv_i64 n = tcg_temp_new_i64();
3290
3291 tcg_gen_xori_i64(n, cpu_sar, 63);
3292 tcg_gen_shl_i64(t, src2, n);
3293 tcg_gen_shli_i64(t, t, 1);
3294 tcg_gen_shr_i64(dest, src1, cpu_sar);
3295 tcg_gen_or_i64(dest, dest, t);
3296 } else {
3297 TCGv_i64 t = tcg_temp_new_i64();
3298 TCGv_i64 s = tcg_temp_new_i64();
3299
3300 tcg_gen_concat32_i64(t, src2, src1);
3301 tcg_gen_andi_i64(s, cpu_sar, 31);
3302 tcg_gen_shr_i64(dest, t, s);
3303 }
3304 }
3305 save_gpr(ctx, a->t, dest);
3306
3307 /* Install the new nullification. */
3308 cond_free(&ctx->null_cond);
3309 if (a->c) {
3310 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3311 }
3312 return nullify_end(ctx);
3313 }
3314
3315 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3316 {
3317 unsigned width, sa;
3318 TCGv_i64 dest, t2;
3319
3320 if (!ctx->is_pa20 && a->d) {
3321 return false;
3322 }
3323 if (a->c) {
3324 nullify_over(ctx);
3325 }
3326
3327 width = a->d ? 64 : 32;
3328 sa = width - 1 - a->cpos;
3329
3330 dest = dest_gpr(ctx, a->t);
3331 t2 = load_gpr(ctx, a->r2);
3332 if (a->r1 == 0) {
3333 tcg_gen_extract_i64(dest, t2, sa, width - sa);
3334 } else if (width == TARGET_LONG_BITS) {
3335 tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3336 } else {
3337 assert(!a->d);
3338 if (a->r1 == a->r2) {
3339 TCGv_i32 t32 = tcg_temp_new_i32();
3340 tcg_gen_extrl_i64_i32(t32, t2);
3341 tcg_gen_rotri_i32(t32, t32, sa);
3342 tcg_gen_extu_i32_i64(dest, t32);
3343 } else {
3344 tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3345 tcg_gen_extract_i64(dest, dest, sa, 32);
3346 }
3347 }
3348 save_gpr(ctx, a->t, dest);
3349
3350 /* Install the new nullification. */
3351 cond_free(&ctx->null_cond);
3352 if (a->c) {
3353 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3354 }
3355 return nullify_end(ctx);
3356 }
3357
3358 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3359 {
3360 unsigned widthm1 = a->d ? 63 : 31;
3361 TCGv_i64 dest, src, tmp;
3362
3363 if (!ctx->is_pa20 && a->d) {
3364 return false;
3365 }
3366 if (a->c) {
3367 nullify_over(ctx);
3368 }
3369
3370 dest = dest_gpr(ctx, a->t);
3371 src = load_gpr(ctx, a->r);
3372 tmp = tcg_temp_new_i64();
3373
3374 /* Recall that SAR is using big-endian bit numbering. */
3375 tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3376 tcg_gen_xori_i64(tmp, tmp, widthm1);
3377
3378 if (a->se) {
3379 if (!a->d) {
3380 tcg_gen_ext32s_i64(dest, src);
3381 src = dest;
3382 }
3383 tcg_gen_sar_i64(dest, src, tmp);
3384 tcg_gen_sextract_i64(dest, dest, 0, a->len);
3385 } else {
3386 if (!a->d) {
3387 tcg_gen_ext32u_i64(dest, src);
3388 src = dest;
3389 }
3390 tcg_gen_shr_i64(dest, src, tmp);
3391 tcg_gen_extract_i64(dest, dest, 0, a->len);
3392 }
3393 save_gpr(ctx, a->t, dest);
3394
3395 /* Install the new nullification. */
3396 cond_free(&ctx->null_cond);
3397 if (a->c) {
3398 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3399 }
3400 return nullify_end(ctx);
3401 }
3402
3403 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3404 {
3405 unsigned len, cpos, width;
3406 TCGv_i64 dest, src;
3407
3408 if (!ctx->is_pa20 && a->d) {
3409 return false;
3410 }
3411 if (a->c) {
3412 nullify_over(ctx);
3413 }
3414
3415 len = a->len;
3416 width = a->d ? 64 : 32;
3417 cpos = width - 1 - a->pos;
3418 if (cpos + len > width) {
3419 len = width - cpos;
3420 }
3421
3422 dest = dest_gpr(ctx, a->t);
3423 src = load_gpr(ctx, a->r);
3424 if (a->se) {
3425 tcg_gen_sextract_i64(dest, src, cpos, len);
3426 } else {
3427 tcg_gen_extract_i64(dest, src, cpos, len);
3428 }
3429 save_gpr(ctx, a->t, dest);
3430
3431 /* Install the new nullification. */
3432 cond_free(&ctx->null_cond);
3433 if (a->c) {
3434 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3435 }
3436 return nullify_end(ctx);
3437 }
3438
3439 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3440 {
3441 unsigned len, width;
3442 uint64_t mask0, mask1;
3443 TCGv_i64 dest;
3444
3445 if (!ctx->is_pa20 && a->d) {
3446 return false;
3447 }
3448 if (a->c) {
3449 nullify_over(ctx);
3450 }
3451
3452 len = a->len;
3453 width = a->d ? 64 : 32;
3454 if (a->cpos + len > width) {
3455 len = width - a->cpos;
3456 }
3457
3458 dest = dest_gpr(ctx, a->t);
3459 mask0 = deposit64(0, a->cpos, len, a->i);
3460 mask1 = deposit64(-1, a->cpos, len, a->i);
3461
3462 if (a->nz) {
3463 TCGv_i64 src = load_gpr(ctx, a->t);
3464 tcg_gen_andi_i64(dest, src, mask1);
3465 tcg_gen_ori_i64(dest, dest, mask0);
3466 } else {
3467 tcg_gen_movi_i64(dest, mask0);
3468 }
3469 save_gpr(ctx, a->t, dest);
3470
3471 /* Install the new nullification. */
3472 cond_free(&ctx->null_cond);
3473 if (a->c) {
3474 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3475 }
3476 return nullify_end(ctx);
3477 }
3478
3479 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3480 {
3481 unsigned rs = a->nz ? a->t : 0;
3482 unsigned len, width;
3483 TCGv_i64 dest, val;
3484
3485 if (!ctx->is_pa20 && a->d) {
3486 return false;
3487 }
3488 if (a->c) {
3489 nullify_over(ctx);
3490 }
3491
3492 len = a->len;
3493 width = a->d ? 64 : 32;
3494 if (a->cpos + len > width) {
3495 len = width - a->cpos;
3496 }
3497
3498 dest = dest_gpr(ctx, a->t);
3499 val = load_gpr(ctx, a->r);
3500 if (rs == 0) {
3501 tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3502 } else {
3503 tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3504 }
3505 save_gpr(ctx, a->t, dest);
3506
3507 /* Install the new nullification. */
3508 cond_free(&ctx->null_cond);
3509 if (a->c) {
3510 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3511 }
3512 return nullify_end(ctx);
3513 }
3514
3515 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3516 bool d, bool nz, unsigned len, TCGv_i64 val)
3517 {
3518 unsigned rs = nz ? rt : 0;
3519 unsigned widthm1 = d ? 63 : 31;
3520 TCGv_i64 mask, tmp, shift, dest;
3521 uint64_t msb = 1ULL << (len - 1);
3522
3523 dest = dest_gpr(ctx, rt);
3524 shift = tcg_temp_new_i64();
3525 tmp = tcg_temp_new_i64();
3526
3527 /* Convert big-endian bit numbering in SAR to left-shift. */
3528 tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3529 tcg_gen_xori_i64(shift, shift, widthm1);
3530
3531 mask = tcg_temp_new_i64();
3532 tcg_gen_movi_i64(mask, msb + (msb - 1));
3533 tcg_gen_and_i64(tmp, val, mask);
3534 if (rs) {
3535 tcg_gen_shl_i64(mask, mask, shift);
3536 tcg_gen_shl_i64(tmp, tmp, shift);
3537 tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3538 tcg_gen_or_i64(dest, dest, tmp);
3539 } else {
3540 tcg_gen_shl_i64(dest, tmp, shift);
3541 }
3542 save_gpr(ctx, rt, dest);
3543
3544 /* Install the new nullification. */
3545 cond_free(&ctx->null_cond);
3546 if (c) {
3547 ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3548 }
3549 return nullify_end(ctx);
3550 }
3551
3552 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3553 {
3554 if (!ctx->is_pa20 && a->d) {
3555 return false;
3556 }
3557 if (a->c) {
3558 nullify_over(ctx);
3559 }
3560 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3561 load_gpr(ctx, a->r));
3562 }
3563
3564 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3565 {
3566 if (!ctx->is_pa20 && a->d) {
3567 return false;
3568 }
3569 if (a->c) {
3570 nullify_over(ctx);
3571 }
3572 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3573 tcg_constant_i64(a->i));
3574 }
3575
3576 static bool trans_be(DisasContext *ctx, arg_be *a)
3577 {
3578 TCGv_i64 tmp;
3579
3580 #ifdef CONFIG_USER_ONLY
3581 /* ??? It seems like there should be a good way of using
3582 "be disp(sr2, r0)", the canonical gateway entry mechanism
3583 to our advantage. But that appears to be inconvenient to
3584 manage along side branch delay slots. Therefore we handle
3585 entry into the gateway page via absolute address. */
3586 /* Since we don't implement spaces, just branch. Do notice the special
3587 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3588 goto_tb to the TB containing the syscall. */
3589 if (a->b == 0) {
3590 return do_dbranch(ctx, a->disp, a->l, a->n);
3591 }
3592 #else
3593 nullify_over(ctx);
3594 #endif
3595
3596 tmp = tcg_temp_new_i64();
3597 tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
3598 tmp = do_ibranch_priv(ctx, tmp);
3599
3600 #ifdef CONFIG_USER_ONLY
3601 return do_ibranch(ctx, tmp, a->l, a->n);
3602 #else
3603 TCGv_i64 new_spc = tcg_temp_new_i64();
3604
3605 load_spr(ctx, new_spc, a->sp);
3606 if (a->l) {
3607 copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3608 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3609 }
3610 if (a->n && use_nullify_skip(ctx)) {
3611 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3612 tcg_gen_addi_i64(tmp, tmp, 4);
3613 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3614 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3615 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3616 } else {
3617 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3618 if (ctx->iaoq_b == -1) {
3619 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3620 }
3621 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3622 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3623 nullify_set(ctx, a->n);
3624 }
3625 tcg_gen_lookup_and_goto_ptr();
3626 ctx->base.is_jmp = DISAS_NORETURN;
3627 return nullify_end(ctx);
3628 #endif
3629 }
3630
3631 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3632 {
3633 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3634 }
3635
3636 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3637 {
3638 uint64_t dest = iaoq_dest(ctx, a->disp);
3639
3640 nullify_over(ctx);
3641
3642 /* Make sure the caller hasn't done something weird with the queue.
3643 * ??? This is not quite the same as the PSW[B] bit, which would be
3644 * expensive to track. Real hardware will trap for
3645 * b gateway
3646 * b gateway+4 (in delay slot of first branch)
3647 * However, checking for a non-sequential instruction queue *will*
3648 * diagnose the security hole
3649 * b gateway
3650 * b evil
3651 * in which instructions at evil would run with increased privs.
3652 */
3653 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3654 return gen_illegal(ctx);
3655 }
3656
3657 #ifndef CONFIG_USER_ONLY
3658 if (ctx->tb_flags & PSW_C) {
3659 CPUHPPAState *env = cpu_env(ctx->cs);
3660 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3661 /* If we could not find a TLB entry, then we need to generate an
3662 ITLB miss exception so the kernel will provide it.
3663 The resulting TLB fill operation will invalidate this TB and
3664 we will re-translate, at which point we *will* be able to find
3665 the TLB entry and determine if this is in fact a gateway page. */
3666 if (type < 0) {
3667 gen_excp(ctx, EXCP_ITLB_MISS);
3668 return true;
3669 }
3670 /* No change for non-gateway pages or for priv decrease. */
3671 if (type >= 4 && type - 4 < ctx->privilege) {
3672 dest = deposit32(dest, 0, 2, type - 4);
3673 }
3674 } else {
3675 dest &= -4; /* priv = 0 */
3676 }
3677 #endif
3678
3679 if (a->l) {
3680 TCGv_i64 tmp = dest_gpr(ctx, a->l);
3681 if (ctx->privilege < 3) {
3682 tcg_gen_andi_i64(tmp, tmp, -4);
3683 }
3684 tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
3685 save_gpr(ctx, a->l, tmp);
3686 }
3687
3688 return do_dbranch(ctx, dest, 0, a->n);
3689 }
3690
3691 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3692 {
3693 if (a->x) {
3694 TCGv_i64 tmp = tcg_temp_new_i64();
3695 tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
3696 tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
3697 /* The computation here never changes privilege level. */
3698 return do_ibranch(ctx, tmp, a->l, a->n);
3699 } else {
3700 /* BLR R0,RX is a good way to load PC+8 into RX. */
3701 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3702 }
3703 }
3704
3705 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3706 {
3707 TCGv_i64 dest;
3708
3709 if (a->x == 0) {
3710 dest = load_gpr(ctx, a->b);
3711 } else {
3712 dest = tcg_temp_new_i64();
3713 tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
3714 tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
3715 }
3716 dest = do_ibranch_priv(ctx, dest);
3717 return do_ibranch(ctx, dest, 0, a->n);
3718 }
3719
3720 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3721 {
3722 TCGv_i64 dest;
3723
3724 #ifdef CONFIG_USER_ONLY
3725 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3726 return do_ibranch(ctx, dest, a->l, a->n);
3727 #else
3728 nullify_over(ctx);
3729 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3730
3731 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3732 if (ctx->iaoq_b == -1) {
3733 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3734 }
3735 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3736 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3737 if (a->l) {
3738 copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3739 }
3740 nullify_set(ctx, a->n);
3741 tcg_gen_lookup_and_goto_ptr();
3742 ctx->base.is_jmp = DISAS_NORETURN;
3743 return nullify_end(ctx);
3744 #endif
3745 }
3746
3747 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
3748 {
3749 /* All branch target stack instructions implement as nop. */
3750 return ctx->is_pa20;
3751 }
3752
3753 /*
3754 * Float class 0
3755 */
3756
3757 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3758 {
3759 tcg_gen_mov_i32(dst, src);
3760 }
3761
3762 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3763 {
3764 uint64_t ret;
3765
3766 if (ctx->is_pa20) {
3767 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3768 } else {
3769 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3770 }
3771
3772 nullify_over(ctx);
3773 save_frd(0, tcg_constant_i64(ret));
3774 return nullify_end(ctx);
3775 }
3776
3777 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3778 {
3779 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3780 }
3781
3782 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3783 {
3784 tcg_gen_mov_i64(dst, src);
3785 }
3786
3787 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3788 {
3789 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3790 }
3791
3792 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3793 {
3794 tcg_gen_andi_i32(dst, src, INT32_MAX);
3795 }
3796
3797 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3798 {
3799 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3800 }
3801
3802 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3803 {
3804 tcg_gen_andi_i64(dst, src, INT64_MAX);
3805 }
3806
3807 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3808 {
3809 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3810 }
3811
3812 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3813 {
3814 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3815 }
3816
3817 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3818 {
3819 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3820 }
3821
3822 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3823 {
3824 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3825 }
3826
3827 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3828 {
3829 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3830 }
3831
3832 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3833 {
3834 tcg_gen_xori_i32(dst, src, INT32_MIN);
3835 }
3836
3837 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3838 {
3839 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3840 }
3841
3842 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3843 {
3844 tcg_gen_xori_i64(dst, src, INT64_MIN);
3845 }
3846
3847 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3848 {
3849 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3850 }
3851
3852 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3853 {
3854 tcg_gen_ori_i32(dst, src, INT32_MIN);
3855 }
3856
3857 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3858 {
3859 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3860 }
3861
3862 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3863 {
3864 tcg_gen_ori_i64(dst, src, INT64_MIN);
3865 }
3866
3867 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3868 {
3869 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3870 }
3871
3872 /*
3873 * Float class 1
3874 */
3875
3876 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3877 {
3878 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3879 }
3880
3881 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3882 {
3883 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3884 }
3885
3886 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3887 {
3888 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3889 }
3890
3891 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3892 {
3893 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3894 }
3895
3896 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3897 {
3898 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3899 }
3900
3901 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3902 {
3903 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3904 }
3905
3906 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3907 {
3908 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3909 }
3910
3911 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3912 {
3913 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3914 }
3915
3916 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3917 {
3918 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3919 }
3920
3921 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3922 {
3923 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3924 }
3925
3926 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3927 {
3928 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3929 }
3930
3931 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3932 {
3933 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3934 }
3935
3936 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3937 {
3938 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3939 }
3940
3941 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3942 {
3943 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3944 }
3945
3946 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3947 {
3948 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3949 }
3950
3951 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3952 {
3953 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3954 }
3955
3956 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3957 {
3958 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3959 }
3960
3961 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3962 {
3963 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3964 }
3965
3966 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3967 {
3968 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3969 }
3970
3971 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3972 {
3973 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3974 }
3975
3976 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3977 {
3978 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3979 }
3980
3981 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3982 {
3983 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3984 }
3985
3986 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3987 {
3988 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3989 }
3990
3991 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3992 {
3993 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3994 }
3995
3996 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3997 {
3998 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3999 }
4000
4001 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4002 {
4003 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4004 }
4005
4006 /*
4007 * Float class 2
4008 */
4009
4010 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4011 {
4012 TCGv_i32 ta, tb, tc, ty;
4013
4014 nullify_over(ctx);
4015
4016 ta = load_frw0_i32(a->r1);
4017 tb = load_frw0_i32(a->r2);
4018 ty = tcg_constant_i32(a->y);
4019 tc = tcg_constant_i32(a->c);
4020
4021 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4022
4023 return nullify_end(ctx);
4024 }
4025
4026 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4027 {
4028 TCGv_i64 ta, tb;
4029 TCGv_i32 tc, ty;
4030
4031 nullify_over(ctx);
4032
4033 ta = load_frd0(a->r1);
4034 tb = load_frd0(a->r2);
4035 ty = tcg_constant_i32(a->y);
4036 tc = tcg_constant_i32(a->c);
4037
4038 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4039
4040 return nullify_end(ctx);
4041 }
4042
4043 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4044 {
4045 TCGv_i64 t;
4046
4047 nullify_over(ctx);
4048
4049 t = tcg_temp_new_i64();
4050 tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4051
4052 if (a->y == 1) {
4053 int mask;
4054 bool inv = false;
4055
4056 switch (a->c) {
4057 case 0: /* simple */
4058 tcg_gen_andi_i64(t, t, 0x4000000);
4059 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4060 goto done;
4061 case 2: /* rej */
4062 inv = true;
4063 /* fallthru */
4064 case 1: /* acc */
4065 mask = 0x43ff800;
4066 break;
4067 case 6: /* rej8 */
4068 inv = true;
4069 /* fallthru */
4070 case 5: /* acc8 */
4071 mask = 0x43f8000;
4072 break;
4073 case 9: /* acc6 */
4074 mask = 0x43e0000;
4075 break;
4076 case 13: /* acc4 */
4077 mask = 0x4380000;
4078 break;
4079 case 17: /* acc2 */
4080 mask = 0x4200000;
4081 break;
4082 default:
4083 gen_illegal(ctx);
4084 return true;
4085 }
4086 if (inv) {
4087 TCGv_i64 c = tcg_constant_i64(mask);
4088 tcg_gen_or_i64(t, t, c);
4089 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4090 } else {
4091 tcg_gen_andi_i64(t, t, mask);
4092 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4093 }
4094 } else {
4095 unsigned cbit = (a->y ^ 1) - 1;
4096
4097 tcg_gen_extract_i64(t, t, 21 - cbit, 1);
4098 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4099 }
4100
4101 done:
4102 return nullify_end(ctx);
4103 }
4104
4105 /*
4106 * Float class 2
4107 */
4108
4109 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4110 {
4111 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4112 }
4113
4114 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4115 {
4116 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4117 }
4118
4119 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4120 {
4121 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4122 }
4123
4124 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4125 {
4126 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4127 }
4128
4129 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4130 {
4131 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4132 }
4133
4134 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4135 {
4136 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4137 }
4138
4139 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4140 {
4141 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4142 }
4143
4144 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4145 {
4146 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4147 }
4148
4149 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4150 {
4151 TCGv_i64 x, y;
4152
4153 nullify_over(ctx);
4154
4155 x = load_frw0_i64(a->r1);
4156 y = load_frw0_i64(a->r2);
4157 tcg_gen_mul_i64(x, x, y);
4158 save_frd(a->t, x);
4159
4160 return nullify_end(ctx);
4161 }
4162
4163 /* Convert the fmpyadd single-precision register encodings to standard. */
4164 static inline int fmpyadd_s_reg(unsigned r)
4165 {
4166 return (r & 16) * 2 + 16 + (r & 15);
4167 }
4168
4169 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4170 {
4171 int tm = fmpyadd_s_reg(a->tm);
4172 int ra = fmpyadd_s_reg(a->ra);
4173 int ta = fmpyadd_s_reg(a->ta);
4174 int rm2 = fmpyadd_s_reg(a->rm2);
4175 int rm1 = fmpyadd_s_reg(a->rm1);
4176
4177 nullify_over(ctx);
4178
4179 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4180 do_fop_weww(ctx, ta, ta, ra,
4181 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4182
4183 return nullify_end(ctx);
4184 }
4185
4186 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4187 {
4188 return do_fmpyadd_s(ctx, a, false);
4189 }
4190
4191 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4192 {
4193 return do_fmpyadd_s(ctx, a, true);
4194 }
4195
4196 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4197 {
4198 nullify_over(ctx);
4199
4200 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4201 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4202 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4203
4204 return nullify_end(ctx);
4205 }
4206
4207 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4208 {
4209 return do_fmpyadd_d(ctx, a, false);
4210 }
4211
4212 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4213 {
4214 return do_fmpyadd_d(ctx, a, true);
4215 }
4216
4217 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4218 {
4219 TCGv_i32 x, y, z;
4220
4221 nullify_over(ctx);
4222 x = load_frw0_i32(a->rm1);
4223 y = load_frw0_i32(a->rm2);
4224 z = load_frw0_i32(a->ra3);
4225
4226 if (a->neg) {
4227 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4228 } else {
4229 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4230 }
4231
4232 save_frw_i32(a->t, x);
4233 return nullify_end(ctx);
4234 }
4235
4236 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4237 {
4238 TCGv_i64 x, y, z;
4239
4240 nullify_over(ctx);
4241 x = load_frd0(a->rm1);
4242 y = load_frd0(a->rm2);
4243 z = load_frd0(a->ra3);
4244
4245 if (a->neg) {
4246 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4247 } else {
4248 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4249 }
4250
4251 save_frd(a->t, x);
4252 return nullify_end(ctx);
4253 }
4254
4255 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4256 {
4257 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4258 #ifndef CONFIG_USER_ONLY
4259 if (a->i == 0x100) {
4260 /* emulate PDC BTLB, called by SeaBIOS-hppa */
4261 nullify_over(ctx);
4262 gen_helper_diag_btlb(tcg_env);
4263 return nullify_end(ctx);
4264 }
4265 #endif
4266 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4267 return true;
4268 }
4269
4270 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4271 {
4272 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4273 int bound;
4274
4275 ctx->cs = cs;
4276 ctx->tb_flags = ctx->base.tb->flags;
4277 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4278
4279 #ifdef CONFIG_USER_ONLY
4280 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4281 ctx->mmu_idx = MMU_USER_IDX;
4282 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4283 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4284 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4285 #else
4286 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4287 ctx->mmu_idx = (ctx->tb_flags & PSW_D
4288 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4289 : MMU_PHYS_IDX);
4290
4291 /* Recover the IAOQ values from the GVA + PRIV. */
4292 uint64_t cs_base = ctx->base.tb->cs_base;
4293 uint64_t iasq_f = cs_base & ~0xffffffffull;
4294 int32_t diff = cs_base;
4295
4296 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4297 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4298 #endif
4299 ctx->iaoq_n = -1;
4300 ctx->iaoq_n_var = NULL;
4301
4302 /* Bound the number of instructions by those left on the page. */
4303 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4304 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4305 }
4306
4307 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4308 {
4309 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4310
4311 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4312 ctx->null_cond = cond_make_f();
4313 ctx->psw_n_nonzero = false;
4314 if (ctx->tb_flags & PSW_N) {
4315 ctx->null_cond.c = TCG_COND_ALWAYS;
4316 ctx->psw_n_nonzero = true;
4317 }
4318 ctx->null_lab = NULL;
4319 }
4320
4321 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4322 {
4323 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4324
4325 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4326 }
4327
4328 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4329 {
4330 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4331 CPUHPPAState *env = cpu_env(cs);
4332 DisasJumpType ret;
4333
4334 /* Execute one insn. */
4335 #ifdef CONFIG_USER_ONLY
4336 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4337 do_page_zero(ctx);
4338 ret = ctx->base.is_jmp;
4339 assert(ret != DISAS_NEXT);
4340 } else
4341 #endif
4342 {
4343 /* Always fetch the insn, even if nullified, so that we check
4344 the page permissions for execute. */
4345 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4346
4347 /* Set up the IA queue for the next insn.
4348 This will be overwritten by a branch. */
4349 if (ctx->iaoq_b == -1) {
4350 ctx->iaoq_n = -1;
4351 ctx->iaoq_n_var = tcg_temp_new_i64();
4352 tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4353 } else {
4354 ctx->iaoq_n = ctx->iaoq_b + 4;
4355 ctx->iaoq_n_var = NULL;
4356 }
4357
4358 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4359 ctx->null_cond.c = TCG_COND_NEVER;
4360 ret = DISAS_NEXT;
4361 } else {
4362 ctx->insn = insn;
4363 if (!decode(ctx, insn)) {
4364 gen_illegal(ctx);
4365 }
4366 ret = ctx->base.is_jmp;
4367 assert(ctx->null_lab == NULL);
4368 }
4369 }
4370
4371 /* Advance the insn queue. Note that this check also detects
4372 a priority change within the instruction queue. */
4373 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4374 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4375 && use_goto_tb(ctx, ctx->iaoq_b)
4376 && (ctx->null_cond.c == TCG_COND_NEVER
4377 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4378 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4379 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4380 ctx->base.is_jmp = ret = DISAS_NORETURN;
4381 } else {
4382 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4383 }
4384 }
4385 ctx->iaoq_f = ctx->iaoq_b;
4386 ctx->iaoq_b = ctx->iaoq_n;
4387 ctx->base.pc_next += 4;
4388
4389 switch (ret) {
4390 case DISAS_NORETURN:
4391 case DISAS_IAQ_N_UPDATED:
4392 break;
4393
4394 case DISAS_NEXT:
4395 case DISAS_IAQ_N_STALE:
4396 case DISAS_IAQ_N_STALE_EXIT:
4397 if (ctx->iaoq_f == -1) {
4398 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4399 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4400 #ifndef CONFIG_USER_ONLY
4401 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4402 #endif
4403 nullify_save(ctx);
4404 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4405 ? DISAS_EXIT
4406 : DISAS_IAQ_N_UPDATED);
4407 } else if (ctx->iaoq_b == -1) {
4408 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4409 }
4410 break;
4411
4412 default:
4413 g_assert_not_reached();
4414 }
4415 }
4416
4417 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4418 {
4419 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4420 DisasJumpType is_jmp = ctx->base.is_jmp;
4421
4422 switch (is_jmp) {
4423 case DISAS_NORETURN:
4424 break;
4425 case DISAS_TOO_MANY:
4426 case DISAS_IAQ_N_STALE:
4427 case DISAS_IAQ_N_STALE_EXIT:
4428 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4429 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4430 nullify_save(ctx);
4431 /* FALLTHRU */
4432 case DISAS_IAQ_N_UPDATED:
4433 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4434 tcg_gen_lookup_and_goto_ptr();
4435 break;
4436 }
4437 /* FALLTHRU */
4438 case DISAS_EXIT:
4439 tcg_gen_exit_tb(NULL, 0);
4440 break;
4441 default:
4442 g_assert_not_reached();
4443 }
4444 }
4445
4446 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4447 CPUState *cs, FILE *logfile)
4448 {
4449 target_ulong pc = dcbase->pc_first;
4450
4451 #ifdef CONFIG_USER_ONLY
4452 switch (pc) {
4453 case 0x00:
4454 fprintf(logfile, "IN:\n0x00000000: (null)\n");
4455 return;
4456 case 0xb0:
4457 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
4458 return;
4459 case 0xe0:
4460 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4461 return;
4462 case 0x100:
4463 fprintf(logfile, "IN:\n0x00000100: syscall\n");
4464 return;
4465 }
4466 #endif
4467
4468 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4469 target_disas(logfile, cs, pc, dcbase->tb->size);
4470 }
4471
4472 static const TranslatorOps hppa_tr_ops = {
4473 .init_disas_context = hppa_tr_init_disas_context,
4474 .tb_start = hppa_tr_tb_start,
4475 .insn_start = hppa_tr_insn_start,
4476 .translate_insn = hppa_tr_translate_insn,
4477 .tb_stop = hppa_tr_tb_stop,
4478 .disas_log = hppa_tr_disas_log,
4479 };
4480
4481 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4482 target_ulong pc, void *host_pc)
4483 {
4484 DisasContext ctx;
4485 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4486 }