]> git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/translate.c
1973777597ccd33248fa2c575ab862c7564abdc3
[mirror_qemu.git] / target / hppa / translate.c
1 /*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
27
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30
31 #include "trace-tcg.h"
32 #include "exec/log.h"
33
34 typedef struct DisasCond {
35 TCGCond c;
36 TCGv a0, a1;
37 bool a0_is_n;
38 bool a1_is_0;
39 } DisasCond;
40
41 typedef struct DisasContext {
42 struct TranslationBlock *tb;
43 CPUState *cs;
44
45 target_ulong iaoq_f;
46 target_ulong iaoq_b;
47 target_ulong iaoq_n;
48 TCGv iaoq_n_var;
49
50 int ntemps;
51 TCGv temps[8];
52
53 DisasCond null_cond;
54 TCGLabel *null_lab;
55
56 bool singlestep_enabled;
57 bool psw_n_nonzero;
58 } DisasContext;
59
60 /* Return values from translate_one, indicating the state of the TB.
61 Note that zero indicates that we are not exiting the TB. */
62
63 typedef enum {
64 NO_EXIT,
65
66 /* We have emitted one or more goto_tb. No fixup required. */
67 EXIT_GOTO_TB,
68
69 /* We are not using a goto_tb (for whatever reason), but have updated
70 the iaq (for whatever reason), so don't do it again on exit. */
71 EXIT_IAQ_N_UPDATED,
72
73 /* We are exiting the TB, but have neither emitted a goto_tb, nor
74 updated the iaq for the next instruction to be executed. */
75 EXIT_IAQ_N_STALE,
76
77 /* We are ending the TB with a noreturn function call, e.g. longjmp.
78 No following code will be executed. */
79 EXIT_NORETURN,
80 } ExitStatus;
81
82 typedef struct DisasInsn {
83 uint32_t insn, mask;
84 ExitStatus (*trans)(DisasContext *ctx, uint32_t insn,
85 const struct DisasInsn *f);
86 union {
87 void (*f_ttt)(TCGv, TCGv, TCGv);
88 };
89 } DisasInsn;
90
91 /* global register indexes */
92 static TCGv_env cpu_env;
93 static TCGv cpu_gr[32];
94 static TCGv cpu_iaoq_f;
95 static TCGv cpu_iaoq_b;
96 static TCGv cpu_sar;
97 static TCGv cpu_psw_n;
98 static TCGv cpu_psw_v;
99 static TCGv cpu_psw_cb;
100 static TCGv cpu_psw_cb_msb;
101 static TCGv cpu_cr26;
102 static TCGv cpu_cr27;
103
104 #include "exec/gen-icount.h"
105
106 void hppa_translate_init(void)
107 {
108 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
109
110 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
111 static const GlobalVar vars[] = {
112 DEF_VAR(sar),
113 DEF_VAR(cr26),
114 DEF_VAR(cr27),
115 DEF_VAR(psw_n),
116 DEF_VAR(psw_v),
117 DEF_VAR(psw_cb),
118 DEF_VAR(psw_cb_msb),
119 DEF_VAR(iaoq_f),
120 DEF_VAR(iaoq_b),
121 };
122
123 #undef DEF_VAR
124
125 /* Use the symbolic register names that match the disassembler. */
126 static const char gr_names[32][4] = {
127 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
128 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
129 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
130 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
131 };
132
133 static bool done_init = 0;
134 int i;
135
136 if (done_init) {
137 return;
138 }
139 done_init = 1;
140
141 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
142 tcg_ctx.tcg_env = cpu_env;
143
144 TCGV_UNUSED(cpu_gr[0]);
145 for (i = 1; i < 32; i++) {
146 cpu_gr[i] = tcg_global_mem_new(cpu_env,
147 offsetof(CPUHPPAState, gr[i]),
148 gr_names[i]);
149 }
150
151 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
152 const GlobalVar *v = &vars[i];
153 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
154 }
155 }
156
157 static DisasCond cond_make_f(void)
158 {
159 DisasCond r = { .c = TCG_COND_NEVER };
160 TCGV_UNUSED(r.a0);
161 TCGV_UNUSED(r.a1);
162 return r;
163 }
164
165 static DisasCond cond_make_n(void)
166 {
167 DisasCond r = { .c = TCG_COND_NE, .a0_is_n = true, .a1_is_0 = true };
168 r.a0 = cpu_psw_n;
169 TCGV_UNUSED(r.a1);
170 return r;
171 }
172
173 static DisasCond cond_make_0(TCGCond c, TCGv a0)
174 {
175 DisasCond r = { .c = c, .a1_is_0 = true };
176
177 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
178 r.a0 = tcg_temp_new();
179 tcg_gen_mov_tl(r.a0, a0);
180 TCGV_UNUSED(r.a1);
181
182 return r;
183 }
184
185 static DisasCond cond_make(TCGCond c, TCGv a0, TCGv a1)
186 {
187 DisasCond r = { .c = c };
188
189 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
190 r.a0 = tcg_temp_new();
191 tcg_gen_mov_tl(r.a0, a0);
192 r.a1 = tcg_temp_new();
193 tcg_gen_mov_tl(r.a1, a1);
194
195 return r;
196 }
197
198 static void cond_prep(DisasCond *cond)
199 {
200 if (cond->a1_is_0) {
201 cond->a1_is_0 = false;
202 cond->a1 = tcg_const_tl(0);
203 }
204 }
205
206 static void cond_free(DisasCond *cond)
207 {
208 switch (cond->c) {
209 default:
210 if (!cond->a0_is_n) {
211 tcg_temp_free(cond->a0);
212 }
213 if (!cond->a1_is_0) {
214 tcg_temp_free(cond->a1);
215 }
216 cond->a0_is_n = false;
217 cond->a1_is_0 = false;
218 TCGV_UNUSED(cond->a0);
219 TCGV_UNUSED(cond->a1);
220 /* fallthru */
221 case TCG_COND_ALWAYS:
222 cond->c = TCG_COND_NEVER;
223 break;
224 case TCG_COND_NEVER:
225 break;
226 }
227 }
228
229 static TCGv get_temp(DisasContext *ctx)
230 {
231 unsigned i = ctx->ntemps++;
232 g_assert(i < ARRAY_SIZE(ctx->temps));
233 return ctx->temps[i] = tcg_temp_new();
234 }
235
236 static TCGv load_const(DisasContext *ctx, target_long v)
237 {
238 TCGv t = get_temp(ctx);
239 tcg_gen_movi_tl(t, v);
240 return t;
241 }
242
243 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
244 {
245 if (reg == 0) {
246 TCGv t = get_temp(ctx);
247 tcg_gen_movi_tl(t, 0);
248 return t;
249 } else {
250 return cpu_gr[reg];
251 }
252 }
253
254 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
255 {
256 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
257 return get_temp(ctx);
258 } else {
259 return cpu_gr[reg];
260 }
261 }
262
263 static void save_or_nullify(DisasContext *ctx, TCGv dest, TCGv t)
264 {
265 if (ctx->null_cond.c != TCG_COND_NEVER) {
266 cond_prep(&ctx->null_cond);
267 tcg_gen_movcond_tl(ctx->null_cond.c, dest, ctx->null_cond.a0,
268 ctx->null_cond.a1, dest, t);
269 } else {
270 tcg_gen_mov_tl(dest, t);
271 }
272 }
273
274 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv t)
275 {
276 if (reg != 0) {
277 save_or_nullify(ctx, cpu_gr[reg], t);
278 }
279 }
280
281 #ifdef HOST_WORDS_BIGENDIAN
282 # define HI_OFS 0
283 # define LO_OFS 4
284 #else
285 # define HI_OFS 4
286 # define LO_OFS 0
287 #endif
288
289 static TCGv_i32 load_frw_i32(unsigned rt)
290 {
291 TCGv_i32 ret = tcg_temp_new_i32();
292 tcg_gen_ld_i32(ret, cpu_env,
293 offsetof(CPUHPPAState, fr[rt & 31])
294 + (rt & 32 ? LO_OFS : HI_OFS));
295 return ret;
296 }
297
298 static void save_frw_i32(unsigned rt, TCGv_i32 val)
299 {
300 tcg_gen_st_i32(val, cpu_env,
301 offsetof(CPUHPPAState, fr[rt & 31])
302 + (rt & 32 ? LO_OFS : HI_OFS));
303 }
304
305 #undef HI_OFS
306 #undef LO_OFS
307
308 static TCGv_i64 load_frd(unsigned rt)
309 {
310 TCGv_i64 ret = tcg_temp_new_i64();
311 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
312 return ret;
313 }
314
315 static void save_frd(unsigned rt, TCGv_i64 val)
316 {
317 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
318 }
319
320 /* Skip over the implementation of an insn that has been nullified.
321 Use this when the insn is too complex for a conditional move. */
322 static void nullify_over(DisasContext *ctx)
323 {
324 if (ctx->null_cond.c != TCG_COND_NEVER) {
325 /* The always condition should have been handled in the main loop. */
326 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
327
328 ctx->null_lab = gen_new_label();
329 cond_prep(&ctx->null_cond);
330
331 /* If we're using PSW[N], copy it to a temp because... */
332 if (ctx->null_cond.a0_is_n) {
333 ctx->null_cond.a0_is_n = false;
334 ctx->null_cond.a0 = tcg_temp_new();
335 tcg_gen_mov_tl(ctx->null_cond.a0, cpu_psw_n);
336 }
337 /* ... we clear it before branching over the implementation,
338 so that (1) it's clear after nullifying this insn and
339 (2) if this insn nullifies the next, PSW[N] is valid. */
340 if (ctx->psw_n_nonzero) {
341 ctx->psw_n_nonzero = false;
342 tcg_gen_movi_tl(cpu_psw_n, 0);
343 }
344
345 tcg_gen_brcond_tl(ctx->null_cond.c, ctx->null_cond.a0,
346 ctx->null_cond.a1, ctx->null_lab);
347 cond_free(&ctx->null_cond);
348 }
349 }
350
351 /* Save the current nullification state to PSW[N]. */
352 static void nullify_save(DisasContext *ctx)
353 {
354 if (ctx->null_cond.c == TCG_COND_NEVER) {
355 if (ctx->psw_n_nonzero) {
356 tcg_gen_movi_tl(cpu_psw_n, 0);
357 }
358 return;
359 }
360 if (!ctx->null_cond.a0_is_n) {
361 cond_prep(&ctx->null_cond);
362 tcg_gen_setcond_tl(ctx->null_cond.c, cpu_psw_n,
363 ctx->null_cond.a0, ctx->null_cond.a1);
364 ctx->psw_n_nonzero = true;
365 }
366 cond_free(&ctx->null_cond);
367 }
368
369 /* Set a PSW[N] to X. The intention is that this is used immediately
370 before a goto_tb/exit_tb, so that there is no fallthru path to other
371 code within the TB. Therefore we do not update psw_n_nonzero. */
372 static void nullify_set(DisasContext *ctx, bool x)
373 {
374 if (ctx->psw_n_nonzero || x) {
375 tcg_gen_movi_tl(cpu_psw_n, x);
376 }
377 }
378
379 /* Mark the end of an instruction that may have been nullified.
380 This is the pair to nullify_over. */
381 static ExitStatus nullify_end(DisasContext *ctx, ExitStatus status)
382 {
383 TCGLabel *null_lab = ctx->null_lab;
384
385 if (likely(null_lab == NULL)) {
386 /* The current insn wasn't conditional or handled the condition
387 applied to it without a branch, so the (new) setting of
388 NULL_COND can be applied directly to the next insn. */
389 return status;
390 }
391 ctx->null_lab = NULL;
392
393 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
394 /* The next instruction will be unconditional,
395 and NULL_COND already reflects that. */
396 gen_set_label(null_lab);
397 } else {
398 /* The insn that we just executed is itself nullifying the next
399 instruction. Store the condition in the PSW[N] global.
400 We asserted PSW[N] = 0 in nullify_over, so that after the
401 label we have the proper value in place. */
402 nullify_save(ctx);
403 gen_set_label(null_lab);
404 ctx->null_cond = cond_make_n();
405 }
406
407 assert(status != EXIT_GOTO_TB && status != EXIT_IAQ_N_UPDATED);
408 if (status == EXIT_NORETURN) {
409 status = NO_EXIT;
410 }
411 return status;
412 }
413
414 static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval)
415 {
416 if (unlikely(ival == -1)) {
417 tcg_gen_mov_tl(dest, vval);
418 } else {
419 tcg_gen_movi_tl(dest, ival);
420 }
421 }
422
423 static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp)
424 {
425 return ctx->iaoq_f + disp + 8;
426 }
427
428 static void gen_excp_1(int exception)
429 {
430 TCGv_i32 t = tcg_const_i32(exception);
431 gen_helper_excp(cpu_env, t);
432 tcg_temp_free_i32(t);
433 }
434
435 static ExitStatus gen_excp(DisasContext *ctx, int exception)
436 {
437 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
438 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
439 nullify_save(ctx);
440 gen_excp_1(exception);
441 return EXIT_NORETURN;
442 }
443
444 static ExitStatus gen_illegal(DisasContext *ctx)
445 {
446 nullify_over(ctx);
447 return nullify_end(ctx, gen_excp(ctx, EXCP_SIGILL));
448 }
449
450 static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
451 {
452 /* Suppress goto_tb in the case of single-steping and IO. */
453 if ((ctx->tb->cflags & CF_LAST_IO) || ctx->singlestep_enabled) {
454 return false;
455 }
456 return true;
457 }
458
459 /* If the next insn is to be nullified, and it's on the same page,
460 and we're not attempting to set a breakpoint on it, then we can
461 totally skip the nullified insn. This avoids creating and
462 executing a TB that merely branches to the next TB. */
463 static bool use_nullify_skip(DisasContext *ctx)
464 {
465 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
466 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
467 }
468
469 static void gen_goto_tb(DisasContext *ctx, int which,
470 target_ulong f, target_ulong b)
471 {
472 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
473 tcg_gen_goto_tb(which);
474 tcg_gen_movi_tl(cpu_iaoq_f, f);
475 tcg_gen_movi_tl(cpu_iaoq_b, b);
476 tcg_gen_exit_tb((uintptr_t)ctx->tb + which);
477 } else {
478 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
479 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
480 if (ctx->singlestep_enabled) {
481 gen_excp_1(EXCP_DEBUG);
482 } else {
483 tcg_gen_exit_tb(0);
484 }
485 }
486 }
487
488 /* PA has a habit of taking the LSB of a field and using that as the sign,
489 with the rest of the field becoming the least significant bits. */
490 static target_long low_sextract(uint32_t val, int pos, int len)
491 {
492 target_ulong x = -(target_ulong)extract32(val, pos, 1);
493 x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
494 return x;
495 }
496
497 static target_long assemble_12(uint32_t insn)
498 {
499 target_ulong x = -(target_ulong)(insn & 1);
500 x = (x << 1) | extract32(insn, 2, 1);
501 x = (x << 10) | extract32(insn, 3, 10);
502 return x;
503 }
504
505 static target_long assemble_16(uint32_t insn)
506 {
507 /* Take the name from PA2.0, which produces a 16-bit number
508 only with wide mode; otherwise a 14-bit number. Since we don't
509 implement wide mode, this is always the 14-bit number. */
510 return low_sextract(insn, 0, 14);
511 }
512
513 static target_long assemble_16a(uint32_t insn)
514 {
515 /* Take the name from PA2.0, which produces a 14-bit shifted number
516 only with wide mode; otherwise a 12-bit shifted number. Since we
517 don't implement wide mode, this is always the 12-bit number. */
518 target_ulong x = -(target_ulong)(insn & 1);
519 x = (x << 11) | extract32(insn, 2, 11);
520 return x << 2;
521 }
522
523 static target_long assemble_17(uint32_t insn)
524 {
525 target_ulong x = -(target_ulong)(insn & 1);
526 x = (x << 5) | extract32(insn, 16, 5);
527 x = (x << 1) | extract32(insn, 2, 1);
528 x = (x << 10) | extract32(insn, 3, 10);
529 return x << 2;
530 }
531
532 static target_long assemble_21(uint32_t insn)
533 {
534 target_ulong x = -(target_ulong)(insn & 1);
535 x = (x << 11) | extract32(insn, 1, 11);
536 x = (x << 2) | extract32(insn, 14, 2);
537 x = (x << 5) | extract32(insn, 16, 5);
538 x = (x << 2) | extract32(insn, 12, 2);
539 return x << 11;
540 }
541
542 static target_long assemble_22(uint32_t insn)
543 {
544 target_ulong x = -(target_ulong)(insn & 1);
545 x = (x << 10) | extract32(insn, 16, 10);
546 x = (x << 1) | extract32(insn, 2, 1);
547 x = (x << 10) | extract32(insn, 3, 10);
548 return x << 2;
549 }
550
551 /* The parisc documentation describes only the general interpretation of
552 the conditions, without describing their exact implementation. The
553 interpretations do not stand up well when considering ADD,C and SUB,B.
554 However, considering the Addition, Subtraction and Logical conditions
555 as a whole it would appear that these relations are similar to what
556 a traditional NZCV set of flags would produce. */
557
558 static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv)
559 {
560 DisasCond cond;
561 TCGv tmp;
562
563 switch (cf >> 1) {
564 case 0: /* Never / TR */
565 cond = cond_make_f();
566 break;
567 case 1: /* = / <> (Z / !Z) */
568 cond = cond_make_0(TCG_COND_EQ, res);
569 break;
570 case 2: /* < / >= (N / !N) */
571 cond = cond_make_0(TCG_COND_LT, res);
572 break;
573 case 3: /* <= / > (N | Z / !N & !Z) */
574 cond = cond_make_0(TCG_COND_LE, res);
575 break;
576 case 4: /* NUV / UV (!C / C) */
577 cond = cond_make_0(TCG_COND_EQ, cb_msb);
578 break;
579 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
580 tmp = tcg_temp_new();
581 tcg_gen_neg_tl(tmp, cb_msb);
582 tcg_gen_and_tl(tmp, tmp, res);
583 cond = cond_make_0(TCG_COND_EQ, tmp);
584 tcg_temp_free(tmp);
585 break;
586 case 6: /* SV / NSV (V / !V) */
587 cond = cond_make_0(TCG_COND_LT, sv);
588 break;
589 case 7: /* OD / EV */
590 tmp = tcg_temp_new();
591 tcg_gen_andi_tl(tmp, res, 1);
592 cond = cond_make_0(TCG_COND_NE, tmp);
593 tcg_temp_free(tmp);
594 break;
595 default:
596 g_assert_not_reached();
597 }
598 if (cf & 1) {
599 cond.c = tcg_invert_cond(cond.c);
600 }
601
602 return cond;
603 }
604
605 /* Similar, but for the special case of subtraction without borrow, we
606 can use the inputs directly. This can allow other computation to be
607 deleted as unused. */
608
609 static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv)
610 {
611 DisasCond cond;
612
613 switch (cf >> 1) {
614 case 1: /* = / <> */
615 cond = cond_make(TCG_COND_EQ, in1, in2);
616 break;
617 case 2: /* < / >= */
618 cond = cond_make(TCG_COND_LT, in1, in2);
619 break;
620 case 3: /* <= / > */
621 cond = cond_make(TCG_COND_LE, in1, in2);
622 break;
623 case 4: /* << / >>= */
624 cond = cond_make(TCG_COND_LTU, in1, in2);
625 break;
626 case 5: /* <<= / >> */
627 cond = cond_make(TCG_COND_LEU, in1, in2);
628 break;
629 default:
630 return do_cond(cf, res, sv, sv);
631 }
632 if (cf & 1) {
633 cond.c = tcg_invert_cond(cond.c);
634 }
635
636 return cond;
637 }
638
639 /* Similar, but for logicals, where the carry and overflow bits are not
640 computed, and use of them is undefined. */
641
642 static DisasCond do_log_cond(unsigned cf, TCGv res)
643 {
644 switch (cf >> 1) {
645 case 4: case 5: case 6:
646 cf &= 1;
647 break;
648 }
649 return do_cond(cf, res, res, res);
650 }
651
652 /* Similar, but for shift/extract/deposit conditions. */
653
654 static DisasCond do_sed_cond(unsigned orig, TCGv res)
655 {
656 unsigned c, f;
657
658 /* Convert the compressed condition codes to standard.
659 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
660 4-7 are the reverse of 0-3. */
661 c = orig & 3;
662 if (c == 3) {
663 c = 7;
664 }
665 f = (orig & 4) / 4;
666
667 return do_log_cond(c * 2 + f, res);
668 }
669
670 /* Similar, but for unit conditions. */
671
672 static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
673 {
674 DisasCond cond;
675 TCGv tmp, cb;
676
677 TCGV_UNUSED(cb);
678 if (cf & 8) {
679 /* Since we want to test lots of carry-out bits all at once, do not
680 * do our normal thing and compute carry-in of bit B+1 since that
681 * leaves us with carry bits spread across two words.
682 */
683 cb = tcg_temp_new();
684 tmp = tcg_temp_new();
685 tcg_gen_or_tl(cb, in1, in2);
686 tcg_gen_and_tl(tmp, in1, in2);
687 tcg_gen_andc_tl(cb, cb, res);
688 tcg_gen_or_tl(cb, cb, tmp);
689 tcg_temp_free(tmp);
690 }
691
692 switch (cf >> 1) {
693 case 0: /* never / TR */
694 case 1: /* undefined */
695 case 5: /* undefined */
696 cond = cond_make_f();
697 break;
698
699 case 2: /* SBZ / NBZ */
700 /* See hasless(v,1) from
701 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
702 */
703 tmp = tcg_temp_new();
704 tcg_gen_subi_tl(tmp, res, 0x01010101u);
705 tcg_gen_andc_tl(tmp, tmp, res);
706 tcg_gen_andi_tl(tmp, tmp, 0x80808080u);
707 cond = cond_make_0(TCG_COND_NE, tmp);
708 tcg_temp_free(tmp);
709 break;
710
711 case 3: /* SHZ / NHZ */
712 tmp = tcg_temp_new();
713 tcg_gen_subi_tl(tmp, res, 0x00010001u);
714 tcg_gen_andc_tl(tmp, tmp, res);
715 tcg_gen_andi_tl(tmp, tmp, 0x80008000u);
716 cond = cond_make_0(TCG_COND_NE, tmp);
717 tcg_temp_free(tmp);
718 break;
719
720 case 4: /* SDC / NDC */
721 tcg_gen_andi_tl(cb, cb, 0x88888888u);
722 cond = cond_make_0(TCG_COND_NE, cb);
723 break;
724
725 case 6: /* SBC / NBC */
726 tcg_gen_andi_tl(cb, cb, 0x80808080u);
727 cond = cond_make_0(TCG_COND_NE, cb);
728 break;
729
730 case 7: /* SHC / NHC */
731 tcg_gen_andi_tl(cb, cb, 0x80008000u);
732 cond = cond_make_0(TCG_COND_NE, cb);
733 break;
734
735 default:
736 g_assert_not_reached();
737 }
738 if (cf & 8) {
739 tcg_temp_free(cb);
740 }
741 if (cf & 1) {
742 cond.c = tcg_invert_cond(cond.c);
743 }
744
745 return cond;
746 }
747
748 /* Compute signed overflow for addition. */
749 static TCGv do_add_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
750 {
751 TCGv sv = get_temp(ctx);
752 TCGv tmp = tcg_temp_new();
753
754 tcg_gen_xor_tl(sv, res, in1);
755 tcg_gen_xor_tl(tmp, in1, in2);
756 tcg_gen_andc_tl(sv, sv, tmp);
757 tcg_temp_free(tmp);
758
759 return sv;
760 }
761
762 /* Compute signed overflow for subtraction. */
763 static TCGv do_sub_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
764 {
765 TCGv sv = get_temp(ctx);
766 TCGv tmp = tcg_temp_new();
767
768 tcg_gen_xor_tl(sv, res, in1);
769 tcg_gen_xor_tl(tmp, in1, in2);
770 tcg_gen_and_tl(sv, sv, tmp);
771 tcg_temp_free(tmp);
772
773 return sv;
774 }
775
776 static ExitStatus do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
777 unsigned shift, bool is_l, bool is_tsv, bool is_tc,
778 bool is_c, unsigned cf)
779 {
780 TCGv dest, cb, cb_msb, sv, tmp;
781 unsigned c = cf >> 1;
782 DisasCond cond;
783
784 dest = tcg_temp_new();
785 TCGV_UNUSED(cb);
786 TCGV_UNUSED(cb_msb);
787
788 if (shift) {
789 tmp = get_temp(ctx);
790 tcg_gen_shli_tl(tmp, in1, shift);
791 in1 = tmp;
792 }
793
794 if (!is_l || c == 4 || c == 5) {
795 TCGv zero = tcg_const_tl(0);
796 cb_msb = get_temp(ctx);
797 tcg_gen_add2_tl(dest, cb_msb, in1, zero, in2, zero);
798 if (is_c) {
799 tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
800 }
801 tcg_temp_free(zero);
802 if (!is_l) {
803 cb = get_temp(ctx);
804 tcg_gen_xor_tl(cb, in1, in2);
805 tcg_gen_xor_tl(cb, cb, dest);
806 }
807 } else {
808 tcg_gen_add_tl(dest, in1, in2);
809 if (is_c) {
810 tcg_gen_add_tl(dest, dest, cpu_psw_cb_msb);
811 }
812 }
813
814 /* Compute signed overflow if required. */
815 TCGV_UNUSED(sv);
816 if (is_tsv || c == 6) {
817 sv = do_add_sv(ctx, dest, in1, in2);
818 if (is_tsv) {
819 /* ??? Need to include overflow from shift. */
820 gen_helper_tsv(cpu_env, sv);
821 }
822 }
823
824 /* Emit any conditional trap before any writeback. */
825 cond = do_cond(cf, dest, cb_msb, sv);
826 if (is_tc) {
827 cond_prep(&cond);
828 tmp = tcg_temp_new();
829 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
830 gen_helper_tcond(cpu_env, tmp);
831 tcg_temp_free(tmp);
832 }
833
834 /* Write back the result. */
835 if (!is_l) {
836 save_or_nullify(ctx, cpu_psw_cb, cb);
837 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
838 }
839 save_gpr(ctx, rt, dest);
840 tcg_temp_free(dest);
841
842 /* Install the new nullification. */
843 cond_free(&ctx->null_cond);
844 ctx->null_cond = cond;
845 return NO_EXIT;
846 }
847
848 static ExitStatus do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
849 bool is_tsv, bool is_b, bool is_tc, unsigned cf)
850 {
851 TCGv dest, sv, cb, cb_msb, zero, tmp;
852 unsigned c = cf >> 1;
853 DisasCond cond;
854
855 dest = tcg_temp_new();
856 cb = tcg_temp_new();
857 cb_msb = tcg_temp_new();
858
859 zero = tcg_const_tl(0);
860 if (is_b) {
861 /* DEST,C = IN1 + ~IN2 + C. */
862 tcg_gen_not_tl(cb, in2);
863 tcg_gen_add2_tl(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
864 tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cb, zero);
865 tcg_gen_xor_tl(cb, cb, in1);
866 tcg_gen_xor_tl(cb, cb, dest);
867 } else {
868 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
869 operations by seeding the high word with 1 and subtracting. */
870 tcg_gen_movi_tl(cb_msb, 1);
871 tcg_gen_sub2_tl(dest, cb_msb, in1, cb_msb, in2, zero);
872 tcg_gen_eqv_tl(cb, in1, in2);
873 tcg_gen_xor_tl(cb, cb, dest);
874 }
875 tcg_temp_free(zero);
876
877 /* Compute signed overflow if required. */
878 TCGV_UNUSED(sv);
879 if (is_tsv || c == 6) {
880 sv = do_sub_sv(ctx, dest, in1, in2);
881 if (is_tsv) {
882 gen_helper_tsv(cpu_env, sv);
883 }
884 }
885
886 /* Compute the condition. We cannot use the special case for borrow. */
887 if (!is_b) {
888 cond = do_sub_cond(cf, dest, in1, in2, sv);
889 } else {
890 cond = do_cond(cf, dest, cb_msb, sv);
891 }
892
893 /* Emit any conditional trap before any writeback. */
894 if (is_tc) {
895 cond_prep(&cond);
896 tmp = tcg_temp_new();
897 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
898 gen_helper_tcond(cpu_env, tmp);
899 tcg_temp_free(tmp);
900 }
901
902 /* Write back the result. */
903 save_or_nullify(ctx, cpu_psw_cb, cb);
904 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
905 save_gpr(ctx, rt, dest);
906 tcg_temp_free(dest);
907
908 /* Install the new nullification. */
909 cond_free(&ctx->null_cond);
910 ctx->null_cond = cond;
911 return NO_EXIT;
912 }
913
914 static ExitStatus do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1,
915 TCGv in2, unsigned cf)
916 {
917 TCGv dest, sv;
918 DisasCond cond;
919
920 dest = tcg_temp_new();
921 tcg_gen_sub_tl(dest, in1, in2);
922
923 /* Compute signed overflow if required. */
924 TCGV_UNUSED(sv);
925 if ((cf >> 1) == 6) {
926 sv = do_sub_sv(ctx, dest, in1, in2);
927 }
928
929 /* Form the condition for the compare. */
930 cond = do_sub_cond(cf, dest, in1, in2, sv);
931
932 /* Clear. */
933 tcg_gen_movi_tl(dest, 0);
934 save_gpr(ctx, rt, dest);
935 tcg_temp_free(dest);
936
937 /* Install the new nullification. */
938 cond_free(&ctx->null_cond);
939 ctx->null_cond = cond;
940 return NO_EXIT;
941 }
942
943 static ExitStatus do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
944 unsigned cf, void (*fn)(TCGv, TCGv, TCGv))
945 {
946 TCGv dest = dest_gpr(ctx, rt);
947
948 /* Perform the operation, and writeback. */
949 fn(dest, in1, in2);
950 save_gpr(ctx, rt, dest);
951
952 /* Install the new nullification. */
953 cond_free(&ctx->null_cond);
954 if (cf) {
955 ctx->null_cond = do_log_cond(cf, dest);
956 }
957 return NO_EXIT;
958 }
959
960 static ExitStatus do_unit(DisasContext *ctx, unsigned rt, TCGv in1,
961 TCGv in2, unsigned cf, bool is_tc,
962 void (*fn)(TCGv, TCGv, TCGv))
963 {
964 TCGv dest;
965 DisasCond cond;
966
967 if (cf == 0) {
968 dest = dest_gpr(ctx, rt);
969 fn(dest, in1, in2);
970 save_gpr(ctx, rt, dest);
971 cond_free(&ctx->null_cond);
972 } else {
973 dest = tcg_temp_new();
974 fn(dest, in1, in2);
975
976 cond = do_unit_cond(cf, dest, in1, in2);
977
978 if (is_tc) {
979 TCGv tmp = tcg_temp_new();
980 cond_prep(&cond);
981 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
982 gen_helper_tcond(cpu_env, tmp);
983 tcg_temp_free(tmp);
984 }
985 save_gpr(ctx, rt, dest);
986
987 cond_free(&ctx->null_cond);
988 ctx->null_cond = cond;
989 }
990 return NO_EXIT;
991 }
992
993 /* Emit a memory load. The modify parameter should be
994 * < 0 for pre-modify,
995 * > 0 for post-modify,
996 * = 0 for no base register update.
997 */
998 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
999 unsigned rx, int scale, target_long disp,
1000 int modify, TCGMemOp mop)
1001 {
1002 TCGv addr, base;
1003
1004 /* Caller uses nullify_over/nullify_end. */
1005 assert(ctx->null_cond.c == TCG_COND_NEVER);
1006
1007 addr = tcg_temp_new();
1008 base = load_gpr(ctx, rb);
1009
1010 /* Note that RX is mutually exclusive with DISP. */
1011 if (rx) {
1012 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1013 tcg_gen_add_tl(addr, addr, base);
1014 } else {
1015 tcg_gen_addi_tl(addr, base, disp);
1016 }
1017
1018 if (modify == 0) {
1019 tcg_gen_qemu_ld_i32(dest, addr, MMU_USER_IDX, mop);
1020 } else {
1021 tcg_gen_qemu_ld_i32(dest, (modify < 0 ? addr : base),
1022 MMU_USER_IDX, mop);
1023 save_gpr(ctx, rb, addr);
1024 }
1025 tcg_temp_free(addr);
1026 }
1027
1028 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1029 unsigned rx, int scale, target_long disp,
1030 int modify, TCGMemOp mop)
1031 {
1032 TCGv addr, base;
1033
1034 /* Caller uses nullify_over/nullify_end. */
1035 assert(ctx->null_cond.c == TCG_COND_NEVER);
1036
1037 addr = tcg_temp_new();
1038 base = load_gpr(ctx, rb);
1039
1040 /* Note that RX is mutually exclusive with DISP. */
1041 if (rx) {
1042 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1043 tcg_gen_add_tl(addr, addr, base);
1044 } else {
1045 tcg_gen_addi_tl(addr, base, disp);
1046 }
1047
1048 if (modify == 0) {
1049 tcg_gen_qemu_ld_i64(dest, addr, MMU_USER_IDX, mop);
1050 } else {
1051 tcg_gen_qemu_ld_i64(dest, (modify < 0 ? addr : base),
1052 MMU_USER_IDX, mop);
1053 save_gpr(ctx, rb, addr);
1054 }
1055 tcg_temp_free(addr);
1056 }
1057
1058 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1059 unsigned rx, int scale, target_long disp,
1060 int modify, TCGMemOp mop)
1061 {
1062 TCGv addr, base;
1063
1064 /* Caller uses nullify_over/nullify_end. */
1065 assert(ctx->null_cond.c == TCG_COND_NEVER);
1066
1067 addr = tcg_temp_new();
1068 base = load_gpr(ctx, rb);
1069
1070 /* Note that RX is mutually exclusive with DISP. */
1071 if (rx) {
1072 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1073 tcg_gen_add_tl(addr, addr, base);
1074 } else {
1075 tcg_gen_addi_tl(addr, base, disp);
1076 }
1077
1078 tcg_gen_qemu_st_i32(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1079
1080 if (modify != 0) {
1081 save_gpr(ctx, rb, addr);
1082 }
1083 tcg_temp_free(addr);
1084 }
1085
1086 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1087 unsigned rx, int scale, target_long disp,
1088 int modify, TCGMemOp mop)
1089 {
1090 TCGv addr, base;
1091
1092 /* Caller uses nullify_over/nullify_end. */
1093 assert(ctx->null_cond.c == TCG_COND_NEVER);
1094
1095 addr = tcg_temp_new();
1096 base = load_gpr(ctx, rb);
1097
1098 /* Note that RX is mutually exclusive with DISP. */
1099 if (rx) {
1100 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1101 tcg_gen_add_tl(addr, addr, base);
1102 } else {
1103 tcg_gen_addi_tl(addr, base, disp);
1104 }
1105
1106 tcg_gen_qemu_st_i64(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1107
1108 if (modify != 0) {
1109 save_gpr(ctx, rb, addr);
1110 }
1111 tcg_temp_free(addr);
1112 }
1113
1114 #if TARGET_LONG_BITS == 64
1115 #define do_load_tl do_load_64
1116 #define do_store_tl do_store_64
1117 #else
1118 #define do_load_tl do_load_32
1119 #define do_store_tl do_store_32
1120 #endif
1121
1122 static ExitStatus do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1123 unsigned rx, int scale, target_long disp,
1124 int modify, TCGMemOp mop)
1125 {
1126 TCGv dest;
1127
1128 nullify_over(ctx);
1129
1130 if (modify == 0) {
1131 /* No base register update. */
1132 dest = dest_gpr(ctx, rt);
1133 } else {
1134 /* Make sure if RT == RB, we see the result of the load. */
1135 dest = get_temp(ctx);
1136 }
1137 do_load_tl(ctx, dest, rb, rx, scale, disp, modify, mop);
1138 save_gpr(ctx, rt, dest);
1139
1140 return nullify_end(ctx, NO_EXIT);
1141 }
1142
1143 static ExitStatus do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1144 unsigned rx, int scale, target_long disp,
1145 int modify)
1146 {
1147 TCGv_i32 tmp;
1148
1149 nullify_over(ctx);
1150
1151 tmp = tcg_temp_new_i32();
1152 do_load_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1153 save_frw_i32(rt, tmp);
1154 tcg_temp_free_i32(tmp);
1155
1156 if (rt == 0) {
1157 gen_helper_loaded_fr0(cpu_env);
1158 }
1159
1160 return nullify_end(ctx, NO_EXIT);
1161 }
1162
1163 static ExitStatus do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1164 unsigned rx, int scale, target_long disp,
1165 int modify)
1166 {
1167 TCGv_i64 tmp;
1168
1169 nullify_over(ctx);
1170
1171 tmp = tcg_temp_new_i64();
1172 do_load_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1173 save_frd(rt, tmp);
1174 tcg_temp_free_i64(tmp);
1175
1176 if (rt == 0) {
1177 gen_helper_loaded_fr0(cpu_env);
1178 }
1179
1180 return nullify_end(ctx, NO_EXIT);
1181 }
1182
1183 static ExitStatus do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1184 target_long disp, int modify, TCGMemOp mop)
1185 {
1186 nullify_over(ctx);
1187 do_store_tl(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop);
1188 return nullify_end(ctx, NO_EXIT);
1189 }
1190
1191 static ExitStatus do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1192 unsigned rx, int scale, target_long disp,
1193 int modify)
1194 {
1195 TCGv_i32 tmp;
1196
1197 nullify_over(ctx);
1198
1199 tmp = load_frw_i32(rt);
1200 do_store_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1201 tcg_temp_free_i32(tmp);
1202
1203 return nullify_end(ctx, NO_EXIT);
1204 }
1205
1206 static ExitStatus do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1207 unsigned rx, int scale, target_long disp,
1208 int modify)
1209 {
1210 TCGv_i64 tmp;
1211
1212 nullify_over(ctx);
1213
1214 tmp = load_frd(rt);
1215 do_store_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1216 tcg_temp_free_i64(tmp);
1217
1218 return nullify_end(ctx, NO_EXIT);
1219 }
1220
1221 /* Emit an unconditional branch to a direct target, which may or may not
1222 have already had nullification handled. */
1223 static ExitStatus do_dbranch(DisasContext *ctx, target_ulong dest,
1224 unsigned link, bool is_n)
1225 {
1226 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1227 if (link != 0) {
1228 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1229 }
1230 ctx->iaoq_n = dest;
1231 if (is_n) {
1232 ctx->null_cond.c = TCG_COND_ALWAYS;
1233 }
1234 return NO_EXIT;
1235 } else {
1236 nullify_over(ctx);
1237
1238 if (link != 0) {
1239 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1240 }
1241
1242 if (is_n && use_nullify_skip(ctx)) {
1243 nullify_set(ctx, 0);
1244 gen_goto_tb(ctx, 0, dest, dest + 4);
1245 } else {
1246 nullify_set(ctx, is_n);
1247 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1248 }
1249
1250 nullify_end(ctx, NO_EXIT);
1251
1252 nullify_set(ctx, 0);
1253 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1254 return EXIT_GOTO_TB;
1255 }
1256 }
1257
1258 /* Emit a conditional branch to a direct target. If the branch itself
1259 is nullified, we should have already used nullify_over. */
1260 static ExitStatus do_cbranch(DisasContext *ctx, target_long disp, bool is_n,
1261 DisasCond *cond)
1262 {
1263 target_ulong dest = iaoq_dest(ctx, disp);
1264 TCGLabel *taken = NULL;
1265 TCGCond c = cond->c;
1266 int which = 0;
1267 bool n;
1268
1269 assert(ctx->null_cond.c == TCG_COND_NEVER);
1270
1271 /* Handle TRUE and NEVER as direct branches. */
1272 if (c == TCG_COND_ALWAYS) {
1273 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1274 }
1275 if (c == TCG_COND_NEVER) {
1276 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1277 }
1278
1279 taken = gen_new_label();
1280 cond_prep(cond);
1281 tcg_gen_brcond_tl(c, cond->a0, cond->a1, taken);
1282 cond_free(cond);
1283
1284 /* Not taken: Condition not satisfied; nullify on backward branches. */
1285 n = is_n && disp < 0;
1286 if (n && use_nullify_skip(ctx)) {
1287 nullify_set(ctx, 0);
1288 gen_goto_tb(ctx, which++, ctx->iaoq_n, ctx->iaoq_n + 4);
1289 } else {
1290 if (!n && ctx->null_lab) {
1291 gen_set_label(ctx->null_lab);
1292 ctx->null_lab = NULL;
1293 }
1294 nullify_set(ctx, n);
1295 gen_goto_tb(ctx, which++, ctx->iaoq_b, ctx->iaoq_n);
1296 }
1297
1298 gen_set_label(taken);
1299
1300 /* Taken: Condition satisfied; nullify on forward branches. */
1301 n = is_n && disp >= 0;
1302 if (n && use_nullify_skip(ctx)) {
1303 nullify_set(ctx, 0);
1304 gen_goto_tb(ctx, which++, dest, dest + 4);
1305 } else {
1306 nullify_set(ctx, n);
1307 gen_goto_tb(ctx, which++, ctx->iaoq_b, dest);
1308 }
1309
1310 /* Not taken: the branch itself was nullified. */
1311 if (ctx->null_lab) {
1312 gen_set_label(ctx->null_lab);
1313 ctx->null_lab = NULL;
1314 if (which < 2) {
1315 nullify_set(ctx, 0);
1316 gen_goto_tb(ctx, which, ctx->iaoq_b, ctx->iaoq_n);
1317 return EXIT_GOTO_TB;
1318 } else {
1319 return EXIT_IAQ_N_STALE;
1320 }
1321 } else {
1322 return EXIT_GOTO_TB;
1323 }
1324 }
1325
1326 /* Emit an unconditional branch to an indirect target. This handles
1327 nullification of the branch itself. */
1328 static ExitStatus do_ibranch(DisasContext *ctx, TCGv dest,
1329 unsigned link, bool is_n)
1330 {
1331 TCGv a0, a1, next, tmp;
1332 TCGCond c;
1333
1334 assert(ctx->null_lab == NULL);
1335
1336 if (ctx->null_cond.c == TCG_COND_NEVER) {
1337 if (link != 0) {
1338 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1339 }
1340 next = get_temp(ctx);
1341 tcg_gen_mov_tl(next, dest);
1342 ctx->iaoq_n = -1;
1343 ctx->iaoq_n_var = next;
1344 if (is_n) {
1345 ctx->null_cond.c = TCG_COND_ALWAYS;
1346 }
1347 } else if (is_n && use_nullify_skip(ctx)) {
1348 /* The (conditional) branch, B, nullifies the next insn, N,
1349 and we're allowed to skip execution N (no single-step or
1350 tracepoint in effect). Since the exit_tb that we must use
1351 for the indirect branch consumes no special resources, we
1352 can (conditionally) skip B and continue execution. */
1353 /* The use_nullify_skip test implies we have a known control path. */
1354 tcg_debug_assert(ctx->iaoq_b != -1);
1355 tcg_debug_assert(ctx->iaoq_n != -1);
1356
1357 /* We do have to handle the non-local temporary, DEST, before
1358 branching. Since IOAQ_F is not really live at this point, we
1359 can simply store DEST optimistically. Similarly with IAOQ_B. */
1360 tcg_gen_mov_tl(cpu_iaoq_f, dest);
1361 tcg_gen_addi_tl(cpu_iaoq_b, dest, 4);
1362
1363 nullify_over(ctx);
1364 if (link != 0) {
1365 tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n);
1366 }
1367 tcg_gen_exit_tb(0);
1368 return nullify_end(ctx, NO_EXIT);
1369 } else {
1370 cond_prep(&ctx->null_cond);
1371 c = ctx->null_cond.c;
1372 a0 = ctx->null_cond.a0;
1373 a1 = ctx->null_cond.a1;
1374
1375 tmp = tcg_temp_new();
1376 next = get_temp(ctx);
1377
1378 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1379 tcg_gen_movcond_tl(c, next, a0, a1, tmp, dest);
1380 ctx->iaoq_n = -1;
1381 ctx->iaoq_n_var = next;
1382
1383 if (link != 0) {
1384 tcg_gen_movcond_tl(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1385 }
1386
1387 if (is_n) {
1388 /* The branch nullifies the next insn, which means the state of N
1389 after the branch is the inverse of the state of N that applied
1390 to the branch. */
1391 tcg_gen_setcond_tl(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1392 cond_free(&ctx->null_cond);
1393 ctx->null_cond = cond_make_n();
1394 ctx->psw_n_nonzero = true;
1395 } else {
1396 cond_free(&ctx->null_cond);
1397 }
1398 }
1399
1400 return NO_EXIT;
1401 }
1402
1403 /* On Linux, page zero is normally marked execute only + gateway.
1404 Therefore normal read or write is supposed to fail, but specific
1405 offsets have kernel code mapped to raise permissions to implement
1406 system calls. Handling this via an explicit check here, rather
1407 in than the "be disp(sr2,r0)" instruction that probably sent us
1408 here, is the easiest way to handle the branch delay slot on the
1409 aforementioned BE. */
1410 static ExitStatus do_page_zero(DisasContext *ctx)
1411 {
1412 /* If by some means we get here with PSW[N]=1, that implies that
1413 the B,GATE instruction would be skipped, and we'd fault on the
1414 next insn within the privilaged page. */
1415 switch (ctx->null_cond.c) {
1416 case TCG_COND_NEVER:
1417 break;
1418 case TCG_COND_ALWAYS:
1419 tcg_gen_movi_tl(cpu_psw_n, 0);
1420 goto do_sigill;
1421 default:
1422 /* Since this is always the first (and only) insn within the
1423 TB, we should know the state of PSW[N] from TB->FLAGS. */
1424 g_assert_not_reached();
1425 }
1426
1427 /* Check that we didn't arrive here via some means that allowed
1428 non-sequential instruction execution. Normally the PSW[B] bit
1429 detects this by disallowing the B,GATE instruction to execute
1430 under such conditions. */
1431 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1432 goto do_sigill;
1433 }
1434
1435 switch (ctx->iaoq_f) {
1436 case 0x00: /* Null pointer call */
1437 gen_excp_1(EXCP_SIGSEGV);
1438 return EXIT_NORETURN;
1439
1440 case 0xb0: /* LWS */
1441 gen_excp_1(EXCP_SYSCALL_LWS);
1442 return EXIT_NORETURN;
1443
1444 case 0xe0: /* SET_THREAD_POINTER */
1445 tcg_gen_mov_tl(cpu_cr27, cpu_gr[26]);
1446 tcg_gen_mov_tl(cpu_iaoq_f, cpu_gr[31]);
1447 tcg_gen_addi_tl(cpu_iaoq_b, cpu_iaoq_f, 4);
1448 return EXIT_IAQ_N_UPDATED;
1449
1450 case 0x100: /* SYSCALL */
1451 gen_excp_1(EXCP_SYSCALL);
1452 return EXIT_NORETURN;
1453
1454 default:
1455 do_sigill:
1456 gen_excp_1(EXCP_SIGILL);
1457 return EXIT_NORETURN;
1458 }
1459 }
1460
1461 static ExitStatus trans_nop(DisasContext *ctx, uint32_t insn,
1462 const DisasInsn *di)
1463 {
1464 cond_free(&ctx->null_cond);
1465 return NO_EXIT;
1466 }
1467
1468 static ExitStatus trans_add(DisasContext *ctx, uint32_t insn,
1469 const DisasInsn *di)
1470 {
1471 unsigned r2 = extract32(insn, 21, 5);
1472 unsigned r1 = extract32(insn, 16, 5);
1473 unsigned cf = extract32(insn, 12, 4);
1474 unsigned ext = extract32(insn, 8, 4);
1475 unsigned shift = extract32(insn, 6, 2);
1476 unsigned rt = extract32(insn, 0, 5);
1477 TCGv tcg_r1, tcg_r2;
1478 bool is_c = false;
1479 bool is_l = false;
1480 bool is_tc = false;
1481 bool is_tsv = false;
1482 ExitStatus ret;
1483
1484 switch (ext) {
1485 case 0x6: /* ADD, SHLADD */
1486 break;
1487 case 0xa: /* ADD,L, SHLADD,L */
1488 is_l = true;
1489 break;
1490 case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
1491 is_tsv = true;
1492 break;
1493 case 0x7: /* ADD,C */
1494 is_c = true;
1495 break;
1496 case 0xf: /* ADD,C,TSV */
1497 is_c = is_tsv = true;
1498 break;
1499 default:
1500 return gen_illegal(ctx);
1501 }
1502
1503 if (cf) {
1504 nullify_over(ctx);
1505 }
1506 tcg_r1 = load_gpr(ctx, r1);
1507 tcg_r2 = load_gpr(ctx, r2);
1508 ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
1509 return nullify_end(ctx, ret);
1510 }
1511
1512 static ExitStatus trans_sub(DisasContext *ctx, uint32_t insn,
1513 const DisasInsn *di)
1514 {
1515 unsigned r2 = extract32(insn, 21, 5);
1516 unsigned r1 = extract32(insn, 16, 5);
1517 unsigned cf = extract32(insn, 12, 4);
1518 unsigned ext = extract32(insn, 6, 6);
1519 unsigned rt = extract32(insn, 0, 5);
1520 TCGv tcg_r1, tcg_r2;
1521 bool is_b = false;
1522 bool is_tc = false;
1523 bool is_tsv = false;
1524 ExitStatus ret;
1525
1526 switch (ext) {
1527 case 0x10: /* SUB */
1528 break;
1529 case 0x30: /* SUB,TSV */
1530 is_tsv = true;
1531 break;
1532 case 0x14: /* SUB,B */
1533 is_b = true;
1534 break;
1535 case 0x34: /* SUB,B,TSV */
1536 is_b = is_tsv = true;
1537 break;
1538 case 0x13: /* SUB,TC */
1539 is_tc = true;
1540 break;
1541 case 0x33: /* SUB,TSV,TC */
1542 is_tc = is_tsv = true;
1543 break;
1544 default:
1545 return gen_illegal(ctx);
1546 }
1547
1548 if (cf) {
1549 nullify_over(ctx);
1550 }
1551 tcg_r1 = load_gpr(ctx, r1);
1552 tcg_r2 = load_gpr(ctx, r2);
1553 ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
1554 return nullify_end(ctx, ret);
1555 }
1556
1557 static ExitStatus trans_log(DisasContext *ctx, uint32_t insn,
1558 const DisasInsn *di)
1559 {
1560 unsigned r2 = extract32(insn, 21, 5);
1561 unsigned r1 = extract32(insn, 16, 5);
1562 unsigned cf = extract32(insn, 12, 4);
1563 unsigned rt = extract32(insn, 0, 5);
1564 TCGv tcg_r1, tcg_r2;
1565 ExitStatus ret;
1566
1567 if (cf) {
1568 nullify_over(ctx);
1569 }
1570 tcg_r1 = load_gpr(ctx, r1);
1571 tcg_r2 = load_gpr(ctx, r2);
1572 ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f_ttt);
1573 return nullify_end(ctx, ret);
1574 }
1575
1576 /* OR r,0,t -> COPY (according to gas) */
1577 static ExitStatus trans_copy(DisasContext *ctx, uint32_t insn,
1578 const DisasInsn *di)
1579 {
1580 unsigned r1 = extract32(insn, 16, 5);
1581 unsigned rt = extract32(insn, 0, 5);
1582
1583 if (r1 == 0) {
1584 TCGv dest = dest_gpr(ctx, rt);
1585 tcg_gen_movi_tl(dest, 0);
1586 save_gpr(ctx, rt, dest);
1587 } else {
1588 save_gpr(ctx, rt, cpu_gr[r1]);
1589 }
1590 cond_free(&ctx->null_cond);
1591 return NO_EXIT;
1592 }
1593
1594 static ExitStatus trans_cmpclr(DisasContext *ctx, uint32_t insn,
1595 const DisasInsn *di)
1596 {
1597 unsigned r2 = extract32(insn, 21, 5);
1598 unsigned r1 = extract32(insn, 16, 5);
1599 unsigned cf = extract32(insn, 12, 4);
1600 unsigned rt = extract32(insn, 0, 5);
1601 TCGv tcg_r1, tcg_r2;
1602 ExitStatus ret;
1603
1604 if (cf) {
1605 nullify_over(ctx);
1606 }
1607 tcg_r1 = load_gpr(ctx, r1);
1608 tcg_r2 = load_gpr(ctx, r2);
1609 ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
1610 return nullify_end(ctx, ret);
1611 }
1612
1613 static ExitStatus trans_uxor(DisasContext *ctx, uint32_t insn,
1614 const DisasInsn *di)
1615 {
1616 unsigned r2 = extract32(insn, 21, 5);
1617 unsigned r1 = extract32(insn, 16, 5);
1618 unsigned cf = extract32(insn, 12, 4);
1619 unsigned rt = extract32(insn, 0, 5);
1620 TCGv tcg_r1, tcg_r2;
1621 ExitStatus ret;
1622
1623 if (cf) {
1624 nullify_over(ctx);
1625 }
1626 tcg_r1 = load_gpr(ctx, r1);
1627 tcg_r2 = load_gpr(ctx, r2);
1628 ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_tl);
1629 return nullify_end(ctx, ret);
1630 }
1631
1632 static ExitStatus trans_uaddcm(DisasContext *ctx, uint32_t insn,
1633 const DisasInsn *di)
1634 {
1635 unsigned r2 = extract32(insn, 21, 5);
1636 unsigned r1 = extract32(insn, 16, 5);
1637 unsigned cf = extract32(insn, 12, 4);
1638 unsigned is_tc = extract32(insn, 6, 1);
1639 unsigned rt = extract32(insn, 0, 5);
1640 TCGv tcg_r1, tcg_r2, tmp;
1641 ExitStatus ret;
1642
1643 if (cf) {
1644 nullify_over(ctx);
1645 }
1646 tcg_r1 = load_gpr(ctx, r1);
1647 tcg_r2 = load_gpr(ctx, r2);
1648 tmp = get_temp(ctx);
1649 tcg_gen_not_tl(tmp, tcg_r2);
1650 ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_tl);
1651 return nullify_end(ctx, ret);
1652 }
1653
1654 static ExitStatus trans_dcor(DisasContext *ctx, uint32_t insn,
1655 const DisasInsn *di)
1656 {
1657 unsigned r2 = extract32(insn, 21, 5);
1658 unsigned cf = extract32(insn, 12, 4);
1659 unsigned is_i = extract32(insn, 6, 1);
1660 unsigned rt = extract32(insn, 0, 5);
1661 TCGv tmp;
1662 ExitStatus ret;
1663
1664 nullify_over(ctx);
1665
1666 tmp = get_temp(ctx);
1667 tcg_gen_shri_tl(tmp, cpu_psw_cb, 3);
1668 if (!is_i) {
1669 tcg_gen_not_tl(tmp, tmp);
1670 }
1671 tcg_gen_andi_tl(tmp, tmp, 0x11111111);
1672 tcg_gen_muli_tl(tmp, tmp, 6);
1673 ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
1674 is_i ? tcg_gen_add_tl : tcg_gen_sub_tl);
1675
1676 return nullify_end(ctx, ret);
1677 }
1678
1679 static ExitStatus trans_ds(DisasContext *ctx, uint32_t insn,
1680 const DisasInsn *di)
1681 {
1682 unsigned r2 = extract32(insn, 21, 5);
1683 unsigned r1 = extract32(insn, 16, 5);
1684 unsigned cf = extract32(insn, 12, 4);
1685 unsigned rt = extract32(insn, 0, 5);
1686 TCGv dest, add1, add2, addc, zero, in1, in2;
1687
1688 nullify_over(ctx);
1689
1690 in1 = load_gpr(ctx, r1);
1691 in2 = load_gpr(ctx, r2);
1692
1693 add1 = tcg_temp_new();
1694 add2 = tcg_temp_new();
1695 addc = tcg_temp_new();
1696 dest = tcg_temp_new();
1697 zero = tcg_const_tl(0);
1698
1699 /* Form R1 << 1 | PSW[CB]{8}. */
1700 tcg_gen_add_tl(add1, in1, in1);
1701 tcg_gen_add_tl(add1, add1, cpu_psw_cb_msb);
1702
1703 /* Add or subtract R2, depending on PSW[V]. Proper computation of
1704 carry{8} requires that we subtract via + ~R2 + 1, as described in
1705 the manual. By extracting and masking V, we can produce the
1706 proper inputs to the addition without movcond. */
1707 tcg_gen_sari_tl(addc, cpu_psw_v, TARGET_LONG_BITS - 1);
1708 tcg_gen_xor_tl(add2, in2, addc);
1709 tcg_gen_andi_tl(addc, addc, 1);
1710 /* ??? This is only correct for 32-bit. */
1711 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
1712 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
1713
1714 tcg_temp_free(addc);
1715 tcg_temp_free(zero);
1716
1717 /* Write back the result register. */
1718 save_gpr(ctx, rt, dest);
1719
1720 /* Write back PSW[CB]. */
1721 tcg_gen_xor_tl(cpu_psw_cb, add1, add2);
1722 tcg_gen_xor_tl(cpu_psw_cb, cpu_psw_cb, dest);
1723
1724 /* Write back PSW[V] for the division step. */
1725 tcg_gen_neg_tl(cpu_psw_v, cpu_psw_cb_msb);
1726 tcg_gen_xor_tl(cpu_psw_v, cpu_psw_v, in2);
1727
1728 /* Install the new nullification. */
1729 if (cf) {
1730 TCGv sv;
1731 TCGV_UNUSED(sv);
1732 if (cf >> 1 == 6) {
1733 /* ??? The lshift is supposed to contribute to overflow. */
1734 sv = do_add_sv(ctx, dest, add1, add2);
1735 }
1736 ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
1737 }
1738
1739 tcg_temp_free(add1);
1740 tcg_temp_free(add2);
1741 tcg_temp_free(dest);
1742
1743 return nullify_end(ctx, NO_EXIT);
1744 }
1745
1746 static const DisasInsn table_arith_log[] = {
1747 { 0x08000240u, 0xfc00ffffu, trans_nop }, /* or x,y,0 */
1748 { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
1749 { 0x08000000u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_andc_tl },
1750 { 0x08000200u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_and_tl },
1751 { 0x08000240u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_or_tl },
1752 { 0x08000280u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_xor_tl },
1753 { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
1754 { 0x08000380u, 0xfc000fe0u, trans_uxor },
1755 { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
1756 { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
1757 { 0x08000440u, 0xfc000fe0u, trans_ds },
1758 { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
1759 { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
1760 { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
1761 { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
1762 };
1763
1764 static ExitStatus trans_addi(DisasContext *ctx, uint32_t insn)
1765 {
1766 target_long im = low_sextract(insn, 0, 11);
1767 unsigned e1 = extract32(insn, 11, 1);
1768 unsigned cf = extract32(insn, 12, 4);
1769 unsigned rt = extract32(insn, 16, 5);
1770 unsigned r2 = extract32(insn, 21, 5);
1771 unsigned o1 = extract32(insn, 26, 1);
1772 TCGv tcg_im, tcg_r2;
1773 ExitStatus ret;
1774
1775 if (cf) {
1776 nullify_over(ctx);
1777 }
1778
1779 tcg_im = load_const(ctx, im);
1780 tcg_r2 = load_gpr(ctx, r2);
1781 ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
1782
1783 return nullify_end(ctx, ret);
1784 }
1785
1786 static ExitStatus trans_subi(DisasContext *ctx, uint32_t insn)
1787 {
1788 target_long im = low_sextract(insn, 0, 11);
1789 unsigned e1 = extract32(insn, 11, 1);
1790 unsigned cf = extract32(insn, 12, 4);
1791 unsigned rt = extract32(insn, 16, 5);
1792 unsigned r2 = extract32(insn, 21, 5);
1793 TCGv tcg_im, tcg_r2;
1794 ExitStatus ret;
1795
1796 if (cf) {
1797 nullify_over(ctx);
1798 }
1799
1800 tcg_im = load_const(ctx, im);
1801 tcg_r2 = load_gpr(ctx, r2);
1802 ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
1803
1804 return nullify_end(ctx, ret);
1805 }
1806
1807 static ExitStatus trans_cmpiclr(DisasContext *ctx, uint32_t insn)
1808 {
1809 target_long im = low_sextract(insn, 0, 11);
1810 unsigned cf = extract32(insn, 12, 4);
1811 unsigned rt = extract32(insn, 16, 5);
1812 unsigned r2 = extract32(insn, 21, 5);
1813 TCGv tcg_im, tcg_r2;
1814 ExitStatus ret;
1815
1816 if (cf) {
1817 nullify_over(ctx);
1818 }
1819
1820 tcg_im = load_const(ctx, im);
1821 tcg_r2 = load_gpr(ctx, r2);
1822 ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
1823
1824 return nullify_end(ctx, ret);
1825 }
1826
1827 static ExitStatus trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
1828 const DisasInsn *di)
1829 {
1830 unsigned rt = extract32(insn, 0, 5);
1831 unsigned m = extract32(insn, 5, 1);
1832 unsigned sz = extract32(insn, 6, 2);
1833 unsigned a = extract32(insn, 13, 1);
1834 int disp = low_sextract(insn, 16, 5);
1835 unsigned rb = extract32(insn, 21, 5);
1836 int modify = (m ? (a ? -1 : 1) : 0);
1837 TCGMemOp mop = MO_TE | sz;
1838
1839 return do_load(ctx, rt, rb, 0, 0, disp, modify, mop);
1840 }
1841
1842 static ExitStatus trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
1843 const DisasInsn *di)
1844 {
1845 unsigned rt = extract32(insn, 0, 5);
1846 unsigned m = extract32(insn, 5, 1);
1847 unsigned sz = extract32(insn, 6, 2);
1848 unsigned u = extract32(insn, 13, 1);
1849 unsigned rx = extract32(insn, 16, 5);
1850 unsigned rb = extract32(insn, 21, 5);
1851 TCGMemOp mop = MO_TE | sz;
1852
1853 return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, m, mop);
1854 }
1855
1856 static ExitStatus trans_st_idx_i(DisasContext *ctx, uint32_t insn,
1857 const DisasInsn *di)
1858 {
1859 int disp = low_sextract(insn, 0, 5);
1860 unsigned m = extract32(insn, 5, 1);
1861 unsigned sz = extract32(insn, 6, 2);
1862 unsigned a = extract32(insn, 13, 1);
1863 unsigned rr = extract32(insn, 16, 5);
1864 unsigned rb = extract32(insn, 21, 5);
1865 int modify = (m ? (a ? -1 : 1) : 0);
1866 TCGMemOp mop = MO_TE | sz;
1867
1868 return do_store(ctx, rr, rb, disp, modify, mop);
1869 }
1870
1871 static ExitStatus trans_ldcw(DisasContext *ctx, uint32_t insn,
1872 const DisasInsn *di)
1873 {
1874 unsigned rt = extract32(insn, 0, 5);
1875 unsigned m = extract32(insn, 5, 1);
1876 unsigned i = extract32(insn, 12, 1);
1877 unsigned au = extract32(insn, 13, 1);
1878 unsigned rx = extract32(insn, 16, 5);
1879 unsigned rb = extract32(insn, 21, 5);
1880 TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
1881 TCGv zero, addr, base, dest;
1882 int modify, disp = 0, scale = 0;
1883
1884 nullify_over(ctx);
1885
1886 /* ??? Share more code with do_load and do_load_{32,64}. */
1887
1888 if (i) {
1889 modify = (m ? (au ? -1 : 1) : 0);
1890 disp = low_sextract(rx, 0, 5);
1891 rx = 0;
1892 } else {
1893 modify = m;
1894 if (au) {
1895 scale = mop & MO_SIZE;
1896 }
1897 }
1898 if (modify) {
1899 /* Base register modification. Make sure if RT == RB, we see
1900 the result of the load. */
1901 dest = get_temp(ctx);
1902 } else {
1903 dest = dest_gpr(ctx, rt);
1904 }
1905
1906 addr = tcg_temp_new();
1907 base = load_gpr(ctx, rb);
1908 if (rx) {
1909 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1910 tcg_gen_add_tl(addr, addr, base);
1911 } else {
1912 tcg_gen_addi_tl(addr, base, disp);
1913 }
1914
1915 zero = tcg_const_tl(0);
1916 tcg_gen_atomic_xchg_tl(dest, (modify <= 0 ? addr : base),
1917 zero, MMU_USER_IDX, mop);
1918 if (modify) {
1919 save_gpr(ctx, rb, addr);
1920 }
1921 save_gpr(ctx, rt, dest);
1922
1923 return nullify_end(ctx, NO_EXIT);
1924 }
1925
1926 static ExitStatus trans_stby(DisasContext *ctx, uint32_t insn,
1927 const DisasInsn *di)
1928 {
1929 target_long disp = low_sextract(insn, 0, 5);
1930 unsigned m = extract32(insn, 5, 1);
1931 unsigned a = extract32(insn, 13, 1);
1932 unsigned rt = extract32(insn, 16, 5);
1933 unsigned rb = extract32(insn, 21, 5);
1934 TCGv addr, val;
1935
1936 nullify_over(ctx);
1937
1938 addr = tcg_temp_new();
1939 if (m || disp == 0) {
1940 tcg_gen_mov_tl(addr, load_gpr(ctx, rb));
1941 } else {
1942 tcg_gen_addi_tl(addr, load_gpr(ctx, rb), disp);
1943 }
1944 val = load_gpr(ctx, rt);
1945
1946 if (a) {
1947 gen_helper_stby_e(cpu_env, addr, val);
1948 } else {
1949 gen_helper_stby_b(cpu_env, addr, val);
1950 }
1951
1952 if (m) {
1953 tcg_gen_addi_tl(addr, addr, disp);
1954 tcg_gen_andi_tl(addr, addr, ~3);
1955 save_gpr(ctx, rb, addr);
1956 }
1957 tcg_temp_free(addr);
1958
1959 return nullify_end(ctx, NO_EXIT);
1960 }
1961
1962 static const DisasInsn table_index_mem[] = {
1963 { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
1964 { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
1965 { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
1966 { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
1967 { 0x0c001300u, 0xfc0013c0, trans_stby },
1968 };
1969
1970 static ExitStatus trans_ldil(DisasContext *ctx, uint32_t insn)
1971 {
1972 unsigned rt = extract32(insn, 21, 5);
1973 target_long i = assemble_21(insn);
1974 TCGv tcg_rt = dest_gpr(ctx, rt);
1975
1976 tcg_gen_movi_tl(tcg_rt, i);
1977 save_gpr(ctx, rt, tcg_rt);
1978 cond_free(&ctx->null_cond);
1979
1980 return NO_EXIT;
1981 }
1982
1983 static ExitStatus trans_addil(DisasContext *ctx, uint32_t insn)
1984 {
1985 unsigned rt = extract32(insn, 21, 5);
1986 target_long i = assemble_21(insn);
1987 TCGv tcg_rt = load_gpr(ctx, rt);
1988 TCGv tcg_r1 = dest_gpr(ctx, 1);
1989
1990 tcg_gen_addi_tl(tcg_r1, tcg_rt, i);
1991 save_gpr(ctx, 1, tcg_r1);
1992 cond_free(&ctx->null_cond);
1993
1994 return NO_EXIT;
1995 }
1996
1997 static ExitStatus trans_ldo(DisasContext *ctx, uint32_t insn)
1998 {
1999 unsigned rb = extract32(insn, 21, 5);
2000 unsigned rt = extract32(insn, 16, 5);
2001 target_long i = assemble_16(insn);
2002 TCGv tcg_rt = dest_gpr(ctx, rt);
2003
2004 /* Special case rb == 0, for the LDI pseudo-op.
2005 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2006 if (rb == 0) {
2007 tcg_gen_movi_tl(tcg_rt, i);
2008 } else {
2009 tcg_gen_addi_tl(tcg_rt, cpu_gr[rb], i);
2010 }
2011 save_gpr(ctx, rt, tcg_rt);
2012 cond_free(&ctx->null_cond);
2013
2014 return NO_EXIT;
2015 }
2016
2017 static ExitStatus trans_load(DisasContext *ctx, uint32_t insn,
2018 bool is_mod, TCGMemOp mop)
2019 {
2020 unsigned rb = extract32(insn, 21, 5);
2021 unsigned rt = extract32(insn, 16, 5);
2022 target_long i = assemble_16(insn);
2023
2024 return do_load(ctx, rt, rb, 0, 0, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2025 }
2026
2027 static ExitStatus trans_load_w(DisasContext *ctx, uint32_t insn)
2028 {
2029 unsigned rb = extract32(insn, 21, 5);
2030 unsigned rt = extract32(insn, 16, 5);
2031 target_long i = assemble_16a(insn);
2032 unsigned ext2 = extract32(insn, 1, 2);
2033
2034 switch (ext2) {
2035 case 0:
2036 case 1:
2037 /* FLDW without modification. */
2038 return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2039 case 2:
2040 /* LDW with modification. Note that the sign of I selects
2041 post-dec vs pre-inc. */
2042 return do_load(ctx, rt, rb, 0, 0, i, (i < 0 ? 1 : -1), MO_TEUL);
2043 default:
2044 return gen_illegal(ctx);
2045 }
2046 }
2047
2048 static ExitStatus trans_fload_mod(DisasContext *ctx, uint32_t insn)
2049 {
2050 target_long i = assemble_16a(insn);
2051 unsigned t1 = extract32(insn, 1, 1);
2052 unsigned a = extract32(insn, 2, 1);
2053 unsigned t0 = extract32(insn, 16, 5);
2054 unsigned rb = extract32(insn, 21, 5);
2055
2056 /* FLDW with modification. */
2057 return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2058 }
2059
2060 static ExitStatus trans_store(DisasContext *ctx, uint32_t insn,
2061 bool is_mod, TCGMemOp mop)
2062 {
2063 unsigned rb = extract32(insn, 21, 5);
2064 unsigned rt = extract32(insn, 16, 5);
2065 target_long i = assemble_16(insn);
2066
2067 return do_store(ctx, rt, rb, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2068 }
2069
2070 static ExitStatus trans_store_w(DisasContext *ctx, uint32_t insn)
2071 {
2072 unsigned rb = extract32(insn, 21, 5);
2073 unsigned rt = extract32(insn, 16, 5);
2074 target_long i = assemble_16a(insn);
2075 unsigned ext2 = extract32(insn, 1, 2);
2076
2077 switch (ext2) {
2078 case 0:
2079 case 1:
2080 /* FSTW without modification. */
2081 return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2082 case 2:
2083 /* LDW with modification. */
2084 return do_store(ctx, rt, rb, i, (i < 0 ? 1 : -1), MO_TEUL);
2085 default:
2086 return gen_illegal(ctx);
2087 }
2088 }
2089
2090 static ExitStatus trans_fstore_mod(DisasContext *ctx, uint32_t insn)
2091 {
2092 target_long i = assemble_16a(insn);
2093 unsigned t1 = extract32(insn, 1, 1);
2094 unsigned a = extract32(insn, 2, 1);
2095 unsigned t0 = extract32(insn, 16, 5);
2096 unsigned rb = extract32(insn, 21, 5);
2097
2098 /* FSTW with modification. */
2099 return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2100 }
2101
2102 static ExitStatus trans_copr_w(DisasContext *ctx, uint32_t insn)
2103 {
2104 unsigned t0 = extract32(insn, 0, 5);
2105 unsigned m = extract32(insn, 5, 1);
2106 unsigned t1 = extract32(insn, 6, 1);
2107 unsigned ext3 = extract32(insn, 7, 3);
2108 /* unsigned cc = extract32(insn, 10, 2); */
2109 unsigned i = extract32(insn, 12, 1);
2110 unsigned ua = extract32(insn, 13, 1);
2111 unsigned rx = extract32(insn, 16, 5);
2112 unsigned rb = extract32(insn, 21, 5);
2113 unsigned rt = t1 * 32 + t0;
2114 int modify = (m ? (ua ? -1 : 1) : 0);
2115 int disp, scale;
2116
2117 if (i == 0) {
2118 scale = (ua ? 2 : 0);
2119 disp = 0;
2120 modify = m;
2121 } else {
2122 disp = low_sextract(rx, 0, 5);
2123 scale = 0;
2124 rx = 0;
2125 modify = (m ? (ua ? -1 : 1) : 0);
2126 }
2127
2128 switch (ext3) {
2129 case 0: /* FLDW */
2130 return do_floadw(ctx, rt, rb, rx, scale, disp, modify);
2131 case 4: /* FSTW */
2132 return do_fstorew(ctx, rt, rb, rx, scale, disp, modify);
2133 }
2134 return gen_illegal(ctx);
2135 }
2136
2137 static ExitStatus trans_copr_dw(DisasContext *ctx, uint32_t insn)
2138 {
2139 unsigned rt = extract32(insn, 0, 5);
2140 unsigned m = extract32(insn, 5, 1);
2141 unsigned ext4 = extract32(insn, 6, 4);
2142 /* unsigned cc = extract32(insn, 10, 2); */
2143 unsigned i = extract32(insn, 12, 1);
2144 unsigned ua = extract32(insn, 13, 1);
2145 unsigned rx = extract32(insn, 16, 5);
2146 unsigned rb = extract32(insn, 21, 5);
2147 int modify = (m ? (ua ? -1 : 1) : 0);
2148 int disp, scale;
2149
2150 if (i == 0) {
2151 scale = (ua ? 3 : 0);
2152 disp = 0;
2153 modify = m;
2154 } else {
2155 disp = low_sextract(rx, 0, 5);
2156 scale = 0;
2157 rx = 0;
2158 modify = (m ? (ua ? -1 : 1) : 0);
2159 }
2160
2161 switch (ext4) {
2162 case 0: /* FLDD */
2163 return do_floadd(ctx, rt, rb, rx, scale, disp, modify);
2164 case 8: /* FSTD */
2165 return do_fstored(ctx, rt, rb, rx, scale, disp, modify);
2166 default:
2167 return gen_illegal(ctx);
2168 }
2169 }
2170
2171 static ExitStatus trans_cmpb(DisasContext *ctx, uint32_t insn,
2172 bool is_true, bool is_imm, bool is_dw)
2173 {
2174 target_long disp = assemble_12(insn) * 4;
2175 unsigned n = extract32(insn, 1, 1);
2176 unsigned c = extract32(insn, 13, 3);
2177 unsigned r = extract32(insn, 21, 5);
2178 unsigned cf = c * 2 + !is_true;
2179 TCGv dest, in1, in2, sv;
2180 DisasCond cond;
2181
2182 nullify_over(ctx);
2183
2184 if (is_imm) {
2185 in1 = load_const(ctx, low_sextract(insn, 16, 5));
2186 } else {
2187 in1 = load_gpr(ctx, extract32(insn, 16, 5));
2188 }
2189 in2 = load_gpr(ctx, r);
2190 dest = get_temp(ctx);
2191
2192 tcg_gen_sub_tl(dest, in1, in2);
2193
2194 TCGV_UNUSED(sv);
2195 if (c == 6) {
2196 sv = do_sub_sv(ctx, dest, in1, in2);
2197 }
2198
2199 cond = do_sub_cond(cf, dest, in1, in2, sv);
2200 return do_cbranch(ctx, disp, n, &cond);
2201 }
2202
2203 static ExitStatus trans_addb(DisasContext *ctx, uint32_t insn,
2204 bool is_true, bool is_imm)
2205 {
2206 target_long disp = assemble_12(insn) * 4;
2207 unsigned n = extract32(insn, 1, 1);
2208 unsigned c = extract32(insn, 13, 3);
2209 unsigned r = extract32(insn, 21, 5);
2210 unsigned cf = c * 2 + !is_true;
2211 TCGv dest, in1, in2, sv, cb_msb;
2212 DisasCond cond;
2213
2214 nullify_over(ctx);
2215
2216 if (is_imm) {
2217 in1 = load_const(ctx, low_sextract(insn, 16, 5));
2218 } else {
2219 in1 = load_gpr(ctx, extract32(insn, 16, 5));
2220 }
2221 in2 = load_gpr(ctx, r);
2222 dest = dest_gpr(ctx, r);
2223 TCGV_UNUSED(sv);
2224 TCGV_UNUSED(cb_msb);
2225
2226 switch (c) {
2227 default:
2228 tcg_gen_add_tl(dest, in1, in2);
2229 break;
2230 case 4: case 5:
2231 cb_msb = get_temp(ctx);
2232 tcg_gen_movi_tl(cb_msb, 0);
2233 tcg_gen_add2_tl(dest, cb_msb, in1, cb_msb, in2, cb_msb);
2234 break;
2235 case 6:
2236 tcg_gen_add_tl(dest, in1, in2);
2237 sv = do_add_sv(ctx, dest, in1, in2);
2238 break;
2239 }
2240
2241 cond = do_cond(cf, dest, cb_msb, sv);
2242 return do_cbranch(ctx, disp, n, &cond);
2243 }
2244
2245 static ExitStatus trans_bb(DisasContext *ctx, uint32_t insn)
2246 {
2247 target_long disp = assemble_12(insn) * 4;
2248 unsigned n = extract32(insn, 1, 1);
2249 unsigned c = extract32(insn, 15, 1);
2250 unsigned r = extract32(insn, 16, 5);
2251 unsigned p = extract32(insn, 21, 5);
2252 unsigned i = extract32(insn, 26, 1);
2253 TCGv tmp, tcg_r;
2254 DisasCond cond;
2255
2256 nullify_over(ctx);
2257
2258 tmp = tcg_temp_new();
2259 tcg_r = load_gpr(ctx, r);
2260 if (i) {
2261 tcg_gen_shli_tl(tmp, tcg_r, p);
2262 } else {
2263 tcg_gen_shl_tl(tmp, tcg_r, cpu_sar);
2264 }
2265
2266 cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
2267 tcg_temp_free(tmp);
2268 return do_cbranch(ctx, disp, n, &cond);
2269 }
2270
2271 static ExitStatus trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
2272 {
2273 target_long disp = assemble_12(insn) * 4;
2274 unsigned n = extract32(insn, 1, 1);
2275 unsigned c = extract32(insn, 13, 3);
2276 unsigned t = extract32(insn, 16, 5);
2277 unsigned r = extract32(insn, 21, 5);
2278 TCGv dest;
2279 DisasCond cond;
2280
2281 nullify_over(ctx);
2282
2283 dest = dest_gpr(ctx, r);
2284 if (is_imm) {
2285 tcg_gen_movi_tl(dest, low_sextract(t, 0, 5));
2286 } else if (t == 0) {
2287 tcg_gen_movi_tl(dest, 0);
2288 } else {
2289 tcg_gen_mov_tl(dest, cpu_gr[t]);
2290 }
2291
2292 cond = do_sed_cond(c, dest);
2293 return do_cbranch(ctx, disp, n, &cond);
2294 }
2295
2296 static ExitStatus trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
2297 const DisasInsn *di)
2298 {
2299 unsigned rt = extract32(insn, 0, 5);
2300 unsigned c = extract32(insn, 13, 3);
2301 unsigned r1 = extract32(insn, 16, 5);
2302 unsigned r2 = extract32(insn, 21, 5);
2303 TCGv dest;
2304
2305 if (c) {
2306 nullify_over(ctx);
2307 }
2308
2309 dest = dest_gpr(ctx, rt);
2310 if (r1 == 0) {
2311 tcg_gen_ext32u_tl(dest, load_gpr(ctx, r2));
2312 tcg_gen_shr_tl(dest, dest, cpu_sar);
2313 } else if (r1 == r2) {
2314 TCGv_i32 t32 = tcg_temp_new_i32();
2315 tcg_gen_trunc_tl_i32(t32, load_gpr(ctx, r2));
2316 tcg_gen_rotr_i32(t32, t32, cpu_sar);
2317 tcg_gen_extu_i32_tl(dest, t32);
2318 tcg_temp_free_i32(t32);
2319 } else {
2320 TCGv_i64 t = tcg_temp_new_i64();
2321 TCGv_i64 s = tcg_temp_new_i64();
2322
2323 tcg_gen_concat_tl_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
2324 tcg_gen_extu_tl_i64(s, cpu_sar);
2325 tcg_gen_shr_i64(t, t, s);
2326 tcg_gen_trunc_i64_tl(dest, t);
2327
2328 tcg_temp_free_i64(t);
2329 tcg_temp_free_i64(s);
2330 }
2331 save_gpr(ctx, rt, dest);
2332
2333 /* Install the new nullification. */
2334 cond_free(&ctx->null_cond);
2335 if (c) {
2336 ctx->null_cond = do_sed_cond(c, dest);
2337 }
2338 return nullify_end(ctx, NO_EXIT);
2339 }
2340
2341 static ExitStatus trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
2342 const DisasInsn *di)
2343 {
2344 unsigned rt = extract32(insn, 0, 5);
2345 unsigned cpos = extract32(insn, 5, 5);
2346 unsigned c = extract32(insn, 13, 3);
2347 unsigned r1 = extract32(insn, 16, 5);
2348 unsigned r2 = extract32(insn, 21, 5);
2349 unsigned sa = 31 - cpos;
2350 TCGv dest, t2;
2351
2352 if (c) {
2353 nullify_over(ctx);
2354 }
2355
2356 dest = dest_gpr(ctx, rt);
2357 t2 = load_gpr(ctx, r2);
2358 if (r1 == r2) {
2359 TCGv_i32 t32 = tcg_temp_new_i32();
2360 tcg_gen_trunc_tl_i32(t32, t2);
2361 tcg_gen_rotri_i32(t32, t32, sa);
2362 tcg_gen_extu_i32_tl(dest, t32);
2363 tcg_temp_free_i32(t32);
2364 } else if (r1 == 0) {
2365 tcg_gen_extract_tl(dest, t2, sa, 32 - sa);
2366 } else {
2367 TCGv t0 = tcg_temp_new();
2368 tcg_gen_extract_tl(t0, t2, sa, 32 - sa);
2369 tcg_gen_deposit_tl(dest, t0, cpu_gr[r1], 32 - sa, sa);
2370 tcg_temp_free(t0);
2371 }
2372 save_gpr(ctx, rt, dest);
2373
2374 /* Install the new nullification. */
2375 cond_free(&ctx->null_cond);
2376 if (c) {
2377 ctx->null_cond = do_sed_cond(c, dest);
2378 }
2379 return nullify_end(ctx, NO_EXIT);
2380 }
2381
2382 static ExitStatus trans_extrw_sar(DisasContext *ctx, uint32_t insn,
2383 const DisasInsn *di)
2384 {
2385 unsigned clen = extract32(insn, 0, 5);
2386 unsigned is_se = extract32(insn, 10, 1);
2387 unsigned c = extract32(insn, 13, 3);
2388 unsigned rt = extract32(insn, 16, 5);
2389 unsigned rr = extract32(insn, 21, 5);
2390 unsigned len = 32 - clen;
2391 TCGv dest, src, tmp;
2392
2393 if (c) {
2394 nullify_over(ctx);
2395 }
2396
2397 dest = dest_gpr(ctx, rt);
2398 src = load_gpr(ctx, rr);
2399 tmp = tcg_temp_new();
2400
2401 /* Recall that SAR is using big-endian bit numbering. */
2402 tcg_gen_xori_tl(tmp, cpu_sar, TARGET_LONG_BITS - 1);
2403 if (is_se) {
2404 tcg_gen_sar_tl(dest, src, tmp);
2405 tcg_gen_sextract_tl(dest, dest, 0, len);
2406 } else {
2407 tcg_gen_shr_tl(dest, src, tmp);
2408 tcg_gen_extract_tl(dest, dest, 0, len);
2409 }
2410 tcg_temp_free(tmp);
2411 save_gpr(ctx, rt, dest);
2412
2413 /* Install the new nullification. */
2414 cond_free(&ctx->null_cond);
2415 if (c) {
2416 ctx->null_cond = do_sed_cond(c, dest);
2417 }
2418 return nullify_end(ctx, NO_EXIT);
2419 }
2420
2421 static ExitStatus trans_extrw_imm(DisasContext *ctx, uint32_t insn,
2422 const DisasInsn *di)
2423 {
2424 unsigned clen = extract32(insn, 0, 5);
2425 unsigned pos = extract32(insn, 5, 5);
2426 unsigned is_se = extract32(insn, 10, 1);
2427 unsigned c = extract32(insn, 13, 3);
2428 unsigned rt = extract32(insn, 16, 5);
2429 unsigned rr = extract32(insn, 21, 5);
2430 unsigned len = 32 - clen;
2431 unsigned cpos = 31 - pos;
2432 TCGv dest, src;
2433
2434 if (c) {
2435 nullify_over(ctx);
2436 }
2437
2438 dest = dest_gpr(ctx, rt);
2439 src = load_gpr(ctx, rr);
2440 if (is_se) {
2441 tcg_gen_sextract_tl(dest, src, cpos, len);
2442 } else {
2443 tcg_gen_extract_tl(dest, src, cpos, len);
2444 }
2445 save_gpr(ctx, rt, dest);
2446
2447 /* Install the new nullification. */
2448 cond_free(&ctx->null_cond);
2449 if (c) {
2450 ctx->null_cond = do_sed_cond(c, dest);
2451 }
2452 return nullify_end(ctx, NO_EXIT);
2453 }
2454
2455 static const DisasInsn table_sh_ex[] = {
2456 { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
2457 { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
2458 { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
2459 { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
2460 };
2461
2462 static ExitStatus trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
2463 const DisasInsn *di)
2464 {
2465 unsigned clen = extract32(insn, 0, 5);
2466 unsigned cpos = extract32(insn, 5, 5);
2467 unsigned nz = extract32(insn, 10, 1);
2468 unsigned c = extract32(insn, 13, 3);
2469 target_long val = low_sextract(insn, 16, 5);
2470 unsigned rt = extract32(insn, 21, 5);
2471 unsigned len = 32 - clen;
2472 target_long mask0, mask1;
2473 TCGv dest;
2474
2475 if (c) {
2476 nullify_over(ctx);
2477 }
2478 if (cpos + len > 32) {
2479 len = 32 - cpos;
2480 }
2481
2482 dest = dest_gpr(ctx, rt);
2483 mask0 = deposit64(0, cpos, len, val);
2484 mask1 = deposit64(-1, cpos, len, val);
2485
2486 if (nz) {
2487 TCGv src = load_gpr(ctx, rt);
2488 if (mask1 != -1) {
2489 tcg_gen_andi_tl(dest, src, mask1);
2490 src = dest;
2491 }
2492 tcg_gen_ori_tl(dest, src, mask0);
2493 } else {
2494 tcg_gen_movi_tl(dest, mask0);
2495 }
2496 save_gpr(ctx, rt, dest);
2497
2498 /* Install the new nullification. */
2499 cond_free(&ctx->null_cond);
2500 if (c) {
2501 ctx->null_cond = do_sed_cond(c, dest);
2502 }
2503 return nullify_end(ctx, NO_EXIT);
2504 }
2505
2506 static ExitStatus trans_depw_imm(DisasContext *ctx, uint32_t insn,
2507 const DisasInsn *di)
2508 {
2509 unsigned clen = extract32(insn, 0, 5);
2510 unsigned cpos = extract32(insn, 5, 5);
2511 unsigned nz = extract32(insn, 10, 1);
2512 unsigned c = extract32(insn, 13, 3);
2513 unsigned rr = extract32(insn, 16, 5);
2514 unsigned rt = extract32(insn, 21, 5);
2515 unsigned rs = nz ? rt : 0;
2516 unsigned len = 32 - clen;
2517 TCGv dest, val;
2518
2519 if (c) {
2520 nullify_over(ctx);
2521 }
2522 if (cpos + len > 32) {
2523 len = 32 - cpos;
2524 }
2525
2526 dest = dest_gpr(ctx, rt);
2527 val = load_gpr(ctx, rr);
2528 if (rs == 0) {
2529 tcg_gen_deposit_z_tl(dest, val, cpos, len);
2530 } else {
2531 tcg_gen_deposit_tl(dest, cpu_gr[rs], val, cpos, len);
2532 }
2533 save_gpr(ctx, rt, dest);
2534
2535 /* Install the new nullification. */
2536 cond_free(&ctx->null_cond);
2537 if (c) {
2538 ctx->null_cond = do_sed_cond(c, dest);
2539 }
2540 return nullify_end(ctx, NO_EXIT);
2541 }
2542
2543 static ExitStatus trans_depw_sar(DisasContext *ctx, uint32_t insn,
2544 const DisasInsn *di)
2545 {
2546 unsigned clen = extract32(insn, 0, 5);
2547 unsigned nz = extract32(insn, 10, 1);
2548 unsigned i = extract32(insn, 12, 1);
2549 unsigned c = extract32(insn, 13, 3);
2550 unsigned rt = extract32(insn, 21, 5);
2551 unsigned rs = nz ? rt : 0;
2552 unsigned len = 32 - clen;
2553 TCGv val, mask, tmp, shift, dest;
2554 unsigned msb = 1U << (len - 1);
2555
2556 if (c) {
2557 nullify_over(ctx);
2558 }
2559
2560 if (i) {
2561 val = load_const(ctx, low_sextract(insn, 16, 5));
2562 } else {
2563 val = load_gpr(ctx, extract32(insn, 16, 5));
2564 }
2565 dest = dest_gpr(ctx, rt);
2566 shift = tcg_temp_new();
2567 tmp = tcg_temp_new();
2568
2569 /* Convert big-endian bit numbering in SAR to left-shift. */
2570 tcg_gen_xori_tl(shift, cpu_sar, TARGET_LONG_BITS - 1);
2571
2572 mask = tcg_const_tl(msb + (msb - 1));
2573 tcg_gen_and_tl(tmp, val, mask);
2574 if (rs) {
2575 tcg_gen_shl_tl(mask, mask, shift);
2576 tcg_gen_shl_tl(tmp, tmp, shift);
2577 tcg_gen_andc_tl(dest, cpu_gr[rs], mask);
2578 tcg_gen_or_tl(dest, dest, tmp);
2579 } else {
2580 tcg_gen_shl_tl(dest, tmp, shift);
2581 }
2582 tcg_temp_free(shift);
2583 tcg_temp_free(mask);
2584 tcg_temp_free(tmp);
2585 save_gpr(ctx, rt, dest);
2586
2587 /* Install the new nullification. */
2588 cond_free(&ctx->null_cond);
2589 if (c) {
2590 ctx->null_cond = do_sed_cond(c, dest);
2591 }
2592 return nullify_end(ctx, NO_EXIT);
2593 }
2594
2595 static const DisasInsn table_depw[] = {
2596 { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
2597 { 0xd4000800u, 0xfc001800u, trans_depw_imm },
2598 { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
2599 };
2600
2601 static ExitStatus trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
2602 {
2603 unsigned n = extract32(insn, 1, 1);
2604 unsigned b = extract32(insn, 21, 5);
2605 target_long disp = assemble_17(insn);
2606
2607 /* unsigned s = low_uextract(insn, 13, 3); */
2608 /* ??? It seems like there should be a good way of using
2609 "be disp(sr2, r0)", the canonical gateway entry mechanism
2610 to our advantage. But that appears to be inconvenient to
2611 manage along side branch delay slots. Therefore we handle
2612 entry into the gateway page via absolute address. */
2613
2614 /* Since we don't implement spaces, just branch. Do notice the special
2615 case of "be disp(*,r0)" using a direct branch to disp, so that we can
2616 goto_tb to the TB containing the syscall. */
2617 if (b == 0) {
2618 return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
2619 } else {
2620 TCGv tmp = get_temp(ctx);
2621 tcg_gen_addi_tl(tmp, load_gpr(ctx, b), disp);
2622 return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
2623 }
2624 }
2625
2626 static ExitStatus trans_bl(DisasContext *ctx, uint32_t insn,
2627 const DisasInsn *di)
2628 {
2629 unsigned n = extract32(insn, 1, 1);
2630 unsigned link = extract32(insn, 21, 5);
2631 target_long disp = assemble_17(insn);
2632
2633 return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
2634 }
2635
2636 static ExitStatus trans_bl_long(DisasContext *ctx, uint32_t insn,
2637 const DisasInsn *di)
2638 {
2639 unsigned n = extract32(insn, 1, 1);
2640 target_long disp = assemble_22(insn);
2641
2642 return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
2643 }
2644
2645 static ExitStatus trans_blr(DisasContext *ctx, uint32_t insn,
2646 const DisasInsn *di)
2647 {
2648 unsigned n = extract32(insn, 1, 1);
2649 unsigned rx = extract32(insn, 16, 5);
2650 unsigned link = extract32(insn, 21, 5);
2651 TCGv tmp = get_temp(ctx);
2652
2653 tcg_gen_shli_tl(tmp, load_gpr(ctx, rx), 3);
2654 tcg_gen_addi_tl(tmp, tmp, ctx->iaoq_f + 8);
2655 return do_ibranch(ctx, tmp, link, n);
2656 }
2657
2658 static ExitStatus trans_bv(DisasContext *ctx, uint32_t insn,
2659 const DisasInsn *di)
2660 {
2661 unsigned n = extract32(insn, 1, 1);
2662 unsigned rx = extract32(insn, 16, 5);
2663 unsigned rb = extract32(insn, 21, 5);
2664 TCGv dest;
2665
2666 if (rx == 0) {
2667 dest = load_gpr(ctx, rb);
2668 } else {
2669 dest = get_temp(ctx);
2670 tcg_gen_shli_tl(dest, load_gpr(ctx, rx), 3);
2671 tcg_gen_add_tl(dest, dest, load_gpr(ctx, rb));
2672 }
2673 return do_ibranch(ctx, dest, 0, n);
2674 }
2675
2676 static ExitStatus trans_bve(DisasContext *ctx, uint32_t insn,
2677 const DisasInsn *di)
2678 {
2679 unsigned n = extract32(insn, 1, 1);
2680 unsigned rb = extract32(insn, 21, 5);
2681 unsigned link = extract32(insn, 13, 1) ? 2 : 0;
2682
2683 return do_ibranch(ctx, load_gpr(ctx, rb), link, n);
2684 }
2685
2686 static const DisasInsn table_branch[] = {
2687 { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
2688 { 0xe800a000u, 0xfc00e000u, trans_bl_long },
2689 { 0xe8004000u, 0xfc00fffdu, trans_blr },
2690 { 0xe800c000u, 0xfc00fffdu, trans_bv },
2691 { 0xe800d000u, 0xfc00dffcu, trans_bve },
2692 };
2693
2694 static ExitStatus translate_table_int(DisasContext *ctx, uint32_t insn,
2695 const DisasInsn table[], size_t n)
2696 {
2697 size_t i;
2698 for (i = 0; i < n; ++i) {
2699 if ((insn & table[i].mask) == table[i].insn) {
2700 return table[i].trans(ctx, insn, &table[i]);
2701 }
2702 }
2703 return gen_illegal(ctx);
2704 }
2705
2706 #define translate_table(ctx, insn, table) \
2707 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
2708
2709 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
2710 {
2711 uint32_t opc = extract32(insn, 26, 6);
2712
2713 switch (opc) {
2714 case 0x02:
2715 return translate_table(ctx, insn, table_arith_log);
2716 case 0x03:
2717 return translate_table(ctx, insn, table_index_mem);
2718 case 0x08:
2719 return trans_ldil(ctx, insn);
2720 case 0x09:
2721 return trans_copr_w(ctx, insn);
2722 case 0x0A:
2723 return trans_addil(ctx, insn);
2724 case 0x0B:
2725 return trans_copr_dw(ctx, insn);
2726 case 0x0D:
2727 return trans_ldo(ctx, insn);
2728
2729 case 0x10:
2730 return trans_load(ctx, insn, false, MO_UB);
2731 case 0x11:
2732 return trans_load(ctx, insn, false, MO_TEUW);
2733 case 0x12:
2734 return trans_load(ctx, insn, false, MO_TEUL);
2735 case 0x13:
2736 return trans_load(ctx, insn, true, MO_TEUL);
2737 case 0x16:
2738 return trans_fload_mod(ctx, insn);
2739 case 0x17:
2740 return trans_load_w(ctx, insn);
2741 case 0x18:
2742 return trans_store(ctx, insn, false, MO_UB);
2743 case 0x19:
2744 return trans_store(ctx, insn, false, MO_TEUW);
2745 case 0x1A:
2746 return trans_store(ctx, insn, false, MO_TEUL);
2747 case 0x1B:
2748 return trans_store(ctx, insn, true, MO_TEUL);
2749 case 0x1E:
2750 return trans_fstore_mod(ctx, insn);
2751 case 0x1F:
2752 return trans_store_w(ctx, insn);
2753
2754 case 0x20:
2755 return trans_cmpb(ctx, insn, true, false, false);
2756 case 0x21:
2757 return trans_cmpb(ctx, insn, true, true, false);
2758 case 0x22:
2759 return trans_cmpb(ctx, insn, false, false, false);
2760 case 0x23:
2761 return trans_cmpb(ctx, insn, false, true, false);
2762 case 0x24:
2763 return trans_cmpiclr(ctx, insn);
2764 case 0x25:
2765 return trans_subi(ctx, insn);
2766 case 0x27:
2767 return trans_cmpb(ctx, insn, true, false, true);
2768 case 0x28:
2769 return trans_addb(ctx, insn, true, false);
2770 case 0x29:
2771 return trans_addb(ctx, insn, true, true);
2772 case 0x2A:
2773 return trans_addb(ctx, insn, false, false);
2774 case 0x2B:
2775 return trans_addb(ctx, insn, false, true);
2776 case 0x2C:
2777 case 0x2D:
2778 return trans_addi(ctx, insn);
2779 case 0x2F:
2780 return trans_cmpb(ctx, insn, false, false, true);
2781
2782 case 0x30:
2783 case 0x31:
2784 return trans_bb(ctx, insn);
2785 case 0x32:
2786 return trans_movb(ctx, insn, false);
2787 case 0x33:
2788 return trans_movb(ctx, insn, true);
2789 case 0x34:
2790 return translate_table(ctx, insn, table_sh_ex);
2791 case 0x35:
2792 return translate_table(ctx, insn, table_depw);
2793 case 0x38:
2794 return trans_be(ctx, insn, false);
2795 case 0x39:
2796 return trans_be(ctx, insn, true);
2797 case 0x3A:
2798 return translate_table(ctx, insn, table_branch);
2799
2800 case 0x04: /* spopn */
2801 case 0x05: /* diag */
2802 case 0x0F: /* product specific */
2803 break;
2804
2805 case 0x07: /* unassigned */
2806 case 0x15: /* unassigned */
2807 case 0x1D: /* unassigned */
2808 case 0x37: /* unassigned */
2809 case 0x3F: /* unassigned */
2810 default:
2811 break;
2812 }
2813 return gen_illegal(ctx);
2814 }
2815
2816 void gen_intermediate_code(CPUHPPAState *env, struct TranslationBlock *tb)
2817 {
2818 HPPACPU *cpu = hppa_env_get_cpu(env);
2819 CPUState *cs = CPU(cpu);
2820 DisasContext ctx;
2821 ExitStatus ret;
2822 int num_insns, max_insns, i;
2823
2824 ctx.tb = tb;
2825 ctx.cs = cs;
2826 ctx.iaoq_f = tb->pc;
2827 ctx.iaoq_b = tb->cs_base;
2828 ctx.singlestep_enabled = cs->singlestep_enabled;
2829
2830 ctx.ntemps = 0;
2831 for (i = 0; i < ARRAY_SIZE(ctx.temps); ++i) {
2832 TCGV_UNUSED(ctx.temps[i]);
2833 }
2834
2835 /* Compute the maximum number of insns to execute, as bounded by
2836 (1) icount, (2) single-stepping, (3) branch delay slots, or
2837 (4) the number of insns remaining on the current page. */
2838 max_insns = tb->cflags & CF_COUNT_MASK;
2839 if (max_insns == 0) {
2840 max_insns = CF_COUNT_MASK;
2841 }
2842 if (ctx.singlestep_enabled || singlestep) {
2843 max_insns = 1;
2844 } else if (max_insns > TCG_MAX_INSNS) {
2845 max_insns = TCG_MAX_INSNS;
2846 }
2847
2848 num_insns = 0;
2849 gen_tb_start(tb);
2850
2851 /* Seed the nullification status from PSW[N], as shown in TB->FLAGS. */
2852 ctx.null_cond = cond_make_f();
2853 ctx.psw_n_nonzero = false;
2854 if (tb->flags & 1) {
2855 ctx.null_cond.c = TCG_COND_ALWAYS;
2856 ctx.psw_n_nonzero = true;
2857 }
2858 ctx.null_lab = NULL;
2859
2860 do {
2861 tcg_gen_insn_start(ctx.iaoq_f, ctx.iaoq_b);
2862 num_insns++;
2863
2864 if (unlikely(cpu_breakpoint_test(cs, ctx.iaoq_f, BP_ANY))) {
2865 ret = gen_excp(&ctx, EXCP_DEBUG);
2866 break;
2867 }
2868 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2869 gen_io_start();
2870 }
2871
2872 if (ctx.iaoq_f < TARGET_PAGE_SIZE) {
2873 ret = do_page_zero(&ctx);
2874 assert(ret != NO_EXIT);
2875 } else {
2876 /* Always fetch the insn, even if nullified, so that we check
2877 the page permissions for execute. */
2878 uint32_t insn = cpu_ldl_code(env, ctx.iaoq_f);
2879
2880 /* Set up the IA queue for the next insn.
2881 This will be overwritten by a branch. */
2882 if (ctx.iaoq_b == -1) {
2883 ctx.iaoq_n = -1;
2884 ctx.iaoq_n_var = get_temp(&ctx);
2885 tcg_gen_addi_tl(ctx.iaoq_n_var, cpu_iaoq_b, 4);
2886 } else {
2887 ctx.iaoq_n = ctx.iaoq_b + 4;
2888 TCGV_UNUSED(ctx.iaoq_n_var);
2889 }
2890
2891 if (unlikely(ctx.null_cond.c == TCG_COND_ALWAYS)) {
2892 ctx.null_cond.c = TCG_COND_NEVER;
2893 ret = NO_EXIT;
2894 } else {
2895 ret = translate_one(&ctx, insn);
2896 assert(ctx.null_lab == NULL);
2897 }
2898 }
2899
2900 for (i = 0; i < ctx.ntemps; ++i) {
2901 tcg_temp_free(ctx.temps[i]);
2902 TCGV_UNUSED(ctx.temps[i]);
2903 }
2904 ctx.ntemps = 0;
2905
2906 /* If we see non-linear instructions, exhaust instruction count,
2907 or run out of buffer space, stop generation. */
2908 /* ??? The non-linear instruction restriction is purely due to
2909 the debugging dump. Otherwise we *could* follow unconditional
2910 branches within the same page. */
2911 if (ret == NO_EXIT
2912 && (ctx.iaoq_b != ctx.iaoq_f + 4
2913 || num_insns >= max_insns
2914 || tcg_op_buf_full())) {
2915 if (ctx.null_cond.c == TCG_COND_NEVER
2916 || ctx.null_cond.c == TCG_COND_ALWAYS) {
2917 nullify_set(&ctx, ctx.null_cond.c == TCG_COND_ALWAYS);
2918 gen_goto_tb(&ctx, 0, ctx.iaoq_b, ctx.iaoq_n);
2919 ret = EXIT_GOTO_TB;
2920 } else {
2921 ret = EXIT_IAQ_N_STALE;
2922 }
2923 }
2924
2925 ctx.iaoq_f = ctx.iaoq_b;
2926 ctx.iaoq_b = ctx.iaoq_n;
2927 if (ret == EXIT_NORETURN
2928 || ret == EXIT_GOTO_TB
2929 || ret == EXIT_IAQ_N_UPDATED) {
2930 break;
2931 }
2932 if (ctx.iaoq_f == -1) {
2933 tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b);
2934 copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_n, ctx.iaoq_n_var);
2935 nullify_save(&ctx);
2936 ret = EXIT_IAQ_N_UPDATED;
2937 break;
2938 }
2939 if (ctx.iaoq_b == -1) {
2940 tcg_gen_mov_tl(cpu_iaoq_b, ctx.iaoq_n_var);
2941 }
2942 } while (ret == NO_EXIT);
2943
2944 if (tb->cflags & CF_LAST_IO) {
2945 gen_io_end();
2946 }
2947
2948 switch (ret) {
2949 case EXIT_GOTO_TB:
2950 case EXIT_NORETURN:
2951 break;
2952 case EXIT_IAQ_N_STALE:
2953 copy_iaoq_entry(cpu_iaoq_f, ctx.iaoq_f, cpu_iaoq_f);
2954 copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_b, cpu_iaoq_b);
2955 nullify_save(&ctx);
2956 /* FALLTHRU */
2957 case EXIT_IAQ_N_UPDATED:
2958 if (ctx.singlestep_enabled) {
2959 gen_excp_1(EXCP_DEBUG);
2960 } else {
2961 tcg_gen_exit_tb(0);
2962 }
2963 break;
2964 default:
2965 abort();
2966 }
2967
2968 gen_tb_end(tb, num_insns);
2969
2970 tb->size = num_insns * 4;
2971 tb->icount = num_insns;
2972
2973 #ifdef DEBUG_DISAS
2974 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
2975 && qemu_log_in_addr_range(tb->pc)) {
2976 qemu_log_lock();
2977 switch (tb->pc) {
2978 case 0x00:
2979 qemu_log("IN:\n0x00000000: (null)\n\n");
2980 break;
2981 case 0xb0:
2982 qemu_log("IN:\n0x000000b0: light-weight-syscall\n\n");
2983 break;
2984 case 0xe0:
2985 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n\n");
2986 break;
2987 case 0x100:
2988 qemu_log("IN:\n0x00000100: syscall\n\n");
2989 break;
2990 default:
2991 qemu_log("IN: %s\n", lookup_symbol(tb->pc));
2992 log_target_disas(cs, tb->pc, tb->size, 1);
2993 qemu_log("\n");
2994 break;
2995 }
2996 qemu_log_unlock();
2997 }
2998 #endif
2999 }
3000
3001 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
3002 target_ulong *data)
3003 {
3004 env->iaoq_f = data[0];
3005 if (data[1] != -1) {
3006 env->iaoq_b = data[1];
3007 }
3008 /* Since we were executing the instruction at IAOQ_F, and took some
3009 sort of action that provoked the cpu_restore_state, we can infer
3010 that the instruction was not nullified. */
3011 env->psw_n = 0;
3012 }