]> git.proxmox.com Git - mirror_qemu.git/blame - target/hppa/translate.c
target-hppa: Implement shifts and deposits
[mirror_qemu.git] / target / hppa / translate.c
CommitLineData
61766fe9
RH
1/*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "disas/disas.h"
23#include "qemu/host-utils.h"
24#include "exec/exec-all.h"
25#include "tcg-op.h"
26#include "exec/cpu_ldst.h"
27
28#include "exec/helper-proto.h"
29#include "exec/helper-gen.h"
30
31#include "trace-tcg.h"
32#include "exec/log.h"
33
34typedef struct DisasCond {
35 TCGCond c;
36 TCGv a0, a1;
37 bool a0_is_n;
38 bool a1_is_0;
39} DisasCond;
40
41typedef struct DisasContext {
42 struct TranslationBlock *tb;
43 CPUState *cs;
44
45 target_ulong iaoq_f;
46 target_ulong iaoq_b;
47 target_ulong iaoq_n;
48 TCGv iaoq_n_var;
49
50 int ntemps;
51 TCGv temps[8];
52
53 DisasCond null_cond;
54 TCGLabel *null_lab;
55
56 bool singlestep_enabled;
57 bool psw_n_nonzero;
58} DisasContext;
59
60/* Return values from translate_one, indicating the state of the TB.
61 Note that zero indicates that we are not exiting the TB. */
62
63typedef enum {
64 NO_EXIT,
65
66 /* We have emitted one or more goto_tb. No fixup required. */
67 EXIT_GOTO_TB,
68
69 /* We are not using a goto_tb (for whatever reason), but have updated
70 the iaq (for whatever reason), so don't do it again on exit. */
71 EXIT_IAQ_N_UPDATED,
72
73 /* We are exiting the TB, but have neither emitted a goto_tb, nor
74 updated the iaq for the next instruction to be executed. */
75 EXIT_IAQ_N_STALE,
76
77 /* We are ending the TB with a noreturn function call, e.g. longjmp.
78 No following code will be executed. */
79 EXIT_NORETURN,
80} ExitStatus;
81
82typedef struct DisasInsn {
83 uint32_t insn, mask;
84 ExitStatus (*trans)(DisasContext *ctx, uint32_t insn,
85 const struct DisasInsn *f);
b2167459
RH
86 union {
87 void (*f_ttt)(TCGv, TCGv, TCGv);
88 };
61766fe9
RH
89} DisasInsn;
90
91/* global register indexes */
92static TCGv_env cpu_env;
93static TCGv cpu_gr[32];
94static TCGv cpu_iaoq_f;
95static TCGv cpu_iaoq_b;
96static TCGv cpu_sar;
97static TCGv cpu_psw_n;
98static TCGv cpu_psw_v;
99static TCGv cpu_psw_cb;
100static TCGv cpu_psw_cb_msb;
101static TCGv cpu_cr26;
102static TCGv cpu_cr27;
103
104#include "exec/gen-icount.h"
105
106void hppa_translate_init(void)
107{
108#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
109
110 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
111 static const GlobalVar vars[] = {
112 DEF_VAR(sar),
113 DEF_VAR(cr26),
114 DEF_VAR(cr27),
115 DEF_VAR(psw_n),
116 DEF_VAR(psw_v),
117 DEF_VAR(psw_cb),
118 DEF_VAR(psw_cb_msb),
119 DEF_VAR(iaoq_f),
120 DEF_VAR(iaoq_b),
121 };
122
123#undef DEF_VAR
124
125 /* Use the symbolic register names that match the disassembler. */
126 static const char gr_names[32][4] = {
127 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
128 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
129 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
130 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
131 };
132
133 static bool done_init = 0;
134 int i;
135
136 if (done_init) {
137 return;
138 }
139 done_init = 1;
140
141 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
142 tcg_ctx.tcg_env = cpu_env;
143
144 TCGV_UNUSED(cpu_gr[0]);
145 for (i = 1; i < 32; i++) {
146 cpu_gr[i] = tcg_global_mem_new(cpu_env,
147 offsetof(CPUHPPAState, gr[i]),
148 gr_names[i]);
149 }
150
151 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
152 const GlobalVar *v = &vars[i];
153 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
154 }
155}
156
129e9cc3
RH
157static DisasCond cond_make_f(void)
158{
159 DisasCond r = { .c = TCG_COND_NEVER };
160 TCGV_UNUSED(r.a0);
161 TCGV_UNUSED(r.a1);
162 return r;
163}
164
165static DisasCond cond_make_n(void)
166{
167 DisasCond r = { .c = TCG_COND_NE, .a0_is_n = true, .a1_is_0 = true };
168 r.a0 = cpu_psw_n;
169 TCGV_UNUSED(r.a1);
170 return r;
171}
172
173static DisasCond cond_make_0(TCGCond c, TCGv a0)
174{
175 DisasCond r = { .c = c, .a1_is_0 = true };
176
177 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
178 r.a0 = tcg_temp_new();
179 tcg_gen_mov_tl(r.a0, a0);
180 TCGV_UNUSED(r.a1);
181
182 return r;
183}
184
185static DisasCond cond_make(TCGCond c, TCGv a0, TCGv a1)
186{
187 DisasCond r = { .c = c };
188
189 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
190 r.a0 = tcg_temp_new();
191 tcg_gen_mov_tl(r.a0, a0);
192 r.a1 = tcg_temp_new();
193 tcg_gen_mov_tl(r.a1, a1);
194
195 return r;
196}
197
198static void cond_prep(DisasCond *cond)
199{
200 if (cond->a1_is_0) {
201 cond->a1_is_0 = false;
202 cond->a1 = tcg_const_tl(0);
203 }
204}
205
206static void cond_free(DisasCond *cond)
207{
208 switch (cond->c) {
209 default:
210 if (!cond->a0_is_n) {
211 tcg_temp_free(cond->a0);
212 }
213 if (!cond->a1_is_0) {
214 tcg_temp_free(cond->a1);
215 }
216 cond->a0_is_n = false;
217 cond->a1_is_0 = false;
218 TCGV_UNUSED(cond->a0);
219 TCGV_UNUSED(cond->a1);
220 /* fallthru */
221 case TCG_COND_ALWAYS:
222 cond->c = TCG_COND_NEVER;
223 break;
224 case TCG_COND_NEVER:
225 break;
226 }
227}
228
61766fe9
RH
229static TCGv get_temp(DisasContext *ctx)
230{
231 unsigned i = ctx->ntemps++;
232 g_assert(i < ARRAY_SIZE(ctx->temps));
233 return ctx->temps[i] = tcg_temp_new();
234}
235
236static TCGv load_const(DisasContext *ctx, target_long v)
237{
238 TCGv t = get_temp(ctx);
239 tcg_gen_movi_tl(t, v);
240 return t;
241}
242
243static TCGv load_gpr(DisasContext *ctx, unsigned reg)
244{
245 if (reg == 0) {
246 TCGv t = get_temp(ctx);
247 tcg_gen_movi_tl(t, 0);
248 return t;
249 } else {
250 return cpu_gr[reg];
251 }
252}
253
254static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
255{
129e9cc3 256 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
61766fe9
RH
257 return get_temp(ctx);
258 } else {
259 return cpu_gr[reg];
260 }
261}
262
129e9cc3
RH
263static void save_or_nullify(DisasContext *ctx, TCGv dest, TCGv t)
264{
265 if (ctx->null_cond.c != TCG_COND_NEVER) {
266 cond_prep(&ctx->null_cond);
267 tcg_gen_movcond_tl(ctx->null_cond.c, dest, ctx->null_cond.a0,
268 ctx->null_cond.a1, dest, t);
269 } else {
270 tcg_gen_mov_tl(dest, t);
271 }
272}
273
274static void save_gpr(DisasContext *ctx, unsigned reg, TCGv t)
275{
276 if (reg != 0) {
277 save_or_nullify(ctx, cpu_gr[reg], t);
278 }
279}
280
281/* Skip over the implementation of an insn that has been nullified.
282 Use this when the insn is too complex for a conditional move. */
283static void nullify_over(DisasContext *ctx)
284{
285 if (ctx->null_cond.c != TCG_COND_NEVER) {
286 /* The always condition should have been handled in the main loop. */
287 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
288
289 ctx->null_lab = gen_new_label();
290 cond_prep(&ctx->null_cond);
291
292 /* If we're using PSW[N], copy it to a temp because... */
293 if (ctx->null_cond.a0_is_n) {
294 ctx->null_cond.a0_is_n = false;
295 ctx->null_cond.a0 = tcg_temp_new();
296 tcg_gen_mov_tl(ctx->null_cond.a0, cpu_psw_n);
297 }
298 /* ... we clear it before branching over the implementation,
299 so that (1) it's clear after nullifying this insn and
300 (2) if this insn nullifies the next, PSW[N] is valid. */
301 if (ctx->psw_n_nonzero) {
302 ctx->psw_n_nonzero = false;
303 tcg_gen_movi_tl(cpu_psw_n, 0);
304 }
305
306 tcg_gen_brcond_tl(ctx->null_cond.c, ctx->null_cond.a0,
307 ctx->null_cond.a1, ctx->null_lab);
308 cond_free(&ctx->null_cond);
309 }
310}
311
312/* Save the current nullification state to PSW[N]. */
313static void nullify_save(DisasContext *ctx)
314{
315 if (ctx->null_cond.c == TCG_COND_NEVER) {
316 if (ctx->psw_n_nonzero) {
317 tcg_gen_movi_tl(cpu_psw_n, 0);
318 }
319 return;
320 }
321 if (!ctx->null_cond.a0_is_n) {
322 cond_prep(&ctx->null_cond);
323 tcg_gen_setcond_tl(ctx->null_cond.c, cpu_psw_n,
324 ctx->null_cond.a0, ctx->null_cond.a1);
325 ctx->psw_n_nonzero = true;
326 }
327 cond_free(&ctx->null_cond);
328}
329
330/* Set a PSW[N] to X. The intention is that this is used immediately
331 before a goto_tb/exit_tb, so that there is no fallthru path to other
332 code within the TB. Therefore we do not update psw_n_nonzero. */
333static void nullify_set(DisasContext *ctx, bool x)
334{
335 if (ctx->psw_n_nonzero || x) {
336 tcg_gen_movi_tl(cpu_psw_n, x);
337 }
338}
339
340/* Mark the end of an instruction that may have been nullified.
341 This is the pair to nullify_over. */
342static ExitStatus nullify_end(DisasContext *ctx, ExitStatus status)
343{
344 TCGLabel *null_lab = ctx->null_lab;
345
346 if (likely(null_lab == NULL)) {
347 /* The current insn wasn't conditional or handled the condition
348 applied to it without a branch, so the (new) setting of
349 NULL_COND can be applied directly to the next insn. */
350 return status;
351 }
352 ctx->null_lab = NULL;
353
354 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
355 /* The next instruction will be unconditional,
356 and NULL_COND already reflects that. */
357 gen_set_label(null_lab);
358 } else {
359 /* The insn that we just executed is itself nullifying the next
360 instruction. Store the condition in the PSW[N] global.
361 We asserted PSW[N] = 0 in nullify_over, so that after the
362 label we have the proper value in place. */
363 nullify_save(ctx);
364 gen_set_label(null_lab);
365 ctx->null_cond = cond_make_n();
366 }
367
368 assert(status != EXIT_GOTO_TB && status != EXIT_IAQ_N_UPDATED);
369 if (status == EXIT_NORETURN) {
370 status = NO_EXIT;
371 }
372 return status;
373}
374
61766fe9
RH
375static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval)
376{
377 if (unlikely(ival == -1)) {
378 tcg_gen_mov_tl(dest, vval);
379 } else {
380 tcg_gen_movi_tl(dest, ival);
381 }
382}
383
384static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp)
385{
386 return ctx->iaoq_f + disp + 8;
387}
388
389static void gen_excp_1(int exception)
390{
391 TCGv_i32 t = tcg_const_i32(exception);
392 gen_helper_excp(cpu_env, t);
393 tcg_temp_free_i32(t);
394}
395
396static ExitStatus gen_excp(DisasContext *ctx, int exception)
397{
398 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
399 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
129e9cc3 400 nullify_save(ctx);
61766fe9
RH
401 gen_excp_1(exception);
402 return EXIT_NORETURN;
403}
404
405static ExitStatus gen_illegal(DisasContext *ctx)
406{
129e9cc3
RH
407 nullify_over(ctx);
408 return nullify_end(ctx, gen_excp(ctx, EXCP_SIGILL));
61766fe9
RH
409}
410
411static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
412{
413 /* Suppress goto_tb in the case of single-steping and IO. */
414 if ((ctx->tb->cflags & CF_LAST_IO) || ctx->singlestep_enabled) {
415 return false;
416 }
417 return true;
418}
419
129e9cc3
RH
420/* If the next insn is to be nullified, and it's on the same page,
421 and we're not attempting to set a breakpoint on it, then we can
422 totally skip the nullified insn. This avoids creating and
423 executing a TB that merely branches to the next TB. */
424static bool use_nullify_skip(DisasContext *ctx)
425{
426 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
427 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
428}
429
61766fe9
RH
430static void gen_goto_tb(DisasContext *ctx, int which,
431 target_ulong f, target_ulong b)
432{
433 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
434 tcg_gen_goto_tb(which);
435 tcg_gen_movi_tl(cpu_iaoq_f, f);
436 tcg_gen_movi_tl(cpu_iaoq_b, b);
437 tcg_gen_exit_tb((uintptr_t)ctx->tb + which);
438 } else {
439 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
440 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
441 if (ctx->singlestep_enabled) {
442 gen_excp_1(EXCP_DEBUG);
443 } else {
444 tcg_gen_exit_tb(0);
445 }
446 }
447}
448
b2167459
RH
449/* PA has a habit of taking the LSB of a field and using that as the sign,
450 with the rest of the field becoming the least significant bits. */
451static target_long low_sextract(uint32_t val, int pos, int len)
452{
453 target_ulong x = -(target_ulong)extract32(val, pos, 1);
454 x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
455 return x;
456}
457
98cd9ca7
RH
458static target_long assemble_12(uint32_t insn)
459{
460 target_ulong x = -(target_ulong)(insn & 1);
461 x = (x << 1) | extract32(insn, 2, 1);
462 x = (x << 10) | extract32(insn, 3, 10);
463 return x;
464}
465
b2167459
RH
466static target_long assemble_16(uint32_t insn)
467{
468 /* Take the name from PA2.0, which produces a 16-bit number
469 only with wide mode; otherwise a 14-bit number. Since we don't
470 implement wide mode, this is always the 14-bit number. */
471 return low_sextract(insn, 0, 14);
472}
473
98cd9ca7
RH
474static target_long assemble_17(uint32_t insn)
475{
476 target_ulong x = -(target_ulong)(insn & 1);
477 x = (x << 5) | extract32(insn, 16, 5);
478 x = (x << 1) | extract32(insn, 2, 1);
479 x = (x << 10) | extract32(insn, 3, 10);
480 return x << 2;
481}
482
b2167459
RH
483static target_long assemble_21(uint32_t insn)
484{
485 target_ulong x = -(target_ulong)(insn & 1);
486 x = (x << 11) | extract32(insn, 1, 11);
487 x = (x << 2) | extract32(insn, 14, 2);
488 x = (x << 5) | extract32(insn, 16, 5);
489 x = (x << 2) | extract32(insn, 12, 2);
490 return x << 11;
491}
492
98cd9ca7
RH
493static target_long assemble_22(uint32_t insn)
494{
495 target_ulong x = -(target_ulong)(insn & 1);
496 x = (x << 10) | extract32(insn, 16, 10);
497 x = (x << 1) | extract32(insn, 2, 1);
498 x = (x << 10) | extract32(insn, 3, 10);
499 return x << 2;
500}
501
b2167459
RH
502/* The parisc documentation describes only the general interpretation of
503 the conditions, without describing their exact implementation. The
504 interpretations do not stand up well when considering ADD,C and SUB,B.
505 However, considering the Addition, Subtraction and Logical conditions
506 as a whole it would appear that these relations are similar to what
507 a traditional NZCV set of flags would produce. */
508
509static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv)
510{
511 DisasCond cond;
512 TCGv tmp;
513
514 switch (cf >> 1) {
515 case 0: /* Never / TR */
516 cond = cond_make_f();
517 break;
518 case 1: /* = / <> (Z / !Z) */
519 cond = cond_make_0(TCG_COND_EQ, res);
520 break;
521 case 2: /* < / >= (N / !N) */
522 cond = cond_make_0(TCG_COND_LT, res);
523 break;
524 case 3: /* <= / > (N | Z / !N & !Z) */
525 cond = cond_make_0(TCG_COND_LE, res);
526 break;
527 case 4: /* NUV / UV (!C / C) */
528 cond = cond_make_0(TCG_COND_EQ, cb_msb);
529 break;
530 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
531 tmp = tcg_temp_new();
532 tcg_gen_neg_tl(tmp, cb_msb);
533 tcg_gen_and_tl(tmp, tmp, res);
534 cond = cond_make_0(TCG_COND_EQ, tmp);
535 tcg_temp_free(tmp);
536 break;
537 case 6: /* SV / NSV (V / !V) */
538 cond = cond_make_0(TCG_COND_LT, sv);
539 break;
540 case 7: /* OD / EV */
541 tmp = tcg_temp_new();
542 tcg_gen_andi_tl(tmp, res, 1);
543 cond = cond_make_0(TCG_COND_NE, tmp);
544 tcg_temp_free(tmp);
545 break;
546 default:
547 g_assert_not_reached();
548 }
549 if (cf & 1) {
550 cond.c = tcg_invert_cond(cond.c);
551 }
552
553 return cond;
554}
555
556/* Similar, but for the special case of subtraction without borrow, we
557 can use the inputs directly. This can allow other computation to be
558 deleted as unused. */
559
560static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv)
561{
562 DisasCond cond;
563
564 switch (cf >> 1) {
565 case 1: /* = / <> */
566 cond = cond_make(TCG_COND_EQ, in1, in2);
567 break;
568 case 2: /* < / >= */
569 cond = cond_make(TCG_COND_LT, in1, in2);
570 break;
571 case 3: /* <= / > */
572 cond = cond_make(TCG_COND_LE, in1, in2);
573 break;
574 case 4: /* << / >>= */
575 cond = cond_make(TCG_COND_LTU, in1, in2);
576 break;
577 case 5: /* <<= / >> */
578 cond = cond_make(TCG_COND_LEU, in1, in2);
579 break;
580 default:
581 return do_cond(cf, res, sv, sv);
582 }
583 if (cf & 1) {
584 cond.c = tcg_invert_cond(cond.c);
585 }
586
587 return cond;
588}
589
590/* Similar, but for logicals, where the carry and overflow bits are not
591 computed, and use of them is undefined. */
592
593static DisasCond do_log_cond(unsigned cf, TCGv res)
594{
595 switch (cf >> 1) {
596 case 4: case 5: case 6:
597 cf &= 1;
598 break;
599 }
600 return do_cond(cf, res, res, res);
601}
602
98cd9ca7
RH
603/* Similar, but for shift/extract/deposit conditions. */
604
605static DisasCond do_sed_cond(unsigned orig, TCGv res)
606{
607 unsigned c, f;
608
609 /* Convert the compressed condition codes to standard.
610 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
611 4-7 are the reverse of 0-3. */
612 c = orig & 3;
613 if (c == 3) {
614 c = 7;
615 }
616 f = (orig & 4) / 4;
617
618 return do_log_cond(c * 2 + f, res);
619}
620
b2167459
RH
621/* Similar, but for unit conditions. */
622
623static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
624{
625 DisasCond cond;
626 TCGv tmp, cb;
627
628 TCGV_UNUSED(cb);
629 if (cf & 8) {
630 /* Since we want to test lots of carry-out bits all at once, do not
631 * do our normal thing and compute carry-in of bit B+1 since that
632 * leaves us with carry bits spread across two words.
633 */
634 cb = tcg_temp_new();
635 tmp = tcg_temp_new();
636 tcg_gen_or_tl(cb, in1, in2);
637 tcg_gen_and_tl(tmp, in1, in2);
638 tcg_gen_andc_tl(cb, cb, res);
639 tcg_gen_or_tl(cb, cb, tmp);
640 tcg_temp_free(tmp);
641 }
642
643 switch (cf >> 1) {
644 case 0: /* never / TR */
645 case 1: /* undefined */
646 case 5: /* undefined */
647 cond = cond_make_f();
648 break;
649
650 case 2: /* SBZ / NBZ */
651 /* See hasless(v,1) from
652 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
653 */
654 tmp = tcg_temp_new();
655 tcg_gen_subi_tl(tmp, res, 0x01010101u);
656 tcg_gen_andc_tl(tmp, tmp, res);
657 tcg_gen_andi_tl(tmp, tmp, 0x80808080u);
658 cond = cond_make_0(TCG_COND_NE, tmp);
659 tcg_temp_free(tmp);
660 break;
661
662 case 3: /* SHZ / NHZ */
663 tmp = tcg_temp_new();
664 tcg_gen_subi_tl(tmp, res, 0x00010001u);
665 tcg_gen_andc_tl(tmp, tmp, res);
666 tcg_gen_andi_tl(tmp, tmp, 0x80008000u);
667 cond = cond_make_0(TCG_COND_NE, tmp);
668 tcg_temp_free(tmp);
669 break;
670
671 case 4: /* SDC / NDC */
672 tcg_gen_andi_tl(cb, cb, 0x88888888u);
673 cond = cond_make_0(TCG_COND_NE, cb);
674 break;
675
676 case 6: /* SBC / NBC */
677 tcg_gen_andi_tl(cb, cb, 0x80808080u);
678 cond = cond_make_0(TCG_COND_NE, cb);
679 break;
680
681 case 7: /* SHC / NHC */
682 tcg_gen_andi_tl(cb, cb, 0x80008000u);
683 cond = cond_make_0(TCG_COND_NE, cb);
684 break;
685
686 default:
687 g_assert_not_reached();
688 }
689 if (cf & 8) {
690 tcg_temp_free(cb);
691 }
692 if (cf & 1) {
693 cond.c = tcg_invert_cond(cond.c);
694 }
695
696 return cond;
697}
698
699/* Compute signed overflow for addition. */
700static TCGv do_add_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
701{
702 TCGv sv = get_temp(ctx);
703 TCGv tmp = tcg_temp_new();
704
705 tcg_gen_xor_tl(sv, res, in1);
706 tcg_gen_xor_tl(tmp, in1, in2);
707 tcg_gen_andc_tl(sv, sv, tmp);
708 tcg_temp_free(tmp);
709
710 return sv;
711}
712
713/* Compute signed overflow for subtraction. */
714static TCGv do_sub_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
715{
716 TCGv sv = get_temp(ctx);
717 TCGv tmp = tcg_temp_new();
718
719 tcg_gen_xor_tl(sv, res, in1);
720 tcg_gen_xor_tl(tmp, in1, in2);
721 tcg_gen_and_tl(sv, sv, tmp);
722 tcg_temp_free(tmp);
723
724 return sv;
725}
726
727static ExitStatus do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
728 unsigned shift, bool is_l, bool is_tsv, bool is_tc,
729 bool is_c, unsigned cf)
730{
731 TCGv dest, cb, cb_msb, sv, tmp;
732 unsigned c = cf >> 1;
733 DisasCond cond;
734
735 dest = tcg_temp_new();
736 TCGV_UNUSED(cb);
737 TCGV_UNUSED(cb_msb);
738
739 if (shift) {
740 tmp = get_temp(ctx);
741 tcg_gen_shli_tl(tmp, in1, shift);
742 in1 = tmp;
743 }
744
745 if (!is_l || c == 4 || c == 5) {
746 TCGv zero = tcg_const_tl(0);
747 cb_msb = get_temp(ctx);
748 tcg_gen_add2_tl(dest, cb_msb, in1, zero, in2, zero);
749 if (is_c) {
750 tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
751 }
752 tcg_temp_free(zero);
753 if (!is_l) {
754 cb = get_temp(ctx);
755 tcg_gen_xor_tl(cb, in1, in2);
756 tcg_gen_xor_tl(cb, cb, dest);
757 }
758 } else {
759 tcg_gen_add_tl(dest, in1, in2);
760 if (is_c) {
761 tcg_gen_add_tl(dest, dest, cpu_psw_cb_msb);
762 }
763 }
764
765 /* Compute signed overflow if required. */
766 TCGV_UNUSED(sv);
767 if (is_tsv || c == 6) {
768 sv = do_add_sv(ctx, dest, in1, in2);
769 if (is_tsv) {
770 /* ??? Need to include overflow from shift. */
771 gen_helper_tsv(cpu_env, sv);
772 }
773 }
774
775 /* Emit any conditional trap before any writeback. */
776 cond = do_cond(cf, dest, cb_msb, sv);
777 if (is_tc) {
778 cond_prep(&cond);
779 tmp = tcg_temp_new();
780 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
781 gen_helper_tcond(cpu_env, tmp);
782 tcg_temp_free(tmp);
783 }
784
785 /* Write back the result. */
786 if (!is_l) {
787 save_or_nullify(ctx, cpu_psw_cb, cb);
788 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
789 }
790 save_gpr(ctx, rt, dest);
791 tcg_temp_free(dest);
792
793 /* Install the new nullification. */
794 cond_free(&ctx->null_cond);
795 ctx->null_cond = cond;
796 return NO_EXIT;
797}
798
799static ExitStatus do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
800 bool is_tsv, bool is_b, bool is_tc, unsigned cf)
801{
802 TCGv dest, sv, cb, cb_msb, zero, tmp;
803 unsigned c = cf >> 1;
804 DisasCond cond;
805
806 dest = tcg_temp_new();
807 cb = tcg_temp_new();
808 cb_msb = tcg_temp_new();
809
810 zero = tcg_const_tl(0);
811 if (is_b) {
812 /* DEST,C = IN1 + ~IN2 + C. */
813 tcg_gen_not_tl(cb, in2);
814 tcg_gen_add2_tl(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
815 tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cb, zero);
816 tcg_gen_xor_tl(cb, cb, in1);
817 tcg_gen_xor_tl(cb, cb, dest);
818 } else {
819 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
820 operations by seeding the high word with 1 and subtracting. */
821 tcg_gen_movi_tl(cb_msb, 1);
822 tcg_gen_sub2_tl(dest, cb_msb, in1, cb_msb, in2, zero);
823 tcg_gen_eqv_tl(cb, in1, in2);
824 tcg_gen_xor_tl(cb, cb, dest);
825 }
826 tcg_temp_free(zero);
827
828 /* Compute signed overflow if required. */
829 TCGV_UNUSED(sv);
830 if (is_tsv || c == 6) {
831 sv = do_sub_sv(ctx, dest, in1, in2);
832 if (is_tsv) {
833 gen_helper_tsv(cpu_env, sv);
834 }
835 }
836
837 /* Compute the condition. We cannot use the special case for borrow. */
838 if (!is_b) {
839 cond = do_sub_cond(cf, dest, in1, in2, sv);
840 } else {
841 cond = do_cond(cf, dest, cb_msb, sv);
842 }
843
844 /* Emit any conditional trap before any writeback. */
845 if (is_tc) {
846 cond_prep(&cond);
847 tmp = tcg_temp_new();
848 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
849 gen_helper_tcond(cpu_env, tmp);
850 tcg_temp_free(tmp);
851 }
852
853 /* Write back the result. */
854 save_or_nullify(ctx, cpu_psw_cb, cb);
855 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
856 save_gpr(ctx, rt, dest);
857 tcg_temp_free(dest);
858
859 /* Install the new nullification. */
860 cond_free(&ctx->null_cond);
861 ctx->null_cond = cond;
862 return NO_EXIT;
863}
864
865static ExitStatus do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1,
866 TCGv in2, unsigned cf)
867{
868 TCGv dest, sv;
869 DisasCond cond;
870
871 dest = tcg_temp_new();
872 tcg_gen_sub_tl(dest, in1, in2);
873
874 /* Compute signed overflow if required. */
875 TCGV_UNUSED(sv);
876 if ((cf >> 1) == 6) {
877 sv = do_sub_sv(ctx, dest, in1, in2);
878 }
879
880 /* Form the condition for the compare. */
881 cond = do_sub_cond(cf, dest, in1, in2, sv);
882
883 /* Clear. */
884 tcg_gen_movi_tl(dest, 0);
885 save_gpr(ctx, rt, dest);
886 tcg_temp_free(dest);
887
888 /* Install the new nullification. */
889 cond_free(&ctx->null_cond);
890 ctx->null_cond = cond;
891 return NO_EXIT;
892}
893
894static ExitStatus do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
895 unsigned cf, void (*fn)(TCGv, TCGv, TCGv))
896{
897 TCGv dest = dest_gpr(ctx, rt);
898
899 /* Perform the operation, and writeback. */
900 fn(dest, in1, in2);
901 save_gpr(ctx, rt, dest);
902
903 /* Install the new nullification. */
904 cond_free(&ctx->null_cond);
905 if (cf) {
906 ctx->null_cond = do_log_cond(cf, dest);
907 }
908 return NO_EXIT;
909}
910
911static ExitStatus do_unit(DisasContext *ctx, unsigned rt, TCGv in1,
912 TCGv in2, unsigned cf, bool is_tc,
913 void (*fn)(TCGv, TCGv, TCGv))
914{
915 TCGv dest;
916 DisasCond cond;
917
918 if (cf == 0) {
919 dest = dest_gpr(ctx, rt);
920 fn(dest, in1, in2);
921 save_gpr(ctx, rt, dest);
922 cond_free(&ctx->null_cond);
923 } else {
924 dest = tcg_temp_new();
925 fn(dest, in1, in2);
926
927 cond = do_unit_cond(cf, dest, in1, in2);
928
929 if (is_tc) {
930 TCGv tmp = tcg_temp_new();
931 cond_prep(&cond);
932 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
933 gen_helper_tcond(cpu_env, tmp);
934 tcg_temp_free(tmp);
935 }
936 save_gpr(ctx, rt, dest);
937
938 cond_free(&ctx->null_cond);
939 ctx->null_cond = cond;
940 }
941 return NO_EXIT;
942}
943
98cd9ca7
RH
944/* Emit an unconditional branch to a direct target, which may or may not
945 have already had nullification handled. */
946static ExitStatus do_dbranch(DisasContext *ctx, target_ulong dest,
947 unsigned link, bool is_n)
948{
949 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
950 if (link != 0) {
951 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
952 }
953 ctx->iaoq_n = dest;
954 if (is_n) {
955 ctx->null_cond.c = TCG_COND_ALWAYS;
956 }
957 return NO_EXIT;
958 } else {
959 nullify_over(ctx);
960
961 if (link != 0) {
962 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
963 }
964
965 if (is_n && use_nullify_skip(ctx)) {
966 nullify_set(ctx, 0);
967 gen_goto_tb(ctx, 0, dest, dest + 4);
968 } else {
969 nullify_set(ctx, is_n);
970 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
971 }
972
973 nullify_end(ctx, NO_EXIT);
974
975 nullify_set(ctx, 0);
976 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
977 return EXIT_GOTO_TB;
978 }
979}
980
981/* Emit a conditional branch to a direct target. If the branch itself
982 is nullified, we should have already used nullify_over. */
983static ExitStatus do_cbranch(DisasContext *ctx, target_long disp, bool is_n,
984 DisasCond *cond)
985{
986 target_ulong dest = iaoq_dest(ctx, disp);
987 TCGLabel *taken = NULL;
988 TCGCond c = cond->c;
989 int which = 0;
990 bool n;
991
992 assert(ctx->null_cond.c == TCG_COND_NEVER);
993
994 /* Handle TRUE and NEVER as direct branches. */
995 if (c == TCG_COND_ALWAYS) {
996 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
997 }
998 if (c == TCG_COND_NEVER) {
999 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1000 }
1001
1002 taken = gen_new_label();
1003 cond_prep(cond);
1004 tcg_gen_brcond_tl(c, cond->a0, cond->a1, taken);
1005 cond_free(cond);
1006
1007 /* Not taken: Condition not satisfied; nullify on backward branches. */
1008 n = is_n && disp < 0;
1009 if (n && use_nullify_skip(ctx)) {
1010 nullify_set(ctx, 0);
1011 gen_goto_tb(ctx, which++, ctx->iaoq_n, ctx->iaoq_n + 4);
1012 } else {
1013 if (!n && ctx->null_lab) {
1014 gen_set_label(ctx->null_lab);
1015 ctx->null_lab = NULL;
1016 }
1017 nullify_set(ctx, n);
1018 gen_goto_tb(ctx, which++, ctx->iaoq_b, ctx->iaoq_n);
1019 }
1020
1021 gen_set_label(taken);
1022
1023 /* Taken: Condition satisfied; nullify on forward branches. */
1024 n = is_n && disp >= 0;
1025 if (n && use_nullify_skip(ctx)) {
1026 nullify_set(ctx, 0);
1027 gen_goto_tb(ctx, which++, dest, dest + 4);
1028 } else {
1029 nullify_set(ctx, n);
1030 gen_goto_tb(ctx, which++, ctx->iaoq_b, dest);
1031 }
1032
1033 /* Not taken: the branch itself was nullified. */
1034 if (ctx->null_lab) {
1035 gen_set_label(ctx->null_lab);
1036 ctx->null_lab = NULL;
1037 if (which < 2) {
1038 nullify_set(ctx, 0);
1039 gen_goto_tb(ctx, which, ctx->iaoq_b, ctx->iaoq_n);
1040 return EXIT_GOTO_TB;
1041 } else {
1042 return EXIT_IAQ_N_STALE;
1043 }
1044 } else {
1045 return EXIT_GOTO_TB;
1046 }
1047}
1048
1049/* Emit an unconditional branch to an indirect target. This handles
1050 nullification of the branch itself. */
1051static ExitStatus do_ibranch(DisasContext *ctx, TCGv dest,
1052 unsigned link, bool is_n)
1053{
1054 TCGv a0, a1, next, tmp;
1055 TCGCond c;
1056
1057 assert(ctx->null_lab == NULL);
1058
1059 if (ctx->null_cond.c == TCG_COND_NEVER) {
1060 if (link != 0) {
1061 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1062 }
1063 next = get_temp(ctx);
1064 tcg_gen_mov_tl(next, dest);
1065 ctx->iaoq_n = -1;
1066 ctx->iaoq_n_var = next;
1067 if (is_n) {
1068 ctx->null_cond.c = TCG_COND_ALWAYS;
1069 }
1070 } else if (is_n && use_nullify_skip(ctx)) {
1071 /* The (conditional) branch, B, nullifies the next insn, N,
1072 and we're allowed to skip execution N (no single-step or
1073 tracepoint in effect). Since the exit_tb that we must use
1074 for the indirect branch consumes no special resources, we
1075 can (conditionally) skip B and continue execution. */
1076 /* The use_nullify_skip test implies we have a known control path. */
1077 tcg_debug_assert(ctx->iaoq_b != -1);
1078 tcg_debug_assert(ctx->iaoq_n != -1);
1079
1080 /* We do have to handle the non-local temporary, DEST, before
1081 branching. Since IOAQ_F is not really live at this point, we
1082 can simply store DEST optimistically. Similarly with IAOQ_B. */
1083 tcg_gen_mov_tl(cpu_iaoq_f, dest);
1084 tcg_gen_addi_tl(cpu_iaoq_b, dest, 4);
1085
1086 nullify_over(ctx);
1087 if (link != 0) {
1088 tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n);
1089 }
1090 tcg_gen_exit_tb(0);
1091 return nullify_end(ctx, NO_EXIT);
1092 } else {
1093 cond_prep(&ctx->null_cond);
1094 c = ctx->null_cond.c;
1095 a0 = ctx->null_cond.a0;
1096 a1 = ctx->null_cond.a1;
1097
1098 tmp = tcg_temp_new();
1099 next = get_temp(ctx);
1100
1101 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1102 tcg_gen_movcond_tl(c, next, a0, a1, tmp, dest);
1103 ctx->iaoq_n = -1;
1104 ctx->iaoq_n_var = next;
1105
1106 if (link != 0) {
1107 tcg_gen_movcond_tl(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1108 }
1109
1110 if (is_n) {
1111 /* The branch nullifies the next insn, which means the state of N
1112 after the branch is the inverse of the state of N that applied
1113 to the branch. */
1114 tcg_gen_setcond_tl(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1115 cond_free(&ctx->null_cond);
1116 ctx->null_cond = cond_make_n();
1117 ctx->psw_n_nonzero = true;
1118 } else {
1119 cond_free(&ctx->null_cond);
1120 }
1121 }
1122
1123 return NO_EXIT;
1124}
1125
7ad439df
RH
1126/* On Linux, page zero is normally marked execute only + gateway.
1127 Therefore normal read or write is supposed to fail, but specific
1128 offsets have kernel code mapped to raise permissions to implement
1129 system calls. Handling this via an explicit check here, rather
1130 in than the "be disp(sr2,r0)" instruction that probably sent us
1131 here, is the easiest way to handle the branch delay slot on the
1132 aforementioned BE. */
1133static ExitStatus do_page_zero(DisasContext *ctx)
1134{
1135 /* If by some means we get here with PSW[N]=1, that implies that
1136 the B,GATE instruction would be skipped, and we'd fault on the
1137 next insn within the privilaged page. */
1138 switch (ctx->null_cond.c) {
1139 case TCG_COND_NEVER:
1140 break;
1141 case TCG_COND_ALWAYS:
1142 tcg_gen_movi_tl(cpu_psw_n, 0);
1143 goto do_sigill;
1144 default:
1145 /* Since this is always the first (and only) insn within the
1146 TB, we should know the state of PSW[N] from TB->FLAGS. */
1147 g_assert_not_reached();
1148 }
1149
1150 /* Check that we didn't arrive here via some means that allowed
1151 non-sequential instruction execution. Normally the PSW[B] bit
1152 detects this by disallowing the B,GATE instruction to execute
1153 under such conditions. */
1154 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1155 goto do_sigill;
1156 }
1157
1158 switch (ctx->iaoq_f) {
1159 case 0x00: /* Null pointer call */
1160 gen_excp_1(EXCP_SIGSEGV);
1161 return EXIT_NORETURN;
1162
1163 case 0xb0: /* LWS */
1164 gen_excp_1(EXCP_SYSCALL_LWS);
1165 return EXIT_NORETURN;
1166
1167 case 0xe0: /* SET_THREAD_POINTER */
1168 tcg_gen_mov_tl(cpu_cr27, cpu_gr[26]);
1169 tcg_gen_mov_tl(cpu_iaoq_f, cpu_gr[31]);
1170 tcg_gen_addi_tl(cpu_iaoq_b, cpu_iaoq_f, 4);
1171 return EXIT_IAQ_N_UPDATED;
1172
1173 case 0x100: /* SYSCALL */
1174 gen_excp_1(EXCP_SYSCALL);
1175 return EXIT_NORETURN;
1176
1177 default:
1178 do_sigill:
1179 gen_excp_1(EXCP_SIGILL);
1180 return EXIT_NORETURN;
1181 }
1182}
1183
b2167459
RH
1184static ExitStatus trans_nop(DisasContext *ctx, uint32_t insn,
1185 const DisasInsn *di)
1186{
1187 cond_free(&ctx->null_cond);
1188 return NO_EXIT;
1189}
1190
1191static ExitStatus trans_add(DisasContext *ctx, uint32_t insn,
1192 const DisasInsn *di)
1193{
1194 unsigned r2 = extract32(insn, 21, 5);
1195 unsigned r1 = extract32(insn, 16, 5);
1196 unsigned cf = extract32(insn, 12, 4);
1197 unsigned ext = extract32(insn, 8, 4);
1198 unsigned shift = extract32(insn, 6, 2);
1199 unsigned rt = extract32(insn, 0, 5);
1200 TCGv tcg_r1, tcg_r2;
1201 bool is_c = false;
1202 bool is_l = false;
1203 bool is_tc = false;
1204 bool is_tsv = false;
1205 ExitStatus ret;
1206
1207 switch (ext) {
1208 case 0x6: /* ADD, SHLADD */
1209 break;
1210 case 0xa: /* ADD,L, SHLADD,L */
1211 is_l = true;
1212 break;
1213 case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
1214 is_tsv = true;
1215 break;
1216 case 0x7: /* ADD,C */
1217 is_c = true;
1218 break;
1219 case 0xf: /* ADD,C,TSV */
1220 is_c = is_tsv = true;
1221 break;
1222 default:
1223 return gen_illegal(ctx);
1224 }
1225
1226 if (cf) {
1227 nullify_over(ctx);
1228 }
1229 tcg_r1 = load_gpr(ctx, r1);
1230 tcg_r2 = load_gpr(ctx, r2);
1231 ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
1232 return nullify_end(ctx, ret);
1233}
1234
1235static ExitStatus trans_sub(DisasContext *ctx, uint32_t insn,
1236 const DisasInsn *di)
1237{
1238 unsigned r2 = extract32(insn, 21, 5);
1239 unsigned r1 = extract32(insn, 16, 5);
1240 unsigned cf = extract32(insn, 12, 4);
1241 unsigned ext = extract32(insn, 6, 6);
1242 unsigned rt = extract32(insn, 0, 5);
1243 TCGv tcg_r1, tcg_r2;
1244 bool is_b = false;
1245 bool is_tc = false;
1246 bool is_tsv = false;
1247 ExitStatus ret;
1248
1249 switch (ext) {
1250 case 0x10: /* SUB */
1251 break;
1252 case 0x30: /* SUB,TSV */
1253 is_tsv = true;
1254 break;
1255 case 0x14: /* SUB,B */
1256 is_b = true;
1257 break;
1258 case 0x34: /* SUB,B,TSV */
1259 is_b = is_tsv = true;
1260 break;
1261 case 0x13: /* SUB,TC */
1262 is_tc = true;
1263 break;
1264 case 0x33: /* SUB,TSV,TC */
1265 is_tc = is_tsv = true;
1266 break;
1267 default:
1268 return gen_illegal(ctx);
1269 }
1270
1271 if (cf) {
1272 nullify_over(ctx);
1273 }
1274 tcg_r1 = load_gpr(ctx, r1);
1275 tcg_r2 = load_gpr(ctx, r2);
1276 ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
1277 return nullify_end(ctx, ret);
1278}
1279
1280static ExitStatus trans_log(DisasContext *ctx, uint32_t insn,
1281 const DisasInsn *di)
1282{
1283 unsigned r2 = extract32(insn, 21, 5);
1284 unsigned r1 = extract32(insn, 16, 5);
1285 unsigned cf = extract32(insn, 12, 4);
1286 unsigned rt = extract32(insn, 0, 5);
1287 TCGv tcg_r1, tcg_r2;
1288 ExitStatus ret;
1289
1290 if (cf) {
1291 nullify_over(ctx);
1292 }
1293 tcg_r1 = load_gpr(ctx, r1);
1294 tcg_r2 = load_gpr(ctx, r2);
1295 ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f_ttt);
1296 return nullify_end(ctx, ret);
1297}
1298
1299/* OR r,0,t -> COPY (according to gas) */
1300static ExitStatus trans_copy(DisasContext *ctx, uint32_t insn,
1301 const DisasInsn *di)
1302{
1303 unsigned r1 = extract32(insn, 16, 5);
1304 unsigned rt = extract32(insn, 0, 5);
1305
1306 if (r1 == 0) {
1307 TCGv dest = dest_gpr(ctx, rt);
1308 tcg_gen_movi_tl(dest, 0);
1309 save_gpr(ctx, rt, dest);
1310 } else {
1311 save_gpr(ctx, rt, cpu_gr[r1]);
1312 }
1313 cond_free(&ctx->null_cond);
1314 return NO_EXIT;
1315}
1316
1317static ExitStatus trans_cmpclr(DisasContext *ctx, uint32_t insn,
1318 const DisasInsn *di)
1319{
1320 unsigned r2 = extract32(insn, 21, 5);
1321 unsigned r1 = extract32(insn, 16, 5);
1322 unsigned cf = extract32(insn, 12, 4);
1323 unsigned rt = extract32(insn, 0, 5);
1324 TCGv tcg_r1, tcg_r2;
1325 ExitStatus ret;
1326
1327 if (cf) {
1328 nullify_over(ctx);
1329 }
1330 tcg_r1 = load_gpr(ctx, r1);
1331 tcg_r2 = load_gpr(ctx, r2);
1332 ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
1333 return nullify_end(ctx, ret);
1334}
1335
1336static ExitStatus trans_uxor(DisasContext *ctx, uint32_t insn,
1337 const DisasInsn *di)
1338{
1339 unsigned r2 = extract32(insn, 21, 5);
1340 unsigned r1 = extract32(insn, 16, 5);
1341 unsigned cf = extract32(insn, 12, 4);
1342 unsigned rt = extract32(insn, 0, 5);
1343 TCGv tcg_r1, tcg_r2;
1344 ExitStatus ret;
1345
1346 if (cf) {
1347 nullify_over(ctx);
1348 }
1349 tcg_r1 = load_gpr(ctx, r1);
1350 tcg_r2 = load_gpr(ctx, r2);
1351 ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_tl);
1352 return nullify_end(ctx, ret);
1353}
1354
1355static ExitStatus trans_uaddcm(DisasContext *ctx, uint32_t insn,
1356 const DisasInsn *di)
1357{
1358 unsigned r2 = extract32(insn, 21, 5);
1359 unsigned r1 = extract32(insn, 16, 5);
1360 unsigned cf = extract32(insn, 12, 4);
1361 unsigned is_tc = extract32(insn, 6, 1);
1362 unsigned rt = extract32(insn, 0, 5);
1363 TCGv tcg_r1, tcg_r2, tmp;
1364 ExitStatus ret;
1365
1366 if (cf) {
1367 nullify_over(ctx);
1368 }
1369 tcg_r1 = load_gpr(ctx, r1);
1370 tcg_r2 = load_gpr(ctx, r2);
1371 tmp = get_temp(ctx);
1372 tcg_gen_not_tl(tmp, tcg_r2);
1373 ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_tl);
1374 return nullify_end(ctx, ret);
1375}
1376
1377static ExitStatus trans_dcor(DisasContext *ctx, uint32_t insn,
1378 const DisasInsn *di)
1379{
1380 unsigned r2 = extract32(insn, 21, 5);
1381 unsigned cf = extract32(insn, 12, 4);
1382 unsigned is_i = extract32(insn, 6, 1);
1383 unsigned rt = extract32(insn, 0, 5);
1384 TCGv tmp;
1385 ExitStatus ret;
1386
1387 nullify_over(ctx);
1388
1389 tmp = get_temp(ctx);
1390 tcg_gen_shri_tl(tmp, cpu_psw_cb, 3);
1391 if (!is_i) {
1392 tcg_gen_not_tl(tmp, tmp);
1393 }
1394 tcg_gen_andi_tl(tmp, tmp, 0x11111111);
1395 tcg_gen_muli_tl(tmp, tmp, 6);
1396 ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
1397 is_i ? tcg_gen_add_tl : tcg_gen_sub_tl);
1398
1399 return nullify_end(ctx, ret);
1400}
1401
1402static ExitStatus trans_ds(DisasContext *ctx, uint32_t insn,
1403 const DisasInsn *di)
1404{
1405 unsigned r2 = extract32(insn, 21, 5);
1406 unsigned r1 = extract32(insn, 16, 5);
1407 unsigned cf = extract32(insn, 12, 4);
1408 unsigned rt = extract32(insn, 0, 5);
1409 TCGv dest, add1, add2, addc, zero, in1, in2;
1410
1411 nullify_over(ctx);
1412
1413 in1 = load_gpr(ctx, r1);
1414 in2 = load_gpr(ctx, r2);
1415
1416 add1 = tcg_temp_new();
1417 add2 = tcg_temp_new();
1418 addc = tcg_temp_new();
1419 dest = tcg_temp_new();
1420 zero = tcg_const_tl(0);
1421
1422 /* Form R1 << 1 | PSW[CB]{8}. */
1423 tcg_gen_add_tl(add1, in1, in1);
1424 tcg_gen_add_tl(add1, add1, cpu_psw_cb_msb);
1425
1426 /* Add or subtract R2, depending on PSW[V]. Proper computation of
1427 carry{8} requires that we subtract via + ~R2 + 1, as described in
1428 the manual. By extracting and masking V, we can produce the
1429 proper inputs to the addition without movcond. */
1430 tcg_gen_sari_tl(addc, cpu_psw_v, TARGET_LONG_BITS - 1);
1431 tcg_gen_xor_tl(add2, in2, addc);
1432 tcg_gen_andi_tl(addc, addc, 1);
1433 /* ??? This is only correct for 32-bit. */
1434 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
1435 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
1436
1437 tcg_temp_free(addc);
1438 tcg_temp_free(zero);
1439
1440 /* Write back the result register. */
1441 save_gpr(ctx, rt, dest);
1442
1443 /* Write back PSW[CB]. */
1444 tcg_gen_xor_tl(cpu_psw_cb, add1, add2);
1445 tcg_gen_xor_tl(cpu_psw_cb, cpu_psw_cb, dest);
1446
1447 /* Write back PSW[V] for the division step. */
1448 tcg_gen_neg_tl(cpu_psw_v, cpu_psw_cb_msb);
1449 tcg_gen_xor_tl(cpu_psw_v, cpu_psw_v, in2);
1450
1451 /* Install the new nullification. */
1452 if (cf) {
1453 TCGv sv;
1454 TCGV_UNUSED(sv);
1455 if (cf >> 1 == 6) {
1456 /* ??? The lshift is supposed to contribute to overflow. */
1457 sv = do_add_sv(ctx, dest, add1, add2);
1458 }
1459 ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
1460 }
1461
1462 tcg_temp_free(add1);
1463 tcg_temp_free(add2);
1464 tcg_temp_free(dest);
1465
1466 return nullify_end(ctx, NO_EXIT);
1467}
1468
1469static const DisasInsn table_arith_log[] = {
1470 { 0x08000240u, 0xfc00ffffu, trans_nop }, /* or x,y,0 */
1471 { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
1472 { 0x08000000u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_andc_tl },
1473 { 0x08000200u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_and_tl },
1474 { 0x08000240u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_or_tl },
1475 { 0x08000280u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_xor_tl },
1476 { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
1477 { 0x08000380u, 0xfc000fe0u, trans_uxor },
1478 { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
1479 { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
1480 { 0x08000440u, 0xfc000fe0u, trans_ds },
1481 { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
1482 { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
1483 { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
1484 { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
1485};
1486
1487static ExitStatus trans_addi(DisasContext *ctx, uint32_t insn)
1488{
1489 target_long im = low_sextract(insn, 0, 11);
1490 unsigned e1 = extract32(insn, 11, 1);
1491 unsigned cf = extract32(insn, 12, 4);
1492 unsigned rt = extract32(insn, 16, 5);
1493 unsigned r2 = extract32(insn, 21, 5);
1494 unsigned o1 = extract32(insn, 26, 1);
1495 TCGv tcg_im, tcg_r2;
1496 ExitStatus ret;
1497
1498 if (cf) {
1499 nullify_over(ctx);
1500 }
1501
1502 tcg_im = load_const(ctx, im);
1503 tcg_r2 = load_gpr(ctx, r2);
1504 ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
1505
1506 return nullify_end(ctx, ret);
1507}
1508
1509static ExitStatus trans_subi(DisasContext *ctx, uint32_t insn)
1510{
1511 target_long im = low_sextract(insn, 0, 11);
1512 unsigned e1 = extract32(insn, 11, 1);
1513 unsigned cf = extract32(insn, 12, 4);
1514 unsigned rt = extract32(insn, 16, 5);
1515 unsigned r2 = extract32(insn, 21, 5);
1516 TCGv tcg_im, tcg_r2;
1517 ExitStatus ret;
1518
1519 if (cf) {
1520 nullify_over(ctx);
1521 }
1522
1523 tcg_im = load_const(ctx, im);
1524 tcg_r2 = load_gpr(ctx, r2);
1525 ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
1526
1527 return nullify_end(ctx, ret);
1528}
1529
1530static ExitStatus trans_cmpiclr(DisasContext *ctx, uint32_t insn)
1531{
1532 target_long im = low_sextract(insn, 0, 11);
1533 unsigned cf = extract32(insn, 12, 4);
1534 unsigned rt = extract32(insn, 16, 5);
1535 unsigned r2 = extract32(insn, 21, 5);
1536 TCGv tcg_im, tcg_r2;
1537 ExitStatus ret;
1538
1539 if (cf) {
1540 nullify_over(ctx);
1541 }
1542
1543 tcg_im = load_const(ctx, im);
1544 tcg_r2 = load_gpr(ctx, r2);
1545 ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
1546
1547 return nullify_end(ctx, ret);
1548}
1549
1550static ExitStatus trans_ldil(DisasContext *ctx, uint32_t insn)
1551{
1552 unsigned rt = extract32(insn, 21, 5);
1553 target_long i = assemble_21(insn);
1554 TCGv tcg_rt = dest_gpr(ctx, rt);
1555
1556 tcg_gen_movi_tl(tcg_rt, i);
1557 save_gpr(ctx, rt, tcg_rt);
1558 cond_free(&ctx->null_cond);
1559
1560 return NO_EXIT;
1561}
1562
1563static ExitStatus trans_addil(DisasContext *ctx, uint32_t insn)
1564{
1565 unsigned rt = extract32(insn, 21, 5);
1566 target_long i = assemble_21(insn);
1567 TCGv tcg_rt = load_gpr(ctx, rt);
1568 TCGv tcg_r1 = dest_gpr(ctx, 1);
1569
1570 tcg_gen_addi_tl(tcg_r1, tcg_rt, i);
1571 save_gpr(ctx, 1, tcg_r1);
1572 cond_free(&ctx->null_cond);
1573
1574 return NO_EXIT;
1575}
1576
1577static ExitStatus trans_ldo(DisasContext *ctx, uint32_t insn)
1578{
1579 unsigned rb = extract32(insn, 21, 5);
1580 unsigned rt = extract32(insn, 16, 5);
1581 target_long i = assemble_16(insn);
1582 TCGv tcg_rt = dest_gpr(ctx, rt);
1583
1584 /* Special case rb == 0, for the LDI pseudo-op.
1585 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
1586 if (rb == 0) {
1587 tcg_gen_movi_tl(tcg_rt, i);
1588 } else {
1589 tcg_gen_addi_tl(tcg_rt, cpu_gr[rb], i);
1590 }
1591 save_gpr(ctx, rt, tcg_rt);
1592 cond_free(&ctx->null_cond);
1593
1594 return NO_EXIT;
1595}
1596
98cd9ca7
RH
1597static ExitStatus trans_cmpb(DisasContext *ctx, uint32_t insn,
1598 bool is_true, bool is_imm, bool is_dw)
1599{
1600 target_long disp = assemble_12(insn) * 4;
1601 unsigned n = extract32(insn, 1, 1);
1602 unsigned c = extract32(insn, 13, 3);
1603 unsigned r = extract32(insn, 21, 5);
1604 unsigned cf = c * 2 + !is_true;
1605 TCGv dest, in1, in2, sv;
1606 DisasCond cond;
1607
1608 nullify_over(ctx);
1609
1610 if (is_imm) {
1611 in1 = load_const(ctx, low_sextract(insn, 16, 5));
1612 } else {
1613 in1 = load_gpr(ctx, extract32(insn, 16, 5));
1614 }
1615 in2 = load_gpr(ctx, r);
1616 dest = get_temp(ctx);
1617
1618 tcg_gen_sub_tl(dest, in1, in2);
1619
1620 TCGV_UNUSED(sv);
1621 if (c == 6) {
1622 sv = do_sub_sv(ctx, dest, in1, in2);
1623 }
1624
1625 cond = do_sub_cond(cf, dest, in1, in2, sv);
1626 return do_cbranch(ctx, disp, n, &cond);
1627}
1628
1629static ExitStatus trans_addb(DisasContext *ctx, uint32_t insn,
1630 bool is_true, bool is_imm)
1631{
1632 target_long disp = assemble_12(insn) * 4;
1633 unsigned n = extract32(insn, 1, 1);
1634 unsigned c = extract32(insn, 13, 3);
1635 unsigned r = extract32(insn, 21, 5);
1636 unsigned cf = c * 2 + !is_true;
1637 TCGv dest, in1, in2, sv, cb_msb;
1638 DisasCond cond;
1639
1640 nullify_over(ctx);
1641
1642 if (is_imm) {
1643 in1 = load_const(ctx, low_sextract(insn, 16, 5));
1644 } else {
1645 in1 = load_gpr(ctx, extract32(insn, 16, 5));
1646 }
1647 in2 = load_gpr(ctx, r);
1648 dest = dest_gpr(ctx, r);
1649 TCGV_UNUSED(sv);
1650 TCGV_UNUSED(cb_msb);
1651
1652 switch (c) {
1653 default:
1654 tcg_gen_add_tl(dest, in1, in2);
1655 break;
1656 case 4: case 5:
1657 cb_msb = get_temp(ctx);
1658 tcg_gen_movi_tl(cb_msb, 0);
1659 tcg_gen_add2_tl(dest, cb_msb, in1, cb_msb, in2, cb_msb);
1660 break;
1661 case 6:
1662 tcg_gen_add_tl(dest, in1, in2);
1663 sv = do_add_sv(ctx, dest, in1, in2);
1664 break;
1665 }
1666
1667 cond = do_cond(cf, dest, cb_msb, sv);
1668 return do_cbranch(ctx, disp, n, &cond);
1669}
1670
1671static ExitStatus trans_bb(DisasContext *ctx, uint32_t insn)
1672{
1673 target_long disp = assemble_12(insn) * 4;
1674 unsigned n = extract32(insn, 1, 1);
1675 unsigned c = extract32(insn, 15, 1);
1676 unsigned r = extract32(insn, 16, 5);
1677 unsigned p = extract32(insn, 21, 5);
1678 unsigned i = extract32(insn, 26, 1);
1679 TCGv tmp, tcg_r;
1680 DisasCond cond;
1681
1682 nullify_over(ctx);
1683
1684 tmp = tcg_temp_new();
1685 tcg_r = load_gpr(ctx, r);
1686 if (i) {
1687 tcg_gen_shli_tl(tmp, tcg_r, p);
1688 } else {
1689 tcg_gen_shl_tl(tmp, tcg_r, cpu_sar);
1690 }
1691
1692 cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
1693 tcg_temp_free(tmp);
1694 return do_cbranch(ctx, disp, n, &cond);
1695}
1696
1697static ExitStatus trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
1698{
1699 target_long disp = assemble_12(insn) * 4;
1700 unsigned n = extract32(insn, 1, 1);
1701 unsigned c = extract32(insn, 13, 3);
1702 unsigned t = extract32(insn, 16, 5);
1703 unsigned r = extract32(insn, 21, 5);
1704 TCGv dest;
1705 DisasCond cond;
1706
1707 nullify_over(ctx);
1708
1709 dest = dest_gpr(ctx, r);
1710 if (is_imm) {
1711 tcg_gen_movi_tl(dest, low_sextract(t, 0, 5));
1712 } else if (t == 0) {
1713 tcg_gen_movi_tl(dest, 0);
1714 } else {
1715 tcg_gen_mov_tl(dest, cpu_gr[t]);
1716 }
1717
1718 cond = do_sed_cond(c, dest);
1719 return do_cbranch(ctx, disp, n, &cond);
1720}
1721
0b1347d2
RH
1722static ExitStatus trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
1723 const DisasInsn *di)
1724{
1725 unsigned rt = extract32(insn, 0, 5);
1726 unsigned c = extract32(insn, 13, 3);
1727 unsigned r1 = extract32(insn, 16, 5);
1728 unsigned r2 = extract32(insn, 21, 5);
1729 TCGv dest;
1730
1731 if (c) {
1732 nullify_over(ctx);
1733 }
1734
1735 dest = dest_gpr(ctx, rt);
1736 if (r1 == 0) {
1737 tcg_gen_ext32u_tl(dest, load_gpr(ctx, r2));
1738 tcg_gen_shr_tl(dest, dest, cpu_sar);
1739 } else if (r1 == r2) {
1740 TCGv_i32 t32 = tcg_temp_new_i32();
1741 tcg_gen_trunc_tl_i32(t32, load_gpr(ctx, r2));
1742 tcg_gen_rotr_i32(t32, t32, cpu_sar);
1743 tcg_gen_extu_i32_tl(dest, t32);
1744 tcg_temp_free_i32(t32);
1745 } else {
1746 TCGv_i64 t = tcg_temp_new_i64();
1747 TCGv_i64 s = tcg_temp_new_i64();
1748
1749 tcg_gen_concat_tl_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
1750 tcg_gen_extu_tl_i64(s, cpu_sar);
1751 tcg_gen_shr_i64(t, t, s);
1752 tcg_gen_trunc_i64_tl(dest, t);
1753
1754 tcg_temp_free_i64(t);
1755 tcg_temp_free_i64(s);
1756 }
1757 save_gpr(ctx, rt, dest);
1758
1759 /* Install the new nullification. */
1760 cond_free(&ctx->null_cond);
1761 if (c) {
1762 ctx->null_cond = do_sed_cond(c, dest);
1763 }
1764 return nullify_end(ctx, NO_EXIT);
1765}
1766
1767static ExitStatus trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
1768 const DisasInsn *di)
1769{
1770 unsigned rt = extract32(insn, 0, 5);
1771 unsigned cpos = extract32(insn, 5, 5);
1772 unsigned c = extract32(insn, 13, 3);
1773 unsigned r1 = extract32(insn, 16, 5);
1774 unsigned r2 = extract32(insn, 21, 5);
1775 unsigned sa = 31 - cpos;
1776 TCGv dest, t2;
1777
1778 if (c) {
1779 nullify_over(ctx);
1780 }
1781
1782 dest = dest_gpr(ctx, rt);
1783 t2 = load_gpr(ctx, r2);
1784 if (r1 == r2) {
1785 TCGv_i32 t32 = tcg_temp_new_i32();
1786 tcg_gen_trunc_tl_i32(t32, t2);
1787 tcg_gen_rotri_i32(t32, t32, sa);
1788 tcg_gen_extu_i32_tl(dest, t32);
1789 tcg_temp_free_i32(t32);
1790 } else if (r1 == 0) {
1791 tcg_gen_extract_tl(dest, t2, sa, 32 - sa);
1792 } else {
1793 TCGv t0 = tcg_temp_new();
1794 tcg_gen_extract_tl(t0, t2, sa, 32 - sa);
1795 tcg_gen_deposit_tl(dest, t0, cpu_gr[r1], 32 - sa, sa);
1796 tcg_temp_free(t0);
1797 }
1798 save_gpr(ctx, rt, dest);
1799
1800 /* Install the new nullification. */
1801 cond_free(&ctx->null_cond);
1802 if (c) {
1803 ctx->null_cond = do_sed_cond(c, dest);
1804 }
1805 return nullify_end(ctx, NO_EXIT);
1806}
1807
1808static ExitStatus trans_extrw_sar(DisasContext *ctx, uint32_t insn,
1809 const DisasInsn *di)
1810{
1811 unsigned clen = extract32(insn, 0, 5);
1812 unsigned is_se = extract32(insn, 10, 1);
1813 unsigned c = extract32(insn, 13, 3);
1814 unsigned rt = extract32(insn, 16, 5);
1815 unsigned rr = extract32(insn, 21, 5);
1816 unsigned len = 32 - clen;
1817 TCGv dest, src, tmp;
1818
1819 if (c) {
1820 nullify_over(ctx);
1821 }
1822
1823 dest = dest_gpr(ctx, rt);
1824 src = load_gpr(ctx, rr);
1825 tmp = tcg_temp_new();
1826
1827 /* Recall that SAR is using big-endian bit numbering. */
1828 tcg_gen_xori_tl(tmp, cpu_sar, TARGET_LONG_BITS - 1);
1829 if (is_se) {
1830 tcg_gen_sar_tl(dest, src, tmp);
1831 tcg_gen_sextract_tl(dest, dest, 0, len);
1832 } else {
1833 tcg_gen_shr_tl(dest, src, tmp);
1834 tcg_gen_extract_tl(dest, dest, 0, len);
1835 }
1836 tcg_temp_free(tmp);
1837 save_gpr(ctx, rt, dest);
1838
1839 /* Install the new nullification. */
1840 cond_free(&ctx->null_cond);
1841 if (c) {
1842 ctx->null_cond = do_sed_cond(c, dest);
1843 }
1844 return nullify_end(ctx, NO_EXIT);
1845}
1846
1847static ExitStatus trans_extrw_imm(DisasContext *ctx, uint32_t insn,
1848 const DisasInsn *di)
1849{
1850 unsigned clen = extract32(insn, 0, 5);
1851 unsigned pos = extract32(insn, 5, 5);
1852 unsigned is_se = extract32(insn, 10, 1);
1853 unsigned c = extract32(insn, 13, 3);
1854 unsigned rt = extract32(insn, 16, 5);
1855 unsigned rr = extract32(insn, 21, 5);
1856 unsigned len = 32 - clen;
1857 unsigned cpos = 31 - pos;
1858 TCGv dest, src;
1859
1860 if (c) {
1861 nullify_over(ctx);
1862 }
1863
1864 dest = dest_gpr(ctx, rt);
1865 src = load_gpr(ctx, rr);
1866 if (is_se) {
1867 tcg_gen_sextract_tl(dest, src, cpos, len);
1868 } else {
1869 tcg_gen_extract_tl(dest, src, cpos, len);
1870 }
1871 save_gpr(ctx, rt, dest);
1872
1873 /* Install the new nullification. */
1874 cond_free(&ctx->null_cond);
1875 if (c) {
1876 ctx->null_cond = do_sed_cond(c, dest);
1877 }
1878 return nullify_end(ctx, NO_EXIT);
1879}
1880
1881static const DisasInsn table_sh_ex[] = {
1882 { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
1883 { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
1884 { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
1885 { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
1886};
1887
1888static ExitStatus trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
1889 const DisasInsn *di)
1890{
1891 unsigned clen = extract32(insn, 0, 5);
1892 unsigned cpos = extract32(insn, 5, 5);
1893 unsigned nz = extract32(insn, 10, 1);
1894 unsigned c = extract32(insn, 13, 3);
1895 target_long val = low_sextract(insn, 16, 5);
1896 unsigned rt = extract32(insn, 21, 5);
1897 unsigned len = 32 - clen;
1898 target_long mask0, mask1;
1899 TCGv dest;
1900
1901 if (c) {
1902 nullify_over(ctx);
1903 }
1904 if (cpos + len > 32) {
1905 len = 32 - cpos;
1906 }
1907
1908 dest = dest_gpr(ctx, rt);
1909 mask0 = deposit64(0, cpos, len, val);
1910 mask1 = deposit64(-1, cpos, len, val);
1911
1912 if (nz) {
1913 TCGv src = load_gpr(ctx, rt);
1914 if (mask1 != -1) {
1915 tcg_gen_andi_tl(dest, src, mask1);
1916 src = dest;
1917 }
1918 tcg_gen_ori_tl(dest, src, mask0);
1919 } else {
1920 tcg_gen_movi_tl(dest, mask0);
1921 }
1922 save_gpr(ctx, rt, dest);
1923
1924 /* Install the new nullification. */
1925 cond_free(&ctx->null_cond);
1926 if (c) {
1927 ctx->null_cond = do_sed_cond(c, dest);
1928 }
1929 return nullify_end(ctx, NO_EXIT);
1930}
1931
1932static ExitStatus trans_depw_imm(DisasContext *ctx, uint32_t insn,
1933 const DisasInsn *di)
1934{
1935 unsigned clen = extract32(insn, 0, 5);
1936 unsigned cpos = extract32(insn, 5, 5);
1937 unsigned nz = extract32(insn, 10, 1);
1938 unsigned c = extract32(insn, 13, 3);
1939 unsigned rr = extract32(insn, 16, 5);
1940 unsigned rt = extract32(insn, 21, 5);
1941 unsigned rs = nz ? rt : 0;
1942 unsigned len = 32 - clen;
1943 TCGv dest, val;
1944
1945 if (c) {
1946 nullify_over(ctx);
1947 }
1948 if (cpos + len > 32) {
1949 len = 32 - cpos;
1950 }
1951
1952 dest = dest_gpr(ctx, rt);
1953 val = load_gpr(ctx, rr);
1954 if (rs == 0) {
1955 tcg_gen_deposit_z_tl(dest, val, cpos, len);
1956 } else {
1957 tcg_gen_deposit_tl(dest, cpu_gr[rs], val, cpos, len);
1958 }
1959 save_gpr(ctx, rt, dest);
1960
1961 /* Install the new nullification. */
1962 cond_free(&ctx->null_cond);
1963 if (c) {
1964 ctx->null_cond = do_sed_cond(c, dest);
1965 }
1966 return nullify_end(ctx, NO_EXIT);
1967}
1968
1969static ExitStatus trans_depw_sar(DisasContext *ctx, uint32_t insn,
1970 const DisasInsn *di)
1971{
1972 unsigned clen = extract32(insn, 0, 5);
1973 unsigned nz = extract32(insn, 10, 1);
1974 unsigned i = extract32(insn, 12, 1);
1975 unsigned c = extract32(insn, 13, 3);
1976 unsigned rt = extract32(insn, 21, 5);
1977 unsigned rs = nz ? rt : 0;
1978 unsigned len = 32 - clen;
1979 TCGv val, mask, tmp, shift, dest;
1980 unsigned msb = 1U << (len - 1);
1981
1982 if (c) {
1983 nullify_over(ctx);
1984 }
1985
1986 if (i) {
1987 val = load_const(ctx, low_sextract(insn, 16, 5));
1988 } else {
1989 val = load_gpr(ctx, extract32(insn, 16, 5));
1990 }
1991 dest = dest_gpr(ctx, rt);
1992 shift = tcg_temp_new();
1993 tmp = tcg_temp_new();
1994
1995 /* Convert big-endian bit numbering in SAR to left-shift. */
1996 tcg_gen_xori_tl(shift, cpu_sar, TARGET_LONG_BITS - 1);
1997
1998 mask = tcg_const_tl(msb + (msb - 1));
1999 tcg_gen_and_tl(tmp, val, mask);
2000 if (rs) {
2001 tcg_gen_shl_tl(mask, mask, shift);
2002 tcg_gen_shl_tl(tmp, tmp, shift);
2003 tcg_gen_andc_tl(dest, cpu_gr[rs], mask);
2004 tcg_gen_or_tl(dest, dest, tmp);
2005 } else {
2006 tcg_gen_shl_tl(dest, tmp, shift);
2007 }
2008 tcg_temp_free(shift);
2009 tcg_temp_free(mask);
2010 tcg_temp_free(tmp);
2011 save_gpr(ctx, rt, dest);
2012
2013 /* Install the new nullification. */
2014 cond_free(&ctx->null_cond);
2015 if (c) {
2016 ctx->null_cond = do_sed_cond(c, dest);
2017 }
2018 return nullify_end(ctx, NO_EXIT);
2019}
2020
2021static const DisasInsn table_depw[] = {
2022 { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
2023 { 0xd4000800u, 0xfc001800u, trans_depw_imm },
2024 { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
2025};
2026
98cd9ca7
RH
2027static ExitStatus trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
2028{
2029 unsigned n = extract32(insn, 1, 1);
2030 unsigned b = extract32(insn, 21, 5);
2031 target_long disp = assemble_17(insn);
2032
2033 /* unsigned s = low_uextract(insn, 13, 3); */
2034 /* ??? It seems like there should be a good way of using
2035 "be disp(sr2, r0)", the canonical gateway entry mechanism
2036 to our advantage. But that appears to be inconvenient to
2037 manage along side branch delay slots. Therefore we handle
2038 entry into the gateway page via absolute address. */
2039
2040 /* Since we don't implement spaces, just branch. Do notice the special
2041 case of "be disp(*,r0)" using a direct branch to disp, so that we can
2042 goto_tb to the TB containing the syscall. */
2043 if (b == 0) {
2044 return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
2045 } else {
2046 TCGv tmp = get_temp(ctx);
2047 tcg_gen_addi_tl(tmp, load_gpr(ctx, b), disp);
2048 return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
2049 }
2050}
2051
2052static ExitStatus trans_bl(DisasContext *ctx, uint32_t insn,
2053 const DisasInsn *di)
2054{
2055 unsigned n = extract32(insn, 1, 1);
2056 unsigned link = extract32(insn, 21, 5);
2057 target_long disp = assemble_17(insn);
2058
2059 return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
2060}
2061
2062static ExitStatus trans_bl_long(DisasContext *ctx, uint32_t insn,
2063 const DisasInsn *di)
2064{
2065 unsigned n = extract32(insn, 1, 1);
2066 target_long disp = assemble_22(insn);
2067
2068 return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
2069}
2070
2071static ExitStatus trans_blr(DisasContext *ctx, uint32_t insn,
2072 const DisasInsn *di)
2073{
2074 unsigned n = extract32(insn, 1, 1);
2075 unsigned rx = extract32(insn, 16, 5);
2076 unsigned link = extract32(insn, 21, 5);
2077 TCGv tmp = get_temp(ctx);
2078
2079 tcg_gen_shli_tl(tmp, load_gpr(ctx, rx), 3);
2080 tcg_gen_addi_tl(tmp, tmp, ctx->iaoq_f + 8);
2081 return do_ibranch(ctx, tmp, link, n);
2082}
2083
2084static ExitStatus trans_bv(DisasContext *ctx, uint32_t insn,
2085 const DisasInsn *di)
2086{
2087 unsigned n = extract32(insn, 1, 1);
2088 unsigned rx = extract32(insn, 16, 5);
2089 unsigned rb = extract32(insn, 21, 5);
2090 TCGv dest;
2091
2092 if (rx == 0) {
2093 dest = load_gpr(ctx, rb);
2094 } else {
2095 dest = get_temp(ctx);
2096 tcg_gen_shli_tl(dest, load_gpr(ctx, rx), 3);
2097 tcg_gen_add_tl(dest, dest, load_gpr(ctx, rb));
2098 }
2099 return do_ibranch(ctx, dest, 0, n);
2100}
2101
2102static ExitStatus trans_bve(DisasContext *ctx, uint32_t insn,
2103 const DisasInsn *di)
2104{
2105 unsigned n = extract32(insn, 1, 1);
2106 unsigned rb = extract32(insn, 21, 5);
2107 unsigned link = extract32(insn, 13, 1) ? 2 : 0;
2108
2109 return do_ibranch(ctx, load_gpr(ctx, rb), link, n);
2110}
2111
2112static const DisasInsn table_branch[] = {
2113 { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
2114 { 0xe800a000u, 0xfc00e000u, trans_bl_long },
2115 { 0xe8004000u, 0xfc00fffdu, trans_blr },
2116 { 0xe800c000u, 0xfc00fffdu, trans_bv },
2117 { 0xe800d000u, 0xfc00dffcu, trans_bve },
2118};
2119
61766fe9
RH
2120static ExitStatus translate_table_int(DisasContext *ctx, uint32_t insn,
2121 const DisasInsn table[], size_t n)
2122{
2123 size_t i;
2124 for (i = 0; i < n; ++i) {
2125 if ((insn & table[i].mask) == table[i].insn) {
2126 return table[i].trans(ctx, insn, &table[i]);
2127 }
2128 }
2129 return gen_illegal(ctx);
2130}
2131
2132#define translate_table(ctx, insn, table) \
2133 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
2134
2135static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
2136{
2137 uint32_t opc = extract32(insn, 26, 6);
2138
2139 switch (opc) {
b2167459
RH
2140 case 0x02:
2141 return translate_table(ctx, insn, table_arith_log);
2142 case 0x08:
2143 return trans_ldil(ctx, insn);
2144 case 0x0A:
2145 return trans_addil(ctx, insn);
2146 case 0x0D:
2147 return trans_ldo(ctx, insn);
98cd9ca7
RH
2148 case 0x20:
2149 return trans_cmpb(ctx, insn, true, false, false);
2150 case 0x21:
2151 return trans_cmpb(ctx, insn, true, true, false);
2152 case 0x22:
2153 return trans_cmpb(ctx, insn, false, false, false);
2154 case 0x23:
2155 return trans_cmpb(ctx, insn, false, true, false);
b2167459
RH
2156 case 0x24:
2157 return trans_cmpiclr(ctx, insn);
2158 case 0x25:
2159 return trans_subi(ctx, insn);
98cd9ca7
RH
2160 case 0x27:
2161 return trans_cmpb(ctx, insn, true, false, true);
2162 case 0x28:
2163 return trans_addb(ctx, insn, true, false);
2164 case 0x29:
2165 return trans_addb(ctx, insn, true, true);
2166 case 0x2A:
2167 return trans_addb(ctx, insn, false, false);
2168 case 0x2B:
2169 return trans_addb(ctx, insn, false, true);
b2167459
RH
2170 case 0x2C:
2171 case 0x2D:
2172 return trans_addi(ctx, insn);
98cd9ca7
RH
2173 case 0x2F:
2174 return trans_cmpb(ctx, insn, false, false, true);
2175 case 0x30:
2176 case 0x31:
2177 return trans_bb(ctx, insn);
2178 case 0x32:
2179 return trans_movb(ctx, insn, false);
2180 case 0x33:
2181 return trans_movb(ctx, insn, true);
0b1347d2
RH
2182 case 0x34:
2183 return translate_table(ctx, insn, table_sh_ex);
2184 case 0x35:
2185 return translate_table(ctx, insn, table_depw);
98cd9ca7
RH
2186 case 0x38:
2187 return trans_be(ctx, insn, false);
2188 case 0x39:
2189 return trans_be(ctx, insn, true);
2190 case 0x3A:
2191 return translate_table(ctx, insn, table_branch);
61766fe9
RH
2192 default:
2193 break;
2194 }
2195 return gen_illegal(ctx);
2196}
2197
2198void gen_intermediate_code(CPUHPPAState *env, struct TranslationBlock *tb)
2199{
2200 HPPACPU *cpu = hppa_env_get_cpu(env);
2201 CPUState *cs = CPU(cpu);
2202 DisasContext ctx;
2203 ExitStatus ret;
2204 int num_insns, max_insns, i;
2205
2206 ctx.tb = tb;
2207 ctx.cs = cs;
2208 ctx.iaoq_f = tb->pc;
2209 ctx.iaoq_b = tb->cs_base;
2210 ctx.singlestep_enabled = cs->singlestep_enabled;
2211
2212 ctx.ntemps = 0;
2213 for (i = 0; i < ARRAY_SIZE(ctx.temps); ++i) {
2214 TCGV_UNUSED(ctx.temps[i]);
2215 }
2216
2217 /* Compute the maximum number of insns to execute, as bounded by
2218 (1) icount, (2) single-stepping, (3) branch delay slots, or
2219 (4) the number of insns remaining on the current page. */
2220 max_insns = tb->cflags & CF_COUNT_MASK;
2221 if (max_insns == 0) {
2222 max_insns = CF_COUNT_MASK;
2223 }
2224 if (ctx.singlestep_enabled || singlestep) {
2225 max_insns = 1;
2226 } else if (max_insns > TCG_MAX_INSNS) {
2227 max_insns = TCG_MAX_INSNS;
2228 }
2229
2230 num_insns = 0;
2231 gen_tb_start(tb);
2232
129e9cc3
RH
2233 /* Seed the nullification status from PSW[N], as shown in TB->FLAGS. */
2234 ctx.null_cond = cond_make_f();
2235 ctx.psw_n_nonzero = false;
2236 if (tb->flags & 1) {
2237 ctx.null_cond.c = TCG_COND_ALWAYS;
2238 ctx.psw_n_nonzero = true;
2239 }
2240 ctx.null_lab = NULL;
2241
61766fe9
RH
2242 do {
2243 tcg_gen_insn_start(ctx.iaoq_f, ctx.iaoq_b);
2244 num_insns++;
2245
2246 if (unlikely(cpu_breakpoint_test(cs, ctx.iaoq_f, BP_ANY))) {
2247 ret = gen_excp(&ctx, EXCP_DEBUG);
2248 break;
2249 }
2250 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2251 gen_io_start();
2252 }
2253
7ad439df
RH
2254 if (ctx.iaoq_f < TARGET_PAGE_SIZE) {
2255 ret = do_page_zero(&ctx);
2256 assert(ret != NO_EXIT);
2257 } else {
61766fe9
RH
2258 /* Always fetch the insn, even if nullified, so that we check
2259 the page permissions for execute. */
2260 uint32_t insn = cpu_ldl_code(env, ctx.iaoq_f);
2261
2262 /* Set up the IA queue for the next insn.
2263 This will be overwritten by a branch. */
2264 if (ctx.iaoq_b == -1) {
2265 ctx.iaoq_n = -1;
2266 ctx.iaoq_n_var = get_temp(&ctx);
2267 tcg_gen_addi_tl(ctx.iaoq_n_var, cpu_iaoq_b, 4);
2268 } else {
2269 ctx.iaoq_n = ctx.iaoq_b + 4;
2270 TCGV_UNUSED(ctx.iaoq_n_var);
2271 }
2272
129e9cc3
RH
2273 if (unlikely(ctx.null_cond.c == TCG_COND_ALWAYS)) {
2274 ctx.null_cond.c = TCG_COND_NEVER;
2275 ret = NO_EXIT;
2276 } else {
2277 ret = translate_one(&ctx, insn);
2278 assert(ctx.null_lab == NULL);
2279 }
61766fe9
RH
2280 }
2281
2282 for (i = 0; i < ctx.ntemps; ++i) {
2283 tcg_temp_free(ctx.temps[i]);
2284 TCGV_UNUSED(ctx.temps[i]);
2285 }
2286 ctx.ntemps = 0;
2287
2288 /* If we see non-linear instructions, exhaust instruction count,
2289 or run out of buffer space, stop generation. */
2290 /* ??? The non-linear instruction restriction is purely due to
2291 the debugging dump. Otherwise we *could* follow unconditional
2292 branches within the same page. */
2293 if (ret == NO_EXIT
2294 && (ctx.iaoq_b != ctx.iaoq_f + 4
2295 || num_insns >= max_insns
2296 || tcg_op_buf_full())) {
129e9cc3
RH
2297 if (ctx.null_cond.c == TCG_COND_NEVER
2298 || ctx.null_cond.c == TCG_COND_ALWAYS) {
2299 nullify_set(&ctx, ctx.null_cond.c == TCG_COND_ALWAYS);
2300 gen_goto_tb(&ctx, 0, ctx.iaoq_b, ctx.iaoq_n);
2301 ret = EXIT_GOTO_TB;
2302 } else {
2303 ret = EXIT_IAQ_N_STALE;
2304 }
61766fe9
RH
2305 }
2306
2307 ctx.iaoq_f = ctx.iaoq_b;
2308 ctx.iaoq_b = ctx.iaoq_n;
2309 if (ret == EXIT_NORETURN
2310 || ret == EXIT_GOTO_TB
2311 || ret == EXIT_IAQ_N_UPDATED) {
2312 break;
2313 }
2314 if (ctx.iaoq_f == -1) {
2315 tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b);
2316 copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_n, ctx.iaoq_n_var);
129e9cc3 2317 nullify_save(&ctx);
61766fe9
RH
2318 ret = EXIT_IAQ_N_UPDATED;
2319 break;
2320 }
2321 if (ctx.iaoq_b == -1) {
2322 tcg_gen_mov_tl(cpu_iaoq_b, ctx.iaoq_n_var);
2323 }
2324 } while (ret == NO_EXIT);
2325
2326 if (tb->cflags & CF_LAST_IO) {
2327 gen_io_end();
2328 }
2329
2330 switch (ret) {
2331 case EXIT_GOTO_TB:
2332 case EXIT_NORETURN:
2333 break;
2334 case EXIT_IAQ_N_STALE:
2335 copy_iaoq_entry(cpu_iaoq_f, ctx.iaoq_f, cpu_iaoq_f);
2336 copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_b, cpu_iaoq_b);
129e9cc3 2337 nullify_save(&ctx);
61766fe9
RH
2338 /* FALLTHRU */
2339 case EXIT_IAQ_N_UPDATED:
2340 if (ctx.singlestep_enabled) {
2341 gen_excp_1(EXCP_DEBUG);
2342 } else {
2343 tcg_gen_exit_tb(0);
2344 }
2345 break;
2346 default:
2347 abort();
2348 }
2349
2350 gen_tb_end(tb, num_insns);
2351
2352 tb->size = num_insns * 4;
2353 tb->icount = num_insns;
2354
2355#ifdef DEBUG_DISAS
2356 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
2357 && qemu_log_in_addr_range(tb->pc)) {
2358 qemu_log_lock();
7ad439df
RH
2359 switch (tb->pc) {
2360 case 0x00:
2361 qemu_log("IN:\n0x00000000: (null)\n\n");
2362 break;
2363 case 0xb0:
2364 qemu_log("IN:\n0x000000b0: light-weight-syscall\n\n");
2365 break;
2366 case 0xe0:
2367 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n\n");
2368 break;
2369 case 0x100:
2370 qemu_log("IN:\n0x00000100: syscall\n\n");
2371 break;
2372 default:
2373 qemu_log("IN: %s\n", lookup_symbol(tb->pc));
2374 log_target_disas(cs, tb->pc, tb->size, 1);
2375 qemu_log("\n");
2376 break;
2377 }
61766fe9
RH
2378 qemu_log_unlock();
2379 }
2380#endif
2381}
2382
2383void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
2384 target_ulong *data)
2385{
2386 env->iaoq_f = data[0];
2387 if (data[1] != -1) {
2388 env->iaoq_b = data[1];
2389 }
2390 /* Since we were executing the instruction at IAOQ_F, and took some
2391 sort of action that provoked the cpu_restore_state, we can infer
2392 that the instruction was not nullified. */
2393 env->psw_n = 0;
2394}