]> git.proxmox.com Git - qemu.git/blame - target-alpha/translate.c
target-alpha: Add flags markups to helpers.h.
[qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
20#include <stdint.h>
21#include <stdlib.h>
22#include <stdio.h>
23
24#include "cpu.h"
25#include "exec-all.h"
26#include "disas.h"
ae8ecd42 27#include "host-utils.h"
57fec1fe 28#include "tcg-op.h"
ca10f867 29#include "qemu-common.h"
4c9649a9 30
a7812ae4
PB
31#include "helper.h"
32#define GEN_HELPER 1
33#include "helper.h"
34
19188121 35#undef ALPHA_DEBUG_DISAS
f24518b5 36#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
37
38#ifdef ALPHA_DEBUG_DISAS
806991da 39# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
40#else
41# define LOG_DISAS(...) do { } while (0)
42#endif
43
4c9649a9
JM
44typedef struct DisasContext DisasContext;
45struct DisasContext {
46 uint64_t pc;
47 int mem_idx;
48#if !defined (CONFIG_USER_ONLY)
49 int pal_mode;
50#endif
8579095b 51 CPUAlphaState *env;
4c9649a9 52 uint32_t amask;
f24518b5
RH
53
54 /* Current rounding mode for this TB. */
55 int tb_rm;
56 /* Current flush-to-zero setting for this TB. */
57 int tb_ftz;
4c9649a9
JM
58};
59
3761035f 60/* global register indexes */
a7812ae4 61static TCGv_ptr cpu_env;
496cb5b9 62static TCGv cpu_ir[31];
f18cd223 63static TCGv cpu_fir[31];
496cb5b9 64static TCGv cpu_pc;
f4ed8679 65static TCGv cpu_lock;
ab471ade
RH
66#ifdef CONFIG_USER_ONLY
67static TCGv cpu_uniq;
68#endif
496cb5b9 69
3761035f 70/* register names */
f18cd223 71static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef
PB
72
73#include "gen-icount.h"
74
a5f1b965 75static void alpha_translate_init(void)
2e70f6ef 76{
496cb5b9
AJ
77 int i;
78 char *p;
2e70f6ef 79 static int done_init = 0;
496cb5b9 80
2e70f6ef
PB
81 if (done_init)
82 return;
496cb5b9 83
a7812ae4 84 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
85
86 p = cpu_reg_names;
87 for (i = 0; i < 31; i++) {
88 sprintf(p, "ir%d", i);
a7812ae4
PB
89 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
90 offsetof(CPUState, ir[i]), p);
6ba8dcd7 91 p += (i < 10) ? 4 : 5;
f18cd223
AJ
92
93 sprintf(p, "fir%d", i);
a7812ae4
PB
94 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
95 offsetof(CPUState, fir[i]), p);
f18cd223 96 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
97 }
98
a7812ae4
PB
99 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
100 offsetof(CPUState, pc), "pc");
496cb5b9 101
a7812ae4
PB
102 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
103 offsetof(CPUState, lock), "lock");
f4ed8679 104
ab471ade
RH
105#ifdef CONFIG_USER_ONLY
106 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
107 offsetof(CPUState, unique), "uniq");
108#endif
109
496cb5b9 110 /* register helpers */
a7812ae4 111#define GEN_HELPER 2
496cb5b9
AJ
112#include "helper.h"
113
2e70f6ef
PB
114 done_init = 1;
115}
116
636aa200 117static inline void gen_excp(DisasContext *ctx, int exception, int error_code)
4c9649a9 118{
a7812ae4 119 TCGv_i32 tmp1, tmp2;
6ad02592 120
496cb5b9 121 tcg_gen_movi_i64(cpu_pc, ctx->pc);
6ad02592
AJ
122 tmp1 = tcg_const_i32(exception);
123 tmp2 = tcg_const_i32(error_code);
a7812ae4
PB
124 gen_helper_excp(tmp1, tmp2);
125 tcg_temp_free_i32(tmp2);
126 tcg_temp_free_i32(tmp1);
4c9649a9
JM
127}
128
636aa200 129static inline void gen_invalid(DisasContext *ctx)
4c9649a9
JM
130{
131 gen_excp(ctx, EXCP_OPCDEC, 0);
132}
133
636aa200 134static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 135{
a7812ae4
PB
136 TCGv tmp = tcg_temp_new();
137 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 138 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
139 tcg_gen_trunc_i64_i32(tmp32, tmp);
140 gen_helper_memory_to_f(t0, tmp32);
141 tcg_temp_free_i32(tmp32);
f18cd223
AJ
142 tcg_temp_free(tmp);
143}
144
636aa200 145static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 146{
a7812ae4 147 TCGv tmp = tcg_temp_new();
f18cd223 148 tcg_gen_qemu_ld64(tmp, t1, flags);
a7812ae4 149 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
150 tcg_temp_free(tmp);
151}
152
636aa200 153static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 154{
a7812ae4
PB
155 TCGv tmp = tcg_temp_new();
156 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 157 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
158 tcg_gen_trunc_i64_i32(tmp32, tmp);
159 gen_helper_memory_to_s(t0, tmp32);
160 tcg_temp_free_i32(tmp32);
f18cd223
AJ
161 tcg_temp_free(tmp);
162}
163
636aa200 164static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
165{
166 tcg_gen_mov_i64(cpu_lock, t1);
167 tcg_gen_qemu_ld32s(t0, t1, flags);
168}
169
636aa200 170static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
171{
172 tcg_gen_mov_i64(cpu_lock, t1);
173 tcg_gen_qemu_ld64(t0, t1, flags);
174}
175
636aa200
BS
176static inline void gen_load_mem(DisasContext *ctx,
177 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
178 int flags),
179 int ra, int rb, int32_t disp16, int fp,
180 int clear)
023d8ca2
AJ
181{
182 TCGv addr;
183
184 if (unlikely(ra == 31))
185 return;
186
a7812ae4 187 addr = tcg_temp_new();
023d8ca2
AJ
188 if (rb != 31) {
189 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
190 if (clear)
191 tcg_gen_andi_i64(addr, addr, ~0x7);
192 } else {
193 if (clear)
194 disp16 &= ~0x7;
195 tcg_gen_movi_i64(addr, disp16);
196 }
f18cd223
AJ
197 if (fp)
198 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
199 else
200 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
023d8ca2
AJ
201 tcg_temp_free(addr);
202}
203
636aa200 204static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 205{
a7812ae4
PB
206 TCGv_i32 tmp32 = tcg_temp_new_i32();
207 TCGv tmp = tcg_temp_new();
208 gen_helper_f_to_memory(tmp32, t0);
209 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
210 tcg_gen_qemu_st32(tmp, t1, flags);
211 tcg_temp_free(tmp);
a7812ae4 212 tcg_temp_free_i32(tmp32);
f18cd223
AJ
213}
214
636aa200 215static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 216{
a7812ae4
PB
217 TCGv tmp = tcg_temp_new();
218 gen_helper_g_to_memory(tmp, t0);
f18cd223
AJ
219 tcg_gen_qemu_st64(tmp, t1, flags);
220 tcg_temp_free(tmp);
221}
222
636aa200 223static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 224{
a7812ae4
PB
225 TCGv_i32 tmp32 = tcg_temp_new_i32();
226 TCGv tmp = tcg_temp_new();
227 gen_helper_s_to_memory(tmp32, t0);
228 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
229 tcg_gen_qemu_st32(tmp, t1, flags);
230 tcg_temp_free(tmp);
a7812ae4 231 tcg_temp_free_i32(tmp32);
f18cd223
AJ
232}
233
636aa200 234static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
235{
236 int l1, l2;
237
238 l1 = gen_new_label();
239 l2 = gen_new_label();
240 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
241 tcg_gen_qemu_st32(t0, t1, flags);
6223246a 242 tcg_gen_movi_i64(t0, 1);
f4ed8679
AJ
243 tcg_gen_br(l2);
244 gen_set_label(l1);
6223246a 245 tcg_gen_movi_i64(t0, 0);
f4ed8679
AJ
246 gen_set_label(l2);
247 tcg_gen_movi_i64(cpu_lock, -1);
248}
249
636aa200 250static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
251{
252 int l1, l2;
253
254 l1 = gen_new_label();
255 l2 = gen_new_label();
256 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
257 tcg_gen_qemu_st64(t0, t1, flags);
6223246a 258 tcg_gen_movi_i64(t0, 1);
f4ed8679
AJ
259 tcg_gen_br(l2);
260 gen_set_label(l1);
6223246a 261 tcg_gen_movi_i64(t0, 0);
f4ed8679
AJ
262 gen_set_label(l2);
263 tcg_gen_movi_i64(cpu_lock, -1);
264}
265
636aa200
BS
266static inline void gen_store_mem(DisasContext *ctx,
267 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
268 int flags),
269 int ra, int rb, int32_t disp16, int fp,
270 int clear, int local)
023d8ca2 271{
9cd38c23 272 TCGv addr;
57a92c8e 273 if (local)
a7812ae4 274 addr = tcg_temp_local_new();
57a92c8e 275 else
a7812ae4 276 addr = tcg_temp_new();
023d8ca2
AJ
277 if (rb != 31) {
278 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
279 if (clear)
280 tcg_gen_andi_i64(addr, addr, ~0x7);
281 } else {
282 if (clear)
283 disp16 &= ~0x7;
284 tcg_gen_movi_i64(addr, disp16);
285 }
f18cd223
AJ
286 if (ra != 31) {
287 if (fp)
288 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
289 else
290 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
291 } else {
57a92c8e
AJ
292 TCGv zero;
293 if (local)
294 zero = tcg_const_local_i64(0);
295 else
296 zero = tcg_const_i64(0);
023d8ca2
AJ
297 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
298 tcg_temp_free(zero);
299 }
300 tcg_temp_free(addr);
301}
302
dbb30fe6 303static void gen_bcond_pcload(DisasContext *ctx, int32_t disp, int lab_true)
4c9649a9 304{
dbb30fe6
RH
305 int lab_over = gen_new_label();
306
307 tcg_gen_movi_i64(cpu_pc, ctx->pc);
308 tcg_gen_br(lab_over);
309 gen_set_label(lab_true);
310 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
311 gen_set_label(lab_over);
312}
313
314static void gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
315 int32_t disp, int mask)
316{
317 int lab_true = gen_new_label();
9c29504e 318
9c29504e
AJ
319 if (likely(ra != 31)) {
320 if (mask) {
a7812ae4 321 TCGv tmp = tcg_temp_new();
9c29504e 322 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
dbb30fe6 323 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
9c29504e 324 tcg_temp_free(tmp);
dbb30fe6
RH
325 } else {
326 tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, lab_true);
327 }
9c29504e
AJ
328 } else {
329 /* Very uncommon case - Do not bother to optimize. */
330 TCGv tmp = tcg_const_i64(0);
dbb30fe6 331 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
9c29504e
AJ
332 tcg_temp_free(tmp);
333 }
dbb30fe6 334 gen_bcond_pcload(ctx, disp, lab_true);
4c9649a9
JM
335}
336
dbb30fe6
RH
337/* Generate a forward TCG branch to LAB_TRUE if RA cmp 0.0.
338 This is complicated by the fact that -0.0 compares the same as +0.0. */
339
340static void gen_fbcond_internal(TCGCond cond, TCGv src, int lab_true)
4c9649a9 341{
dbb30fe6
RH
342 int lab_false = -1;
343 uint64_t mzero = 1ull << 63;
f18cd223
AJ
344 TCGv tmp;
345
dbb30fe6
RH
346 switch (cond) {
347 case TCG_COND_LE:
348 case TCG_COND_GT:
349 /* For <= or >, the -0.0 value directly compares the way we want. */
350 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
a7812ae4 351 break;
dbb30fe6
RH
352
353 case TCG_COND_EQ:
354 case TCG_COND_NE:
355 /* For == or !=, we can simply mask off the sign bit and compare. */
356 /* ??? Assume that the temporary is reclaimed at the branch. */
357 tmp = tcg_temp_new();
358 tcg_gen_andi_i64(tmp, src, mzero - 1);
359 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
a7812ae4 360 break;
dbb30fe6
RH
361
362 case TCG_COND_GE:
363 /* For >=, emit two branches to the destination. */
364 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
365 tcg_gen_brcondi_i64(TCG_COND_EQ, src, mzero, lab_true);
a7812ae4 366 break;
dbb30fe6
RH
367
368 case TCG_COND_LT:
369 /* For <, first filter out -0.0 to what will be the fallthru. */
370 lab_false = gen_new_label();
371 tcg_gen_brcondi_i64(TCG_COND_EQ, src, mzero, lab_false);
372 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
373 gen_set_label(lab_false);
a7812ae4 374 break;
dbb30fe6 375
a7812ae4
PB
376 default:
377 abort();
f18cd223 378 }
dbb30fe6
RH
379}
380
381static void gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, int32_t disp)
382{
383 int lab_true;
384
385 if (unlikely(ra == 31)) {
386 /* Very uncommon case, but easier to optimize it to an integer
387 comparison than continuing with the floating point comparison. */
388 gen_bcond(ctx, cond, ra, disp, 0);
389 return;
390 }
391
392 lab_true = gen_new_label();
393 gen_fbcond_internal(cond, cpu_fir[ra], lab_true);
394 gen_bcond_pcload(ctx, disp, lab_true);
4c9649a9
JM
395}
396
636aa200
BS
397static inline void gen_cmov(TCGCond inv_cond, int ra, int rb, int rc,
398 int islit, uint8_t lit, int mask)
4c9649a9 399{
9c29504e
AJ
400 int l1;
401
402 if (unlikely(rc == 31))
403 return;
404
405 l1 = gen_new_label();
406
407 if (ra != 31) {
408 if (mask) {
a7812ae4 409 TCGv tmp = tcg_temp_new();
9c29504e
AJ
410 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
411 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
412 tcg_temp_free(tmp);
413 } else
414 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
415 } else {
416 /* Very uncommon case - Do not bother to optimize. */
417 TCGv tmp = tcg_const_i64(0);
418 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
419 tcg_temp_free(tmp);
420 }
421
4c9649a9 422 if (islit)
9c29504e 423 tcg_gen_movi_i64(cpu_ir[rc], lit);
4c9649a9 424 else
dfaa8583 425 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
9c29504e 426 gen_set_label(l1);
4c9649a9
JM
427}
428
dbb30fe6
RH
429static void gen_fcmov(TCGCond inv_cond, int ra, int rb, int rc)
430{
431 TCGv va = cpu_fir[ra];
432 int l1;
433
434 if (unlikely(rc == 31))
435 return;
436 if (unlikely(ra == 31)) {
437 /* ??? Assume that the temporary is reclaimed at the branch. */
438 va = tcg_const_i64(0);
439 }
440
441 l1 = gen_new_label();
442 gen_fbcond_internal(inv_cond, va, l1);
443
444 if (rb != 31)
445 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
446 else
447 tcg_gen_movi_i64(cpu_fir[rc], 0);
448 gen_set_label(l1);
449}
450
f24518b5
RH
451#define QUAL_RM_N 0x080 /* Round mode nearest even */
452#define QUAL_RM_C 0x000 /* Round mode chopped */
453#define QUAL_RM_M 0x040 /* Round mode minus infinity */
454#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
455#define QUAL_RM_MASK 0x0c0
456
457#define QUAL_U 0x100 /* Underflow enable (fp output) */
458#define QUAL_V 0x100 /* Overflow enable (int output) */
459#define QUAL_S 0x400 /* Software completion enable */
460#define QUAL_I 0x200 /* Inexact detection enable */
461
462static void gen_qual_roundmode(DisasContext *ctx, int fn11)
463{
464 TCGv_i32 tmp;
465
466 fn11 &= QUAL_RM_MASK;
467 if (fn11 == ctx->tb_rm) {
468 return;
469 }
470 ctx->tb_rm = fn11;
471
472 tmp = tcg_temp_new_i32();
473 switch (fn11) {
474 case QUAL_RM_N:
475 tcg_gen_movi_i32(tmp, float_round_nearest_even);
476 break;
477 case QUAL_RM_C:
478 tcg_gen_movi_i32(tmp, float_round_to_zero);
479 break;
480 case QUAL_RM_M:
481 tcg_gen_movi_i32(tmp, float_round_down);
482 break;
483 case QUAL_RM_D:
484 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
485 break;
486 }
487
488#if defined(CONFIG_SOFTFLOAT_INLINE)
489 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
490 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
491 sets the one field. */
492 tcg_gen_st8_i32(tmp, cpu_env,
493 offsetof(CPUState, fp_status.float_rounding_mode));
494#else
495 gen_helper_setroundmode(tmp);
496#endif
497
498 tcg_temp_free_i32(tmp);
499}
500
501static void gen_qual_flushzero(DisasContext *ctx, int fn11)
502{
503 TCGv_i32 tmp;
504
505 fn11 &= QUAL_U;
506 if (fn11 == ctx->tb_ftz) {
507 return;
508 }
509 ctx->tb_ftz = fn11;
510
511 tmp = tcg_temp_new_i32();
512 if (fn11) {
513 /* Underflow is enabled, use the FPCR setting. */
514 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
515 } else {
516 /* Underflow is disabled, force flush-to-zero. */
517 tcg_gen_movi_i32(tmp, 1);
518 }
519
520#if defined(CONFIG_SOFTFLOAT_INLINE)
521 tcg_gen_st8_i32(tmp, cpu_env,
522 offsetof(CPUState, fp_status.flush_to_zero));
523#else
524 gen_helper_setflushzero(tmp);
525#endif
526
527 tcg_temp_free_i32(tmp);
528}
529
530static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
531{
532 TCGv val = tcg_temp_new();
533 if (reg == 31) {
534 tcg_gen_movi_i64(val, 0);
535 } else if (fn11 & QUAL_S) {
536 gen_helper_ieee_input_s(val, cpu_fir[reg]);
537 } else if (is_cmp) {
538 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
539 } else {
540 gen_helper_ieee_input(val, cpu_fir[reg]);
541 }
542 return val;
543}
544
545static void gen_fp_exc_clear(void)
546{
547#if defined(CONFIG_SOFTFLOAT_INLINE)
548 TCGv_i32 zero = tcg_const_i32(0);
549 tcg_gen_st8_i32(zero, cpu_env,
550 offsetof(CPUState, fp_status.float_exception_flags));
551 tcg_temp_free_i32(zero);
552#else
553 gen_helper_fp_exc_clear();
554#endif
555}
556
557static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
558{
559 /* ??? We ought to be able to do something with imprecise exceptions.
560 E.g. notice we're still in the trap shadow of something within the
561 TB and do not generate the code to signal the exception; end the TB
562 when an exception is forced to arrive, either by consumption of a
563 register value or TRAPB or EXCB. */
564 TCGv_i32 exc = tcg_temp_new_i32();
565 TCGv_i32 reg;
566
567#if defined(CONFIG_SOFTFLOAT_INLINE)
568 tcg_gen_ld8u_i32(exc, cpu_env,
569 offsetof(CPUState, fp_status.float_exception_flags));
570#else
571 gen_helper_fp_exc_get(exc);
572#endif
573
574 if (ignore) {
575 tcg_gen_andi_i32(exc, exc, ~ignore);
576 }
577
578 /* ??? Pass in the regno of the destination so that the helper can
579 set EXC_MASK, which contains a bitmask of destination registers
580 that have caused arithmetic traps. A simple userspace emulation
581 does not require this. We do need it for a guest kernel's entArith,
582 or if we were to do something clever with imprecise exceptions. */
583 reg = tcg_const_i32(rc + 32);
584
585 if (fn11 & QUAL_S) {
586 gen_helper_fp_exc_raise_s(exc, reg);
587 } else {
588 gen_helper_fp_exc_raise(exc, reg);
589 }
590
591 tcg_temp_free_i32(reg);
592 tcg_temp_free_i32(exc);
593}
594
595static inline void gen_fp_exc_raise(int rc, int fn11)
596{
597 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 598}
f24518b5
RH
599
600#define FARITH2(name) \
601static inline void glue(gen_f, name)(int rb, int rc) \
602{ \
603 if (unlikely(rc == 31)) { \
604 return; \
605 } \
606 if (rb != 31) { \
607 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
608 } else { \
609 TCGv tmp = tcg_const_i64(0); \
610 gen_helper_ ## name (cpu_fir[rc], tmp); \
611 tcg_temp_free(tmp); \
612 } \
613}
614FARITH2(cvtlq)
615FARITH2(cvtql)
616FARITH2(cvtql_v)
617FARITH2(cvtql_sv)
618
619/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
620FARITH2(sqrtf)
621FARITH2(sqrtg)
a7812ae4
PB
622FARITH2(cvtgf)
623FARITH2(cvtgq)
624FARITH2(cvtqf)
625FARITH2(cvtqg)
f24518b5
RH
626
627static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
628 int rb, int rc, int fn11)
629{
630 TCGv vb;
631
632 /* ??? This is wrong: the instruction is not a nop, it still may
633 raise exceptions. */
634 if (unlikely(rc == 31)) {
635 return;
636 }
637
638 gen_qual_roundmode(ctx, fn11);
639 gen_qual_flushzero(ctx, fn11);
640 gen_fp_exc_clear();
641
642 vb = gen_ieee_input(rb, fn11, 0);
643 helper(cpu_fir[rc], vb);
644 tcg_temp_free(vb);
645
646 gen_fp_exc_raise(rc, fn11);
647}
648
649#define IEEE_ARITH2(name) \
650static inline void glue(gen_f, name)(DisasContext *ctx, \
651 int rb, int rc, int fn11) \
652{ \
653 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
654}
655IEEE_ARITH2(sqrts)
656IEEE_ARITH2(sqrtt)
657IEEE_ARITH2(cvtst)
658IEEE_ARITH2(cvtts)
659
660static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
661{
662 TCGv vb;
663 int ignore = 0;
664
665 /* ??? This is wrong: the instruction is not a nop, it still may
666 raise exceptions. */
667 if (unlikely(rc == 31)) {
668 return;
669 }
670
671 /* No need to set flushzero, since we have an integer output. */
672 gen_fp_exc_clear();
673 vb = gen_ieee_input(rb, fn11, 0);
674
675 /* Almost all integer conversions use cropped rounding, and most
676 also do not have integer overflow enabled. Special case that. */
677 switch (fn11) {
678 case QUAL_RM_C:
679 gen_helper_cvttq_c(cpu_fir[rc], vb);
680 break;
681 case QUAL_V | QUAL_RM_C:
682 case QUAL_S | QUAL_V | QUAL_RM_C:
683 ignore = float_flag_inexact;
684 /* FALLTHRU */
685 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
686 gen_helper_cvttq_svic(cpu_fir[rc], vb);
687 break;
688 default:
689 gen_qual_roundmode(ctx, fn11);
690 gen_helper_cvttq(cpu_fir[rc], vb);
691 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
692 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
693 break;
694 }
695 tcg_temp_free(vb);
696
697 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
698}
699
f24518b5
RH
700static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
701 int rb, int rc, int fn11)
702{
703 TCGv vb;
704
705 /* ??? This is wrong: the instruction is not a nop, it still may
706 raise exceptions. */
707 if (unlikely(rc == 31)) {
708 return;
709 }
710
711 gen_qual_roundmode(ctx, fn11);
712
713 if (rb == 31) {
714 vb = tcg_const_i64(0);
715 } else {
716 vb = cpu_fir[rb];
717 }
718
719 /* The only exception that can be raised by integer conversion
720 is inexact. Thus we only need to worry about exceptions when
721 inexact handling is requested. */
722 if (fn11 & QUAL_I) {
723 gen_fp_exc_clear();
724 helper(cpu_fir[rc], vb);
725 gen_fp_exc_raise(rc, fn11);
726 } else {
727 helper(cpu_fir[rc], vb);
728 }
729
730 if (rb == 31) {
731 tcg_temp_free(vb);
732 }
733}
734
735#define IEEE_INTCVT(name) \
736static inline void glue(gen_f, name)(DisasContext *ctx, \
737 int rb, int rc, int fn11) \
738{ \
739 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
740}
741IEEE_INTCVT(cvtqs)
742IEEE_INTCVT(cvtqt)
743
744#define FARITH3(name) \
745static inline void glue(gen_f, name)(int ra, int rb, int rc) \
746{ \
747 TCGv va, vb; \
748 \
749 if (unlikely(rc == 31)) { \
750 return; \
751 } \
752 if (ra == 31) { \
753 va = tcg_const_i64(0); \
754 } else { \
755 va = cpu_fir[ra]; \
756 } \
757 if (rb == 31) { \
758 vb = tcg_const_i64(0); \
759 } else { \
760 vb = cpu_fir[rb]; \
761 } \
762 \
763 gen_helper_ ## name (cpu_fir[rc], va, vb); \
764 \
765 if (ra == 31) { \
766 tcg_temp_free(va); \
767 } \
768 if (rb == 31) { \
769 tcg_temp_free(vb); \
770 } \
771}
772/* ??? Ought to expand these inline; simple masking operations. */
773FARITH3(cpys)
774FARITH3(cpysn)
775FARITH3(cpyse)
776
777/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
778FARITH3(addf)
779FARITH3(subf)
780FARITH3(mulf)
781FARITH3(divf)
782FARITH3(addg)
783FARITH3(subg)
784FARITH3(mulg)
785FARITH3(divg)
786FARITH3(cmpgeq)
787FARITH3(cmpglt)
788FARITH3(cmpgle)
f24518b5
RH
789
790static void gen_ieee_arith3(DisasContext *ctx,
791 void (*helper)(TCGv, TCGv, TCGv),
792 int ra, int rb, int rc, int fn11)
793{
794 TCGv va, vb;
795
796 /* ??? This is wrong: the instruction is not a nop, it still may
797 raise exceptions. */
798 if (unlikely(rc == 31)) {
799 return;
800 }
801
802 gen_qual_roundmode(ctx, fn11);
803 gen_qual_flushzero(ctx, fn11);
804 gen_fp_exc_clear();
805
806 va = gen_ieee_input(ra, fn11, 0);
807 vb = gen_ieee_input(rb, fn11, 0);
808 helper(cpu_fir[rc], va, vb);
809 tcg_temp_free(va);
810 tcg_temp_free(vb);
811
812 gen_fp_exc_raise(rc, fn11);
813}
814
815#define IEEE_ARITH3(name) \
816static inline void glue(gen_f, name)(DisasContext *ctx, \
817 int ra, int rb, int rc, int fn11) \
818{ \
819 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
820}
821IEEE_ARITH3(adds)
822IEEE_ARITH3(subs)
823IEEE_ARITH3(muls)
824IEEE_ARITH3(divs)
825IEEE_ARITH3(addt)
826IEEE_ARITH3(subt)
827IEEE_ARITH3(mult)
828IEEE_ARITH3(divt)
829
830static void gen_ieee_compare(DisasContext *ctx,
831 void (*helper)(TCGv, TCGv, TCGv),
832 int ra, int rb, int rc, int fn11)
833{
834 TCGv va, vb;
835
836 /* ??? This is wrong: the instruction is not a nop, it still may
837 raise exceptions. */
838 if (unlikely(rc == 31)) {
839 return;
840 }
841
842 gen_fp_exc_clear();
843
844 va = gen_ieee_input(ra, fn11, 1);
845 vb = gen_ieee_input(rb, fn11, 1);
846 helper(cpu_fir[rc], va, vb);
847 tcg_temp_free(va);
848 tcg_temp_free(vb);
849
850 gen_fp_exc_raise(rc, fn11);
851}
852
853#define IEEE_CMP3(name) \
854static inline void glue(gen_f, name)(DisasContext *ctx, \
855 int ra, int rb, int rc, int fn11) \
856{ \
857 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
858}
859IEEE_CMP3(cmptun)
860IEEE_CMP3(cmpteq)
861IEEE_CMP3(cmptlt)
862IEEE_CMP3(cmptle)
a7812ae4 863
248c42f3
RH
864static inline uint64_t zapnot_mask(uint8_t lit)
865{
866 uint64_t mask = 0;
867 int i;
868
869 for (i = 0; i < 8; ++i) {
870 if ((lit >> i) & 1)
871 mask |= 0xffull << (i * 8);
872 }
873 return mask;
874}
875
87d98f95
RH
876/* Implement zapnot with an immediate operand, which expands to some
877 form of immediate AND. This is a basic building block in the
878 definition of many of the other byte manipulation instructions. */
248c42f3 879static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 880{
87d98f95
RH
881 switch (lit) {
882 case 0x00:
248c42f3 883 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
884 break;
885 case 0x01:
248c42f3 886 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
887 break;
888 case 0x03:
248c42f3 889 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
890 break;
891 case 0x0f:
248c42f3 892 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
893 break;
894 case 0xff:
248c42f3 895 tcg_gen_mov_i64(dest, src);
87d98f95
RH
896 break;
897 default:
248c42f3 898 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
87d98f95
RH
899 break;
900 }
901}
902
903static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
904{
905 if (unlikely(rc == 31))
906 return;
907 else if (unlikely(ra == 31))
908 tcg_gen_movi_i64(cpu_ir[rc], 0);
909 else if (islit)
248c42f3 910 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
87d98f95
RH
911 else
912 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
913}
914
915static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
916{
917 if (unlikely(rc == 31))
918 return;
919 else if (unlikely(ra == 31))
920 tcg_gen_movi_i64(cpu_ir[rc], 0);
921 else if (islit)
248c42f3 922 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
87d98f95
RH
923 else
924 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
925}
926
927
248c42f3 928/* EXTWH, EXTLH, EXTQH */
ffec44f1
RH
929static void gen_ext_h(int ra, int rb, int rc, int islit,
930 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
931{
932 if (unlikely(rc == 31))
933 return;
377a43b6
RH
934 else if (unlikely(ra == 31))
935 tcg_gen_movi_i64(cpu_ir[rc], 0);
936 else {
dfaa8583 937 if (islit) {
377a43b6
RH
938 lit = (64 - (lit & 7) * 8) & 0x3f;
939 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
fe2b269a 940 } else {
377a43b6 941 TCGv tmp1 = tcg_temp_new();
b3249f63
AJ
942 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
943 tcg_gen_shli_i64(tmp1, tmp1, 3);
dbf95805
VW
944 tcg_gen_neg_i64(tmp1, tmp1);
945 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
dfaa8583 946 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
b3249f63 947 tcg_temp_free(tmp1);
dfaa8583 948 }
248c42f3 949 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
377a43b6 950 }
b3249f63
AJ
951}
952
248c42f3 953/* EXTBL, EXTWL, EXTLL, EXTQL */
ffec44f1
RH
954static void gen_ext_l(int ra, int rb, int rc, int islit,
955 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
956{
957 if (unlikely(rc == 31))
958 return;
377a43b6
RH
959 else if (unlikely(ra == 31))
960 tcg_gen_movi_i64(cpu_ir[rc], 0);
961 else {
dfaa8583 962 if (islit) {
377a43b6 963 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
dfaa8583 964 } else {
a7812ae4 965 TCGv tmp = tcg_temp_new();
b3249f63
AJ
966 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
967 tcg_gen_shli_i64(tmp, tmp, 3);
dfaa8583 968 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
b3249f63 969 tcg_temp_free(tmp);
fe2b269a 970 }
248c42f3
RH
971 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
972 }
973}
974
50eb6e5c
RH
975/* INSWH, INSLH, INSQH */
976static void gen_ins_h(int ra, int rb, int rc, int islit,
977 uint8_t lit, uint8_t byte_mask)
978{
979 if (unlikely(rc == 31))
980 return;
981 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
982 tcg_gen_movi_i64(cpu_ir[rc], 0);
983 else {
984 TCGv tmp = tcg_temp_new();
985
986 /* The instruction description has us left-shift the byte mask
987 and extract bits <15:8> and apply that zap at the end. This
988 is equivalent to simply performing the zap first and shifting
989 afterward. */
990 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
991
992 if (islit) {
993 /* Note that we have handled the lit==0 case above. */
994 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
995 } else {
996 TCGv shift = tcg_temp_new();
997
998 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
999 Do this portably by splitting the shift into two parts:
1000 shift_count-1 and 1. Arrange for the -1 by using
1001 ones-complement instead of twos-complement in the negation:
1002 ~((B & 7) * 8) & 63. */
1003
1004 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1005 tcg_gen_shli_i64(shift, shift, 3);
1006 tcg_gen_not_i64(shift, shift);
1007 tcg_gen_andi_i64(shift, shift, 0x3f);
1008
1009 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1010 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1011 tcg_temp_free(shift);
1012 }
1013 tcg_temp_free(tmp);
1014 }
1015}
1016
248c42f3 1017/* INSBL, INSWL, INSLL, INSQL */
ffec44f1
RH
1018static void gen_ins_l(int ra, int rb, int rc, int islit,
1019 uint8_t lit, uint8_t byte_mask)
248c42f3
RH
1020{
1021 if (unlikely(rc == 31))
1022 return;
1023 else if (unlikely(ra == 31))
1024 tcg_gen_movi_i64(cpu_ir[rc], 0);
1025 else {
1026 TCGv tmp = tcg_temp_new();
1027
1028 /* The instruction description has us left-shift the byte mask
1029 the same number of byte slots as the data and apply the zap
1030 at the end. This is equivalent to simply performing the zap
1031 first and shifting afterward. */
1032 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1033
1034 if (islit) {
1035 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1036 } else {
1037 TCGv shift = tcg_temp_new();
1038 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1039 tcg_gen_shli_i64(shift, shift, 3);
1040 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1041 tcg_temp_free(shift);
1042 }
1043 tcg_temp_free(tmp);
377a43b6 1044 }
b3249f63
AJ
1045}
1046
ffec44f1
RH
1047/* MSKWH, MSKLH, MSKQH */
1048static void gen_msk_h(int ra, int rb, int rc, int islit,
1049 uint8_t lit, uint8_t byte_mask)
1050{
1051 if (unlikely(rc == 31))
1052 return;
1053 else if (unlikely(ra == 31))
1054 tcg_gen_movi_i64(cpu_ir[rc], 0);
1055 else if (islit) {
1056 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1057 } else {
1058 TCGv shift = tcg_temp_new();
1059 TCGv mask = tcg_temp_new();
1060
1061 /* The instruction description is as above, where the byte_mask
1062 is shifted left, and then we extract bits <15:8>. This can be
1063 emulated with a right-shift on the expanded byte mask. This
1064 requires extra care because for an input <2:0> == 0 we need a
1065 shift of 64 bits in order to generate a zero. This is done by
1066 splitting the shift into two parts, the variable shift - 1
1067 followed by a constant 1 shift. The code we expand below is
1068 equivalent to ~((B & 7) * 8) & 63. */
1069
1070 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1071 tcg_gen_shli_i64(shift, shift, 3);
1072 tcg_gen_not_i64(shift, shift);
1073 tcg_gen_andi_i64(shift, shift, 0x3f);
1074 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1075 tcg_gen_shr_i64(mask, mask, shift);
1076 tcg_gen_shri_i64(mask, mask, 1);
1077
1078 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1079
1080 tcg_temp_free(mask);
1081 tcg_temp_free(shift);
1082 }
1083}
1084
14ab1634 1085/* MSKBL, MSKWL, MSKLL, MSKQL */
ffec44f1
RH
1086static void gen_msk_l(int ra, int rb, int rc, int islit,
1087 uint8_t lit, uint8_t byte_mask)
14ab1634
RH
1088{
1089 if (unlikely(rc == 31))
1090 return;
1091 else if (unlikely(ra == 31))
1092 tcg_gen_movi_i64(cpu_ir[rc], 0);
1093 else if (islit) {
1094 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1095 } else {
1096 TCGv shift = tcg_temp_new();
1097 TCGv mask = tcg_temp_new();
1098
1099 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1100 tcg_gen_shli_i64(shift, shift, 3);
1101 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1102 tcg_gen_shl_i64(mask, mask, shift);
1103
1104 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1105
1106 tcg_temp_free(mask);
1107 tcg_temp_free(shift);
1108 }
1109}
1110
04acd307 1111/* Code to call arith3 helpers */
a7812ae4 1112#define ARITH3(name) \
636aa200
BS
1113static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1114 uint8_t lit) \
a7812ae4
PB
1115{ \
1116 if (unlikely(rc == 31)) \
1117 return; \
1118 \
1119 if (ra != 31) { \
1120 if (islit) { \
1121 TCGv tmp = tcg_const_i64(lit); \
1122 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1123 tcg_temp_free(tmp); \
1124 } else \
1125 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1126 } else { \
1127 TCGv tmp1 = tcg_const_i64(0); \
1128 if (islit) { \
1129 TCGv tmp2 = tcg_const_i64(lit); \
1130 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1131 tcg_temp_free(tmp2); \
1132 } else \
1133 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1134 tcg_temp_free(tmp1); \
1135 } \
b3249f63 1136}
a7812ae4
PB
1137ARITH3(cmpbge)
1138ARITH3(addlv)
1139ARITH3(sublv)
1140ARITH3(addqv)
1141ARITH3(subqv)
a7812ae4
PB
1142ARITH3(umulh)
1143ARITH3(mullv)
1144ARITH3(mulqv)
13e4df99
RH
1145ARITH3(minub8)
1146ARITH3(minsb8)
1147ARITH3(minuw4)
1148ARITH3(minsw4)
1149ARITH3(maxub8)
1150ARITH3(maxsb8)
1151ARITH3(maxuw4)
1152ARITH3(maxsw4)
1153ARITH3(perr)
1154
1155#define MVIOP2(name) \
1156static inline void glue(gen_, name)(int rb, int rc) \
1157{ \
1158 if (unlikely(rc == 31)) \
1159 return; \
1160 if (unlikely(rb == 31)) \
1161 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1162 else \
1163 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1164}
1165MVIOP2(pklb)
1166MVIOP2(pkwb)
1167MVIOP2(unpkbl)
1168MVIOP2(unpkbw)
b3249f63 1169
636aa200
BS
1170static inline void gen_cmp(TCGCond cond, int ra, int rb, int rc, int islit,
1171 uint8_t lit)
01ff9cc8
AJ
1172{
1173 int l1, l2;
1174 TCGv tmp;
1175
1176 if (unlikely(rc == 31))
13e4df99 1177 return;
01ff9cc8
AJ
1178
1179 l1 = gen_new_label();
1180 l2 = gen_new_label();
1181
1182 if (ra != 31) {
a7812ae4 1183 tmp = tcg_temp_new();
01ff9cc8
AJ
1184 tcg_gen_mov_i64(tmp, cpu_ir[ra]);
1185 } else
1186 tmp = tcg_const_i64(0);
1187 if (islit)
1188 tcg_gen_brcondi_i64(cond, tmp, lit, l1);
01ff9cc8 1189 else
dfaa8583 1190 tcg_gen_brcond_i64(cond, tmp, cpu_ir[rb], l1);
01ff9cc8
AJ
1191
1192 tcg_gen_movi_i64(cpu_ir[rc], 0);
1193 tcg_gen_br(l2);
1194 gen_set_label(l1);
1195 tcg_gen_movi_i64(cpu_ir[rc], 1);
1196 gen_set_label(l2);
1197}
1198
636aa200 1199static inline int translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1200{
1201 uint32_t palcode;
1202 int32_t disp21, disp16, disp12;
1203 uint16_t fn11, fn16;
13e4df99 1204 uint8_t opc, ra, rb, rc, sbz, fpfn, fn7, fn2, islit, real_islit;
adf3c8b6 1205 uint8_t lit;
4c9649a9
JM
1206 int ret;
1207
1208 /* Decode all instruction fields */
1209 opc = insn >> 26;
1210 ra = (insn >> 21) & 0x1F;
1211 rb = (insn >> 16) & 0x1F;
1212 rc = insn & 0x1F;
1213 sbz = (insn >> 13) & 0x07;
13e4df99 1214 real_islit = islit = (insn >> 12) & 1;
dfaa8583
AJ
1215 if (rb == 31 && !islit) {
1216 islit = 1;
1217 lit = 0;
1218 } else
1219 lit = (insn >> 13) & 0xFF;
4c9649a9
JM
1220 palcode = insn & 0x03FFFFFF;
1221 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1222 disp16 = (int16_t)(insn & 0x0000FFFF);
1223 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1224 fn16 = insn & 0x0000FFFF;
1225 fn11 = (insn >> 5) & 0x000007FF;
1226 fpfn = fn11 & 0x3F;
1227 fn7 = (insn >> 5) & 0x0000007F;
1228 fn2 = (insn >> 5) & 0x00000003;
1229 ret = 0;
806991da 1230 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1231 opc, ra, rb, rc, disp16);
806991da 1232
4c9649a9
JM
1233 switch (opc) {
1234 case 0x00:
1235 /* CALL_PAL */
ab471ade
RH
1236#ifdef CONFIG_USER_ONLY
1237 if (palcode == 0x9E) {
1238 /* RDUNIQUE */
1239 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1240 break;
1241 } else if (palcode == 0x9F) {
1242 /* WRUNIQUE */
1243 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1244 break;
1245 }
1246#endif
4c9649a9
JM
1247 if (palcode >= 0x80 && palcode < 0xC0) {
1248 /* Unprivileged PAL call */
31a877f2 1249 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
ab471ade
RH
1250 ret = 3;
1251 break;
1252 }
1253#ifndef CONFIG_USER_ONLY
1254 if (palcode < 0x40) {
4c9649a9
JM
1255 /* Privileged PAL code */
1256 if (ctx->mem_idx & 1)
1257 goto invalid_opc;
ab471ade
RH
1258 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
1259 ret = 3;
4c9649a9 1260 }
ab471ade
RH
1261#endif
1262 /* Invalid PAL call */
1263 goto invalid_opc;
4c9649a9
JM
1264 case 0x01:
1265 /* OPC01 */
1266 goto invalid_opc;
1267 case 0x02:
1268 /* OPC02 */
1269 goto invalid_opc;
1270 case 0x03:
1271 /* OPC03 */
1272 goto invalid_opc;
1273 case 0x04:
1274 /* OPC04 */
1275 goto invalid_opc;
1276 case 0x05:
1277 /* OPC05 */
1278 goto invalid_opc;
1279 case 0x06:
1280 /* OPC06 */
1281 goto invalid_opc;
1282 case 0x07:
1283 /* OPC07 */
1284 goto invalid_opc;
1285 case 0x08:
1286 /* LDA */
1ef4ef4e 1287 if (likely(ra != 31)) {
496cb5b9 1288 if (rb != 31)
3761035f
AJ
1289 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1290 else
1291 tcg_gen_movi_i64(cpu_ir[ra], disp16);
496cb5b9 1292 }
4c9649a9
JM
1293 break;
1294 case 0x09:
1295 /* LDAH */
1ef4ef4e 1296 if (likely(ra != 31)) {
496cb5b9 1297 if (rb != 31)
3761035f
AJ
1298 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1299 else
1300 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
496cb5b9 1301 }
4c9649a9
JM
1302 break;
1303 case 0x0A:
1304 /* LDBU */
1305 if (!(ctx->amask & AMASK_BWX))
1306 goto invalid_opc;
f18cd223 1307 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
4c9649a9
JM
1308 break;
1309 case 0x0B:
1310 /* LDQ_U */
f18cd223 1311 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1312 break;
1313 case 0x0C:
1314 /* LDWU */
1315 if (!(ctx->amask & AMASK_BWX))
1316 goto invalid_opc;
577d5e7f 1317 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
4c9649a9
JM
1318 break;
1319 case 0x0D:
1320 /* STW */
57a92c8e 1321 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
1322 break;
1323 case 0x0E:
1324 /* STB */
57a92c8e 1325 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
1326 break;
1327 case 0x0F:
1328 /* STQ_U */
57a92c8e 1329 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
4c9649a9
JM
1330 break;
1331 case 0x10:
1332 switch (fn7) {
1333 case 0x00:
1334 /* ADDL */
30c7183b
AJ
1335 if (likely(rc != 31)) {
1336 if (ra != 31) {
1337 if (islit) {
1338 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1339 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1340 } else {
30c7183b
AJ
1341 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1342 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1343 }
30c7183b
AJ
1344 } else {
1345 if (islit)
dfaa8583 1346 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1347 else
dfaa8583 1348 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1349 }
1350 }
4c9649a9
JM
1351 break;
1352 case 0x02:
1353 /* S4ADDL */
30c7183b
AJ
1354 if (likely(rc != 31)) {
1355 if (ra != 31) {
a7812ae4 1356 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1357 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1358 if (islit)
1359 tcg_gen_addi_i64(tmp, tmp, lit);
1360 else
1361 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1362 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1363 tcg_temp_free(tmp);
30c7183b
AJ
1364 } else {
1365 if (islit)
1366 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1367 else
dfaa8583 1368 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1369 }
1370 }
4c9649a9
JM
1371 break;
1372 case 0x09:
1373 /* SUBL */
30c7183b
AJ
1374 if (likely(rc != 31)) {
1375 if (ra != 31) {
dfaa8583 1376 if (islit)
30c7183b 1377 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
dfaa8583 1378 else
30c7183b 1379 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
dfaa8583 1380 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1381 } else {
1382 if (islit)
1383 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1384 else {
30c7183b
AJ
1385 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1386 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1387 }
1388 }
4c9649a9
JM
1389 break;
1390 case 0x0B:
1391 /* S4SUBL */
30c7183b
AJ
1392 if (likely(rc != 31)) {
1393 if (ra != 31) {
a7812ae4 1394 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1395 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1396 if (islit)
1397 tcg_gen_subi_i64(tmp, tmp, lit);
1398 else
1399 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1400 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1401 tcg_temp_free(tmp);
30c7183b
AJ
1402 } else {
1403 if (islit)
1404 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1405 else {
30c7183b
AJ
1406 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1407 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1408 }
30c7183b
AJ
1409 }
1410 }
4c9649a9
JM
1411 break;
1412 case 0x0F:
1413 /* CMPBGE */
a7812ae4 1414 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1415 break;
1416 case 0x12:
1417 /* S8ADDL */
30c7183b
AJ
1418 if (likely(rc != 31)) {
1419 if (ra != 31) {
a7812ae4 1420 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1421 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1422 if (islit)
1423 tcg_gen_addi_i64(tmp, tmp, lit);
1424 else
1425 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1426 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1427 tcg_temp_free(tmp);
30c7183b
AJ
1428 } else {
1429 if (islit)
1430 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1431 else
dfaa8583 1432 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1433 }
1434 }
4c9649a9
JM
1435 break;
1436 case 0x1B:
1437 /* S8SUBL */
30c7183b
AJ
1438 if (likely(rc != 31)) {
1439 if (ra != 31) {
a7812ae4 1440 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1441 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1442 if (islit)
1443 tcg_gen_subi_i64(tmp, tmp, lit);
1444 else
1445 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1446 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1447 tcg_temp_free(tmp);
30c7183b
AJ
1448 } else {
1449 if (islit)
1450 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1451 else
30c7183b
AJ
1452 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1453 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1454 }
30c7183b
AJ
1455 }
1456 }
4c9649a9
JM
1457 break;
1458 case 0x1D:
1459 /* CMPULT */
01ff9cc8 1460 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
4c9649a9
JM
1461 break;
1462 case 0x20:
1463 /* ADDQ */
30c7183b
AJ
1464 if (likely(rc != 31)) {
1465 if (ra != 31) {
1466 if (islit)
1467 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1468 else
dfaa8583 1469 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1470 } else {
1471 if (islit)
1472 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1473 else
dfaa8583 1474 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1475 }
1476 }
4c9649a9
JM
1477 break;
1478 case 0x22:
1479 /* S4ADDQ */
30c7183b
AJ
1480 if (likely(rc != 31)) {
1481 if (ra != 31) {
a7812ae4 1482 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1483 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1484 if (islit)
1485 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1486 else
1487 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1488 tcg_temp_free(tmp);
30c7183b
AJ
1489 } else {
1490 if (islit)
1491 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1492 else
dfaa8583 1493 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1494 }
1495 }
4c9649a9
JM
1496 break;
1497 case 0x29:
1498 /* SUBQ */
30c7183b
AJ
1499 if (likely(rc != 31)) {
1500 if (ra != 31) {
1501 if (islit)
1502 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1503 else
dfaa8583 1504 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1505 } else {
1506 if (islit)
1507 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1508 else
dfaa8583 1509 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1510 }
1511 }
4c9649a9
JM
1512 break;
1513 case 0x2B:
1514 /* S4SUBQ */
30c7183b
AJ
1515 if (likely(rc != 31)) {
1516 if (ra != 31) {
a7812ae4 1517 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1518 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1519 if (islit)
1520 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1521 else
1522 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1523 tcg_temp_free(tmp);
30c7183b
AJ
1524 } else {
1525 if (islit)
1526 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1527 else
dfaa8583 1528 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1529 }
1530 }
4c9649a9
JM
1531 break;
1532 case 0x2D:
1533 /* CMPEQ */
01ff9cc8 1534 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
4c9649a9
JM
1535 break;
1536 case 0x32:
1537 /* S8ADDQ */
30c7183b
AJ
1538 if (likely(rc != 31)) {
1539 if (ra != 31) {
a7812ae4 1540 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1541 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1542 if (islit)
1543 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1544 else
1545 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1546 tcg_temp_free(tmp);
30c7183b
AJ
1547 } else {
1548 if (islit)
1549 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1550 else
dfaa8583 1551 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1552 }
1553 }
4c9649a9
JM
1554 break;
1555 case 0x3B:
1556 /* S8SUBQ */
30c7183b
AJ
1557 if (likely(rc != 31)) {
1558 if (ra != 31) {
a7812ae4 1559 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1560 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1561 if (islit)
1562 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1563 else
1564 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1565 tcg_temp_free(tmp);
30c7183b
AJ
1566 } else {
1567 if (islit)
1568 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1569 else
dfaa8583 1570 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1571 }
1572 }
4c9649a9
JM
1573 break;
1574 case 0x3D:
1575 /* CMPULE */
01ff9cc8 1576 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
4c9649a9
JM
1577 break;
1578 case 0x40:
1579 /* ADDL/V */
a7812ae4 1580 gen_addlv(ra, rb, rc, islit, lit);
4c9649a9
JM
1581 break;
1582 case 0x49:
1583 /* SUBL/V */
a7812ae4 1584 gen_sublv(ra, rb, rc, islit, lit);
4c9649a9
JM
1585 break;
1586 case 0x4D:
1587 /* CMPLT */
01ff9cc8 1588 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
4c9649a9
JM
1589 break;
1590 case 0x60:
1591 /* ADDQ/V */
a7812ae4 1592 gen_addqv(ra, rb, rc, islit, lit);
4c9649a9
JM
1593 break;
1594 case 0x69:
1595 /* SUBQ/V */
a7812ae4 1596 gen_subqv(ra, rb, rc, islit, lit);
4c9649a9
JM
1597 break;
1598 case 0x6D:
1599 /* CMPLE */
01ff9cc8 1600 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
4c9649a9
JM
1601 break;
1602 default:
1603 goto invalid_opc;
1604 }
1605 break;
1606 case 0x11:
1607 switch (fn7) {
1608 case 0x00:
1609 /* AND */
30c7183b 1610 if (likely(rc != 31)) {
dfaa8583 1611 if (ra == 31)
30c7183b
AJ
1612 tcg_gen_movi_i64(cpu_ir[rc], 0);
1613 else if (islit)
1614 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1615 else
1616 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1617 }
4c9649a9
JM
1618 break;
1619 case 0x08:
1620 /* BIC */
30c7183b
AJ
1621 if (likely(rc != 31)) {
1622 if (ra != 31) {
1623 if (islit)
1624 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1625 else
1626 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1627 } else
1628 tcg_gen_movi_i64(cpu_ir[rc], 0);
1629 }
4c9649a9
JM
1630 break;
1631 case 0x14:
1632 /* CMOVLBS */
fe2b269a 1633 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
1634 break;
1635 case 0x16:
1636 /* CMOVLBC */
fe2b269a 1637 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
1638 break;
1639 case 0x20:
1640 /* BIS */
30c7183b
AJ
1641 if (likely(rc != 31)) {
1642 if (ra != 31) {
1643 if (islit)
1644 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
8bb6e981 1645 else
30c7183b 1646 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
4c9649a9 1647 } else {
30c7183b
AJ
1648 if (islit)
1649 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1650 else
dfaa8583 1651 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
4c9649a9 1652 }
4c9649a9
JM
1653 }
1654 break;
1655 case 0x24:
1656 /* CMOVEQ */
fe2b269a 1657 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1658 break;
1659 case 0x26:
1660 /* CMOVNE */
fe2b269a 1661 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1662 break;
1663 case 0x28:
1664 /* ORNOT */
30c7183b 1665 if (likely(rc != 31)) {
dfaa8583 1666 if (ra != 31) {
30c7183b
AJ
1667 if (islit)
1668 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1669 else
1670 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1671 } else {
1672 if (islit)
1673 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1674 else
1675 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1676 }
1677 }
4c9649a9
JM
1678 break;
1679 case 0x40:
1680 /* XOR */
30c7183b
AJ
1681 if (likely(rc != 31)) {
1682 if (ra != 31) {
1683 if (islit)
1684 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1685 else
dfaa8583 1686 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1687 } else {
1688 if (islit)
1689 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1690 else
dfaa8583 1691 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1692 }
1693 }
4c9649a9
JM
1694 break;
1695 case 0x44:
1696 /* CMOVLT */
fe2b269a 1697 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1698 break;
1699 case 0x46:
1700 /* CMOVGE */
fe2b269a 1701 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1702 break;
1703 case 0x48:
1704 /* EQV */
30c7183b
AJ
1705 if (likely(rc != 31)) {
1706 if (ra != 31) {
1707 if (islit)
1708 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1709 else
1710 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1711 } else {
1712 if (islit)
1713 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
30c7183b 1714 else
dfaa8583 1715 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1716 }
1717 }
4c9649a9
JM
1718 break;
1719 case 0x61:
1720 /* AMASK */
ae8ecd42
AJ
1721 if (likely(rc != 31)) {
1722 if (islit)
1a1f7dbc 1723 tcg_gen_movi_i64(cpu_ir[rc], lit);
ae8ecd42 1724 else
1a1f7dbc
AJ
1725 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1726 switch (ctx->env->implver) {
1727 case IMPLVER_2106x:
1728 /* EV4, EV45, LCA, LCA45 & EV5 */
1729 break;
1730 case IMPLVER_21164:
1731 case IMPLVER_21264:
1732 case IMPLVER_21364:
1733 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1734 ~(uint64_t)ctx->amask);
1735 break;
1736 }
ae8ecd42 1737 }
4c9649a9
JM
1738 break;
1739 case 0x64:
1740 /* CMOVLE */
fe2b269a 1741 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1742 break;
1743 case 0x66:
1744 /* CMOVGT */
fe2b269a 1745 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1746 break;
1747 case 0x6C:
1748 /* IMPLVER */
3761035f 1749 if (rc != 31)
8579095b 1750 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
4c9649a9
JM
1751 break;
1752 default:
1753 goto invalid_opc;
1754 }
1755 break;
1756 case 0x12:
1757 switch (fn7) {
1758 case 0x02:
1759 /* MSKBL */
14ab1634 1760 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
1761 break;
1762 case 0x06:
1763 /* EXTBL */
377a43b6 1764 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
1765 break;
1766 case 0x0B:
1767 /* INSBL */
248c42f3 1768 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
1769 break;
1770 case 0x12:
1771 /* MSKWL */
14ab1634 1772 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1773 break;
1774 case 0x16:
1775 /* EXTWL */
377a43b6 1776 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1777 break;
1778 case 0x1B:
1779 /* INSWL */
248c42f3 1780 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1781 break;
1782 case 0x22:
1783 /* MSKLL */
14ab1634 1784 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1785 break;
1786 case 0x26:
1787 /* EXTLL */
377a43b6 1788 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1789 break;
1790 case 0x2B:
1791 /* INSLL */
248c42f3 1792 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1793 break;
1794 case 0x30:
1795 /* ZAP */
a7812ae4 1796 gen_zap(ra, rb, rc, islit, lit);
4c9649a9
JM
1797 break;
1798 case 0x31:
1799 /* ZAPNOT */
a7812ae4 1800 gen_zapnot(ra, rb, rc, islit, lit);
4c9649a9
JM
1801 break;
1802 case 0x32:
1803 /* MSKQL */
14ab1634 1804 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1805 break;
1806 case 0x34:
1807 /* SRL */
30c7183b
AJ
1808 if (likely(rc != 31)) {
1809 if (ra != 31) {
1810 if (islit)
1811 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 1812 else {
a7812ae4 1813 TCGv shift = tcg_temp_new();
30c7183b
AJ
1814 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1815 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
1816 tcg_temp_free(shift);
dfaa8583 1817 }
30c7183b
AJ
1818 } else
1819 tcg_gen_movi_i64(cpu_ir[rc], 0);
1820 }
4c9649a9
JM
1821 break;
1822 case 0x36:
1823 /* EXTQL */
377a43b6 1824 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1825 break;
1826 case 0x39:
1827 /* SLL */
30c7183b
AJ
1828 if (likely(rc != 31)) {
1829 if (ra != 31) {
1830 if (islit)
1831 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 1832 else {
a7812ae4 1833 TCGv shift = tcg_temp_new();
30c7183b
AJ
1834 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1835 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
1836 tcg_temp_free(shift);
dfaa8583 1837 }
30c7183b
AJ
1838 } else
1839 tcg_gen_movi_i64(cpu_ir[rc], 0);
1840 }
4c9649a9
JM
1841 break;
1842 case 0x3B:
1843 /* INSQL */
248c42f3 1844 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1845 break;
1846 case 0x3C:
1847 /* SRA */
30c7183b
AJ
1848 if (likely(rc != 31)) {
1849 if (ra != 31) {
1850 if (islit)
1851 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 1852 else {
a7812ae4 1853 TCGv shift = tcg_temp_new();
30c7183b
AJ
1854 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1855 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
1856 tcg_temp_free(shift);
dfaa8583 1857 }
30c7183b
AJ
1858 } else
1859 tcg_gen_movi_i64(cpu_ir[rc], 0);
1860 }
4c9649a9
JM
1861 break;
1862 case 0x52:
1863 /* MSKWH */
ffec44f1 1864 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1865 break;
1866 case 0x57:
1867 /* INSWH */
50eb6e5c 1868 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1869 break;
1870 case 0x5A:
1871 /* EXTWH */
377a43b6 1872 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1873 break;
1874 case 0x62:
1875 /* MSKLH */
ffec44f1 1876 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1877 break;
1878 case 0x67:
1879 /* INSLH */
50eb6e5c 1880 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1881 break;
1882 case 0x6A:
1883 /* EXTLH */
377a43b6 1884 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1885 break;
1886 case 0x72:
1887 /* MSKQH */
ffec44f1 1888 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1889 break;
1890 case 0x77:
1891 /* INSQH */
50eb6e5c 1892 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1893 break;
1894 case 0x7A:
1895 /* EXTQH */
377a43b6 1896 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1897 break;
1898 default:
1899 goto invalid_opc;
1900 }
1901 break;
1902 case 0x13:
1903 switch (fn7) {
1904 case 0x00:
1905 /* MULL */
30c7183b 1906 if (likely(rc != 31)) {
dfaa8583 1907 if (ra == 31)
30c7183b
AJ
1908 tcg_gen_movi_i64(cpu_ir[rc], 0);
1909 else {
1910 if (islit)
1911 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1912 else
1913 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1914 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1915 }
1916 }
4c9649a9
JM
1917 break;
1918 case 0x20:
1919 /* MULQ */
30c7183b 1920 if (likely(rc != 31)) {
dfaa8583 1921 if (ra == 31)
30c7183b
AJ
1922 tcg_gen_movi_i64(cpu_ir[rc], 0);
1923 else if (islit)
1924 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1925 else
1926 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1927 }
4c9649a9
JM
1928 break;
1929 case 0x30:
1930 /* UMULH */
a7812ae4 1931 gen_umulh(ra, rb, rc, islit, lit);
4c9649a9
JM
1932 break;
1933 case 0x40:
1934 /* MULL/V */
a7812ae4 1935 gen_mullv(ra, rb, rc, islit, lit);
4c9649a9
JM
1936 break;
1937 case 0x60:
1938 /* MULQ/V */
a7812ae4 1939 gen_mulqv(ra, rb, rc, islit, lit);
4c9649a9
JM
1940 break;
1941 default:
1942 goto invalid_opc;
1943 }
1944 break;
1945 case 0x14:
f24518b5 1946 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
1947 case 0x04:
1948 /* ITOFS */
1949 if (!(ctx->amask & AMASK_FIX))
1950 goto invalid_opc;
f18cd223
AJ
1951 if (likely(rc != 31)) {
1952 if (ra != 31) {
a7812ae4 1953 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 1954 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
1955 gen_helper_memory_to_s(cpu_fir[rc], tmp);
1956 tcg_temp_free_i32(tmp);
f18cd223
AJ
1957 } else
1958 tcg_gen_movi_i64(cpu_fir[rc], 0);
1959 }
4c9649a9
JM
1960 break;
1961 case 0x0A:
1962 /* SQRTF */
1963 if (!(ctx->amask & AMASK_FIX))
1964 goto invalid_opc;
a7812ae4 1965 gen_fsqrtf(rb, rc);
4c9649a9
JM
1966 break;
1967 case 0x0B:
1968 /* SQRTS */
1969 if (!(ctx->amask & AMASK_FIX))
1970 goto invalid_opc;
f24518b5 1971 gen_fsqrts(ctx, rb, rc, fn11);
4c9649a9
JM
1972 break;
1973 case 0x14:
1974 /* ITOFF */
1975 if (!(ctx->amask & AMASK_FIX))
1976 goto invalid_opc;
f18cd223
AJ
1977 if (likely(rc != 31)) {
1978 if (ra != 31) {
a7812ae4 1979 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 1980 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
1981 gen_helper_memory_to_f(cpu_fir[rc], tmp);
1982 tcg_temp_free_i32(tmp);
f18cd223
AJ
1983 } else
1984 tcg_gen_movi_i64(cpu_fir[rc], 0);
1985 }
4c9649a9
JM
1986 break;
1987 case 0x24:
1988 /* ITOFT */
1989 if (!(ctx->amask & AMASK_FIX))
1990 goto invalid_opc;
f18cd223
AJ
1991 if (likely(rc != 31)) {
1992 if (ra != 31)
1993 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
1994 else
1995 tcg_gen_movi_i64(cpu_fir[rc], 0);
1996 }
4c9649a9
JM
1997 break;
1998 case 0x2A:
1999 /* SQRTG */
2000 if (!(ctx->amask & AMASK_FIX))
2001 goto invalid_opc;
a7812ae4 2002 gen_fsqrtg(rb, rc);
4c9649a9
JM
2003 break;
2004 case 0x02B:
2005 /* SQRTT */
2006 if (!(ctx->amask & AMASK_FIX))
2007 goto invalid_opc;
f24518b5 2008 gen_fsqrtt(ctx, rb, rc, fn11);
4c9649a9
JM
2009 break;
2010 default:
2011 goto invalid_opc;
2012 }
2013 break;
2014 case 0x15:
2015 /* VAX floating point */
2016 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2017 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2018 case 0x00:
2019 /* ADDF */
a7812ae4 2020 gen_faddf(ra, rb, rc);
4c9649a9
JM
2021 break;
2022 case 0x01:
2023 /* SUBF */
a7812ae4 2024 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2025 break;
2026 case 0x02:
2027 /* MULF */
a7812ae4 2028 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2029 break;
2030 case 0x03:
2031 /* DIVF */
a7812ae4 2032 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2033 break;
2034 case 0x1E:
2035 /* CVTDG */
2036#if 0 // TODO
a7812ae4 2037 gen_fcvtdg(rb, rc);
4c9649a9
JM
2038#else
2039 goto invalid_opc;
2040#endif
2041 break;
2042 case 0x20:
2043 /* ADDG */
a7812ae4 2044 gen_faddg(ra, rb, rc);
4c9649a9
JM
2045 break;
2046 case 0x21:
2047 /* SUBG */
a7812ae4 2048 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2049 break;
2050 case 0x22:
2051 /* MULG */
a7812ae4 2052 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2053 break;
2054 case 0x23:
2055 /* DIVG */
a7812ae4 2056 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2057 break;
2058 case 0x25:
2059 /* CMPGEQ */
a7812ae4 2060 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2061 break;
2062 case 0x26:
2063 /* CMPGLT */
a7812ae4 2064 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2065 break;
2066 case 0x27:
2067 /* CMPGLE */
a7812ae4 2068 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2069 break;
2070 case 0x2C:
2071 /* CVTGF */
a7812ae4 2072 gen_fcvtgf(rb, rc);
4c9649a9
JM
2073 break;
2074 case 0x2D:
2075 /* CVTGD */
2076#if 0 // TODO
a7812ae4 2077 gen_fcvtgd(rb, rc);
4c9649a9
JM
2078#else
2079 goto invalid_opc;
2080#endif
2081 break;
2082 case 0x2F:
2083 /* CVTGQ */
a7812ae4 2084 gen_fcvtgq(rb, rc);
4c9649a9
JM
2085 break;
2086 case 0x3C:
2087 /* CVTQF */
a7812ae4 2088 gen_fcvtqf(rb, rc);
4c9649a9
JM
2089 break;
2090 case 0x3E:
2091 /* CVTQG */
a7812ae4 2092 gen_fcvtqg(rb, rc);
4c9649a9
JM
2093 break;
2094 default:
2095 goto invalid_opc;
2096 }
2097 break;
2098 case 0x16:
2099 /* IEEE floating-point */
f24518b5 2100 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2101 case 0x00:
2102 /* ADDS */
f24518b5 2103 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2104 break;
2105 case 0x01:
2106 /* SUBS */
f24518b5 2107 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2108 break;
2109 case 0x02:
2110 /* MULS */
f24518b5 2111 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2112 break;
2113 case 0x03:
2114 /* DIVS */
f24518b5 2115 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2116 break;
2117 case 0x20:
2118 /* ADDT */
f24518b5 2119 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2120 break;
2121 case 0x21:
2122 /* SUBT */
f24518b5 2123 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2124 break;
2125 case 0x22:
2126 /* MULT */
f24518b5 2127 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2128 break;
2129 case 0x23:
2130 /* DIVT */
f24518b5 2131 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2132 break;
2133 case 0x24:
2134 /* CMPTUN */
f24518b5 2135 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2136 break;
2137 case 0x25:
2138 /* CMPTEQ */
f24518b5 2139 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2140 break;
2141 case 0x26:
2142 /* CMPTLT */
f24518b5 2143 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2144 break;
2145 case 0x27:
2146 /* CMPTLE */
f24518b5 2147 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2148 break;
2149 case 0x2C:
a74b4d2c 2150 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2151 /* CVTST */
f24518b5 2152 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2153 } else {
2154 /* CVTTS */
f24518b5 2155 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2156 }
2157 break;
2158 case 0x2F:
2159 /* CVTTQ */
f24518b5 2160 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2161 break;
2162 case 0x3C:
2163 /* CVTQS */
f24518b5 2164 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2165 break;
2166 case 0x3E:
2167 /* CVTQT */
f24518b5 2168 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2169 break;
2170 default:
2171 goto invalid_opc;
2172 }
2173 break;
2174 case 0x17:
2175 switch (fn11) {
2176 case 0x010:
2177 /* CVTLQ */
a7812ae4 2178 gen_fcvtlq(rb, rc);
4c9649a9
JM
2179 break;
2180 case 0x020:
f18cd223 2181 if (likely(rc != 31)) {
a06d48d9 2182 if (ra == rb) {
4c9649a9 2183 /* FMOV */
a06d48d9
RH
2184 if (ra == 31)
2185 tcg_gen_movi_i64(cpu_fir[rc], 0);
2186 else
2187 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2188 } else {
f18cd223 2189 /* CPYS */
a7812ae4 2190 gen_fcpys(ra, rb, rc);
a06d48d9 2191 }
4c9649a9
JM
2192 }
2193 break;
2194 case 0x021:
2195 /* CPYSN */
a7812ae4 2196 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2197 break;
2198 case 0x022:
2199 /* CPYSE */
a7812ae4 2200 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2201 break;
2202 case 0x024:
2203 /* MT_FPCR */
f18cd223 2204 if (likely(ra != 31))
a7812ae4 2205 gen_helper_store_fpcr(cpu_fir[ra]);
f18cd223
AJ
2206 else {
2207 TCGv tmp = tcg_const_i64(0);
a7812ae4 2208 gen_helper_store_fpcr(tmp);
f18cd223
AJ
2209 tcg_temp_free(tmp);
2210 }
4c9649a9
JM
2211 break;
2212 case 0x025:
2213 /* MF_FPCR */
f18cd223 2214 if (likely(ra != 31))
a7812ae4 2215 gen_helper_load_fpcr(cpu_fir[ra]);
4c9649a9
JM
2216 break;
2217 case 0x02A:
2218 /* FCMOVEQ */
dbb30fe6 2219 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2220 break;
2221 case 0x02B:
2222 /* FCMOVNE */
dbb30fe6 2223 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2224 break;
2225 case 0x02C:
2226 /* FCMOVLT */
dbb30fe6 2227 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2228 break;
2229 case 0x02D:
2230 /* FCMOVGE */
dbb30fe6 2231 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2232 break;
2233 case 0x02E:
2234 /* FCMOVLE */
dbb30fe6 2235 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2236 break;
2237 case 0x02F:
2238 /* FCMOVGT */
dbb30fe6 2239 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2240 break;
2241 case 0x030:
2242 /* CVTQL */
a7812ae4 2243 gen_fcvtql(rb, rc);
4c9649a9
JM
2244 break;
2245 case 0x130:
2246 /* CVTQL/V */
f24518b5 2247 gen_fcvtql_v(rb, rc);
4c9649a9
JM
2248 break;
2249 case 0x530:
2250 /* CVTQL/SV */
f24518b5 2251 gen_fcvtql_sv(rb, rc);
4c9649a9
JM
2252 break;
2253 default:
2254 goto invalid_opc;
2255 }
2256 break;
2257 case 0x18:
2258 switch ((uint16_t)disp16) {
2259 case 0x0000:
2260 /* TRAPB */
2261 /* No-op. Just exit from the current tb */
2262 ret = 2;
2263 break;
2264 case 0x0400:
2265 /* EXCB */
2266 /* No-op. Just exit from the current tb */
2267 ret = 2;
2268 break;
2269 case 0x4000:
2270 /* MB */
2271 /* No-op */
2272 break;
2273 case 0x4400:
2274 /* WMB */
2275 /* No-op */
2276 break;
2277 case 0x8000:
2278 /* FETCH */
2279 /* No-op */
2280 break;
2281 case 0xA000:
2282 /* FETCH_M */
2283 /* No-op */
2284 break;
2285 case 0xC000:
2286 /* RPCC */
3761035f 2287 if (ra != 31)
a7812ae4 2288 gen_helper_load_pcc(cpu_ir[ra]);
4c9649a9
JM
2289 break;
2290 case 0xE000:
2291 /* RC */
3761035f 2292 if (ra != 31)
a7812ae4 2293 gen_helper_rc(cpu_ir[ra]);
4c9649a9
JM
2294 break;
2295 case 0xE800:
2296 /* ECB */
4c9649a9
JM
2297 break;
2298 case 0xF000:
2299 /* RS */
3761035f 2300 if (ra != 31)
a7812ae4 2301 gen_helper_rs(cpu_ir[ra]);
4c9649a9
JM
2302 break;
2303 case 0xF800:
2304 /* WH64 */
2305 /* No-op */
2306 break;
2307 default:
2308 goto invalid_opc;
2309 }
2310 break;
2311 case 0x19:
2312 /* HW_MFPR (PALcode) */
2313#if defined (CONFIG_USER_ONLY)
2314 goto invalid_opc;
2315#else
2316 if (!ctx->pal_mode)
2317 goto invalid_opc;
8bb6e981
AJ
2318 if (ra != 31) {
2319 TCGv tmp = tcg_const_i32(insn & 0xFF);
a7812ae4 2320 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
8bb6e981
AJ
2321 tcg_temp_free(tmp);
2322 }
4c9649a9
JM
2323 break;
2324#endif
2325 case 0x1A:
3761035f
AJ
2326 if (rb != 31)
2327 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2328 else
2329 tcg_gen_movi_i64(cpu_pc, 0);
1304ca87
AJ
2330 if (ra != 31)
2331 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
4c9649a9
JM
2332 /* Those four jumps only differ by the branch prediction hint */
2333 switch (fn2) {
2334 case 0x0:
2335 /* JMP */
2336 break;
2337 case 0x1:
2338 /* JSR */
2339 break;
2340 case 0x2:
2341 /* RET */
2342 break;
2343 case 0x3:
2344 /* JSR_COROUTINE */
2345 break;
2346 }
2347 ret = 1;
2348 break;
2349 case 0x1B:
2350 /* HW_LD (PALcode) */
2351#if defined (CONFIG_USER_ONLY)
2352 goto invalid_opc;
2353#else
2354 if (!ctx->pal_mode)
2355 goto invalid_opc;
8bb6e981 2356 if (ra != 31) {
a7812ae4 2357 TCGv addr = tcg_temp_new();
8bb6e981
AJ
2358 if (rb != 31)
2359 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2360 else
2361 tcg_gen_movi_i64(addr, disp12);
2362 switch ((insn >> 12) & 0xF) {
2363 case 0x0:
b5d51029 2364 /* Longword physical access (hw_ldl/p) */
a7812ae4 2365 gen_helper_ldl_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2366 break;
2367 case 0x1:
b5d51029 2368 /* Quadword physical access (hw_ldq/p) */
a7812ae4 2369 gen_helper_ldq_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2370 break;
2371 case 0x2:
b5d51029 2372 /* Longword physical access with lock (hw_ldl_l/p) */
a7812ae4 2373 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2374 break;
2375 case 0x3:
b5d51029 2376 /* Quadword physical access with lock (hw_ldq_l/p) */
a7812ae4 2377 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2378 break;
2379 case 0x4:
b5d51029
AJ
2380 /* Longword virtual PTE fetch (hw_ldl/v) */
2381 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2382 break;
2383 case 0x5:
b5d51029
AJ
2384 /* Quadword virtual PTE fetch (hw_ldq/v) */
2385 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2386 break;
2387 case 0x6:
2388 /* Incpu_ir[ra]id */
b5d51029 2389 goto invalid_opc;
8bb6e981
AJ
2390 case 0x7:
2391 /* Incpu_ir[ra]id */
b5d51029 2392 goto invalid_opc;
8bb6e981 2393 case 0x8:
b5d51029 2394 /* Longword virtual access (hw_ldl) */
a7812ae4
PB
2395 gen_helper_st_virt_to_phys(addr, addr);
2396 gen_helper_ldl_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2397 break;
2398 case 0x9:
b5d51029 2399 /* Quadword virtual access (hw_ldq) */
a7812ae4
PB
2400 gen_helper_st_virt_to_phys(addr, addr);
2401 gen_helper_ldq_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2402 break;
2403 case 0xA:
b5d51029
AJ
2404 /* Longword virtual access with protection check (hw_ldl/w) */
2405 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2406 break;
2407 case 0xB:
b5d51029
AJ
2408 /* Quadword virtual access with protection check (hw_ldq/w) */
2409 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2410 break;
2411 case 0xC:
b5d51029 2412 /* Longword virtual access with alt access mode (hw_ldl/a)*/
a7812ae4
PB
2413 gen_helper_set_alt_mode();
2414 gen_helper_st_virt_to_phys(addr, addr);
2415 gen_helper_ldl_raw(cpu_ir[ra], addr);
2416 gen_helper_restore_mode();
8bb6e981
AJ
2417 break;
2418 case 0xD:
b5d51029 2419 /* Quadword virtual access with alt access mode (hw_ldq/a) */
a7812ae4
PB
2420 gen_helper_set_alt_mode();
2421 gen_helper_st_virt_to_phys(addr, addr);
2422 gen_helper_ldq_raw(cpu_ir[ra], addr);
2423 gen_helper_restore_mode();
8bb6e981
AJ
2424 break;
2425 case 0xE:
2426 /* Longword virtual access with alternate access mode and
b5d51029 2427 * protection checks (hw_ldl/wa)
8bb6e981 2428 */
a7812ae4
PB
2429 gen_helper_set_alt_mode();
2430 gen_helper_ldl_data(cpu_ir[ra], addr);
2431 gen_helper_restore_mode();
8bb6e981
AJ
2432 break;
2433 case 0xF:
2434 /* Quadword virtual access with alternate access mode and
b5d51029 2435 * protection checks (hw_ldq/wa)
8bb6e981 2436 */
a7812ae4
PB
2437 gen_helper_set_alt_mode();
2438 gen_helper_ldq_data(cpu_ir[ra], addr);
2439 gen_helper_restore_mode();
8bb6e981
AJ
2440 break;
2441 }
2442 tcg_temp_free(addr);
4c9649a9 2443 }
4c9649a9
JM
2444 break;
2445#endif
2446 case 0x1C:
2447 switch (fn7) {
2448 case 0x00:
2449 /* SEXTB */
2450 if (!(ctx->amask & AMASK_BWX))
2451 goto invalid_opc;
ae8ecd42
AJ
2452 if (likely(rc != 31)) {
2453 if (islit)
2454 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
ae8ecd42 2455 else
dfaa8583 2456 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2457 }
4c9649a9
JM
2458 break;
2459 case 0x01:
2460 /* SEXTW */
2461 if (!(ctx->amask & AMASK_BWX))
2462 goto invalid_opc;
ae8ecd42
AJ
2463 if (likely(rc != 31)) {
2464 if (islit)
2465 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
ae8ecd42 2466 else
dfaa8583 2467 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2468 }
4c9649a9
JM
2469 break;
2470 case 0x30:
2471 /* CTPOP */
2472 if (!(ctx->amask & AMASK_CIX))
2473 goto invalid_opc;
ae8ecd42
AJ
2474 if (likely(rc != 31)) {
2475 if (islit)
2476 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
ae8ecd42 2477 else
a7812ae4 2478 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2479 }
4c9649a9
JM
2480 break;
2481 case 0x31:
2482 /* PERR */
2483 if (!(ctx->amask & AMASK_MVI))
2484 goto invalid_opc;
13e4df99 2485 gen_perr(ra, rb, rc, islit, lit);
4c9649a9
JM
2486 break;
2487 case 0x32:
2488 /* CTLZ */
2489 if (!(ctx->amask & AMASK_CIX))
2490 goto invalid_opc;
ae8ecd42
AJ
2491 if (likely(rc != 31)) {
2492 if (islit)
2493 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
ae8ecd42 2494 else
a7812ae4 2495 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2496 }
4c9649a9
JM
2497 break;
2498 case 0x33:
2499 /* CTTZ */
2500 if (!(ctx->amask & AMASK_CIX))
2501 goto invalid_opc;
ae8ecd42
AJ
2502 if (likely(rc != 31)) {
2503 if (islit)
2504 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
ae8ecd42 2505 else
a7812ae4 2506 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2507 }
4c9649a9
JM
2508 break;
2509 case 0x34:
2510 /* UNPKBW */
2511 if (!(ctx->amask & AMASK_MVI))
2512 goto invalid_opc;
13e4df99
RH
2513 if (real_islit || ra != 31)
2514 goto invalid_opc;
2515 gen_unpkbw (rb, rc);
4c9649a9
JM
2516 break;
2517 case 0x35:
13e4df99 2518 /* UNPKBL */
4c9649a9
JM
2519 if (!(ctx->amask & AMASK_MVI))
2520 goto invalid_opc;
13e4df99
RH
2521 if (real_islit || ra != 31)
2522 goto invalid_opc;
2523 gen_unpkbl (rb, rc);
4c9649a9
JM
2524 break;
2525 case 0x36:
2526 /* PKWB */
2527 if (!(ctx->amask & AMASK_MVI))
2528 goto invalid_opc;
13e4df99
RH
2529 if (real_islit || ra != 31)
2530 goto invalid_opc;
2531 gen_pkwb (rb, rc);
4c9649a9
JM
2532 break;
2533 case 0x37:
2534 /* PKLB */
2535 if (!(ctx->amask & AMASK_MVI))
2536 goto invalid_opc;
13e4df99
RH
2537 if (real_islit || ra != 31)
2538 goto invalid_opc;
2539 gen_pklb (rb, rc);
4c9649a9
JM
2540 break;
2541 case 0x38:
2542 /* MINSB8 */
2543 if (!(ctx->amask & AMASK_MVI))
2544 goto invalid_opc;
13e4df99 2545 gen_minsb8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2546 break;
2547 case 0x39:
2548 /* MINSW4 */
2549 if (!(ctx->amask & AMASK_MVI))
2550 goto invalid_opc;
13e4df99 2551 gen_minsw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2552 break;
2553 case 0x3A:
2554 /* MINUB8 */
2555 if (!(ctx->amask & AMASK_MVI))
2556 goto invalid_opc;
13e4df99 2557 gen_minub8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2558 break;
2559 case 0x3B:
2560 /* MINUW4 */
2561 if (!(ctx->amask & AMASK_MVI))
2562 goto invalid_opc;
13e4df99 2563 gen_minuw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2564 break;
2565 case 0x3C:
2566 /* MAXUB8 */
2567 if (!(ctx->amask & AMASK_MVI))
2568 goto invalid_opc;
13e4df99 2569 gen_maxub8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2570 break;
2571 case 0x3D:
2572 /* MAXUW4 */
2573 if (!(ctx->amask & AMASK_MVI))
2574 goto invalid_opc;
13e4df99 2575 gen_maxuw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2576 break;
2577 case 0x3E:
2578 /* MAXSB8 */
2579 if (!(ctx->amask & AMASK_MVI))
2580 goto invalid_opc;
13e4df99 2581 gen_maxsb8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2582 break;
2583 case 0x3F:
2584 /* MAXSW4 */
2585 if (!(ctx->amask & AMASK_MVI))
2586 goto invalid_opc;
13e4df99 2587 gen_maxsw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2588 break;
2589 case 0x70:
2590 /* FTOIT */
2591 if (!(ctx->amask & AMASK_FIX))
2592 goto invalid_opc;
f18cd223
AJ
2593 if (likely(rc != 31)) {
2594 if (ra != 31)
2595 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2596 else
2597 tcg_gen_movi_i64(cpu_ir[rc], 0);
2598 }
4c9649a9
JM
2599 break;
2600 case 0x78:
2601 /* FTOIS */
2602 if (!(ctx->amask & AMASK_FIX))
2603 goto invalid_opc;
f18cd223 2604 if (rc != 31) {
a7812ae4 2605 TCGv_i32 tmp1 = tcg_temp_new_i32();
f18cd223 2606 if (ra != 31)
a7812ae4 2607 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
f18cd223
AJ
2608 else {
2609 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 2610 gen_helper_s_to_memory(tmp1, tmp2);
f18cd223
AJ
2611 tcg_temp_free(tmp2);
2612 }
2613 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
a7812ae4 2614 tcg_temp_free_i32(tmp1);
f18cd223 2615 }
4c9649a9
JM
2616 break;
2617 default:
2618 goto invalid_opc;
2619 }
2620 break;
2621 case 0x1D:
2622 /* HW_MTPR (PALcode) */
2623#if defined (CONFIG_USER_ONLY)
2624 goto invalid_opc;
2625#else
2626 if (!ctx->pal_mode)
2627 goto invalid_opc;
8bb6e981
AJ
2628 else {
2629 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2630 if (ra != 31)
a7812ae4 2631 gen_helper_mtpr(tmp1, cpu_ir[ra]);
8bb6e981
AJ
2632 else {
2633 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 2634 gen_helper_mtpr(tmp1, tmp2);
8bb6e981
AJ
2635 tcg_temp_free(tmp2);
2636 }
2637 tcg_temp_free(tmp1);
2638 ret = 2;
2639 }
4c9649a9
JM
2640 break;
2641#endif
2642 case 0x1E:
2643 /* HW_REI (PALcode) */
2644#if defined (CONFIG_USER_ONLY)
2645 goto invalid_opc;
2646#else
2647 if (!ctx->pal_mode)
2648 goto invalid_opc;
2649 if (rb == 31) {
2650 /* "Old" alpha */
a7812ae4 2651 gen_helper_hw_rei();
4c9649a9 2652 } else {
8bb6e981
AJ
2653 TCGv tmp;
2654
2655 if (ra != 31) {
a7812ae4 2656 tmp = tcg_temp_new();
8bb6e981
AJ
2657 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2658 } else
2659 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
a7812ae4 2660 gen_helper_hw_ret(tmp);
8bb6e981 2661 tcg_temp_free(tmp);
4c9649a9
JM
2662 }
2663 ret = 2;
2664 break;
2665#endif
2666 case 0x1F:
2667 /* HW_ST (PALcode) */
2668#if defined (CONFIG_USER_ONLY)
2669 goto invalid_opc;
2670#else
2671 if (!ctx->pal_mode)
2672 goto invalid_opc;
8bb6e981
AJ
2673 else {
2674 TCGv addr, val;
a7812ae4 2675 addr = tcg_temp_new();
8bb6e981
AJ
2676 if (rb != 31)
2677 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2678 else
2679 tcg_gen_movi_i64(addr, disp12);
2680 if (ra != 31)
2681 val = cpu_ir[ra];
2682 else {
a7812ae4 2683 val = tcg_temp_new();
8bb6e981
AJ
2684 tcg_gen_movi_i64(val, 0);
2685 }
2686 switch ((insn >> 12) & 0xF) {
2687 case 0x0:
2688 /* Longword physical access */
a7812ae4 2689 gen_helper_stl_raw(val, addr);
8bb6e981
AJ
2690 break;
2691 case 0x1:
2692 /* Quadword physical access */
a7812ae4 2693 gen_helper_stq_raw(val, addr);
8bb6e981
AJ
2694 break;
2695 case 0x2:
2696 /* Longword physical access with lock */
a7812ae4 2697 gen_helper_stl_c_raw(val, val, addr);
8bb6e981
AJ
2698 break;
2699 case 0x3:
2700 /* Quadword physical access with lock */
a7812ae4 2701 gen_helper_stq_c_raw(val, val, addr);
8bb6e981
AJ
2702 break;
2703 case 0x4:
2704 /* Longword virtual access */
a7812ae4
PB
2705 gen_helper_st_virt_to_phys(addr, addr);
2706 gen_helper_stl_raw(val, addr);
8bb6e981
AJ
2707 break;
2708 case 0x5:
2709 /* Quadword virtual access */
a7812ae4
PB
2710 gen_helper_st_virt_to_phys(addr, addr);
2711 gen_helper_stq_raw(val, addr);
8bb6e981
AJ
2712 break;
2713 case 0x6:
2714 /* Invalid */
2715 goto invalid_opc;
2716 case 0x7:
2717 /* Invalid */
2718 goto invalid_opc;
2719 case 0x8:
2720 /* Invalid */
2721 goto invalid_opc;
2722 case 0x9:
2723 /* Invalid */
2724 goto invalid_opc;
2725 case 0xA:
2726 /* Invalid */
2727 goto invalid_opc;
2728 case 0xB:
2729 /* Invalid */
2730 goto invalid_opc;
2731 case 0xC:
2732 /* Longword virtual access with alternate access mode */
a7812ae4
PB
2733 gen_helper_set_alt_mode();
2734 gen_helper_st_virt_to_phys(addr, addr);
2735 gen_helper_stl_raw(val, addr);
2736 gen_helper_restore_mode();
8bb6e981
AJ
2737 break;
2738 case 0xD:
2739 /* Quadword virtual access with alternate access mode */
a7812ae4
PB
2740 gen_helper_set_alt_mode();
2741 gen_helper_st_virt_to_phys(addr, addr);
2742 gen_helper_stl_raw(val, addr);
2743 gen_helper_restore_mode();
8bb6e981
AJ
2744 break;
2745 case 0xE:
2746 /* Invalid */
2747 goto invalid_opc;
2748 case 0xF:
2749 /* Invalid */
2750 goto invalid_opc;
2751 }
45d46ce8 2752 if (ra == 31)
8bb6e981
AJ
2753 tcg_temp_free(val);
2754 tcg_temp_free(addr);
4c9649a9 2755 }
4c9649a9
JM
2756 break;
2757#endif
2758 case 0x20:
2759 /* LDF */
f18cd223 2760 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
2761 break;
2762 case 0x21:
2763 /* LDG */
f18cd223 2764 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
2765 break;
2766 case 0x22:
2767 /* LDS */
f18cd223 2768 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
2769 break;
2770 case 0x23:
2771 /* LDT */
f18cd223 2772 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
2773 break;
2774 case 0x24:
2775 /* STF */
57a92c8e 2776 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2777 break;
2778 case 0x25:
2779 /* STG */
57a92c8e 2780 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2781 break;
2782 case 0x26:
2783 /* STS */
57a92c8e 2784 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2785 break;
2786 case 0x27:
2787 /* STT */
57a92c8e 2788 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2789 break;
2790 case 0x28:
2791 /* LDL */
f18cd223 2792 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
2793 break;
2794 case 0x29:
2795 /* LDQ */
f18cd223 2796 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
2797 break;
2798 case 0x2A:
2799 /* LDL_L */
f4ed8679 2800 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2801 break;
2802 case 0x2B:
2803 /* LDQ_L */
f4ed8679 2804 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2805 break;
2806 case 0x2C:
2807 /* STL */
57a92c8e 2808 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
2809 break;
2810 case 0x2D:
2811 /* STQ */
57a92c8e 2812 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
2813 break;
2814 case 0x2E:
2815 /* STL_C */
57a92c8e 2816 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
4c9649a9
JM
2817 break;
2818 case 0x2F:
2819 /* STQ_C */
57a92c8e 2820 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
4c9649a9
JM
2821 break;
2822 case 0x30:
2823 /* BR */
3761035f
AJ
2824 if (ra != 31)
2825 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2826 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
4c9649a9
JM
2827 ret = 1;
2828 break;
a7812ae4 2829 case 0x31: /* FBEQ */
dbb30fe6
RH
2830 gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2831 ret = 1;
2832 break;
a7812ae4 2833 case 0x32: /* FBLT */
dbb30fe6
RH
2834 gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2835 ret = 1;
2836 break;
a7812ae4 2837 case 0x33: /* FBLE */
dbb30fe6 2838 gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
2839 ret = 1;
2840 break;
2841 case 0x34:
2842 /* BSR */
3761035f
AJ
2843 if (ra != 31)
2844 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2845 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
4c9649a9
JM
2846 ret = 1;
2847 break;
a7812ae4 2848 case 0x35: /* FBNE */
dbb30fe6
RH
2849 gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2850 ret = 1;
2851 break;
a7812ae4 2852 case 0x36: /* FBGE */
dbb30fe6
RH
2853 gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2854 ret = 1;
2855 break;
a7812ae4 2856 case 0x37: /* FBGT */
dbb30fe6 2857 gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
2858 ret = 1;
2859 break;
2860 case 0x38:
2861 /* BLBC */
a1516744 2862 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
2863 ret = 1;
2864 break;
2865 case 0x39:
2866 /* BEQ */
a1516744 2867 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
2868 ret = 1;
2869 break;
2870 case 0x3A:
2871 /* BLT */
a1516744 2872 gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
2873 ret = 1;
2874 break;
2875 case 0x3B:
2876 /* BLE */
a1516744 2877 gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
2878 ret = 1;
2879 break;
2880 case 0x3C:
2881 /* BLBS */
a1516744 2882 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
2883 ret = 1;
2884 break;
2885 case 0x3D:
2886 /* BNE */
a1516744 2887 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
2888 ret = 1;
2889 break;
2890 case 0x3E:
2891 /* BGE */
a1516744 2892 gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
2893 ret = 1;
2894 break;
2895 case 0x3F:
2896 /* BGT */
a1516744 2897 gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
2898 ret = 1;
2899 break;
2900 invalid_opc:
2901 gen_invalid(ctx);
2902 ret = 3;
2903 break;
2904 }
2905
2906 return ret;
2907}
2908
636aa200
BS
2909static inline void gen_intermediate_code_internal(CPUState *env,
2910 TranslationBlock *tb,
2911 int search_pc)
4c9649a9 2912{
4c9649a9
JM
2913 DisasContext ctx, *ctxp = &ctx;
2914 target_ulong pc_start;
2915 uint32_t insn;
2916 uint16_t *gen_opc_end;
a1d1bb31 2917 CPUBreakpoint *bp;
4c9649a9
JM
2918 int j, lj = -1;
2919 int ret;
2e70f6ef
PB
2920 int num_insns;
2921 int max_insns;
4c9649a9
JM
2922
2923 pc_start = tb->pc;
4c9649a9 2924 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
4c9649a9
JM
2925 ctx.pc = pc_start;
2926 ctx.amask = env->amask;
8579095b 2927 ctx.env = env;
4c9649a9
JM
2928#if defined (CONFIG_USER_ONLY)
2929 ctx.mem_idx = 0;
2930#else
2931 ctx.mem_idx = ((env->ps >> 3) & 3);
2932 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
2933#endif
f24518b5
RH
2934
2935 /* ??? Every TB begins with unset rounding mode, to be initialized on
2936 the first fp insn of the TB. Alternately we could define a proper
2937 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2938 to reset the FP_STATUS to that default at the end of any TB that
2939 changes the default. We could even (gasp) dynamiclly figure out
2940 what default would be most efficient given the running program. */
2941 ctx.tb_rm = -1;
2942 /* Similarly for flush-to-zero. */
2943 ctx.tb_ftz = -1;
2944
2e70f6ef
PB
2945 num_insns = 0;
2946 max_insns = tb->cflags & CF_COUNT_MASK;
2947 if (max_insns == 0)
2948 max_insns = CF_COUNT_MASK;
2949
2950 gen_icount_start();
4c9649a9 2951 for (ret = 0; ret == 0;) {
72cf2d4f
BS
2952 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2953 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 2954 if (bp->pc == ctx.pc) {
4c9649a9
JM
2955 gen_excp(&ctx, EXCP_DEBUG, 0);
2956 break;
2957 }
2958 }
2959 }
2960 if (search_pc) {
2961 j = gen_opc_ptr - gen_opc_buf;
2962 if (lj < j) {
2963 lj++;
2964 while (lj < j)
2965 gen_opc_instr_start[lj++] = 0;
4c9649a9 2966 }
ed1dda53
AJ
2967 gen_opc_pc[lj] = ctx.pc;
2968 gen_opc_instr_start[lj] = 1;
2969 gen_opc_icount[lj] = num_insns;
4c9649a9 2970 }
2e70f6ef
PB
2971 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
2972 gen_io_start();
4c9649a9 2973 insn = ldl_code(ctx.pc);
2e70f6ef 2974 num_insns++;
c4b3be39
RH
2975
2976 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
2977 tcg_gen_debug_insn_start(ctx.pc);
2978 }
2979
4c9649a9
JM
2980 ctx.pc += 4;
2981 ret = translate_one(ctxp, insn);
2982 if (ret != 0)
2983 break;
2984 /* if we reach a page boundary or are single stepping, stop
2985 * generation
2986 */
19bf517b
AJ
2987 if (env->singlestep_enabled) {
2988 gen_excp(&ctx, EXCP_DEBUG, 0);
2989 break;
1b530a6d 2990 }
19bf517b 2991
8fcc55f9
AJ
2992 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
2993 break;
2994
2995 if (gen_opc_ptr >= gen_opc_end)
2996 break;
2997
2998 if (num_insns >= max_insns)
2999 break;
3000
1b530a6d
AJ
3001 if (singlestep) {
3002 break;
3003 }
4c9649a9
JM
3004 }
3005 if (ret != 1 && ret != 3) {
496cb5b9 3006 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4c9649a9 3007 }
2e70f6ef
PB
3008 if (tb->cflags & CF_LAST_IO)
3009 gen_io_end();
4c9649a9 3010 /* Generate the return instruction */
57fec1fe 3011 tcg_gen_exit_tb(0);
2e70f6ef 3012 gen_icount_end(tb, num_insns);
4c9649a9
JM
3013 *gen_opc_ptr = INDEX_op_end;
3014 if (search_pc) {
3015 j = gen_opc_ptr - gen_opc_buf;
3016 lj++;
3017 while (lj <= j)
3018 gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3019 } else {
3020 tb->size = ctx.pc - pc_start;
2e70f6ef 3021 tb->icount = num_insns;
4c9649a9 3022 }
806991da 3023#ifdef DEBUG_DISAS
8fec2b8c 3024 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
3025 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3026 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3027 qemu_log("\n");
4c9649a9 3028 }
4c9649a9 3029#endif
4c9649a9
JM
3030}
3031
2cfc5f17 3032void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3033{
2cfc5f17 3034 gen_intermediate_code_internal(env, tb, 0);
4c9649a9
JM
3035}
3036
2cfc5f17 3037void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3038{
2cfc5f17 3039 gen_intermediate_code_internal(env, tb, 1);
4c9649a9
JM
3040}
3041
a964acc6
RH
3042struct cpu_def_t {
3043 const char *name;
3044 int implver, amask;
3045};
3046
3047static const struct cpu_def_t cpu_defs[] = {
3048 { "ev4", IMPLVER_2106x, 0 },
3049 { "ev5", IMPLVER_21164, 0 },
3050 { "ev56", IMPLVER_21164, AMASK_BWX },
3051 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3052 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3053 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3054 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3055 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3056 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3057 { "21064", IMPLVER_2106x, 0 },
3058 { "21164", IMPLVER_21164, 0 },
3059 { "21164a", IMPLVER_21164, AMASK_BWX },
3060 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3061 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3062 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3063 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3064};
3065
aaed909a 3066CPUAlphaState * cpu_alpha_init (const char *cpu_model)
4c9649a9
JM
3067{
3068 CPUAlphaState *env;
a964acc6 3069 int implver, amask, i, max;
4c9649a9
JM
3070
3071 env = qemu_mallocz(sizeof(CPUAlphaState));
4c9649a9 3072 cpu_exec_init(env);
2e70f6ef 3073 alpha_translate_init();
4c9649a9 3074 tlb_flush(env, 1);
a964acc6
RH
3075
3076 /* Default to ev67; no reason not to emulate insns by default. */
3077 implver = IMPLVER_21264;
3078 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3079 | AMASK_TRAP | AMASK_PREFETCH);
3080
3081 max = ARRAY_SIZE(cpu_defs);
3082 for (i = 0; i < max; i++) {
3083 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3084 implver = cpu_defs[i].implver;
3085 amask = cpu_defs[i].amask;
3086 break;
3087 }
3088 }
3089 env->implver = implver;
3090 env->amask = amask;
3091
4c9649a9
JM
3092 env->ps = 0x1F00;
3093#if defined (CONFIG_USER_ONLY)
3094 env->ps |= 1 << 3;
2edd07ef
RH
3095 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3096 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
6049f4f8 3097#else
4c9649a9 3098 pal_init(env);
6049f4f8 3099#endif
dad081ee 3100
4c9649a9 3101 /* Initialize IPR */
dad081ee
RH
3102#if defined (CONFIG_USER_ONLY)
3103 env->ipr[IPR_EXC_ADDR] = 0;
3104 env->ipr[IPR_EXC_SUM] = 0;
3105 env->ipr[IPR_EXC_MASK] = 0;
3106#else
3107 {
3108 uint64_t hwpcb;
3109 hwpcb = env->ipr[IPR_PCBB];
3110 env->ipr[IPR_ASN] = 0;
3111 env->ipr[IPR_ASTEN] = 0;
3112 env->ipr[IPR_ASTSR] = 0;
3113 env->ipr[IPR_DATFX] = 0;
3114 /* XXX: fix this */
3115 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3116 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3117 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3118 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3119 env->ipr[IPR_FEN] = 0;
3120 env->ipr[IPR_IPL] = 31;
3121 env->ipr[IPR_MCES] = 0;
3122 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
3123 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3124 env->ipr[IPR_SISR] = 0;
3125 env->ipr[IPR_VIRBND] = -1ULL;
3126 }
3127#endif
4c9649a9 3128
0bf46a40 3129 qemu_init_vcpu(env);
4c9649a9
JM
3130 return env;
3131}
aaed909a 3132
d2856f1a
AJ
3133void gen_pc_load(CPUState *env, TranslationBlock *tb,
3134 unsigned long searched_pc, int pc_pos, void *puc)
3135{
3136 env->pc = gen_opc_pc[pc_pos];
3137}