]> git.proxmox.com Git - qemu.git/blame - target-alpha/translate.c
target-alpha: Implement cvtql inline.
[qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
20#include <stdint.h>
21#include <stdlib.h>
22#include <stdio.h>
23
24#include "cpu.h"
25#include "exec-all.h"
26#include "disas.h"
ae8ecd42 27#include "host-utils.h"
57fec1fe 28#include "tcg-op.h"
ca10f867 29#include "qemu-common.h"
4c9649a9 30
a7812ae4
PB
31#include "helper.h"
32#define GEN_HELPER 1
33#include "helper.h"
34
19188121 35#undef ALPHA_DEBUG_DISAS
f24518b5 36#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
37
38#ifdef ALPHA_DEBUG_DISAS
806991da 39# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
40#else
41# define LOG_DISAS(...) do { } while (0)
42#endif
43
4c9649a9
JM
44typedef struct DisasContext DisasContext;
45struct DisasContext {
46 uint64_t pc;
47 int mem_idx;
48#if !defined (CONFIG_USER_ONLY)
49 int pal_mode;
50#endif
8579095b 51 CPUAlphaState *env;
4c9649a9 52 uint32_t amask;
f24518b5
RH
53
54 /* Current rounding mode for this TB. */
55 int tb_rm;
56 /* Current flush-to-zero setting for this TB. */
57 int tb_ftz;
4c9649a9
JM
58};
59
3761035f 60/* global register indexes */
a7812ae4 61static TCGv_ptr cpu_env;
496cb5b9 62static TCGv cpu_ir[31];
f18cd223 63static TCGv cpu_fir[31];
496cb5b9 64static TCGv cpu_pc;
f4ed8679 65static TCGv cpu_lock;
ab471ade
RH
66#ifdef CONFIG_USER_ONLY
67static TCGv cpu_uniq;
68#endif
496cb5b9 69
3761035f 70/* register names */
f18cd223 71static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef
PB
72
73#include "gen-icount.h"
74
a5f1b965 75static void alpha_translate_init(void)
2e70f6ef 76{
496cb5b9
AJ
77 int i;
78 char *p;
2e70f6ef 79 static int done_init = 0;
496cb5b9 80
2e70f6ef
PB
81 if (done_init)
82 return;
496cb5b9 83
a7812ae4 84 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
85
86 p = cpu_reg_names;
87 for (i = 0; i < 31; i++) {
88 sprintf(p, "ir%d", i);
a7812ae4
PB
89 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
90 offsetof(CPUState, ir[i]), p);
6ba8dcd7 91 p += (i < 10) ? 4 : 5;
f18cd223
AJ
92
93 sprintf(p, "fir%d", i);
a7812ae4
PB
94 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
95 offsetof(CPUState, fir[i]), p);
f18cd223 96 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
97 }
98
a7812ae4
PB
99 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
100 offsetof(CPUState, pc), "pc");
496cb5b9 101
a7812ae4
PB
102 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
103 offsetof(CPUState, lock), "lock");
f4ed8679 104
ab471ade
RH
105#ifdef CONFIG_USER_ONLY
106 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
107 offsetof(CPUState, unique), "uniq");
108#endif
109
496cb5b9 110 /* register helpers */
a7812ae4 111#define GEN_HELPER 2
496cb5b9
AJ
112#include "helper.h"
113
2e70f6ef
PB
114 done_init = 1;
115}
116
636aa200 117static inline void gen_excp(DisasContext *ctx, int exception, int error_code)
4c9649a9 118{
a7812ae4 119 TCGv_i32 tmp1, tmp2;
6ad02592 120
496cb5b9 121 tcg_gen_movi_i64(cpu_pc, ctx->pc);
6ad02592
AJ
122 tmp1 = tcg_const_i32(exception);
123 tmp2 = tcg_const_i32(error_code);
a7812ae4
PB
124 gen_helper_excp(tmp1, tmp2);
125 tcg_temp_free_i32(tmp2);
126 tcg_temp_free_i32(tmp1);
4c9649a9
JM
127}
128
636aa200 129static inline void gen_invalid(DisasContext *ctx)
4c9649a9
JM
130{
131 gen_excp(ctx, EXCP_OPCDEC, 0);
132}
133
636aa200 134static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 135{
a7812ae4
PB
136 TCGv tmp = tcg_temp_new();
137 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 138 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
139 tcg_gen_trunc_i64_i32(tmp32, tmp);
140 gen_helper_memory_to_f(t0, tmp32);
141 tcg_temp_free_i32(tmp32);
f18cd223
AJ
142 tcg_temp_free(tmp);
143}
144
636aa200 145static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 146{
a7812ae4 147 TCGv tmp = tcg_temp_new();
f18cd223 148 tcg_gen_qemu_ld64(tmp, t1, flags);
a7812ae4 149 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
150 tcg_temp_free(tmp);
151}
152
636aa200 153static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 154{
a7812ae4
PB
155 TCGv tmp = tcg_temp_new();
156 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 157 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
158 tcg_gen_trunc_i64_i32(tmp32, tmp);
159 gen_helper_memory_to_s(t0, tmp32);
160 tcg_temp_free_i32(tmp32);
f18cd223
AJ
161 tcg_temp_free(tmp);
162}
163
636aa200 164static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
165{
166 tcg_gen_mov_i64(cpu_lock, t1);
167 tcg_gen_qemu_ld32s(t0, t1, flags);
168}
169
636aa200 170static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
171{
172 tcg_gen_mov_i64(cpu_lock, t1);
173 tcg_gen_qemu_ld64(t0, t1, flags);
174}
175
636aa200
BS
176static inline void gen_load_mem(DisasContext *ctx,
177 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
178 int flags),
179 int ra, int rb, int32_t disp16, int fp,
180 int clear)
023d8ca2
AJ
181{
182 TCGv addr;
183
184 if (unlikely(ra == 31))
185 return;
186
a7812ae4 187 addr = tcg_temp_new();
023d8ca2
AJ
188 if (rb != 31) {
189 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
190 if (clear)
191 tcg_gen_andi_i64(addr, addr, ~0x7);
192 } else {
193 if (clear)
194 disp16 &= ~0x7;
195 tcg_gen_movi_i64(addr, disp16);
196 }
f18cd223
AJ
197 if (fp)
198 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
199 else
200 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
023d8ca2
AJ
201 tcg_temp_free(addr);
202}
203
636aa200 204static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 205{
a7812ae4
PB
206 TCGv_i32 tmp32 = tcg_temp_new_i32();
207 TCGv tmp = tcg_temp_new();
208 gen_helper_f_to_memory(tmp32, t0);
209 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
210 tcg_gen_qemu_st32(tmp, t1, flags);
211 tcg_temp_free(tmp);
a7812ae4 212 tcg_temp_free_i32(tmp32);
f18cd223
AJ
213}
214
636aa200 215static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 216{
a7812ae4
PB
217 TCGv tmp = tcg_temp_new();
218 gen_helper_g_to_memory(tmp, t0);
f18cd223
AJ
219 tcg_gen_qemu_st64(tmp, t1, flags);
220 tcg_temp_free(tmp);
221}
222
636aa200 223static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 224{
a7812ae4
PB
225 TCGv_i32 tmp32 = tcg_temp_new_i32();
226 TCGv tmp = tcg_temp_new();
227 gen_helper_s_to_memory(tmp32, t0);
228 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
229 tcg_gen_qemu_st32(tmp, t1, flags);
230 tcg_temp_free(tmp);
a7812ae4 231 tcg_temp_free_i32(tmp32);
f18cd223
AJ
232}
233
636aa200 234static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
235{
236 int l1, l2;
237
238 l1 = gen_new_label();
239 l2 = gen_new_label();
240 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
241 tcg_gen_qemu_st32(t0, t1, flags);
6223246a 242 tcg_gen_movi_i64(t0, 1);
f4ed8679
AJ
243 tcg_gen_br(l2);
244 gen_set_label(l1);
6223246a 245 tcg_gen_movi_i64(t0, 0);
f4ed8679
AJ
246 gen_set_label(l2);
247 tcg_gen_movi_i64(cpu_lock, -1);
248}
249
636aa200 250static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
251{
252 int l1, l2;
253
254 l1 = gen_new_label();
255 l2 = gen_new_label();
256 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
257 tcg_gen_qemu_st64(t0, t1, flags);
6223246a 258 tcg_gen_movi_i64(t0, 1);
f4ed8679
AJ
259 tcg_gen_br(l2);
260 gen_set_label(l1);
6223246a 261 tcg_gen_movi_i64(t0, 0);
f4ed8679
AJ
262 gen_set_label(l2);
263 tcg_gen_movi_i64(cpu_lock, -1);
264}
265
636aa200
BS
266static inline void gen_store_mem(DisasContext *ctx,
267 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
268 int flags),
269 int ra, int rb, int32_t disp16, int fp,
270 int clear, int local)
023d8ca2 271{
9cd38c23 272 TCGv addr;
57a92c8e 273 if (local)
a7812ae4 274 addr = tcg_temp_local_new();
57a92c8e 275 else
a7812ae4 276 addr = tcg_temp_new();
023d8ca2
AJ
277 if (rb != 31) {
278 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
279 if (clear)
280 tcg_gen_andi_i64(addr, addr, ~0x7);
281 } else {
282 if (clear)
283 disp16 &= ~0x7;
284 tcg_gen_movi_i64(addr, disp16);
285 }
f18cd223
AJ
286 if (ra != 31) {
287 if (fp)
288 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
289 else
290 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
291 } else {
57a92c8e
AJ
292 TCGv zero;
293 if (local)
294 zero = tcg_const_local_i64(0);
295 else
296 zero = tcg_const_i64(0);
023d8ca2
AJ
297 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
298 tcg_temp_free(zero);
299 }
300 tcg_temp_free(addr);
301}
302
dbb30fe6 303static void gen_bcond_pcload(DisasContext *ctx, int32_t disp, int lab_true)
4c9649a9 304{
dbb30fe6
RH
305 int lab_over = gen_new_label();
306
307 tcg_gen_movi_i64(cpu_pc, ctx->pc);
308 tcg_gen_br(lab_over);
309 gen_set_label(lab_true);
310 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
311 gen_set_label(lab_over);
312}
313
314static void gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
315 int32_t disp, int mask)
316{
317 int lab_true = gen_new_label();
9c29504e 318
9c29504e
AJ
319 if (likely(ra != 31)) {
320 if (mask) {
a7812ae4 321 TCGv tmp = tcg_temp_new();
9c29504e 322 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
dbb30fe6 323 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
9c29504e 324 tcg_temp_free(tmp);
dbb30fe6
RH
325 } else {
326 tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, lab_true);
327 }
9c29504e
AJ
328 } else {
329 /* Very uncommon case - Do not bother to optimize. */
330 TCGv tmp = tcg_const_i64(0);
dbb30fe6 331 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
9c29504e
AJ
332 tcg_temp_free(tmp);
333 }
dbb30fe6 334 gen_bcond_pcload(ctx, disp, lab_true);
4c9649a9
JM
335}
336
dbb30fe6
RH
337/* Generate a forward TCG branch to LAB_TRUE if RA cmp 0.0.
338 This is complicated by the fact that -0.0 compares the same as +0.0. */
339
340static void gen_fbcond_internal(TCGCond cond, TCGv src, int lab_true)
4c9649a9 341{
dbb30fe6
RH
342 int lab_false = -1;
343 uint64_t mzero = 1ull << 63;
f18cd223
AJ
344 TCGv tmp;
345
dbb30fe6
RH
346 switch (cond) {
347 case TCG_COND_LE:
348 case TCG_COND_GT:
349 /* For <= or >, the -0.0 value directly compares the way we want. */
350 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
a7812ae4 351 break;
dbb30fe6
RH
352
353 case TCG_COND_EQ:
354 case TCG_COND_NE:
355 /* For == or !=, we can simply mask off the sign bit and compare. */
356 /* ??? Assume that the temporary is reclaimed at the branch. */
357 tmp = tcg_temp_new();
358 tcg_gen_andi_i64(tmp, src, mzero - 1);
359 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
a7812ae4 360 break;
dbb30fe6
RH
361
362 case TCG_COND_GE:
363 /* For >=, emit two branches to the destination. */
364 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
365 tcg_gen_brcondi_i64(TCG_COND_EQ, src, mzero, lab_true);
a7812ae4 366 break;
dbb30fe6
RH
367
368 case TCG_COND_LT:
369 /* For <, first filter out -0.0 to what will be the fallthru. */
370 lab_false = gen_new_label();
371 tcg_gen_brcondi_i64(TCG_COND_EQ, src, mzero, lab_false);
372 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
373 gen_set_label(lab_false);
a7812ae4 374 break;
dbb30fe6 375
a7812ae4
PB
376 default:
377 abort();
f18cd223 378 }
dbb30fe6
RH
379}
380
381static void gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, int32_t disp)
382{
383 int lab_true;
384
385 if (unlikely(ra == 31)) {
386 /* Very uncommon case, but easier to optimize it to an integer
387 comparison than continuing with the floating point comparison. */
388 gen_bcond(ctx, cond, ra, disp, 0);
389 return;
390 }
391
392 lab_true = gen_new_label();
393 gen_fbcond_internal(cond, cpu_fir[ra], lab_true);
394 gen_bcond_pcload(ctx, disp, lab_true);
4c9649a9
JM
395}
396
636aa200
BS
397static inline void gen_cmov(TCGCond inv_cond, int ra, int rb, int rc,
398 int islit, uint8_t lit, int mask)
4c9649a9 399{
9c29504e
AJ
400 int l1;
401
402 if (unlikely(rc == 31))
403 return;
404
405 l1 = gen_new_label();
406
407 if (ra != 31) {
408 if (mask) {
a7812ae4 409 TCGv tmp = tcg_temp_new();
9c29504e
AJ
410 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
411 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
412 tcg_temp_free(tmp);
413 } else
414 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
415 } else {
416 /* Very uncommon case - Do not bother to optimize. */
417 TCGv tmp = tcg_const_i64(0);
418 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
419 tcg_temp_free(tmp);
420 }
421
4c9649a9 422 if (islit)
9c29504e 423 tcg_gen_movi_i64(cpu_ir[rc], lit);
4c9649a9 424 else
dfaa8583 425 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
9c29504e 426 gen_set_label(l1);
4c9649a9
JM
427}
428
dbb30fe6
RH
429static void gen_fcmov(TCGCond inv_cond, int ra, int rb, int rc)
430{
431 TCGv va = cpu_fir[ra];
432 int l1;
433
434 if (unlikely(rc == 31))
435 return;
436 if (unlikely(ra == 31)) {
437 /* ??? Assume that the temporary is reclaimed at the branch. */
438 va = tcg_const_i64(0);
439 }
440
441 l1 = gen_new_label();
442 gen_fbcond_internal(inv_cond, va, l1);
443
444 if (rb != 31)
445 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
446 else
447 tcg_gen_movi_i64(cpu_fir[rc], 0);
448 gen_set_label(l1);
449}
450
f24518b5
RH
451#define QUAL_RM_N 0x080 /* Round mode nearest even */
452#define QUAL_RM_C 0x000 /* Round mode chopped */
453#define QUAL_RM_M 0x040 /* Round mode minus infinity */
454#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
455#define QUAL_RM_MASK 0x0c0
456
457#define QUAL_U 0x100 /* Underflow enable (fp output) */
458#define QUAL_V 0x100 /* Overflow enable (int output) */
459#define QUAL_S 0x400 /* Software completion enable */
460#define QUAL_I 0x200 /* Inexact detection enable */
461
462static void gen_qual_roundmode(DisasContext *ctx, int fn11)
463{
464 TCGv_i32 tmp;
465
466 fn11 &= QUAL_RM_MASK;
467 if (fn11 == ctx->tb_rm) {
468 return;
469 }
470 ctx->tb_rm = fn11;
471
472 tmp = tcg_temp_new_i32();
473 switch (fn11) {
474 case QUAL_RM_N:
475 tcg_gen_movi_i32(tmp, float_round_nearest_even);
476 break;
477 case QUAL_RM_C:
478 tcg_gen_movi_i32(tmp, float_round_to_zero);
479 break;
480 case QUAL_RM_M:
481 tcg_gen_movi_i32(tmp, float_round_down);
482 break;
483 case QUAL_RM_D:
484 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
485 break;
486 }
487
488#if defined(CONFIG_SOFTFLOAT_INLINE)
489 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
490 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
491 sets the one field. */
492 tcg_gen_st8_i32(tmp, cpu_env,
493 offsetof(CPUState, fp_status.float_rounding_mode));
494#else
495 gen_helper_setroundmode(tmp);
496#endif
497
498 tcg_temp_free_i32(tmp);
499}
500
501static void gen_qual_flushzero(DisasContext *ctx, int fn11)
502{
503 TCGv_i32 tmp;
504
505 fn11 &= QUAL_U;
506 if (fn11 == ctx->tb_ftz) {
507 return;
508 }
509 ctx->tb_ftz = fn11;
510
511 tmp = tcg_temp_new_i32();
512 if (fn11) {
513 /* Underflow is enabled, use the FPCR setting. */
514 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
515 } else {
516 /* Underflow is disabled, force flush-to-zero. */
517 tcg_gen_movi_i32(tmp, 1);
518 }
519
520#if defined(CONFIG_SOFTFLOAT_INLINE)
521 tcg_gen_st8_i32(tmp, cpu_env,
522 offsetof(CPUState, fp_status.flush_to_zero));
523#else
524 gen_helper_setflushzero(tmp);
525#endif
526
527 tcg_temp_free_i32(tmp);
528}
529
530static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
531{
532 TCGv val = tcg_temp_new();
533 if (reg == 31) {
534 tcg_gen_movi_i64(val, 0);
535 } else if (fn11 & QUAL_S) {
536 gen_helper_ieee_input_s(val, cpu_fir[reg]);
537 } else if (is_cmp) {
538 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
539 } else {
540 gen_helper_ieee_input(val, cpu_fir[reg]);
541 }
542 return val;
543}
544
545static void gen_fp_exc_clear(void)
546{
547#if defined(CONFIG_SOFTFLOAT_INLINE)
548 TCGv_i32 zero = tcg_const_i32(0);
549 tcg_gen_st8_i32(zero, cpu_env,
550 offsetof(CPUState, fp_status.float_exception_flags));
551 tcg_temp_free_i32(zero);
552#else
553 gen_helper_fp_exc_clear();
554#endif
555}
556
557static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
558{
559 /* ??? We ought to be able to do something with imprecise exceptions.
560 E.g. notice we're still in the trap shadow of something within the
561 TB and do not generate the code to signal the exception; end the TB
562 when an exception is forced to arrive, either by consumption of a
563 register value or TRAPB or EXCB. */
564 TCGv_i32 exc = tcg_temp_new_i32();
565 TCGv_i32 reg;
566
567#if defined(CONFIG_SOFTFLOAT_INLINE)
568 tcg_gen_ld8u_i32(exc, cpu_env,
569 offsetof(CPUState, fp_status.float_exception_flags));
570#else
571 gen_helper_fp_exc_get(exc);
572#endif
573
574 if (ignore) {
575 tcg_gen_andi_i32(exc, exc, ~ignore);
576 }
577
578 /* ??? Pass in the regno of the destination so that the helper can
579 set EXC_MASK, which contains a bitmask of destination registers
580 that have caused arithmetic traps. A simple userspace emulation
581 does not require this. We do need it for a guest kernel's entArith,
582 or if we were to do something clever with imprecise exceptions. */
583 reg = tcg_const_i32(rc + 32);
584
585 if (fn11 & QUAL_S) {
586 gen_helper_fp_exc_raise_s(exc, reg);
587 } else {
588 gen_helper_fp_exc_raise(exc, reg);
589 }
590
591 tcg_temp_free_i32(reg);
592 tcg_temp_free_i32(exc);
593}
594
595static inline void gen_fp_exc_raise(int rc, int fn11)
596{
597 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 598}
f24518b5 599
735cf45f
RH
600static void gen_fcvtql(int rb, int rc)
601{
602 if (unlikely(rc == 31)) {
603 return;
604 }
605 if (unlikely(rb == 31)) {
606 tcg_gen_movi_i64(cpu_fir[rc], 0);
607 } else {
608 TCGv tmp = tcg_temp_new();
609
610 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
611 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
612 tcg_gen_shli_i64(tmp, tmp, 32);
613 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
614 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
615
616 tcg_temp_free(tmp);
617 }
618}
619
620static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
621{
622 if (rb != 31) {
623 int lab = gen_new_label();
624 TCGv tmp = tcg_temp_new();
625
626 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
627 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
628 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
629
630 gen_set_label(lab);
631 }
632 gen_fcvtql(rb, rc);
633}
634
f24518b5
RH
635#define FARITH2(name) \
636static inline void glue(gen_f, name)(int rb, int rc) \
637{ \
638 if (unlikely(rc == 31)) { \
639 return; \
640 } \
641 if (rb != 31) { \
642 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
643 } else { \
644 TCGv tmp = tcg_const_i64(0); \
645 gen_helper_ ## name (cpu_fir[rc], tmp); \
646 tcg_temp_free(tmp); \
647 } \
648}
649FARITH2(cvtlq)
f24518b5
RH
650
651/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
652FARITH2(sqrtf)
653FARITH2(sqrtg)
a7812ae4
PB
654FARITH2(cvtgf)
655FARITH2(cvtgq)
656FARITH2(cvtqf)
657FARITH2(cvtqg)
f24518b5
RH
658
659static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
660 int rb, int rc, int fn11)
661{
662 TCGv vb;
663
664 /* ??? This is wrong: the instruction is not a nop, it still may
665 raise exceptions. */
666 if (unlikely(rc == 31)) {
667 return;
668 }
669
670 gen_qual_roundmode(ctx, fn11);
671 gen_qual_flushzero(ctx, fn11);
672 gen_fp_exc_clear();
673
674 vb = gen_ieee_input(rb, fn11, 0);
675 helper(cpu_fir[rc], vb);
676 tcg_temp_free(vb);
677
678 gen_fp_exc_raise(rc, fn11);
679}
680
681#define IEEE_ARITH2(name) \
682static inline void glue(gen_f, name)(DisasContext *ctx, \
683 int rb, int rc, int fn11) \
684{ \
685 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
686}
687IEEE_ARITH2(sqrts)
688IEEE_ARITH2(sqrtt)
689IEEE_ARITH2(cvtst)
690IEEE_ARITH2(cvtts)
691
692static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
693{
694 TCGv vb;
695 int ignore = 0;
696
697 /* ??? This is wrong: the instruction is not a nop, it still may
698 raise exceptions. */
699 if (unlikely(rc == 31)) {
700 return;
701 }
702
703 /* No need to set flushzero, since we have an integer output. */
704 gen_fp_exc_clear();
705 vb = gen_ieee_input(rb, fn11, 0);
706
707 /* Almost all integer conversions use cropped rounding, and most
708 also do not have integer overflow enabled. Special case that. */
709 switch (fn11) {
710 case QUAL_RM_C:
711 gen_helper_cvttq_c(cpu_fir[rc], vb);
712 break;
713 case QUAL_V | QUAL_RM_C:
714 case QUAL_S | QUAL_V | QUAL_RM_C:
715 ignore = float_flag_inexact;
716 /* FALLTHRU */
717 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
718 gen_helper_cvttq_svic(cpu_fir[rc], vb);
719 break;
720 default:
721 gen_qual_roundmode(ctx, fn11);
722 gen_helper_cvttq(cpu_fir[rc], vb);
723 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
724 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
725 break;
726 }
727 tcg_temp_free(vb);
728
729 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
730}
731
f24518b5
RH
732static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
733 int rb, int rc, int fn11)
734{
735 TCGv vb;
736
737 /* ??? This is wrong: the instruction is not a nop, it still may
738 raise exceptions. */
739 if (unlikely(rc == 31)) {
740 return;
741 }
742
743 gen_qual_roundmode(ctx, fn11);
744
745 if (rb == 31) {
746 vb = tcg_const_i64(0);
747 } else {
748 vb = cpu_fir[rb];
749 }
750
751 /* The only exception that can be raised by integer conversion
752 is inexact. Thus we only need to worry about exceptions when
753 inexact handling is requested. */
754 if (fn11 & QUAL_I) {
755 gen_fp_exc_clear();
756 helper(cpu_fir[rc], vb);
757 gen_fp_exc_raise(rc, fn11);
758 } else {
759 helper(cpu_fir[rc], vb);
760 }
761
762 if (rb == 31) {
763 tcg_temp_free(vb);
764 }
765}
766
767#define IEEE_INTCVT(name) \
768static inline void glue(gen_f, name)(DisasContext *ctx, \
769 int rb, int rc, int fn11) \
770{ \
771 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
772}
773IEEE_INTCVT(cvtqs)
774IEEE_INTCVT(cvtqt)
775
776#define FARITH3(name) \
777static inline void glue(gen_f, name)(int ra, int rb, int rc) \
778{ \
779 TCGv va, vb; \
780 \
781 if (unlikely(rc == 31)) { \
782 return; \
783 } \
784 if (ra == 31) { \
785 va = tcg_const_i64(0); \
786 } else { \
787 va = cpu_fir[ra]; \
788 } \
789 if (rb == 31) { \
790 vb = tcg_const_i64(0); \
791 } else { \
792 vb = cpu_fir[rb]; \
793 } \
794 \
795 gen_helper_ ## name (cpu_fir[rc], va, vb); \
796 \
797 if (ra == 31) { \
798 tcg_temp_free(va); \
799 } \
800 if (rb == 31) { \
801 tcg_temp_free(vb); \
802 } \
803}
804/* ??? Ought to expand these inline; simple masking operations. */
805FARITH3(cpys)
806FARITH3(cpysn)
807FARITH3(cpyse)
808
809/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
810FARITH3(addf)
811FARITH3(subf)
812FARITH3(mulf)
813FARITH3(divf)
814FARITH3(addg)
815FARITH3(subg)
816FARITH3(mulg)
817FARITH3(divg)
818FARITH3(cmpgeq)
819FARITH3(cmpglt)
820FARITH3(cmpgle)
f24518b5
RH
821
822static void gen_ieee_arith3(DisasContext *ctx,
823 void (*helper)(TCGv, TCGv, TCGv),
824 int ra, int rb, int rc, int fn11)
825{
826 TCGv va, vb;
827
828 /* ??? This is wrong: the instruction is not a nop, it still may
829 raise exceptions. */
830 if (unlikely(rc == 31)) {
831 return;
832 }
833
834 gen_qual_roundmode(ctx, fn11);
835 gen_qual_flushzero(ctx, fn11);
836 gen_fp_exc_clear();
837
838 va = gen_ieee_input(ra, fn11, 0);
839 vb = gen_ieee_input(rb, fn11, 0);
840 helper(cpu_fir[rc], va, vb);
841 tcg_temp_free(va);
842 tcg_temp_free(vb);
843
844 gen_fp_exc_raise(rc, fn11);
845}
846
847#define IEEE_ARITH3(name) \
848static inline void glue(gen_f, name)(DisasContext *ctx, \
849 int ra, int rb, int rc, int fn11) \
850{ \
851 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
852}
853IEEE_ARITH3(adds)
854IEEE_ARITH3(subs)
855IEEE_ARITH3(muls)
856IEEE_ARITH3(divs)
857IEEE_ARITH3(addt)
858IEEE_ARITH3(subt)
859IEEE_ARITH3(mult)
860IEEE_ARITH3(divt)
861
862static void gen_ieee_compare(DisasContext *ctx,
863 void (*helper)(TCGv, TCGv, TCGv),
864 int ra, int rb, int rc, int fn11)
865{
866 TCGv va, vb;
867
868 /* ??? This is wrong: the instruction is not a nop, it still may
869 raise exceptions. */
870 if (unlikely(rc == 31)) {
871 return;
872 }
873
874 gen_fp_exc_clear();
875
876 va = gen_ieee_input(ra, fn11, 1);
877 vb = gen_ieee_input(rb, fn11, 1);
878 helper(cpu_fir[rc], va, vb);
879 tcg_temp_free(va);
880 tcg_temp_free(vb);
881
882 gen_fp_exc_raise(rc, fn11);
883}
884
885#define IEEE_CMP3(name) \
886static inline void glue(gen_f, name)(DisasContext *ctx, \
887 int ra, int rb, int rc, int fn11) \
888{ \
889 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
890}
891IEEE_CMP3(cmptun)
892IEEE_CMP3(cmpteq)
893IEEE_CMP3(cmptlt)
894IEEE_CMP3(cmptle)
a7812ae4 895
248c42f3
RH
896static inline uint64_t zapnot_mask(uint8_t lit)
897{
898 uint64_t mask = 0;
899 int i;
900
901 for (i = 0; i < 8; ++i) {
902 if ((lit >> i) & 1)
903 mask |= 0xffull << (i * 8);
904 }
905 return mask;
906}
907
87d98f95
RH
908/* Implement zapnot with an immediate operand, which expands to some
909 form of immediate AND. This is a basic building block in the
910 definition of many of the other byte manipulation instructions. */
248c42f3 911static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 912{
87d98f95
RH
913 switch (lit) {
914 case 0x00:
248c42f3 915 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
916 break;
917 case 0x01:
248c42f3 918 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
919 break;
920 case 0x03:
248c42f3 921 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
922 break;
923 case 0x0f:
248c42f3 924 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
925 break;
926 case 0xff:
248c42f3 927 tcg_gen_mov_i64(dest, src);
87d98f95
RH
928 break;
929 default:
248c42f3 930 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
87d98f95
RH
931 break;
932 }
933}
934
935static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
936{
937 if (unlikely(rc == 31))
938 return;
939 else if (unlikely(ra == 31))
940 tcg_gen_movi_i64(cpu_ir[rc], 0);
941 else if (islit)
248c42f3 942 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
87d98f95
RH
943 else
944 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
945}
946
947static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
948{
949 if (unlikely(rc == 31))
950 return;
951 else if (unlikely(ra == 31))
952 tcg_gen_movi_i64(cpu_ir[rc], 0);
953 else if (islit)
248c42f3 954 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
87d98f95
RH
955 else
956 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
957}
958
959
248c42f3 960/* EXTWH, EXTLH, EXTQH */
ffec44f1
RH
961static void gen_ext_h(int ra, int rb, int rc, int islit,
962 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
963{
964 if (unlikely(rc == 31))
965 return;
377a43b6
RH
966 else if (unlikely(ra == 31))
967 tcg_gen_movi_i64(cpu_ir[rc], 0);
968 else {
dfaa8583 969 if (islit) {
377a43b6
RH
970 lit = (64 - (lit & 7) * 8) & 0x3f;
971 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
fe2b269a 972 } else {
377a43b6 973 TCGv tmp1 = tcg_temp_new();
b3249f63
AJ
974 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
975 tcg_gen_shli_i64(tmp1, tmp1, 3);
dbf95805
VW
976 tcg_gen_neg_i64(tmp1, tmp1);
977 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
dfaa8583 978 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
b3249f63 979 tcg_temp_free(tmp1);
dfaa8583 980 }
248c42f3 981 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
377a43b6 982 }
b3249f63
AJ
983}
984
248c42f3 985/* EXTBL, EXTWL, EXTLL, EXTQL */
ffec44f1
RH
986static void gen_ext_l(int ra, int rb, int rc, int islit,
987 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
988{
989 if (unlikely(rc == 31))
990 return;
377a43b6
RH
991 else if (unlikely(ra == 31))
992 tcg_gen_movi_i64(cpu_ir[rc], 0);
993 else {
dfaa8583 994 if (islit) {
377a43b6 995 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
dfaa8583 996 } else {
a7812ae4 997 TCGv tmp = tcg_temp_new();
b3249f63
AJ
998 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
999 tcg_gen_shli_i64(tmp, tmp, 3);
dfaa8583 1000 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
b3249f63 1001 tcg_temp_free(tmp);
fe2b269a 1002 }
248c42f3
RH
1003 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1004 }
1005}
1006
50eb6e5c
RH
1007/* INSWH, INSLH, INSQH */
1008static void gen_ins_h(int ra, int rb, int rc, int islit,
1009 uint8_t lit, uint8_t byte_mask)
1010{
1011 if (unlikely(rc == 31))
1012 return;
1013 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1014 tcg_gen_movi_i64(cpu_ir[rc], 0);
1015 else {
1016 TCGv tmp = tcg_temp_new();
1017
1018 /* The instruction description has us left-shift the byte mask
1019 and extract bits <15:8> and apply that zap at the end. This
1020 is equivalent to simply performing the zap first and shifting
1021 afterward. */
1022 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1023
1024 if (islit) {
1025 /* Note that we have handled the lit==0 case above. */
1026 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1027 } else {
1028 TCGv shift = tcg_temp_new();
1029
1030 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1031 Do this portably by splitting the shift into two parts:
1032 shift_count-1 and 1. Arrange for the -1 by using
1033 ones-complement instead of twos-complement in the negation:
1034 ~((B & 7) * 8) & 63. */
1035
1036 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1037 tcg_gen_shli_i64(shift, shift, 3);
1038 tcg_gen_not_i64(shift, shift);
1039 tcg_gen_andi_i64(shift, shift, 0x3f);
1040
1041 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1042 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1043 tcg_temp_free(shift);
1044 }
1045 tcg_temp_free(tmp);
1046 }
1047}
1048
248c42f3 1049/* INSBL, INSWL, INSLL, INSQL */
ffec44f1
RH
1050static void gen_ins_l(int ra, int rb, int rc, int islit,
1051 uint8_t lit, uint8_t byte_mask)
248c42f3
RH
1052{
1053 if (unlikely(rc == 31))
1054 return;
1055 else if (unlikely(ra == 31))
1056 tcg_gen_movi_i64(cpu_ir[rc], 0);
1057 else {
1058 TCGv tmp = tcg_temp_new();
1059
1060 /* The instruction description has us left-shift the byte mask
1061 the same number of byte slots as the data and apply the zap
1062 at the end. This is equivalent to simply performing the zap
1063 first and shifting afterward. */
1064 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1065
1066 if (islit) {
1067 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1068 } else {
1069 TCGv shift = tcg_temp_new();
1070 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1071 tcg_gen_shli_i64(shift, shift, 3);
1072 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1073 tcg_temp_free(shift);
1074 }
1075 tcg_temp_free(tmp);
377a43b6 1076 }
b3249f63
AJ
1077}
1078
ffec44f1
RH
1079/* MSKWH, MSKLH, MSKQH */
1080static void gen_msk_h(int ra, int rb, int rc, int islit,
1081 uint8_t lit, uint8_t byte_mask)
1082{
1083 if (unlikely(rc == 31))
1084 return;
1085 else if (unlikely(ra == 31))
1086 tcg_gen_movi_i64(cpu_ir[rc], 0);
1087 else if (islit) {
1088 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1089 } else {
1090 TCGv shift = tcg_temp_new();
1091 TCGv mask = tcg_temp_new();
1092
1093 /* The instruction description is as above, where the byte_mask
1094 is shifted left, and then we extract bits <15:8>. This can be
1095 emulated with a right-shift on the expanded byte mask. This
1096 requires extra care because for an input <2:0> == 0 we need a
1097 shift of 64 bits in order to generate a zero. This is done by
1098 splitting the shift into two parts, the variable shift - 1
1099 followed by a constant 1 shift. The code we expand below is
1100 equivalent to ~((B & 7) * 8) & 63. */
1101
1102 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1103 tcg_gen_shli_i64(shift, shift, 3);
1104 tcg_gen_not_i64(shift, shift);
1105 tcg_gen_andi_i64(shift, shift, 0x3f);
1106 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1107 tcg_gen_shr_i64(mask, mask, shift);
1108 tcg_gen_shri_i64(mask, mask, 1);
1109
1110 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1111
1112 tcg_temp_free(mask);
1113 tcg_temp_free(shift);
1114 }
1115}
1116
14ab1634 1117/* MSKBL, MSKWL, MSKLL, MSKQL */
ffec44f1
RH
1118static void gen_msk_l(int ra, int rb, int rc, int islit,
1119 uint8_t lit, uint8_t byte_mask)
14ab1634
RH
1120{
1121 if (unlikely(rc == 31))
1122 return;
1123 else if (unlikely(ra == 31))
1124 tcg_gen_movi_i64(cpu_ir[rc], 0);
1125 else if (islit) {
1126 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1127 } else {
1128 TCGv shift = tcg_temp_new();
1129 TCGv mask = tcg_temp_new();
1130
1131 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1132 tcg_gen_shli_i64(shift, shift, 3);
1133 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1134 tcg_gen_shl_i64(mask, mask, shift);
1135
1136 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1137
1138 tcg_temp_free(mask);
1139 tcg_temp_free(shift);
1140 }
1141}
1142
04acd307 1143/* Code to call arith3 helpers */
a7812ae4 1144#define ARITH3(name) \
636aa200
BS
1145static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1146 uint8_t lit) \
a7812ae4
PB
1147{ \
1148 if (unlikely(rc == 31)) \
1149 return; \
1150 \
1151 if (ra != 31) { \
1152 if (islit) { \
1153 TCGv tmp = tcg_const_i64(lit); \
1154 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1155 tcg_temp_free(tmp); \
1156 } else \
1157 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1158 } else { \
1159 TCGv tmp1 = tcg_const_i64(0); \
1160 if (islit) { \
1161 TCGv tmp2 = tcg_const_i64(lit); \
1162 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1163 tcg_temp_free(tmp2); \
1164 } else \
1165 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1166 tcg_temp_free(tmp1); \
1167 } \
b3249f63 1168}
a7812ae4
PB
1169ARITH3(cmpbge)
1170ARITH3(addlv)
1171ARITH3(sublv)
1172ARITH3(addqv)
1173ARITH3(subqv)
a7812ae4
PB
1174ARITH3(umulh)
1175ARITH3(mullv)
1176ARITH3(mulqv)
13e4df99
RH
1177ARITH3(minub8)
1178ARITH3(minsb8)
1179ARITH3(minuw4)
1180ARITH3(minsw4)
1181ARITH3(maxub8)
1182ARITH3(maxsb8)
1183ARITH3(maxuw4)
1184ARITH3(maxsw4)
1185ARITH3(perr)
1186
1187#define MVIOP2(name) \
1188static inline void glue(gen_, name)(int rb, int rc) \
1189{ \
1190 if (unlikely(rc == 31)) \
1191 return; \
1192 if (unlikely(rb == 31)) \
1193 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1194 else \
1195 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1196}
1197MVIOP2(pklb)
1198MVIOP2(pkwb)
1199MVIOP2(unpkbl)
1200MVIOP2(unpkbw)
b3249f63 1201
636aa200
BS
1202static inline void gen_cmp(TCGCond cond, int ra, int rb, int rc, int islit,
1203 uint8_t lit)
01ff9cc8
AJ
1204{
1205 int l1, l2;
1206 TCGv tmp;
1207
1208 if (unlikely(rc == 31))
13e4df99 1209 return;
01ff9cc8
AJ
1210
1211 l1 = gen_new_label();
1212 l2 = gen_new_label();
1213
1214 if (ra != 31) {
a7812ae4 1215 tmp = tcg_temp_new();
01ff9cc8
AJ
1216 tcg_gen_mov_i64(tmp, cpu_ir[ra]);
1217 } else
1218 tmp = tcg_const_i64(0);
1219 if (islit)
1220 tcg_gen_brcondi_i64(cond, tmp, lit, l1);
01ff9cc8 1221 else
dfaa8583 1222 tcg_gen_brcond_i64(cond, tmp, cpu_ir[rb], l1);
01ff9cc8
AJ
1223
1224 tcg_gen_movi_i64(cpu_ir[rc], 0);
1225 tcg_gen_br(l2);
1226 gen_set_label(l1);
1227 tcg_gen_movi_i64(cpu_ir[rc], 1);
1228 gen_set_label(l2);
1229}
1230
636aa200 1231static inline int translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1232{
1233 uint32_t palcode;
1234 int32_t disp21, disp16, disp12;
1235 uint16_t fn11, fn16;
13e4df99 1236 uint8_t opc, ra, rb, rc, sbz, fpfn, fn7, fn2, islit, real_islit;
adf3c8b6 1237 uint8_t lit;
4c9649a9
JM
1238 int ret;
1239
1240 /* Decode all instruction fields */
1241 opc = insn >> 26;
1242 ra = (insn >> 21) & 0x1F;
1243 rb = (insn >> 16) & 0x1F;
1244 rc = insn & 0x1F;
1245 sbz = (insn >> 13) & 0x07;
13e4df99 1246 real_islit = islit = (insn >> 12) & 1;
dfaa8583
AJ
1247 if (rb == 31 && !islit) {
1248 islit = 1;
1249 lit = 0;
1250 } else
1251 lit = (insn >> 13) & 0xFF;
4c9649a9
JM
1252 palcode = insn & 0x03FFFFFF;
1253 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1254 disp16 = (int16_t)(insn & 0x0000FFFF);
1255 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1256 fn16 = insn & 0x0000FFFF;
1257 fn11 = (insn >> 5) & 0x000007FF;
1258 fpfn = fn11 & 0x3F;
1259 fn7 = (insn >> 5) & 0x0000007F;
1260 fn2 = (insn >> 5) & 0x00000003;
1261 ret = 0;
806991da 1262 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1263 opc, ra, rb, rc, disp16);
806991da 1264
4c9649a9
JM
1265 switch (opc) {
1266 case 0x00:
1267 /* CALL_PAL */
ab471ade
RH
1268#ifdef CONFIG_USER_ONLY
1269 if (palcode == 0x9E) {
1270 /* RDUNIQUE */
1271 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1272 break;
1273 } else if (palcode == 0x9F) {
1274 /* WRUNIQUE */
1275 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1276 break;
1277 }
1278#endif
4c9649a9
JM
1279 if (palcode >= 0x80 && palcode < 0xC0) {
1280 /* Unprivileged PAL call */
31a877f2 1281 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
ab471ade
RH
1282 ret = 3;
1283 break;
1284 }
1285#ifndef CONFIG_USER_ONLY
1286 if (palcode < 0x40) {
4c9649a9
JM
1287 /* Privileged PAL code */
1288 if (ctx->mem_idx & 1)
1289 goto invalid_opc;
ab471ade
RH
1290 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
1291 ret = 3;
4c9649a9 1292 }
ab471ade
RH
1293#endif
1294 /* Invalid PAL call */
1295 goto invalid_opc;
4c9649a9
JM
1296 case 0x01:
1297 /* OPC01 */
1298 goto invalid_opc;
1299 case 0x02:
1300 /* OPC02 */
1301 goto invalid_opc;
1302 case 0x03:
1303 /* OPC03 */
1304 goto invalid_opc;
1305 case 0x04:
1306 /* OPC04 */
1307 goto invalid_opc;
1308 case 0x05:
1309 /* OPC05 */
1310 goto invalid_opc;
1311 case 0x06:
1312 /* OPC06 */
1313 goto invalid_opc;
1314 case 0x07:
1315 /* OPC07 */
1316 goto invalid_opc;
1317 case 0x08:
1318 /* LDA */
1ef4ef4e 1319 if (likely(ra != 31)) {
496cb5b9 1320 if (rb != 31)
3761035f
AJ
1321 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1322 else
1323 tcg_gen_movi_i64(cpu_ir[ra], disp16);
496cb5b9 1324 }
4c9649a9
JM
1325 break;
1326 case 0x09:
1327 /* LDAH */
1ef4ef4e 1328 if (likely(ra != 31)) {
496cb5b9 1329 if (rb != 31)
3761035f
AJ
1330 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1331 else
1332 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
496cb5b9 1333 }
4c9649a9
JM
1334 break;
1335 case 0x0A:
1336 /* LDBU */
1337 if (!(ctx->amask & AMASK_BWX))
1338 goto invalid_opc;
f18cd223 1339 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
4c9649a9
JM
1340 break;
1341 case 0x0B:
1342 /* LDQ_U */
f18cd223 1343 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1344 break;
1345 case 0x0C:
1346 /* LDWU */
1347 if (!(ctx->amask & AMASK_BWX))
1348 goto invalid_opc;
577d5e7f 1349 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
4c9649a9
JM
1350 break;
1351 case 0x0D:
1352 /* STW */
57a92c8e 1353 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
1354 break;
1355 case 0x0E:
1356 /* STB */
57a92c8e 1357 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
1358 break;
1359 case 0x0F:
1360 /* STQ_U */
57a92c8e 1361 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
4c9649a9
JM
1362 break;
1363 case 0x10:
1364 switch (fn7) {
1365 case 0x00:
1366 /* ADDL */
30c7183b
AJ
1367 if (likely(rc != 31)) {
1368 if (ra != 31) {
1369 if (islit) {
1370 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1371 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1372 } else {
30c7183b
AJ
1373 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1374 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1375 }
30c7183b
AJ
1376 } else {
1377 if (islit)
dfaa8583 1378 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1379 else
dfaa8583 1380 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1381 }
1382 }
4c9649a9
JM
1383 break;
1384 case 0x02:
1385 /* S4ADDL */
30c7183b
AJ
1386 if (likely(rc != 31)) {
1387 if (ra != 31) {
a7812ae4 1388 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1389 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1390 if (islit)
1391 tcg_gen_addi_i64(tmp, tmp, lit);
1392 else
1393 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1394 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1395 tcg_temp_free(tmp);
30c7183b
AJ
1396 } else {
1397 if (islit)
1398 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1399 else
dfaa8583 1400 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1401 }
1402 }
4c9649a9
JM
1403 break;
1404 case 0x09:
1405 /* SUBL */
30c7183b
AJ
1406 if (likely(rc != 31)) {
1407 if (ra != 31) {
dfaa8583 1408 if (islit)
30c7183b 1409 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
dfaa8583 1410 else
30c7183b 1411 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
dfaa8583 1412 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1413 } else {
1414 if (islit)
1415 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1416 else {
30c7183b
AJ
1417 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1418 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1419 }
1420 }
4c9649a9
JM
1421 break;
1422 case 0x0B:
1423 /* S4SUBL */
30c7183b
AJ
1424 if (likely(rc != 31)) {
1425 if (ra != 31) {
a7812ae4 1426 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1427 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1428 if (islit)
1429 tcg_gen_subi_i64(tmp, tmp, lit);
1430 else
1431 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1432 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1433 tcg_temp_free(tmp);
30c7183b
AJ
1434 } else {
1435 if (islit)
1436 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1437 else {
30c7183b
AJ
1438 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1439 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1440 }
30c7183b
AJ
1441 }
1442 }
4c9649a9
JM
1443 break;
1444 case 0x0F:
1445 /* CMPBGE */
a7812ae4 1446 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1447 break;
1448 case 0x12:
1449 /* S8ADDL */
30c7183b
AJ
1450 if (likely(rc != 31)) {
1451 if (ra != 31) {
a7812ae4 1452 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1453 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1454 if (islit)
1455 tcg_gen_addi_i64(tmp, tmp, lit);
1456 else
1457 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1458 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1459 tcg_temp_free(tmp);
30c7183b
AJ
1460 } else {
1461 if (islit)
1462 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1463 else
dfaa8583 1464 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1465 }
1466 }
4c9649a9
JM
1467 break;
1468 case 0x1B:
1469 /* S8SUBL */
30c7183b
AJ
1470 if (likely(rc != 31)) {
1471 if (ra != 31) {
a7812ae4 1472 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1473 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1474 if (islit)
1475 tcg_gen_subi_i64(tmp, tmp, lit);
1476 else
1477 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1478 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1479 tcg_temp_free(tmp);
30c7183b
AJ
1480 } else {
1481 if (islit)
1482 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1483 else
30c7183b
AJ
1484 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1485 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1486 }
30c7183b
AJ
1487 }
1488 }
4c9649a9
JM
1489 break;
1490 case 0x1D:
1491 /* CMPULT */
01ff9cc8 1492 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
4c9649a9
JM
1493 break;
1494 case 0x20:
1495 /* ADDQ */
30c7183b
AJ
1496 if (likely(rc != 31)) {
1497 if (ra != 31) {
1498 if (islit)
1499 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1500 else
dfaa8583 1501 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1502 } else {
1503 if (islit)
1504 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1505 else
dfaa8583 1506 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1507 }
1508 }
4c9649a9
JM
1509 break;
1510 case 0x22:
1511 /* S4ADDQ */
30c7183b
AJ
1512 if (likely(rc != 31)) {
1513 if (ra != 31) {
a7812ae4 1514 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1515 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1516 if (islit)
1517 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1518 else
1519 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1520 tcg_temp_free(tmp);
30c7183b
AJ
1521 } else {
1522 if (islit)
1523 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1524 else
dfaa8583 1525 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1526 }
1527 }
4c9649a9
JM
1528 break;
1529 case 0x29:
1530 /* SUBQ */
30c7183b
AJ
1531 if (likely(rc != 31)) {
1532 if (ra != 31) {
1533 if (islit)
1534 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1535 else
dfaa8583 1536 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1537 } else {
1538 if (islit)
1539 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1540 else
dfaa8583 1541 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1542 }
1543 }
4c9649a9
JM
1544 break;
1545 case 0x2B:
1546 /* S4SUBQ */
30c7183b
AJ
1547 if (likely(rc != 31)) {
1548 if (ra != 31) {
a7812ae4 1549 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1550 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1551 if (islit)
1552 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1553 else
1554 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1555 tcg_temp_free(tmp);
30c7183b
AJ
1556 } else {
1557 if (islit)
1558 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1559 else
dfaa8583 1560 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1561 }
1562 }
4c9649a9
JM
1563 break;
1564 case 0x2D:
1565 /* CMPEQ */
01ff9cc8 1566 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
4c9649a9
JM
1567 break;
1568 case 0x32:
1569 /* S8ADDQ */
30c7183b
AJ
1570 if (likely(rc != 31)) {
1571 if (ra != 31) {
a7812ae4 1572 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1573 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1574 if (islit)
1575 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1576 else
1577 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1578 tcg_temp_free(tmp);
30c7183b
AJ
1579 } else {
1580 if (islit)
1581 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1582 else
dfaa8583 1583 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1584 }
1585 }
4c9649a9
JM
1586 break;
1587 case 0x3B:
1588 /* S8SUBQ */
30c7183b
AJ
1589 if (likely(rc != 31)) {
1590 if (ra != 31) {
a7812ae4 1591 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1592 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1593 if (islit)
1594 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1595 else
1596 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1597 tcg_temp_free(tmp);
30c7183b
AJ
1598 } else {
1599 if (islit)
1600 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1601 else
dfaa8583 1602 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1603 }
1604 }
4c9649a9
JM
1605 break;
1606 case 0x3D:
1607 /* CMPULE */
01ff9cc8 1608 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
4c9649a9
JM
1609 break;
1610 case 0x40:
1611 /* ADDL/V */
a7812ae4 1612 gen_addlv(ra, rb, rc, islit, lit);
4c9649a9
JM
1613 break;
1614 case 0x49:
1615 /* SUBL/V */
a7812ae4 1616 gen_sublv(ra, rb, rc, islit, lit);
4c9649a9
JM
1617 break;
1618 case 0x4D:
1619 /* CMPLT */
01ff9cc8 1620 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
4c9649a9
JM
1621 break;
1622 case 0x60:
1623 /* ADDQ/V */
a7812ae4 1624 gen_addqv(ra, rb, rc, islit, lit);
4c9649a9
JM
1625 break;
1626 case 0x69:
1627 /* SUBQ/V */
a7812ae4 1628 gen_subqv(ra, rb, rc, islit, lit);
4c9649a9
JM
1629 break;
1630 case 0x6D:
1631 /* CMPLE */
01ff9cc8 1632 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
4c9649a9
JM
1633 break;
1634 default:
1635 goto invalid_opc;
1636 }
1637 break;
1638 case 0x11:
1639 switch (fn7) {
1640 case 0x00:
1641 /* AND */
30c7183b 1642 if (likely(rc != 31)) {
dfaa8583 1643 if (ra == 31)
30c7183b
AJ
1644 tcg_gen_movi_i64(cpu_ir[rc], 0);
1645 else if (islit)
1646 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1647 else
1648 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1649 }
4c9649a9
JM
1650 break;
1651 case 0x08:
1652 /* BIC */
30c7183b
AJ
1653 if (likely(rc != 31)) {
1654 if (ra != 31) {
1655 if (islit)
1656 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1657 else
1658 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1659 } else
1660 tcg_gen_movi_i64(cpu_ir[rc], 0);
1661 }
4c9649a9
JM
1662 break;
1663 case 0x14:
1664 /* CMOVLBS */
fe2b269a 1665 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
1666 break;
1667 case 0x16:
1668 /* CMOVLBC */
fe2b269a 1669 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
1670 break;
1671 case 0x20:
1672 /* BIS */
30c7183b
AJ
1673 if (likely(rc != 31)) {
1674 if (ra != 31) {
1675 if (islit)
1676 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
8bb6e981 1677 else
30c7183b 1678 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
4c9649a9 1679 } else {
30c7183b
AJ
1680 if (islit)
1681 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1682 else
dfaa8583 1683 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
4c9649a9 1684 }
4c9649a9
JM
1685 }
1686 break;
1687 case 0x24:
1688 /* CMOVEQ */
fe2b269a 1689 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1690 break;
1691 case 0x26:
1692 /* CMOVNE */
fe2b269a 1693 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1694 break;
1695 case 0x28:
1696 /* ORNOT */
30c7183b 1697 if (likely(rc != 31)) {
dfaa8583 1698 if (ra != 31) {
30c7183b
AJ
1699 if (islit)
1700 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1701 else
1702 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1703 } else {
1704 if (islit)
1705 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1706 else
1707 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1708 }
1709 }
4c9649a9
JM
1710 break;
1711 case 0x40:
1712 /* XOR */
30c7183b
AJ
1713 if (likely(rc != 31)) {
1714 if (ra != 31) {
1715 if (islit)
1716 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1717 else
dfaa8583 1718 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1719 } else {
1720 if (islit)
1721 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1722 else
dfaa8583 1723 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1724 }
1725 }
4c9649a9
JM
1726 break;
1727 case 0x44:
1728 /* CMOVLT */
fe2b269a 1729 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1730 break;
1731 case 0x46:
1732 /* CMOVGE */
fe2b269a 1733 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1734 break;
1735 case 0x48:
1736 /* EQV */
30c7183b
AJ
1737 if (likely(rc != 31)) {
1738 if (ra != 31) {
1739 if (islit)
1740 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1741 else
1742 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1743 } else {
1744 if (islit)
1745 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
30c7183b 1746 else
dfaa8583 1747 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1748 }
1749 }
4c9649a9
JM
1750 break;
1751 case 0x61:
1752 /* AMASK */
ae8ecd42
AJ
1753 if (likely(rc != 31)) {
1754 if (islit)
1a1f7dbc 1755 tcg_gen_movi_i64(cpu_ir[rc], lit);
ae8ecd42 1756 else
1a1f7dbc
AJ
1757 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1758 switch (ctx->env->implver) {
1759 case IMPLVER_2106x:
1760 /* EV4, EV45, LCA, LCA45 & EV5 */
1761 break;
1762 case IMPLVER_21164:
1763 case IMPLVER_21264:
1764 case IMPLVER_21364:
1765 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1766 ~(uint64_t)ctx->amask);
1767 break;
1768 }
ae8ecd42 1769 }
4c9649a9
JM
1770 break;
1771 case 0x64:
1772 /* CMOVLE */
fe2b269a 1773 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1774 break;
1775 case 0x66:
1776 /* CMOVGT */
fe2b269a 1777 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1778 break;
1779 case 0x6C:
1780 /* IMPLVER */
3761035f 1781 if (rc != 31)
8579095b 1782 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
4c9649a9
JM
1783 break;
1784 default:
1785 goto invalid_opc;
1786 }
1787 break;
1788 case 0x12:
1789 switch (fn7) {
1790 case 0x02:
1791 /* MSKBL */
14ab1634 1792 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
1793 break;
1794 case 0x06:
1795 /* EXTBL */
377a43b6 1796 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
1797 break;
1798 case 0x0B:
1799 /* INSBL */
248c42f3 1800 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
1801 break;
1802 case 0x12:
1803 /* MSKWL */
14ab1634 1804 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1805 break;
1806 case 0x16:
1807 /* EXTWL */
377a43b6 1808 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1809 break;
1810 case 0x1B:
1811 /* INSWL */
248c42f3 1812 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1813 break;
1814 case 0x22:
1815 /* MSKLL */
14ab1634 1816 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1817 break;
1818 case 0x26:
1819 /* EXTLL */
377a43b6 1820 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1821 break;
1822 case 0x2B:
1823 /* INSLL */
248c42f3 1824 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1825 break;
1826 case 0x30:
1827 /* ZAP */
a7812ae4 1828 gen_zap(ra, rb, rc, islit, lit);
4c9649a9
JM
1829 break;
1830 case 0x31:
1831 /* ZAPNOT */
a7812ae4 1832 gen_zapnot(ra, rb, rc, islit, lit);
4c9649a9
JM
1833 break;
1834 case 0x32:
1835 /* MSKQL */
14ab1634 1836 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1837 break;
1838 case 0x34:
1839 /* SRL */
30c7183b
AJ
1840 if (likely(rc != 31)) {
1841 if (ra != 31) {
1842 if (islit)
1843 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 1844 else {
a7812ae4 1845 TCGv shift = tcg_temp_new();
30c7183b
AJ
1846 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1847 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
1848 tcg_temp_free(shift);
dfaa8583 1849 }
30c7183b
AJ
1850 } else
1851 tcg_gen_movi_i64(cpu_ir[rc], 0);
1852 }
4c9649a9
JM
1853 break;
1854 case 0x36:
1855 /* EXTQL */
377a43b6 1856 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1857 break;
1858 case 0x39:
1859 /* SLL */
30c7183b
AJ
1860 if (likely(rc != 31)) {
1861 if (ra != 31) {
1862 if (islit)
1863 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 1864 else {
a7812ae4 1865 TCGv shift = tcg_temp_new();
30c7183b
AJ
1866 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1867 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
1868 tcg_temp_free(shift);
dfaa8583 1869 }
30c7183b
AJ
1870 } else
1871 tcg_gen_movi_i64(cpu_ir[rc], 0);
1872 }
4c9649a9
JM
1873 break;
1874 case 0x3B:
1875 /* INSQL */
248c42f3 1876 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1877 break;
1878 case 0x3C:
1879 /* SRA */
30c7183b
AJ
1880 if (likely(rc != 31)) {
1881 if (ra != 31) {
1882 if (islit)
1883 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 1884 else {
a7812ae4 1885 TCGv shift = tcg_temp_new();
30c7183b
AJ
1886 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1887 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
1888 tcg_temp_free(shift);
dfaa8583 1889 }
30c7183b
AJ
1890 } else
1891 tcg_gen_movi_i64(cpu_ir[rc], 0);
1892 }
4c9649a9
JM
1893 break;
1894 case 0x52:
1895 /* MSKWH */
ffec44f1 1896 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1897 break;
1898 case 0x57:
1899 /* INSWH */
50eb6e5c 1900 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1901 break;
1902 case 0x5A:
1903 /* EXTWH */
377a43b6 1904 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1905 break;
1906 case 0x62:
1907 /* MSKLH */
ffec44f1 1908 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1909 break;
1910 case 0x67:
1911 /* INSLH */
50eb6e5c 1912 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1913 break;
1914 case 0x6A:
1915 /* EXTLH */
377a43b6 1916 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1917 break;
1918 case 0x72:
1919 /* MSKQH */
ffec44f1 1920 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1921 break;
1922 case 0x77:
1923 /* INSQH */
50eb6e5c 1924 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1925 break;
1926 case 0x7A:
1927 /* EXTQH */
377a43b6 1928 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1929 break;
1930 default:
1931 goto invalid_opc;
1932 }
1933 break;
1934 case 0x13:
1935 switch (fn7) {
1936 case 0x00:
1937 /* MULL */
30c7183b 1938 if (likely(rc != 31)) {
dfaa8583 1939 if (ra == 31)
30c7183b
AJ
1940 tcg_gen_movi_i64(cpu_ir[rc], 0);
1941 else {
1942 if (islit)
1943 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1944 else
1945 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1946 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1947 }
1948 }
4c9649a9
JM
1949 break;
1950 case 0x20:
1951 /* MULQ */
30c7183b 1952 if (likely(rc != 31)) {
dfaa8583 1953 if (ra == 31)
30c7183b
AJ
1954 tcg_gen_movi_i64(cpu_ir[rc], 0);
1955 else if (islit)
1956 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1957 else
1958 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1959 }
4c9649a9
JM
1960 break;
1961 case 0x30:
1962 /* UMULH */
a7812ae4 1963 gen_umulh(ra, rb, rc, islit, lit);
4c9649a9
JM
1964 break;
1965 case 0x40:
1966 /* MULL/V */
a7812ae4 1967 gen_mullv(ra, rb, rc, islit, lit);
4c9649a9
JM
1968 break;
1969 case 0x60:
1970 /* MULQ/V */
a7812ae4 1971 gen_mulqv(ra, rb, rc, islit, lit);
4c9649a9
JM
1972 break;
1973 default:
1974 goto invalid_opc;
1975 }
1976 break;
1977 case 0x14:
f24518b5 1978 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
1979 case 0x04:
1980 /* ITOFS */
1981 if (!(ctx->amask & AMASK_FIX))
1982 goto invalid_opc;
f18cd223
AJ
1983 if (likely(rc != 31)) {
1984 if (ra != 31) {
a7812ae4 1985 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 1986 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
1987 gen_helper_memory_to_s(cpu_fir[rc], tmp);
1988 tcg_temp_free_i32(tmp);
f18cd223
AJ
1989 } else
1990 tcg_gen_movi_i64(cpu_fir[rc], 0);
1991 }
4c9649a9
JM
1992 break;
1993 case 0x0A:
1994 /* SQRTF */
1995 if (!(ctx->amask & AMASK_FIX))
1996 goto invalid_opc;
a7812ae4 1997 gen_fsqrtf(rb, rc);
4c9649a9
JM
1998 break;
1999 case 0x0B:
2000 /* SQRTS */
2001 if (!(ctx->amask & AMASK_FIX))
2002 goto invalid_opc;
f24518b5 2003 gen_fsqrts(ctx, rb, rc, fn11);
4c9649a9
JM
2004 break;
2005 case 0x14:
2006 /* ITOFF */
2007 if (!(ctx->amask & AMASK_FIX))
2008 goto invalid_opc;
f18cd223
AJ
2009 if (likely(rc != 31)) {
2010 if (ra != 31) {
a7812ae4 2011 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2012 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2013 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2014 tcg_temp_free_i32(tmp);
f18cd223
AJ
2015 } else
2016 tcg_gen_movi_i64(cpu_fir[rc], 0);
2017 }
4c9649a9
JM
2018 break;
2019 case 0x24:
2020 /* ITOFT */
2021 if (!(ctx->amask & AMASK_FIX))
2022 goto invalid_opc;
f18cd223
AJ
2023 if (likely(rc != 31)) {
2024 if (ra != 31)
2025 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2026 else
2027 tcg_gen_movi_i64(cpu_fir[rc], 0);
2028 }
4c9649a9
JM
2029 break;
2030 case 0x2A:
2031 /* SQRTG */
2032 if (!(ctx->amask & AMASK_FIX))
2033 goto invalid_opc;
a7812ae4 2034 gen_fsqrtg(rb, rc);
4c9649a9
JM
2035 break;
2036 case 0x02B:
2037 /* SQRTT */
2038 if (!(ctx->amask & AMASK_FIX))
2039 goto invalid_opc;
f24518b5 2040 gen_fsqrtt(ctx, rb, rc, fn11);
4c9649a9
JM
2041 break;
2042 default:
2043 goto invalid_opc;
2044 }
2045 break;
2046 case 0x15:
2047 /* VAX floating point */
2048 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2049 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2050 case 0x00:
2051 /* ADDF */
a7812ae4 2052 gen_faddf(ra, rb, rc);
4c9649a9
JM
2053 break;
2054 case 0x01:
2055 /* SUBF */
a7812ae4 2056 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2057 break;
2058 case 0x02:
2059 /* MULF */
a7812ae4 2060 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2061 break;
2062 case 0x03:
2063 /* DIVF */
a7812ae4 2064 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2065 break;
2066 case 0x1E:
2067 /* CVTDG */
2068#if 0 // TODO
a7812ae4 2069 gen_fcvtdg(rb, rc);
4c9649a9
JM
2070#else
2071 goto invalid_opc;
2072#endif
2073 break;
2074 case 0x20:
2075 /* ADDG */
a7812ae4 2076 gen_faddg(ra, rb, rc);
4c9649a9
JM
2077 break;
2078 case 0x21:
2079 /* SUBG */
a7812ae4 2080 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2081 break;
2082 case 0x22:
2083 /* MULG */
a7812ae4 2084 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2085 break;
2086 case 0x23:
2087 /* DIVG */
a7812ae4 2088 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2089 break;
2090 case 0x25:
2091 /* CMPGEQ */
a7812ae4 2092 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2093 break;
2094 case 0x26:
2095 /* CMPGLT */
a7812ae4 2096 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2097 break;
2098 case 0x27:
2099 /* CMPGLE */
a7812ae4 2100 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2101 break;
2102 case 0x2C:
2103 /* CVTGF */
a7812ae4 2104 gen_fcvtgf(rb, rc);
4c9649a9
JM
2105 break;
2106 case 0x2D:
2107 /* CVTGD */
2108#if 0 // TODO
a7812ae4 2109 gen_fcvtgd(rb, rc);
4c9649a9
JM
2110#else
2111 goto invalid_opc;
2112#endif
2113 break;
2114 case 0x2F:
2115 /* CVTGQ */
a7812ae4 2116 gen_fcvtgq(rb, rc);
4c9649a9
JM
2117 break;
2118 case 0x3C:
2119 /* CVTQF */
a7812ae4 2120 gen_fcvtqf(rb, rc);
4c9649a9
JM
2121 break;
2122 case 0x3E:
2123 /* CVTQG */
a7812ae4 2124 gen_fcvtqg(rb, rc);
4c9649a9
JM
2125 break;
2126 default:
2127 goto invalid_opc;
2128 }
2129 break;
2130 case 0x16:
2131 /* IEEE floating-point */
f24518b5 2132 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2133 case 0x00:
2134 /* ADDS */
f24518b5 2135 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2136 break;
2137 case 0x01:
2138 /* SUBS */
f24518b5 2139 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2140 break;
2141 case 0x02:
2142 /* MULS */
f24518b5 2143 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2144 break;
2145 case 0x03:
2146 /* DIVS */
f24518b5 2147 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2148 break;
2149 case 0x20:
2150 /* ADDT */
f24518b5 2151 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2152 break;
2153 case 0x21:
2154 /* SUBT */
f24518b5 2155 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2156 break;
2157 case 0x22:
2158 /* MULT */
f24518b5 2159 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2160 break;
2161 case 0x23:
2162 /* DIVT */
f24518b5 2163 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2164 break;
2165 case 0x24:
2166 /* CMPTUN */
f24518b5 2167 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2168 break;
2169 case 0x25:
2170 /* CMPTEQ */
f24518b5 2171 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2172 break;
2173 case 0x26:
2174 /* CMPTLT */
f24518b5 2175 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2176 break;
2177 case 0x27:
2178 /* CMPTLE */
f24518b5 2179 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2180 break;
2181 case 0x2C:
a74b4d2c 2182 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2183 /* CVTST */
f24518b5 2184 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2185 } else {
2186 /* CVTTS */
f24518b5 2187 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2188 }
2189 break;
2190 case 0x2F:
2191 /* CVTTQ */
f24518b5 2192 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2193 break;
2194 case 0x3C:
2195 /* CVTQS */
f24518b5 2196 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2197 break;
2198 case 0x3E:
2199 /* CVTQT */
f24518b5 2200 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2201 break;
2202 default:
2203 goto invalid_opc;
2204 }
2205 break;
2206 case 0x17:
2207 switch (fn11) {
2208 case 0x010:
2209 /* CVTLQ */
a7812ae4 2210 gen_fcvtlq(rb, rc);
4c9649a9
JM
2211 break;
2212 case 0x020:
f18cd223 2213 if (likely(rc != 31)) {
a06d48d9 2214 if (ra == rb) {
4c9649a9 2215 /* FMOV */
a06d48d9
RH
2216 if (ra == 31)
2217 tcg_gen_movi_i64(cpu_fir[rc], 0);
2218 else
2219 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2220 } else {
f18cd223 2221 /* CPYS */
a7812ae4 2222 gen_fcpys(ra, rb, rc);
a06d48d9 2223 }
4c9649a9
JM
2224 }
2225 break;
2226 case 0x021:
2227 /* CPYSN */
a7812ae4 2228 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2229 break;
2230 case 0x022:
2231 /* CPYSE */
a7812ae4 2232 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2233 break;
2234 case 0x024:
2235 /* MT_FPCR */
f18cd223 2236 if (likely(ra != 31))
a7812ae4 2237 gen_helper_store_fpcr(cpu_fir[ra]);
f18cd223
AJ
2238 else {
2239 TCGv tmp = tcg_const_i64(0);
a7812ae4 2240 gen_helper_store_fpcr(tmp);
f18cd223
AJ
2241 tcg_temp_free(tmp);
2242 }
4c9649a9
JM
2243 break;
2244 case 0x025:
2245 /* MF_FPCR */
f18cd223 2246 if (likely(ra != 31))
a7812ae4 2247 gen_helper_load_fpcr(cpu_fir[ra]);
4c9649a9
JM
2248 break;
2249 case 0x02A:
2250 /* FCMOVEQ */
dbb30fe6 2251 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2252 break;
2253 case 0x02B:
2254 /* FCMOVNE */
dbb30fe6 2255 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2256 break;
2257 case 0x02C:
2258 /* FCMOVLT */
dbb30fe6 2259 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2260 break;
2261 case 0x02D:
2262 /* FCMOVGE */
dbb30fe6 2263 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2264 break;
2265 case 0x02E:
2266 /* FCMOVLE */
dbb30fe6 2267 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2268 break;
2269 case 0x02F:
2270 /* FCMOVGT */
dbb30fe6 2271 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2272 break;
2273 case 0x030:
2274 /* CVTQL */
a7812ae4 2275 gen_fcvtql(rb, rc);
4c9649a9
JM
2276 break;
2277 case 0x130:
2278 /* CVTQL/V */
4c9649a9
JM
2279 case 0x530:
2280 /* CVTQL/SV */
735cf45f
RH
2281 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2282 /v doesn't do. The only thing I can think is that /sv is a
2283 valid instruction merely for completeness in the ISA. */
2284 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2285 break;
2286 default:
2287 goto invalid_opc;
2288 }
2289 break;
2290 case 0x18:
2291 switch ((uint16_t)disp16) {
2292 case 0x0000:
2293 /* TRAPB */
2294 /* No-op. Just exit from the current tb */
2295 ret = 2;
2296 break;
2297 case 0x0400:
2298 /* EXCB */
2299 /* No-op. Just exit from the current tb */
2300 ret = 2;
2301 break;
2302 case 0x4000:
2303 /* MB */
2304 /* No-op */
2305 break;
2306 case 0x4400:
2307 /* WMB */
2308 /* No-op */
2309 break;
2310 case 0x8000:
2311 /* FETCH */
2312 /* No-op */
2313 break;
2314 case 0xA000:
2315 /* FETCH_M */
2316 /* No-op */
2317 break;
2318 case 0xC000:
2319 /* RPCC */
3761035f 2320 if (ra != 31)
a7812ae4 2321 gen_helper_load_pcc(cpu_ir[ra]);
4c9649a9
JM
2322 break;
2323 case 0xE000:
2324 /* RC */
3761035f 2325 if (ra != 31)
a7812ae4 2326 gen_helper_rc(cpu_ir[ra]);
4c9649a9
JM
2327 break;
2328 case 0xE800:
2329 /* ECB */
4c9649a9
JM
2330 break;
2331 case 0xF000:
2332 /* RS */
3761035f 2333 if (ra != 31)
a7812ae4 2334 gen_helper_rs(cpu_ir[ra]);
4c9649a9
JM
2335 break;
2336 case 0xF800:
2337 /* WH64 */
2338 /* No-op */
2339 break;
2340 default:
2341 goto invalid_opc;
2342 }
2343 break;
2344 case 0x19:
2345 /* HW_MFPR (PALcode) */
2346#if defined (CONFIG_USER_ONLY)
2347 goto invalid_opc;
2348#else
2349 if (!ctx->pal_mode)
2350 goto invalid_opc;
8bb6e981
AJ
2351 if (ra != 31) {
2352 TCGv tmp = tcg_const_i32(insn & 0xFF);
a7812ae4 2353 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
8bb6e981
AJ
2354 tcg_temp_free(tmp);
2355 }
4c9649a9
JM
2356 break;
2357#endif
2358 case 0x1A:
3761035f
AJ
2359 if (rb != 31)
2360 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2361 else
2362 tcg_gen_movi_i64(cpu_pc, 0);
1304ca87
AJ
2363 if (ra != 31)
2364 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
4c9649a9
JM
2365 /* Those four jumps only differ by the branch prediction hint */
2366 switch (fn2) {
2367 case 0x0:
2368 /* JMP */
2369 break;
2370 case 0x1:
2371 /* JSR */
2372 break;
2373 case 0x2:
2374 /* RET */
2375 break;
2376 case 0x3:
2377 /* JSR_COROUTINE */
2378 break;
2379 }
2380 ret = 1;
2381 break;
2382 case 0x1B:
2383 /* HW_LD (PALcode) */
2384#if defined (CONFIG_USER_ONLY)
2385 goto invalid_opc;
2386#else
2387 if (!ctx->pal_mode)
2388 goto invalid_opc;
8bb6e981 2389 if (ra != 31) {
a7812ae4 2390 TCGv addr = tcg_temp_new();
8bb6e981
AJ
2391 if (rb != 31)
2392 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2393 else
2394 tcg_gen_movi_i64(addr, disp12);
2395 switch ((insn >> 12) & 0xF) {
2396 case 0x0:
b5d51029 2397 /* Longword physical access (hw_ldl/p) */
a7812ae4 2398 gen_helper_ldl_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2399 break;
2400 case 0x1:
b5d51029 2401 /* Quadword physical access (hw_ldq/p) */
a7812ae4 2402 gen_helper_ldq_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2403 break;
2404 case 0x2:
b5d51029 2405 /* Longword physical access with lock (hw_ldl_l/p) */
a7812ae4 2406 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2407 break;
2408 case 0x3:
b5d51029 2409 /* Quadword physical access with lock (hw_ldq_l/p) */
a7812ae4 2410 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2411 break;
2412 case 0x4:
b5d51029
AJ
2413 /* Longword virtual PTE fetch (hw_ldl/v) */
2414 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2415 break;
2416 case 0x5:
b5d51029
AJ
2417 /* Quadword virtual PTE fetch (hw_ldq/v) */
2418 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2419 break;
2420 case 0x6:
2421 /* Incpu_ir[ra]id */
b5d51029 2422 goto invalid_opc;
8bb6e981
AJ
2423 case 0x7:
2424 /* Incpu_ir[ra]id */
b5d51029 2425 goto invalid_opc;
8bb6e981 2426 case 0x8:
b5d51029 2427 /* Longword virtual access (hw_ldl) */
a7812ae4
PB
2428 gen_helper_st_virt_to_phys(addr, addr);
2429 gen_helper_ldl_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2430 break;
2431 case 0x9:
b5d51029 2432 /* Quadword virtual access (hw_ldq) */
a7812ae4
PB
2433 gen_helper_st_virt_to_phys(addr, addr);
2434 gen_helper_ldq_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2435 break;
2436 case 0xA:
b5d51029
AJ
2437 /* Longword virtual access with protection check (hw_ldl/w) */
2438 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2439 break;
2440 case 0xB:
b5d51029
AJ
2441 /* Quadword virtual access with protection check (hw_ldq/w) */
2442 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2443 break;
2444 case 0xC:
b5d51029 2445 /* Longword virtual access with alt access mode (hw_ldl/a)*/
a7812ae4
PB
2446 gen_helper_set_alt_mode();
2447 gen_helper_st_virt_to_phys(addr, addr);
2448 gen_helper_ldl_raw(cpu_ir[ra], addr);
2449 gen_helper_restore_mode();
8bb6e981
AJ
2450 break;
2451 case 0xD:
b5d51029 2452 /* Quadword virtual access with alt access mode (hw_ldq/a) */
a7812ae4
PB
2453 gen_helper_set_alt_mode();
2454 gen_helper_st_virt_to_phys(addr, addr);
2455 gen_helper_ldq_raw(cpu_ir[ra], addr);
2456 gen_helper_restore_mode();
8bb6e981
AJ
2457 break;
2458 case 0xE:
2459 /* Longword virtual access with alternate access mode and
b5d51029 2460 * protection checks (hw_ldl/wa)
8bb6e981 2461 */
a7812ae4
PB
2462 gen_helper_set_alt_mode();
2463 gen_helper_ldl_data(cpu_ir[ra], addr);
2464 gen_helper_restore_mode();
8bb6e981
AJ
2465 break;
2466 case 0xF:
2467 /* Quadword virtual access with alternate access mode and
b5d51029 2468 * protection checks (hw_ldq/wa)
8bb6e981 2469 */
a7812ae4
PB
2470 gen_helper_set_alt_mode();
2471 gen_helper_ldq_data(cpu_ir[ra], addr);
2472 gen_helper_restore_mode();
8bb6e981
AJ
2473 break;
2474 }
2475 tcg_temp_free(addr);
4c9649a9 2476 }
4c9649a9
JM
2477 break;
2478#endif
2479 case 0x1C:
2480 switch (fn7) {
2481 case 0x00:
2482 /* SEXTB */
2483 if (!(ctx->amask & AMASK_BWX))
2484 goto invalid_opc;
ae8ecd42
AJ
2485 if (likely(rc != 31)) {
2486 if (islit)
2487 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
ae8ecd42 2488 else
dfaa8583 2489 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2490 }
4c9649a9
JM
2491 break;
2492 case 0x01:
2493 /* SEXTW */
2494 if (!(ctx->amask & AMASK_BWX))
2495 goto invalid_opc;
ae8ecd42
AJ
2496 if (likely(rc != 31)) {
2497 if (islit)
2498 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
ae8ecd42 2499 else
dfaa8583 2500 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2501 }
4c9649a9
JM
2502 break;
2503 case 0x30:
2504 /* CTPOP */
2505 if (!(ctx->amask & AMASK_CIX))
2506 goto invalid_opc;
ae8ecd42
AJ
2507 if (likely(rc != 31)) {
2508 if (islit)
2509 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
ae8ecd42 2510 else
a7812ae4 2511 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2512 }
4c9649a9
JM
2513 break;
2514 case 0x31:
2515 /* PERR */
2516 if (!(ctx->amask & AMASK_MVI))
2517 goto invalid_opc;
13e4df99 2518 gen_perr(ra, rb, rc, islit, lit);
4c9649a9
JM
2519 break;
2520 case 0x32:
2521 /* CTLZ */
2522 if (!(ctx->amask & AMASK_CIX))
2523 goto invalid_opc;
ae8ecd42
AJ
2524 if (likely(rc != 31)) {
2525 if (islit)
2526 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
ae8ecd42 2527 else
a7812ae4 2528 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2529 }
4c9649a9
JM
2530 break;
2531 case 0x33:
2532 /* CTTZ */
2533 if (!(ctx->amask & AMASK_CIX))
2534 goto invalid_opc;
ae8ecd42
AJ
2535 if (likely(rc != 31)) {
2536 if (islit)
2537 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
ae8ecd42 2538 else
a7812ae4 2539 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2540 }
4c9649a9
JM
2541 break;
2542 case 0x34:
2543 /* UNPKBW */
2544 if (!(ctx->amask & AMASK_MVI))
2545 goto invalid_opc;
13e4df99
RH
2546 if (real_islit || ra != 31)
2547 goto invalid_opc;
2548 gen_unpkbw (rb, rc);
4c9649a9
JM
2549 break;
2550 case 0x35:
13e4df99 2551 /* UNPKBL */
4c9649a9
JM
2552 if (!(ctx->amask & AMASK_MVI))
2553 goto invalid_opc;
13e4df99
RH
2554 if (real_islit || ra != 31)
2555 goto invalid_opc;
2556 gen_unpkbl (rb, rc);
4c9649a9
JM
2557 break;
2558 case 0x36:
2559 /* PKWB */
2560 if (!(ctx->amask & AMASK_MVI))
2561 goto invalid_opc;
13e4df99
RH
2562 if (real_islit || ra != 31)
2563 goto invalid_opc;
2564 gen_pkwb (rb, rc);
4c9649a9
JM
2565 break;
2566 case 0x37:
2567 /* PKLB */
2568 if (!(ctx->amask & AMASK_MVI))
2569 goto invalid_opc;
13e4df99
RH
2570 if (real_islit || ra != 31)
2571 goto invalid_opc;
2572 gen_pklb (rb, rc);
4c9649a9
JM
2573 break;
2574 case 0x38:
2575 /* MINSB8 */
2576 if (!(ctx->amask & AMASK_MVI))
2577 goto invalid_opc;
13e4df99 2578 gen_minsb8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2579 break;
2580 case 0x39:
2581 /* MINSW4 */
2582 if (!(ctx->amask & AMASK_MVI))
2583 goto invalid_opc;
13e4df99 2584 gen_minsw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2585 break;
2586 case 0x3A:
2587 /* MINUB8 */
2588 if (!(ctx->amask & AMASK_MVI))
2589 goto invalid_opc;
13e4df99 2590 gen_minub8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2591 break;
2592 case 0x3B:
2593 /* MINUW4 */
2594 if (!(ctx->amask & AMASK_MVI))
2595 goto invalid_opc;
13e4df99 2596 gen_minuw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2597 break;
2598 case 0x3C:
2599 /* MAXUB8 */
2600 if (!(ctx->amask & AMASK_MVI))
2601 goto invalid_opc;
13e4df99 2602 gen_maxub8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2603 break;
2604 case 0x3D:
2605 /* MAXUW4 */
2606 if (!(ctx->amask & AMASK_MVI))
2607 goto invalid_opc;
13e4df99 2608 gen_maxuw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2609 break;
2610 case 0x3E:
2611 /* MAXSB8 */
2612 if (!(ctx->amask & AMASK_MVI))
2613 goto invalid_opc;
13e4df99 2614 gen_maxsb8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2615 break;
2616 case 0x3F:
2617 /* MAXSW4 */
2618 if (!(ctx->amask & AMASK_MVI))
2619 goto invalid_opc;
13e4df99 2620 gen_maxsw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2621 break;
2622 case 0x70:
2623 /* FTOIT */
2624 if (!(ctx->amask & AMASK_FIX))
2625 goto invalid_opc;
f18cd223
AJ
2626 if (likely(rc != 31)) {
2627 if (ra != 31)
2628 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2629 else
2630 tcg_gen_movi_i64(cpu_ir[rc], 0);
2631 }
4c9649a9
JM
2632 break;
2633 case 0x78:
2634 /* FTOIS */
2635 if (!(ctx->amask & AMASK_FIX))
2636 goto invalid_opc;
f18cd223 2637 if (rc != 31) {
a7812ae4 2638 TCGv_i32 tmp1 = tcg_temp_new_i32();
f18cd223 2639 if (ra != 31)
a7812ae4 2640 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
f18cd223
AJ
2641 else {
2642 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 2643 gen_helper_s_to_memory(tmp1, tmp2);
f18cd223
AJ
2644 tcg_temp_free(tmp2);
2645 }
2646 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
a7812ae4 2647 tcg_temp_free_i32(tmp1);
f18cd223 2648 }
4c9649a9
JM
2649 break;
2650 default:
2651 goto invalid_opc;
2652 }
2653 break;
2654 case 0x1D:
2655 /* HW_MTPR (PALcode) */
2656#if defined (CONFIG_USER_ONLY)
2657 goto invalid_opc;
2658#else
2659 if (!ctx->pal_mode)
2660 goto invalid_opc;
8bb6e981
AJ
2661 else {
2662 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2663 if (ra != 31)
a7812ae4 2664 gen_helper_mtpr(tmp1, cpu_ir[ra]);
8bb6e981
AJ
2665 else {
2666 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 2667 gen_helper_mtpr(tmp1, tmp2);
8bb6e981
AJ
2668 tcg_temp_free(tmp2);
2669 }
2670 tcg_temp_free(tmp1);
2671 ret = 2;
2672 }
4c9649a9
JM
2673 break;
2674#endif
2675 case 0x1E:
2676 /* HW_REI (PALcode) */
2677#if defined (CONFIG_USER_ONLY)
2678 goto invalid_opc;
2679#else
2680 if (!ctx->pal_mode)
2681 goto invalid_opc;
2682 if (rb == 31) {
2683 /* "Old" alpha */
a7812ae4 2684 gen_helper_hw_rei();
4c9649a9 2685 } else {
8bb6e981
AJ
2686 TCGv tmp;
2687
2688 if (ra != 31) {
a7812ae4 2689 tmp = tcg_temp_new();
8bb6e981
AJ
2690 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2691 } else
2692 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
a7812ae4 2693 gen_helper_hw_ret(tmp);
8bb6e981 2694 tcg_temp_free(tmp);
4c9649a9
JM
2695 }
2696 ret = 2;
2697 break;
2698#endif
2699 case 0x1F:
2700 /* HW_ST (PALcode) */
2701#if defined (CONFIG_USER_ONLY)
2702 goto invalid_opc;
2703#else
2704 if (!ctx->pal_mode)
2705 goto invalid_opc;
8bb6e981
AJ
2706 else {
2707 TCGv addr, val;
a7812ae4 2708 addr = tcg_temp_new();
8bb6e981
AJ
2709 if (rb != 31)
2710 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2711 else
2712 tcg_gen_movi_i64(addr, disp12);
2713 if (ra != 31)
2714 val = cpu_ir[ra];
2715 else {
a7812ae4 2716 val = tcg_temp_new();
8bb6e981
AJ
2717 tcg_gen_movi_i64(val, 0);
2718 }
2719 switch ((insn >> 12) & 0xF) {
2720 case 0x0:
2721 /* Longword physical access */
a7812ae4 2722 gen_helper_stl_raw(val, addr);
8bb6e981
AJ
2723 break;
2724 case 0x1:
2725 /* Quadword physical access */
a7812ae4 2726 gen_helper_stq_raw(val, addr);
8bb6e981
AJ
2727 break;
2728 case 0x2:
2729 /* Longword physical access with lock */
a7812ae4 2730 gen_helper_stl_c_raw(val, val, addr);
8bb6e981
AJ
2731 break;
2732 case 0x3:
2733 /* Quadword physical access with lock */
a7812ae4 2734 gen_helper_stq_c_raw(val, val, addr);
8bb6e981
AJ
2735 break;
2736 case 0x4:
2737 /* Longword virtual access */
a7812ae4
PB
2738 gen_helper_st_virt_to_phys(addr, addr);
2739 gen_helper_stl_raw(val, addr);
8bb6e981
AJ
2740 break;
2741 case 0x5:
2742 /* Quadword virtual access */
a7812ae4
PB
2743 gen_helper_st_virt_to_phys(addr, addr);
2744 gen_helper_stq_raw(val, addr);
8bb6e981
AJ
2745 break;
2746 case 0x6:
2747 /* Invalid */
2748 goto invalid_opc;
2749 case 0x7:
2750 /* Invalid */
2751 goto invalid_opc;
2752 case 0x8:
2753 /* Invalid */
2754 goto invalid_opc;
2755 case 0x9:
2756 /* Invalid */
2757 goto invalid_opc;
2758 case 0xA:
2759 /* Invalid */
2760 goto invalid_opc;
2761 case 0xB:
2762 /* Invalid */
2763 goto invalid_opc;
2764 case 0xC:
2765 /* Longword virtual access with alternate access mode */
a7812ae4
PB
2766 gen_helper_set_alt_mode();
2767 gen_helper_st_virt_to_phys(addr, addr);
2768 gen_helper_stl_raw(val, addr);
2769 gen_helper_restore_mode();
8bb6e981
AJ
2770 break;
2771 case 0xD:
2772 /* Quadword virtual access with alternate access mode */
a7812ae4
PB
2773 gen_helper_set_alt_mode();
2774 gen_helper_st_virt_to_phys(addr, addr);
2775 gen_helper_stl_raw(val, addr);
2776 gen_helper_restore_mode();
8bb6e981
AJ
2777 break;
2778 case 0xE:
2779 /* Invalid */
2780 goto invalid_opc;
2781 case 0xF:
2782 /* Invalid */
2783 goto invalid_opc;
2784 }
45d46ce8 2785 if (ra == 31)
8bb6e981
AJ
2786 tcg_temp_free(val);
2787 tcg_temp_free(addr);
4c9649a9 2788 }
4c9649a9
JM
2789 break;
2790#endif
2791 case 0x20:
2792 /* LDF */
f18cd223 2793 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
2794 break;
2795 case 0x21:
2796 /* LDG */
f18cd223 2797 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
2798 break;
2799 case 0x22:
2800 /* LDS */
f18cd223 2801 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
2802 break;
2803 case 0x23:
2804 /* LDT */
f18cd223 2805 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
2806 break;
2807 case 0x24:
2808 /* STF */
57a92c8e 2809 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2810 break;
2811 case 0x25:
2812 /* STG */
57a92c8e 2813 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2814 break;
2815 case 0x26:
2816 /* STS */
57a92c8e 2817 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2818 break;
2819 case 0x27:
2820 /* STT */
57a92c8e 2821 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2822 break;
2823 case 0x28:
2824 /* LDL */
f18cd223 2825 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
2826 break;
2827 case 0x29:
2828 /* LDQ */
f18cd223 2829 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
2830 break;
2831 case 0x2A:
2832 /* LDL_L */
f4ed8679 2833 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2834 break;
2835 case 0x2B:
2836 /* LDQ_L */
f4ed8679 2837 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2838 break;
2839 case 0x2C:
2840 /* STL */
57a92c8e 2841 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
2842 break;
2843 case 0x2D:
2844 /* STQ */
57a92c8e 2845 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
2846 break;
2847 case 0x2E:
2848 /* STL_C */
57a92c8e 2849 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
4c9649a9
JM
2850 break;
2851 case 0x2F:
2852 /* STQ_C */
57a92c8e 2853 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
4c9649a9
JM
2854 break;
2855 case 0x30:
2856 /* BR */
3761035f
AJ
2857 if (ra != 31)
2858 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2859 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
4c9649a9
JM
2860 ret = 1;
2861 break;
a7812ae4 2862 case 0x31: /* FBEQ */
dbb30fe6
RH
2863 gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2864 ret = 1;
2865 break;
a7812ae4 2866 case 0x32: /* FBLT */
dbb30fe6
RH
2867 gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2868 ret = 1;
2869 break;
a7812ae4 2870 case 0x33: /* FBLE */
dbb30fe6 2871 gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
2872 ret = 1;
2873 break;
2874 case 0x34:
2875 /* BSR */
3761035f
AJ
2876 if (ra != 31)
2877 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2878 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
4c9649a9
JM
2879 ret = 1;
2880 break;
a7812ae4 2881 case 0x35: /* FBNE */
dbb30fe6
RH
2882 gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2883 ret = 1;
2884 break;
a7812ae4 2885 case 0x36: /* FBGE */
dbb30fe6
RH
2886 gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2887 ret = 1;
2888 break;
a7812ae4 2889 case 0x37: /* FBGT */
dbb30fe6 2890 gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
2891 ret = 1;
2892 break;
2893 case 0x38:
2894 /* BLBC */
a1516744 2895 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
2896 ret = 1;
2897 break;
2898 case 0x39:
2899 /* BEQ */
a1516744 2900 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
2901 ret = 1;
2902 break;
2903 case 0x3A:
2904 /* BLT */
a1516744 2905 gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
2906 ret = 1;
2907 break;
2908 case 0x3B:
2909 /* BLE */
a1516744 2910 gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
2911 ret = 1;
2912 break;
2913 case 0x3C:
2914 /* BLBS */
a1516744 2915 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
2916 ret = 1;
2917 break;
2918 case 0x3D:
2919 /* BNE */
a1516744 2920 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
2921 ret = 1;
2922 break;
2923 case 0x3E:
2924 /* BGE */
a1516744 2925 gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
2926 ret = 1;
2927 break;
2928 case 0x3F:
2929 /* BGT */
a1516744 2930 gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
2931 ret = 1;
2932 break;
2933 invalid_opc:
2934 gen_invalid(ctx);
2935 ret = 3;
2936 break;
2937 }
2938
2939 return ret;
2940}
2941
636aa200
BS
2942static inline void gen_intermediate_code_internal(CPUState *env,
2943 TranslationBlock *tb,
2944 int search_pc)
4c9649a9 2945{
4c9649a9
JM
2946 DisasContext ctx, *ctxp = &ctx;
2947 target_ulong pc_start;
2948 uint32_t insn;
2949 uint16_t *gen_opc_end;
a1d1bb31 2950 CPUBreakpoint *bp;
4c9649a9
JM
2951 int j, lj = -1;
2952 int ret;
2e70f6ef
PB
2953 int num_insns;
2954 int max_insns;
4c9649a9
JM
2955
2956 pc_start = tb->pc;
4c9649a9 2957 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
4c9649a9
JM
2958 ctx.pc = pc_start;
2959 ctx.amask = env->amask;
8579095b 2960 ctx.env = env;
4c9649a9
JM
2961#if defined (CONFIG_USER_ONLY)
2962 ctx.mem_idx = 0;
2963#else
2964 ctx.mem_idx = ((env->ps >> 3) & 3);
2965 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
2966#endif
f24518b5
RH
2967
2968 /* ??? Every TB begins with unset rounding mode, to be initialized on
2969 the first fp insn of the TB. Alternately we could define a proper
2970 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2971 to reset the FP_STATUS to that default at the end of any TB that
2972 changes the default. We could even (gasp) dynamiclly figure out
2973 what default would be most efficient given the running program. */
2974 ctx.tb_rm = -1;
2975 /* Similarly for flush-to-zero. */
2976 ctx.tb_ftz = -1;
2977
2e70f6ef
PB
2978 num_insns = 0;
2979 max_insns = tb->cflags & CF_COUNT_MASK;
2980 if (max_insns == 0)
2981 max_insns = CF_COUNT_MASK;
2982
2983 gen_icount_start();
4c9649a9 2984 for (ret = 0; ret == 0;) {
72cf2d4f
BS
2985 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2986 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 2987 if (bp->pc == ctx.pc) {
4c9649a9
JM
2988 gen_excp(&ctx, EXCP_DEBUG, 0);
2989 break;
2990 }
2991 }
2992 }
2993 if (search_pc) {
2994 j = gen_opc_ptr - gen_opc_buf;
2995 if (lj < j) {
2996 lj++;
2997 while (lj < j)
2998 gen_opc_instr_start[lj++] = 0;
4c9649a9 2999 }
ed1dda53
AJ
3000 gen_opc_pc[lj] = ctx.pc;
3001 gen_opc_instr_start[lj] = 1;
3002 gen_opc_icount[lj] = num_insns;
4c9649a9 3003 }
2e70f6ef
PB
3004 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3005 gen_io_start();
4c9649a9 3006 insn = ldl_code(ctx.pc);
2e70f6ef 3007 num_insns++;
c4b3be39
RH
3008
3009 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3010 tcg_gen_debug_insn_start(ctx.pc);
3011 }
3012
4c9649a9
JM
3013 ctx.pc += 4;
3014 ret = translate_one(ctxp, insn);
3015 if (ret != 0)
3016 break;
3017 /* if we reach a page boundary or are single stepping, stop
3018 * generation
3019 */
19bf517b
AJ
3020 if (env->singlestep_enabled) {
3021 gen_excp(&ctx, EXCP_DEBUG, 0);
3022 break;
1b530a6d 3023 }
19bf517b 3024
8fcc55f9
AJ
3025 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
3026 break;
3027
3028 if (gen_opc_ptr >= gen_opc_end)
3029 break;
3030
3031 if (num_insns >= max_insns)
3032 break;
3033
1b530a6d
AJ
3034 if (singlestep) {
3035 break;
3036 }
4c9649a9
JM
3037 }
3038 if (ret != 1 && ret != 3) {
496cb5b9 3039 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4c9649a9 3040 }
2e70f6ef
PB
3041 if (tb->cflags & CF_LAST_IO)
3042 gen_io_end();
4c9649a9 3043 /* Generate the return instruction */
57fec1fe 3044 tcg_gen_exit_tb(0);
2e70f6ef 3045 gen_icount_end(tb, num_insns);
4c9649a9
JM
3046 *gen_opc_ptr = INDEX_op_end;
3047 if (search_pc) {
3048 j = gen_opc_ptr - gen_opc_buf;
3049 lj++;
3050 while (lj <= j)
3051 gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3052 } else {
3053 tb->size = ctx.pc - pc_start;
2e70f6ef 3054 tb->icount = num_insns;
4c9649a9 3055 }
806991da 3056#ifdef DEBUG_DISAS
8fec2b8c 3057 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
3058 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3059 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3060 qemu_log("\n");
4c9649a9 3061 }
4c9649a9 3062#endif
4c9649a9
JM
3063}
3064
2cfc5f17 3065void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3066{
2cfc5f17 3067 gen_intermediate_code_internal(env, tb, 0);
4c9649a9
JM
3068}
3069
2cfc5f17 3070void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3071{
2cfc5f17 3072 gen_intermediate_code_internal(env, tb, 1);
4c9649a9
JM
3073}
3074
a964acc6
RH
3075struct cpu_def_t {
3076 const char *name;
3077 int implver, amask;
3078};
3079
3080static const struct cpu_def_t cpu_defs[] = {
3081 { "ev4", IMPLVER_2106x, 0 },
3082 { "ev5", IMPLVER_21164, 0 },
3083 { "ev56", IMPLVER_21164, AMASK_BWX },
3084 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3085 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3086 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3087 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3088 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3089 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3090 { "21064", IMPLVER_2106x, 0 },
3091 { "21164", IMPLVER_21164, 0 },
3092 { "21164a", IMPLVER_21164, AMASK_BWX },
3093 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3094 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3095 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3096 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3097};
3098
aaed909a 3099CPUAlphaState * cpu_alpha_init (const char *cpu_model)
4c9649a9
JM
3100{
3101 CPUAlphaState *env;
a964acc6 3102 int implver, amask, i, max;
4c9649a9
JM
3103
3104 env = qemu_mallocz(sizeof(CPUAlphaState));
4c9649a9 3105 cpu_exec_init(env);
2e70f6ef 3106 alpha_translate_init();
4c9649a9 3107 tlb_flush(env, 1);
a964acc6
RH
3108
3109 /* Default to ev67; no reason not to emulate insns by default. */
3110 implver = IMPLVER_21264;
3111 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3112 | AMASK_TRAP | AMASK_PREFETCH);
3113
3114 max = ARRAY_SIZE(cpu_defs);
3115 for (i = 0; i < max; i++) {
3116 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3117 implver = cpu_defs[i].implver;
3118 amask = cpu_defs[i].amask;
3119 break;
3120 }
3121 }
3122 env->implver = implver;
3123 env->amask = amask;
3124
4c9649a9
JM
3125 env->ps = 0x1F00;
3126#if defined (CONFIG_USER_ONLY)
3127 env->ps |= 1 << 3;
2edd07ef
RH
3128 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3129 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
6049f4f8 3130#else
4c9649a9 3131 pal_init(env);
6049f4f8 3132#endif
dad081ee 3133
4c9649a9 3134 /* Initialize IPR */
dad081ee
RH
3135#if defined (CONFIG_USER_ONLY)
3136 env->ipr[IPR_EXC_ADDR] = 0;
3137 env->ipr[IPR_EXC_SUM] = 0;
3138 env->ipr[IPR_EXC_MASK] = 0;
3139#else
3140 {
3141 uint64_t hwpcb;
3142 hwpcb = env->ipr[IPR_PCBB];
3143 env->ipr[IPR_ASN] = 0;
3144 env->ipr[IPR_ASTEN] = 0;
3145 env->ipr[IPR_ASTSR] = 0;
3146 env->ipr[IPR_DATFX] = 0;
3147 /* XXX: fix this */
3148 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3149 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3150 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3151 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3152 env->ipr[IPR_FEN] = 0;
3153 env->ipr[IPR_IPL] = 31;
3154 env->ipr[IPR_MCES] = 0;
3155 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
3156 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3157 env->ipr[IPR_SISR] = 0;
3158 env->ipr[IPR_VIRBND] = -1ULL;
3159 }
3160#endif
4c9649a9 3161
0bf46a40 3162 qemu_init_vcpu(env);
4c9649a9
JM
3163 return env;
3164}
aaed909a 3165
d2856f1a
AJ
3166void gen_pc_load(CPUState *env, TranslationBlock *tb,
3167 unsigned long searched_pc, int pc_pos, void *puc)
3168{
3169 env->pc = gen_opc_pc[pc_pos];
3170}