]> git.proxmox.com Git - qemu.git/blame - target-alpha/translate.c
target-alpha: Implement cpys{, n, e} inline.
[qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
20#include <stdint.h>
21#include <stdlib.h>
22#include <stdio.h>
23
24#include "cpu.h"
25#include "exec-all.h"
26#include "disas.h"
ae8ecd42 27#include "host-utils.h"
57fec1fe 28#include "tcg-op.h"
ca10f867 29#include "qemu-common.h"
4c9649a9 30
a7812ae4
PB
31#include "helper.h"
32#define GEN_HELPER 1
33#include "helper.h"
34
19188121 35#undef ALPHA_DEBUG_DISAS
f24518b5 36#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
37
38#ifdef ALPHA_DEBUG_DISAS
806991da 39# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
40#else
41# define LOG_DISAS(...) do { } while (0)
42#endif
43
4c9649a9
JM
44typedef struct DisasContext DisasContext;
45struct DisasContext {
46 uint64_t pc;
47 int mem_idx;
48#if !defined (CONFIG_USER_ONLY)
49 int pal_mode;
50#endif
8579095b 51 CPUAlphaState *env;
4c9649a9 52 uint32_t amask;
f24518b5
RH
53
54 /* Current rounding mode for this TB. */
55 int tb_rm;
56 /* Current flush-to-zero setting for this TB. */
57 int tb_ftz;
4c9649a9
JM
58};
59
3761035f 60/* global register indexes */
a7812ae4 61static TCGv_ptr cpu_env;
496cb5b9 62static TCGv cpu_ir[31];
f18cd223 63static TCGv cpu_fir[31];
496cb5b9 64static TCGv cpu_pc;
f4ed8679 65static TCGv cpu_lock;
ab471ade
RH
66#ifdef CONFIG_USER_ONLY
67static TCGv cpu_uniq;
68#endif
496cb5b9 69
3761035f 70/* register names */
f18cd223 71static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef
PB
72
73#include "gen-icount.h"
74
a5f1b965 75static void alpha_translate_init(void)
2e70f6ef 76{
496cb5b9
AJ
77 int i;
78 char *p;
2e70f6ef 79 static int done_init = 0;
496cb5b9 80
2e70f6ef
PB
81 if (done_init)
82 return;
496cb5b9 83
a7812ae4 84 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
85
86 p = cpu_reg_names;
87 for (i = 0; i < 31; i++) {
88 sprintf(p, "ir%d", i);
a7812ae4
PB
89 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
90 offsetof(CPUState, ir[i]), p);
6ba8dcd7 91 p += (i < 10) ? 4 : 5;
f18cd223
AJ
92
93 sprintf(p, "fir%d", i);
a7812ae4
PB
94 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
95 offsetof(CPUState, fir[i]), p);
f18cd223 96 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
97 }
98
a7812ae4
PB
99 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
100 offsetof(CPUState, pc), "pc");
496cb5b9 101
a7812ae4
PB
102 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
103 offsetof(CPUState, lock), "lock");
f4ed8679 104
ab471ade
RH
105#ifdef CONFIG_USER_ONLY
106 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
107 offsetof(CPUState, unique), "uniq");
108#endif
109
496cb5b9 110 /* register helpers */
a7812ae4 111#define GEN_HELPER 2
496cb5b9
AJ
112#include "helper.h"
113
2e70f6ef
PB
114 done_init = 1;
115}
116
636aa200 117static inline void gen_excp(DisasContext *ctx, int exception, int error_code)
4c9649a9 118{
a7812ae4 119 TCGv_i32 tmp1, tmp2;
6ad02592 120
496cb5b9 121 tcg_gen_movi_i64(cpu_pc, ctx->pc);
6ad02592
AJ
122 tmp1 = tcg_const_i32(exception);
123 tmp2 = tcg_const_i32(error_code);
a7812ae4
PB
124 gen_helper_excp(tmp1, tmp2);
125 tcg_temp_free_i32(tmp2);
126 tcg_temp_free_i32(tmp1);
4c9649a9
JM
127}
128
636aa200 129static inline void gen_invalid(DisasContext *ctx)
4c9649a9
JM
130{
131 gen_excp(ctx, EXCP_OPCDEC, 0);
132}
133
636aa200 134static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 135{
a7812ae4
PB
136 TCGv tmp = tcg_temp_new();
137 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 138 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
139 tcg_gen_trunc_i64_i32(tmp32, tmp);
140 gen_helper_memory_to_f(t0, tmp32);
141 tcg_temp_free_i32(tmp32);
f18cd223
AJ
142 tcg_temp_free(tmp);
143}
144
636aa200 145static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 146{
a7812ae4 147 TCGv tmp = tcg_temp_new();
f18cd223 148 tcg_gen_qemu_ld64(tmp, t1, flags);
a7812ae4 149 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
150 tcg_temp_free(tmp);
151}
152
636aa200 153static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 154{
a7812ae4
PB
155 TCGv tmp = tcg_temp_new();
156 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 157 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
158 tcg_gen_trunc_i64_i32(tmp32, tmp);
159 gen_helper_memory_to_s(t0, tmp32);
160 tcg_temp_free_i32(tmp32);
f18cd223
AJ
161 tcg_temp_free(tmp);
162}
163
636aa200 164static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
165{
166 tcg_gen_mov_i64(cpu_lock, t1);
167 tcg_gen_qemu_ld32s(t0, t1, flags);
168}
169
636aa200 170static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
171{
172 tcg_gen_mov_i64(cpu_lock, t1);
173 tcg_gen_qemu_ld64(t0, t1, flags);
174}
175
636aa200
BS
176static inline void gen_load_mem(DisasContext *ctx,
177 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
178 int flags),
179 int ra, int rb, int32_t disp16, int fp,
180 int clear)
023d8ca2
AJ
181{
182 TCGv addr;
183
184 if (unlikely(ra == 31))
185 return;
186
a7812ae4 187 addr = tcg_temp_new();
023d8ca2
AJ
188 if (rb != 31) {
189 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
190 if (clear)
191 tcg_gen_andi_i64(addr, addr, ~0x7);
192 } else {
193 if (clear)
194 disp16 &= ~0x7;
195 tcg_gen_movi_i64(addr, disp16);
196 }
f18cd223
AJ
197 if (fp)
198 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
199 else
200 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
023d8ca2
AJ
201 tcg_temp_free(addr);
202}
203
636aa200 204static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 205{
a7812ae4
PB
206 TCGv_i32 tmp32 = tcg_temp_new_i32();
207 TCGv tmp = tcg_temp_new();
208 gen_helper_f_to_memory(tmp32, t0);
209 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
210 tcg_gen_qemu_st32(tmp, t1, flags);
211 tcg_temp_free(tmp);
a7812ae4 212 tcg_temp_free_i32(tmp32);
f18cd223
AJ
213}
214
636aa200 215static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 216{
a7812ae4
PB
217 TCGv tmp = tcg_temp_new();
218 gen_helper_g_to_memory(tmp, t0);
f18cd223
AJ
219 tcg_gen_qemu_st64(tmp, t1, flags);
220 tcg_temp_free(tmp);
221}
222
636aa200 223static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 224{
a7812ae4
PB
225 TCGv_i32 tmp32 = tcg_temp_new_i32();
226 TCGv tmp = tcg_temp_new();
227 gen_helper_s_to_memory(tmp32, t0);
228 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
229 tcg_gen_qemu_st32(tmp, t1, flags);
230 tcg_temp_free(tmp);
a7812ae4 231 tcg_temp_free_i32(tmp32);
f18cd223
AJ
232}
233
636aa200 234static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
235{
236 int l1, l2;
237
238 l1 = gen_new_label();
239 l2 = gen_new_label();
240 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
241 tcg_gen_qemu_st32(t0, t1, flags);
6223246a 242 tcg_gen_movi_i64(t0, 1);
f4ed8679
AJ
243 tcg_gen_br(l2);
244 gen_set_label(l1);
6223246a 245 tcg_gen_movi_i64(t0, 0);
f4ed8679
AJ
246 gen_set_label(l2);
247 tcg_gen_movi_i64(cpu_lock, -1);
248}
249
636aa200 250static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
251{
252 int l1, l2;
253
254 l1 = gen_new_label();
255 l2 = gen_new_label();
256 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
257 tcg_gen_qemu_st64(t0, t1, flags);
6223246a 258 tcg_gen_movi_i64(t0, 1);
f4ed8679
AJ
259 tcg_gen_br(l2);
260 gen_set_label(l1);
6223246a 261 tcg_gen_movi_i64(t0, 0);
f4ed8679
AJ
262 gen_set_label(l2);
263 tcg_gen_movi_i64(cpu_lock, -1);
264}
265
636aa200
BS
266static inline void gen_store_mem(DisasContext *ctx,
267 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
268 int flags),
269 int ra, int rb, int32_t disp16, int fp,
270 int clear, int local)
023d8ca2 271{
9cd38c23 272 TCGv addr;
57a92c8e 273 if (local)
a7812ae4 274 addr = tcg_temp_local_new();
57a92c8e 275 else
a7812ae4 276 addr = tcg_temp_new();
023d8ca2
AJ
277 if (rb != 31) {
278 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
279 if (clear)
280 tcg_gen_andi_i64(addr, addr, ~0x7);
281 } else {
282 if (clear)
283 disp16 &= ~0x7;
284 tcg_gen_movi_i64(addr, disp16);
285 }
f18cd223
AJ
286 if (ra != 31) {
287 if (fp)
288 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
289 else
290 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
291 } else {
57a92c8e
AJ
292 TCGv zero;
293 if (local)
294 zero = tcg_const_local_i64(0);
295 else
296 zero = tcg_const_i64(0);
023d8ca2
AJ
297 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
298 tcg_temp_free(zero);
299 }
300 tcg_temp_free(addr);
301}
302
dbb30fe6 303static void gen_bcond_pcload(DisasContext *ctx, int32_t disp, int lab_true)
4c9649a9 304{
dbb30fe6
RH
305 int lab_over = gen_new_label();
306
307 tcg_gen_movi_i64(cpu_pc, ctx->pc);
308 tcg_gen_br(lab_over);
309 gen_set_label(lab_true);
310 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
311 gen_set_label(lab_over);
312}
313
314static void gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
315 int32_t disp, int mask)
316{
317 int lab_true = gen_new_label();
9c29504e 318
9c29504e
AJ
319 if (likely(ra != 31)) {
320 if (mask) {
a7812ae4 321 TCGv tmp = tcg_temp_new();
9c29504e 322 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
dbb30fe6 323 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
9c29504e 324 tcg_temp_free(tmp);
dbb30fe6
RH
325 } else {
326 tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, lab_true);
327 }
9c29504e
AJ
328 } else {
329 /* Very uncommon case - Do not bother to optimize. */
330 TCGv tmp = tcg_const_i64(0);
dbb30fe6 331 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
9c29504e
AJ
332 tcg_temp_free(tmp);
333 }
dbb30fe6 334 gen_bcond_pcload(ctx, disp, lab_true);
4c9649a9
JM
335}
336
dbb30fe6
RH
337/* Generate a forward TCG branch to LAB_TRUE if RA cmp 0.0.
338 This is complicated by the fact that -0.0 compares the same as +0.0. */
339
340static void gen_fbcond_internal(TCGCond cond, TCGv src, int lab_true)
4c9649a9 341{
dbb30fe6
RH
342 int lab_false = -1;
343 uint64_t mzero = 1ull << 63;
f18cd223
AJ
344 TCGv tmp;
345
dbb30fe6
RH
346 switch (cond) {
347 case TCG_COND_LE:
348 case TCG_COND_GT:
349 /* For <= or >, the -0.0 value directly compares the way we want. */
350 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
a7812ae4 351 break;
dbb30fe6
RH
352
353 case TCG_COND_EQ:
354 case TCG_COND_NE:
355 /* For == or !=, we can simply mask off the sign bit and compare. */
356 /* ??? Assume that the temporary is reclaimed at the branch. */
357 tmp = tcg_temp_new();
358 tcg_gen_andi_i64(tmp, src, mzero - 1);
359 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
a7812ae4 360 break;
dbb30fe6
RH
361
362 case TCG_COND_GE:
363 /* For >=, emit two branches to the destination. */
364 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
365 tcg_gen_brcondi_i64(TCG_COND_EQ, src, mzero, lab_true);
a7812ae4 366 break;
dbb30fe6
RH
367
368 case TCG_COND_LT:
369 /* For <, first filter out -0.0 to what will be the fallthru. */
370 lab_false = gen_new_label();
371 tcg_gen_brcondi_i64(TCG_COND_EQ, src, mzero, lab_false);
372 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
373 gen_set_label(lab_false);
a7812ae4 374 break;
dbb30fe6 375
a7812ae4
PB
376 default:
377 abort();
f18cd223 378 }
dbb30fe6
RH
379}
380
381static void gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, int32_t disp)
382{
383 int lab_true;
384
385 if (unlikely(ra == 31)) {
386 /* Very uncommon case, but easier to optimize it to an integer
387 comparison than continuing with the floating point comparison. */
388 gen_bcond(ctx, cond, ra, disp, 0);
389 return;
390 }
391
392 lab_true = gen_new_label();
393 gen_fbcond_internal(cond, cpu_fir[ra], lab_true);
394 gen_bcond_pcload(ctx, disp, lab_true);
4c9649a9
JM
395}
396
bbe1dab4
RH
397static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
398 int islit, uint8_t lit, int mask)
4c9649a9 399{
bbe1dab4 400 TCGCond inv_cond = tcg_invert_cond(cond);
9c29504e
AJ
401 int l1;
402
403 if (unlikely(rc == 31))
404 return;
405
406 l1 = gen_new_label();
407
408 if (ra != 31) {
409 if (mask) {
a7812ae4 410 TCGv tmp = tcg_temp_new();
9c29504e
AJ
411 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
412 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
413 tcg_temp_free(tmp);
414 } else
415 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
416 } else {
417 /* Very uncommon case - Do not bother to optimize. */
418 TCGv tmp = tcg_const_i64(0);
419 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
420 tcg_temp_free(tmp);
421 }
422
4c9649a9 423 if (islit)
9c29504e 424 tcg_gen_movi_i64(cpu_ir[rc], lit);
4c9649a9 425 else
dfaa8583 426 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
9c29504e 427 gen_set_label(l1);
4c9649a9
JM
428}
429
bbe1dab4 430static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
dbb30fe6
RH
431{
432 TCGv va = cpu_fir[ra];
433 int l1;
434
435 if (unlikely(rc == 31))
436 return;
437 if (unlikely(ra == 31)) {
438 /* ??? Assume that the temporary is reclaimed at the branch. */
439 va = tcg_const_i64(0);
440 }
441
442 l1 = gen_new_label();
bbe1dab4 443 gen_fbcond_internal(tcg_invert_cond(cond), va, l1);
dbb30fe6
RH
444
445 if (rb != 31)
446 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
447 else
448 tcg_gen_movi_i64(cpu_fir[rc], 0);
449 gen_set_label(l1);
450}
451
f24518b5
RH
452#define QUAL_RM_N 0x080 /* Round mode nearest even */
453#define QUAL_RM_C 0x000 /* Round mode chopped */
454#define QUAL_RM_M 0x040 /* Round mode minus infinity */
455#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
456#define QUAL_RM_MASK 0x0c0
457
458#define QUAL_U 0x100 /* Underflow enable (fp output) */
459#define QUAL_V 0x100 /* Overflow enable (int output) */
460#define QUAL_S 0x400 /* Software completion enable */
461#define QUAL_I 0x200 /* Inexact detection enable */
462
463static void gen_qual_roundmode(DisasContext *ctx, int fn11)
464{
465 TCGv_i32 tmp;
466
467 fn11 &= QUAL_RM_MASK;
468 if (fn11 == ctx->tb_rm) {
469 return;
470 }
471 ctx->tb_rm = fn11;
472
473 tmp = tcg_temp_new_i32();
474 switch (fn11) {
475 case QUAL_RM_N:
476 tcg_gen_movi_i32(tmp, float_round_nearest_even);
477 break;
478 case QUAL_RM_C:
479 tcg_gen_movi_i32(tmp, float_round_to_zero);
480 break;
481 case QUAL_RM_M:
482 tcg_gen_movi_i32(tmp, float_round_down);
483 break;
484 case QUAL_RM_D:
485 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
486 break;
487 }
488
489#if defined(CONFIG_SOFTFLOAT_INLINE)
490 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
491 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
492 sets the one field. */
493 tcg_gen_st8_i32(tmp, cpu_env,
494 offsetof(CPUState, fp_status.float_rounding_mode));
495#else
496 gen_helper_setroundmode(tmp);
497#endif
498
499 tcg_temp_free_i32(tmp);
500}
501
502static void gen_qual_flushzero(DisasContext *ctx, int fn11)
503{
504 TCGv_i32 tmp;
505
506 fn11 &= QUAL_U;
507 if (fn11 == ctx->tb_ftz) {
508 return;
509 }
510 ctx->tb_ftz = fn11;
511
512 tmp = tcg_temp_new_i32();
513 if (fn11) {
514 /* Underflow is enabled, use the FPCR setting. */
515 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
516 } else {
517 /* Underflow is disabled, force flush-to-zero. */
518 tcg_gen_movi_i32(tmp, 1);
519 }
520
521#if defined(CONFIG_SOFTFLOAT_INLINE)
522 tcg_gen_st8_i32(tmp, cpu_env,
523 offsetof(CPUState, fp_status.flush_to_zero));
524#else
525 gen_helper_setflushzero(tmp);
526#endif
527
528 tcg_temp_free_i32(tmp);
529}
530
531static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
532{
533 TCGv val = tcg_temp_new();
534 if (reg == 31) {
535 tcg_gen_movi_i64(val, 0);
536 } else if (fn11 & QUAL_S) {
537 gen_helper_ieee_input_s(val, cpu_fir[reg]);
538 } else if (is_cmp) {
539 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
540 } else {
541 gen_helper_ieee_input(val, cpu_fir[reg]);
542 }
543 return val;
544}
545
546static void gen_fp_exc_clear(void)
547{
548#if defined(CONFIG_SOFTFLOAT_INLINE)
549 TCGv_i32 zero = tcg_const_i32(0);
550 tcg_gen_st8_i32(zero, cpu_env,
551 offsetof(CPUState, fp_status.float_exception_flags));
552 tcg_temp_free_i32(zero);
553#else
554 gen_helper_fp_exc_clear();
555#endif
556}
557
558static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
559{
560 /* ??? We ought to be able to do something with imprecise exceptions.
561 E.g. notice we're still in the trap shadow of something within the
562 TB and do not generate the code to signal the exception; end the TB
563 when an exception is forced to arrive, either by consumption of a
564 register value or TRAPB or EXCB. */
565 TCGv_i32 exc = tcg_temp_new_i32();
566 TCGv_i32 reg;
567
568#if defined(CONFIG_SOFTFLOAT_INLINE)
569 tcg_gen_ld8u_i32(exc, cpu_env,
570 offsetof(CPUState, fp_status.float_exception_flags));
571#else
572 gen_helper_fp_exc_get(exc);
573#endif
574
575 if (ignore) {
576 tcg_gen_andi_i32(exc, exc, ~ignore);
577 }
578
579 /* ??? Pass in the regno of the destination so that the helper can
580 set EXC_MASK, which contains a bitmask of destination registers
581 that have caused arithmetic traps. A simple userspace emulation
582 does not require this. We do need it for a guest kernel's entArith,
583 or if we were to do something clever with imprecise exceptions. */
584 reg = tcg_const_i32(rc + 32);
585
586 if (fn11 & QUAL_S) {
587 gen_helper_fp_exc_raise_s(exc, reg);
588 } else {
589 gen_helper_fp_exc_raise(exc, reg);
590 }
591
592 tcg_temp_free_i32(reg);
593 tcg_temp_free_i32(exc);
594}
595
596static inline void gen_fp_exc_raise(int rc, int fn11)
597{
598 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 599}
f24518b5 600
735cf45f
RH
601static void gen_fcvtql(int rb, int rc)
602{
603 if (unlikely(rc == 31)) {
604 return;
605 }
606 if (unlikely(rb == 31)) {
607 tcg_gen_movi_i64(cpu_fir[rc], 0);
608 } else {
609 TCGv tmp = tcg_temp_new();
610
611 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
612 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
613 tcg_gen_shli_i64(tmp, tmp, 32);
614 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
615 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
616
617 tcg_temp_free(tmp);
618 }
619}
620
621static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
622{
623 if (rb != 31) {
624 int lab = gen_new_label();
625 TCGv tmp = tcg_temp_new();
626
627 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
628 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
629 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
630
631 gen_set_label(lab);
632 }
633 gen_fcvtql(rb, rc);
634}
635
f24518b5
RH
636#define FARITH2(name) \
637static inline void glue(gen_f, name)(int rb, int rc) \
638{ \
639 if (unlikely(rc == 31)) { \
640 return; \
641 } \
642 if (rb != 31) { \
643 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
644 } else { \
645 TCGv tmp = tcg_const_i64(0); \
646 gen_helper_ ## name (cpu_fir[rc], tmp); \
647 tcg_temp_free(tmp); \
648 } \
649}
650FARITH2(cvtlq)
f24518b5
RH
651
652/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
653FARITH2(sqrtf)
654FARITH2(sqrtg)
a7812ae4
PB
655FARITH2(cvtgf)
656FARITH2(cvtgq)
657FARITH2(cvtqf)
658FARITH2(cvtqg)
f24518b5
RH
659
660static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
661 int rb, int rc, int fn11)
662{
663 TCGv vb;
664
665 /* ??? This is wrong: the instruction is not a nop, it still may
666 raise exceptions. */
667 if (unlikely(rc == 31)) {
668 return;
669 }
670
671 gen_qual_roundmode(ctx, fn11);
672 gen_qual_flushzero(ctx, fn11);
673 gen_fp_exc_clear();
674
675 vb = gen_ieee_input(rb, fn11, 0);
676 helper(cpu_fir[rc], vb);
677 tcg_temp_free(vb);
678
679 gen_fp_exc_raise(rc, fn11);
680}
681
682#define IEEE_ARITH2(name) \
683static inline void glue(gen_f, name)(DisasContext *ctx, \
684 int rb, int rc, int fn11) \
685{ \
686 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
687}
688IEEE_ARITH2(sqrts)
689IEEE_ARITH2(sqrtt)
690IEEE_ARITH2(cvtst)
691IEEE_ARITH2(cvtts)
692
693static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
694{
695 TCGv vb;
696 int ignore = 0;
697
698 /* ??? This is wrong: the instruction is not a nop, it still may
699 raise exceptions. */
700 if (unlikely(rc == 31)) {
701 return;
702 }
703
704 /* No need to set flushzero, since we have an integer output. */
705 gen_fp_exc_clear();
706 vb = gen_ieee_input(rb, fn11, 0);
707
708 /* Almost all integer conversions use cropped rounding, and most
709 also do not have integer overflow enabled. Special case that. */
710 switch (fn11) {
711 case QUAL_RM_C:
712 gen_helper_cvttq_c(cpu_fir[rc], vb);
713 break;
714 case QUAL_V | QUAL_RM_C:
715 case QUAL_S | QUAL_V | QUAL_RM_C:
716 ignore = float_flag_inexact;
717 /* FALLTHRU */
718 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
719 gen_helper_cvttq_svic(cpu_fir[rc], vb);
720 break;
721 default:
722 gen_qual_roundmode(ctx, fn11);
723 gen_helper_cvttq(cpu_fir[rc], vb);
724 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
725 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
726 break;
727 }
728 tcg_temp_free(vb);
729
730 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
731}
732
f24518b5
RH
733static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
734 int rb, int rc, int fn11)
735{
736 TCGv vb;
737
738 /* ??? This is wrong: the instruction is not a nop, it still may
739 raise exceptions. */
740 if (unlikely(rc == 31)) {
741 return;
742 }
743
744 gen_qual_roundmode(ctx, fn11);
745
746 if (rb == 31) {
747 vb = tcg_const_i64(0);
748 } else {
749 vb = cpu_fir[rb];
750 }
751
752 /* The only exception that can be raised by integer conversion
753 is inexact. Thus we only need to worry about exceptions when
754 inexact handling is requested. */
755 if (fn11 & QUAL_I) {
756 gen_fp_exc_clear();
757 helper(cpu_fir[rc], vb);
758 gen_fp_exc_raise(rc, fn11);
759 } else {
760 helper(cpu_fir[rc], vb);
761 }
762
763 if (rb == 31) {
764 tcg_temp_free(vb);
765 }
766}
767
768#define IEEE_INTCVT(name) \
769static inline void glue(gen_f, name)(DisasContext *ctx, \
770 int rb, int rc, int fn11) \
771{ \
772 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
773}
774IEEE_INTCVT(cvtqs)
775IEEE_INTCVT(cvtqt)
776
dc96be4b
RH
777static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
778{
779 TCGv va, vb, vmask;
780 int za = 0, zb = 0;
781
782 if (unlikely(rc == 31)) {
783 return;
784 }
785
786 vmask = tcg_const_i64(mask);
787
788 TCGV_UNUSED_I64(va);
789 if (ra == 31) {
790 if (inv_a) {
791 va = vmask;
792 } else {
793 za = 1;
794 }
795 } else {
796 va = tcg_temp_new_i64();
797 tcg_gen_mov_i64(va, cpu_fir[ra]);
798 if (inv_a) {
799 tcg_gen_andc_i64(va, vmask, va);
800 } else {
801 tcg_gen_and_i64(va, va, vmask);
802 }
803 }
804
805 TCGV_UNUSED_I64(vb);
806 if (rb == 31) {
807 zb = 1;
808 } else {
809 vb = tcg_temp_new_i64();
810 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
811 }
812
813 switch (za << 1 | zb) {
814 case 0 | 0:
815 tcg_gen_or_i64(cpu_fir[rc], va, vb);
816 break;
817 case 0 | 1:
818 tcg_gen_mov_i64(cpu_fir[rc], va);
819 break;
820 case 2 | 0:
821 tcg_gen_mov_i64(cpu_fir[rc], vb);
822 break;
823 case 2 | 1:
824 tcg_gen_movi_i64(cpu_fir[rc], 0);
825 break;
826 }
827
828 tcg_temp_free(vmask);
829 if (ra != 31) {
830 tcg_temp_free(va);
831 }
832 if (rb != 31) {
833 tcg_temp_free(vb);
834 }
835}
836
837static inline void gen_fcpys(int ra, int rb, int rc)
838{
839 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
840}
841
842static inline void gen_fcpysn(int ra, int rb, int rc)
843{
844 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
845}
846
847static inline void gen_fcpyse(int ra, int rb, int rc)
848{
849 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
850}
851
f24518b5
RH
852#define FARITH3(name) \
853static inline void glue(gen_f, name)(int ra, int rb, int rc) \
854{ \
855 TCGv va, vb; \
856 \
857 if (unlikely(rc == 31)) { \
858 return; \
859 } \
860 if (ra == 31) { \
861 va = tcg_const_i64(0); \
862 } else { \
863 va = cpu_fir[ra]; \
864 } \
865 if (rb == 31) { \
866 vb = tcg_const_i64(0); \
867 } else { \
868 vb = cpu_fir[rb]; \
869 } \
870 \
871 gen_helper_ ## name (cpu_fir[rc], va, vb); \
872 \
873 if (ra == 31) { \
874 tcg_temp_free(va); \
875 } \
876 if (rb == 31) { \
877 tcg_temp_free(vb); \
878 } \
879}
f24518b5
RH
880
881/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
882FARITH3(addf)
883FARITH3(subf)
884FARITH3(mulf)
885FARITH3(divf)
886FARITH3(addg)
887FARITH3(subg)
888FARITH3(mulg)
889FARITH3(divg)
890FARITH3(cmpgeq)
891FARITH3(cmpglt)
892FARITH3(cmpgle)
f24518b5
RH
893
894static void gen_ieee_arith3(DisasContext *ctx,
895 void (*helper)(TCGv, TCGv, TCGv),
896 int ra, int rb, int rc, int fn11)
897{
898 TCGv va, vb;
899
900 /* ??? This is wrong: the instruction is not a nop, it still may
901 raise exceptions. */
902 if (unlikely(rc == 31)) {
903 return;
904 }
905
906 gen_qual_roundmode(ctx, fn11);
907 gen_qual_flushzero(ctx, fn11);
908 gen_fp_exc_clear();
909
910 va = gen_ieee_input(ra, fn11, 0);
911 vb = gen_ieee_input(rb, fn11, 0);
912 helper(cpu_fir[rc], va, vb);
913 tcg_temp_free(va);
914 tcg_temp_free(vb);
915
916 gen_fp_exc_raise(rc, fn11);
917}
918
919#define IEEE_ARITH3(name) \
920static inline void glue(gen_f, name)(DisasContext *ctx, \
921 int ra, int rb, int rc, int fn11) \
922{ \
923 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
924}
925IEEE_ARITH3(adds)
926IEEE_ARITH3(subs)
927IEEE_ARITH3(muls)
928IEEE_ARITH3(divs)
929IEEE_ARITH3(addt)
930IEEE_ARITH3(subt)
931IEEE_ARITH3(mult)
932IEEE_ARITH3(divt)
933
934static void gen_ieee_compare(DisasContext *ctx,
935 void (*helper)(TCGv, TCGv, TCGv),
936 int ra, int rb, int rc, int fn11)
937{
938 TCGv va, vb;
939
940 /* ??? This is wrong: the instruction is not a nop, it still may
941 raise exceptions. */
942 if (unlikely(rc == 31)) {
943 return;
944 }
945
946 gen_fp_exc_clear();
947
948 va = gen_ieee_input(ra, fn11, 1);
949 vb = gen_ieee_input(rb, fn11, 1);
950 helper(cpu_fir[rc], va, vb);
951 tcg_temp_free(va);
952 tcg_temp_free(vb);
953
954 gen_fp_exc_raise(rc, fn11);
955}
956
957#define IEEE_CMP3(name) \
958static inline void glue(gen_f, name)(DisasContext *ctx, \
959 int ra, int rb, int rc, int fn11) \
960{ \
961 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
962}
963IEEE_CMP3(cmptun)
964IEEE_CMP3(cmpteq)
965IEEE_CMP3(cmptlt)
966IEEE_CMP3(cmptle)
a7812ae4 967
248c42f3
RH
968static inline uint64_t zapnot_mask(uint8_t lit)
969{
970 uint64_t mask = 0;
971 int i;
972
973 for (i = 0; i < 8; ++i) {
974 if ((lit >> i) & 1)
975 mask |= 0xffull << (i * 8);
976 }
977 return mask;
978}
979
87d98f95
RH
980/* Implement zapnot with an immediate operand, which expands to some
981 form of immediate AND. This is a basic building block in the
982 definition of many of the other byte manipulation instructions. */
248c42f3 983static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 984{
87d98f95
RH
985 switch (lit) {
986 case 0x00:
248c42f3 987 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
988 break;
989 case 0x01:
248c42f3 990 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
991 break;
992 case 0x03:
248c42f3 993 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
994 break;
995 case 0x0f:
248c42f3 996 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
997 break;
998 case 0xff:
248c42f3 999 tcg_gen_mov_i64(dest, src);
87d98f95
RH
1000 break;
1001 default:
248c42f3 1002 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
87d98f95
RH
1003 break;
1004 }
1005}
1006
1007static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1008{
1009 if (unlikely(rc == 31))
1010 return;
1011 else if (unlikely(ra == 31))
1012 tcg_gen_movi_i64(cpu_ir[rc], 0);
1013 else if (islit)
248c42f3 1014 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
87d98f95
RH
1015 else
1016 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1017}
1018
1019static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1020{
1021 if (unlikely(rc == 31))
1022 return;
1023 else if (unlikely(ra == 31))
1024 tcg_gen_movi_i64(cpu_ir[rc], 0);
1025 else if (islit)
248c42f3 1026 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
87d98f95
RH
1027 else
1028 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1029}
1030
1031
248c42f3 1032/* EXTWH, EXTLH, EXTQH */
ffec44f1
RH
1033static void gen_ext_h(int ra, int rb, int rc, int islit,
1034 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1035{
1036 if (unlikely(rc == 31))
1037 return;
377a43b6
RH
1038 else if (unlikely(ra == 31))
1039 tcg_gen_movi_i64(cpu_ir[rc], 0);
1040 else {
dfaa8583 1041 if (islit) {
377a43b6
RH
1042 lit = (64 - (lit & 7) * 8) & 0x3f;
1043 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
fe2b269a 1044 } else {
377a43b6 1045 TCGv tmp1 = tcg_temp_new();
b3249f63
AJ
1046 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1047 tcg_gen_shli_i64(tmp1, tmp1, 3);
dbf95805
VW
1048 tcg_gen_neg_i64(tmp1, tmp1);
1049 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
dfaa8583 1050 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
b3249f63 1051 tcg_temp_free(tmp1);
dfaa8583 1052 }
248c42f3 1053 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
377a43b6 1054 }
b3249f63
AJ
1055}
1056
248c42f3 1057/* EXTBL, EXTWL, EXTLL, EXTQL */
ffec44f1
RH
1058static void gen_ext_l(int ra, int rb, int rc, int islit,
1059 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1060{
1061 if (unlikely(rc == 31))
1062 return;
377a43b6
RH
1063 else if (unlikely(ra == 31))
1064 tcg_gen_movi_i64(cpu_ir[rc], 0);
1065 else {
dfaa8583 1066 if (islit) {
377a43b6 1067 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
dfaa8583 1068 } else {
a7812ae4 1069 TCGv tmp = tcg_temp_new();
b3249f63
AJ
1070 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1071 tcg_gen_shli_i64(tmp, tmp, 3);
dfaa8583 1072 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
b3249f63 1073 tcg_temp_free(tmp);
fe2b269a 1074 }
248c42f3
RH
1075 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1076 }
1077}
1078
50eb6e5c
RH
1079/* INSWH, INSLH, INSQH */
1080static void gen_ins_h(int ra, int rb, int rc, int islit,
1081 uint8_t lit, uint8_t byte_mask)
1082{
1083 if (unlikely(rc == 31))
1084 return;
1085 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1086 tcg_gen_movi_i64(cpu_ir[rc], 0);
1087 else {
1088 TCGv tmp = tcg_temp_new();
1089
1090 /* The instruction description has us left-shift the byte mask
1091 and extract bits <15:8> and apply that zap at the end. This
1092 is equivalent to simply performing the zap first and shifting
1093 afterward. */
1094 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1095
1096 if (islit) {
1097 /* Note that we have handled the lit==0 case above. */
1098 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1099 } else {
1100 TCGv shift = tcg_temp_new();
1101
1102 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1103 Do this portably by splitting the shift into two parts:
1104 shift_count-1 and 1. Arrange for the -1 by using
1105 ones-complement instead of twos-complement in the negation:
1106 ~((B & 7) * 8) & 63. */
1107
1108 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1109 tcg_gen_shli_i64(shift, shift, 3);
1110 tcg_gen_not_i64(shift, shift);
1111 tcg_gen_andi_i64(shift, shift, 0x3f);
1112
1113 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1114 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1115 tcg_temp_free(shift);
1116 }
1117 tcg_temp_free(tmp);
1118 }
1119}
1120
248c42f3 1121/* INSBL, INSWL, INSLL, INSQL */
ffec44f1
RH
1122static void gen_ins_l(int ra, int rb, int rc, int islit,
1123 uint8_t lit, uint8_t byte_mask)
248c42f3
RH
1124{
1125 if (unlikely(rc == 31))
1126 return;
1127 else if (unlikely(ra == 31))
1128 tcg_gen_movi_i64(cpu_ir[rc], 0);
1129 else {
1130 TCGv tmp = tcg_temp_new();
1131
1132 /* The instruction description has us left-shift the byte mask
1133 the same number of byte slots as the data and apply the zap
1134 at the end. This is equivalent to simply performing the zap
1135 first and shifting afterward. */
1136 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1137
1138 if (islit) {
1139 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1140 } else {
1141 TCGv shift = tcg_temp_new();
1142 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1143 tcg_gen_shli_i64(shift, shift, 3);
1144 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1145 tcg_temp_free(shift);
1146 }
1147 tcg_temp_free(tmp);
377a43b6 1148 }
b3249f63
AJ
1149}
1150
ffec44f1
RH
1151/* MSKWH, MSKLH, MSKQH */
1152static void gen_msk_h(int ra, int rb, int rc, int islit,
1153 uint8_t lit, uint8_t byte_mask)
1154{
1155 if (unlikely(rc == 31))
1156 return;
1157 else if (unlikely(ra == 31))
1158 tcg_gen_movi_i64(cpu_ir[rc], 0);
1159 else if (islit) {
1160 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1161 } else {
1162 TCGv shift = tcg_temp_new();
1163 TCGv mask = tcg_temp_new();
1164
1165 /* The instruction description is as above, where the byte_mask
1166 is shifted left, and then we extract bits <15:8>. This can be
1167 emulated with a right-shift on the expanded byte mask. This
1168 requires extra care because for an input <2:0> == 0 we need a
1169 shift of 64 bits in order to generate a zero. This is done by
1170 splitting the shift into two parts, the variable shift - 1
1171 followed by a constant 1 shift. The code we expand below is
1172 equivalent to ~((B & 7) * 8) & 63. */
1173
1174 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1175 tcg_gen_shli_i64(shift, shift, 3);
1176 tcg_gen_not_i64(shift, shift);
1177 tcg_gen_andi_i64(shift, shift, 0x3f);
1178 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1179 tcg_gen_shr_i64(mask, mask, shift);
1180 tcg_gen_shri_i64(mask, mask, 1);
1181
1182 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1183
1184 tcg_temp_free(mask);
1185 tcg_temp_free(shift);
1186 }
1187}
1188
14ab1634 1189/* MSKBL, MSKWL, MSKLL, MSKQL */
ffec44f1
RH
1190static void gen_msk_l(int ra, int rb, int rc, int islit,
1191 uint8_t lit, uint8_t byte_mask)
14ab1634
RH
1192{
1193 if (unlikely(rc == 31))
1194 return;
1195 else if (unlikely(ra == 31))
1196 tcg_gen_movi_i64(cpu_ir[rc], 0);
1197 else if (islit) {
1198 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1199 } else {
1200 TCGv shift = tcg_temp_new();
1201 TCGv mask = tcg_temp_new();
1202
1203 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1204 tcg_gen_shli_i64(shift, shift, 3);
1205 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1206 tcg_gen_shl_i64(mask, mask, shift);
1207
1208 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1209
1210 tcg_temp_free(mask);
1211 tcg_temp_free(shift);
1212 }
1213}
1214
04acd307 1215/* Code to call arith3 helpers */
a7812ae4 1216#define ARITH3(name) \
636aa200
BS
1217static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1218 uint8_t lit) \
a7812ae4
PB
1219{ \
1220 if (unlikely(rc == 31)) \
1221 return; \
1222 \
1223 if (ra != 31) { \
1224 if (islit) { \
1225 TCGv tmp = tcg_const_i64(lit); \
1226 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1227 tcg_temp_free(tmp); \
1228 } else \
1229 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1230 } else { \
1231 TCGv tmp1 = tcg_const_i64(0); \
1232 if (islit) { \
1233 TCGv tmp2 = tcg_const_i64(lit); \
1234 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1235 tcg_temp_free(tmp2); \
1236 } else \
1237 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1238 tcg_temp_free(tmp1); \
1239 } \
b3249f63 1240}
a7812ae4
PB
1241ARITH3(cmpbge)
1242ARITH3(addlv)
1243ARITH3(sublv)
1244ARITH3(addqv)
1245ARITH3(subqv)
a7812ae4
PB
1246ARITH3(umulh)
1247ARITH3(mullv)
1248ARITH3(mulqv)
13e4df99
RH
1249ARITH3(minub8)
1250ARITH3(minsb8)
1251ARITH3(minuw4)
1252ARITH3(minsw4)
1253ARITH3(maxub8)
1254ARITH3(maxsb8)
1255ARITH3(maxuw4)
1256ARITH3(maxsw4)
1257ARITH3(perr)
1258
1259#define MVIOP2(name) \
1260static inline void glue(gen_, name)(int rb, int rc) \
1261{ \
1262 if (unlikely(rc == 31)) \
1263 return; \
1264 if (unlikely(rb == 31)) \
1265 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1266 else \
1267 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1268}
1269MVIOP2(pklb)
1270MVIOP2(pkwb)
1271MVIOP2(unpkbl)
1272MVIOP2(unpkbw)
b3249f63 1273
9e05960f
RH
1274static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1275 int islit, uint8_t lit)
01ff9cc8 1276{
9e05960f 1277 TCGv va, vb;
01ff9cc8 1278
9e05960f 1279 if (unlikely(rc == 31)) {
13e4df99 1280 return;
9e05960f 1281 }
01ff9cc8 1282
9e05960f
RH
1283 if (ra == 31) {
1284 va = tcg_const_i64(0);
1285 } else {
1286 va = cpu_ir[ra];
1287 }
1288 if (islit) {
1289 vb = tcg_const_i64(lit);
1290 } else {
1291 vb = cpu_ir[rb];
1292 }
01ff9cc8 1293
9e05960f 1294 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
01ff9cc8 1295
9e05960f
RH
1296 if (ra == 31) {
1297 tcg_temp_free(va);
1298 }
1299 if (islit) {
1300 tcg_temp_free(vb);
1301 }
01ff9cc8
AJ
1302}
1303
636aa200 1304static inline int translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1305{
1306 uint32_t palcode;
1307 int32_t disp21, disp16, disp12;
f88fe4e3
BS
1308 uint16_t fn11;
1309 uint8_t opc, ra, rb, rc, fpfn, fn7, fn2, islit, real_islit;
adf3c8b6 1310 uint8_t lit;
4c9649a9
JM
1311 int ret;
1312
1313 /* Decode all instruction fields */
1314 opc = insn >> 26;
1315 ra = (insn >> 21) & 0x1F;
1316 rb = (insn >> 16) & 0x1F;
1317 rc = insn & 0x1F;
13e4df99 1318 real_islit = islit = (insn >> 12) & 1;
dfaa8583
AJ
1319 if (rb == 31 && !islit) {
1320 islit = 1;
1321 lit = 0;
1322 } else
1323 lit = (insn >> 13) & 0xFF;
4c9649a9
JM
1324 palcode = insn & 0x03FFFFFF;
1325 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1326 disp16 = (int16_t)(insn & 0x0000FFFF);
1327 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
4c9649a9
JM
1328 fn11 = (insn >> 5) & 0x000007FF;
1329 fpfn = fn11 & 0x3F;
1330 fn7 = (insn >> 5) & 0x0000007F;
1331 fn2 = (insn >> 5) & 0x00000003;
1332 ret = 0;
806991da 1333 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1334 opc, ra, rb, rc, disp16);
806991da 1335
4c9649a9
JM
1336 switch (opc) {
1337 case 0x00:
1338 /* CALL_PAL */
ab471ade
RH
1339#ifdef CONFIG_USER_ONLY
1340 if (palcode == 0x9E) {
1341 /* RDUNIQUE */
1342 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1343 break;
1344 } else if (palcode == 0x9F) {
1345 /* WRUNIQUE */
1346 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1347 break;
1348 }
1349#endif
4c9649a9
JM
1350 if (palcode >= 0x80 && palcode < 0xC0) {
1351 /* Unprivileged PAL call */
31a877f2 1352 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
ab471ade
RH
1353 ret = 3;
1354 break;
1355 }
1356#ifndef CONFIG_USER_ONLY
1357 if (palcode < 0x40) {
4c9649a9
JM
1358 /* Privileged PAL code */
1359 if (ctx->mem_idx & 1)
1360 goto invalid_opc;
ab471ade 1361 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
4c9649a9 1362 }
ab471ade
RH
1363#endif
1364 /* Invalid PAL call */
1365 goto invalid_opc;
4c9649a9
JM
1366 case 0x01:
1367 /* OPC01 */
1368 goto invalid_opc;
1369 case 0x02:
1370 /* OPC02 */
1371 goto invalid_opc;
1372 case 0x03:
1373 /* OPC03 */
1374 goto invalid_opc;
1375 case 0x04:
1376 /* OPC04 */
1377 goto invalid_opc;
1378 case 0x05:
1379 /* OPC05 */
1380 goto invalid_opc;
1381 case 0x06:
1382 /* OPC06 */
1383 goto invalid_opc;
1384 case 0x07:
1385 /* OPC07 */
1386 goto invalid_opc;
1387 case 0x08:
1388 /* LDA */
1ef4ef4e 1389 if (likely(ra != 31)) {
496cb5b9 1390 if (rb != 31)
3761035f
AJ
1391 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1392 else
1393 tcg_gen_movi_i64(cpu_ir[ra], disp16);
496cb5b9 1394 }
4c9649a9
JM
1395 break;
1396 case 0x09:
1397 /* LDAH */
1ef4ef4e 1398 if (likely(ra != 31)) {
496cb5b9 1399 if (rb != 31)
3761035f
AJ
1400 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1401 else
1402 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
496cb5b9 1403 }
4c9649a9
JM
1404 break;
1405 case 0x0A:
1406 /* LDBU */
1407 if (!(ctx->amask & AMASK_BWX))
1408 goto invalid_opc;
f18cd223 1409 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
4c9649a9
JM
1410 break;
1411 case 0x0B:
1412 /* LDQ_U */
f18cd223 1413 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1414 break;
1415 case 0x0C:
1416 /* LDWU */
1417 if (!(ctx->amask & AMASK_BWX))
1418 goto invalid_opc;
577d5e7f 1419 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
4c9649a9
JM
1420 break;
1421 case 0x0D:
1422 /* STW */
57a92c8e 1423 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
1424 break;
1425 case 0x0E:
1426 /* STB */
57a92c8e 1427 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
1428 break;
1429 case 0x0F:
1430 /* STQ_U */
57a92c8e 1431 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
4c9649a9
JM
1432 break;
1433 case 0x10:
1434 switch (fn7) {
1435 case 0x00:
1436 /* ADDL */
30c7183b
AJ
1437 if (likely(rc != 31)) {
1438 if (ra != 31) {
1439 if (islit) {
1440 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1441 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1442 } else {
30c7183b
AJ
1443 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1444 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1445 }
30c7183b
AJ
1446 } else {
1447 if (islit)
dfaa8583 1448 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1449 else
dfaa8583 1450 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1451 }
1452 }
4c9649a9
JM
1453 break;
1454 case 0x02:
1455 /* S4ADDL */
30c7183b
AJ
1456 if (likely(rc != 31)) {
1457 if (ra != 31) {
a7812ae4 1458 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1459 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1460 if (islit)
1461 tcg_gen_addi_i64(tmp, tmp, lit);
1462 else
1463 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1464 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1465 tcg_temp_free(tmp);
30c7183b
AJ
1466 } else {
1467 if (islit)
1468 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1469 else
dfaa8583 1470 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1471 }
1472 }
4c9649a9
JM
1473 break;
1474 case 0x09:
1475 /* SUBL */
30c7183b
AJ
1476 if (likely(rc != 31)) {
1477 if (ra != 31) {
dfaa8583 1478 if (islit)
30c7183b 1479 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
dfaa8583 1480 else
30c7183b 1481 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
dfaa8583 1482 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1483 } else {
1484 if (islit)
1485 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1486 else {
30c7183b
AJ
1487 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1488 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1489 }
1490 }
4c9649a9
JM
1491 break;
1492 case 0x0B:
1493 /* S4SUBL */
30c7183b
AJ
1494 if (likely(rc != 31)) {
1495 if (ra != 31) {
a7812ae4 1496 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1497 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1498 if (islit)
1499 tcg_gen_subi_i64(tmp, tmp, lit);
1500 else
1501 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1502 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1503 tcg_temp_free(tmp);
30c7183b
AJ
1504 } else {
1505 if (islit)
1506 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1507 else {
30c7183b
AJ
1508 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1509 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1510 }
30c7183b
AJ
1511 }
1512 }
4c9649a9
JM
1513 break;
1514 case 0x0F:
1515 /* CMPBGE */
a7812ae4 1516 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1517 break;
1518 case 0x12:
1519 /* S8ADDL */
30c7183b
AJ
1520 if (likely(rc != 31)) {
1521 if (ra != 31) {
a7812ae4 1522 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1523 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1524 if (islit)
1525 tcg_gen_addi_i64(tmp, tmp, lit);
1526 else
1527 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1528 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1529 tcg_temp_free(tmp);
30c7183b
AJ
1530 } else {
1531 if (islit)
1532 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1533 else
dfaa8583 1534 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1535 }
1536 }
4c9649a9
JM
1537 break;
1538 case 0x1B:
1539 /* S8SUBL */
30c7183b
AJ
1540 if (likely(rc != 31)) {
1541 if (ra != 31) {
a7812ae4 1542 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1543 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1544 if (islit)
1545 tcg_gen_subi_i64(tmp, tmp, lit);
1546 else
1547 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1548 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1549 tcg_temp_free(tmp);
30c7183b
AJ
1550 } else {
1551 if (islit)
1552 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1553 else
30c7183b
AJ
1554 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1555 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1556 }
30c7183b
AJ
1557 }
1558 }
4c9649a9
JM
1559 break;
1560 case 0x1D:
1561 /* CMPULT */
01ff9cc8 1562 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
4c9649a9
JM
1563 break;
1564 case 0x20:
1565 /* ADDQ */
30c7183b
AJ
1566 if (likely(rc != 31)) {
1567 if (ra != 31) {
1568 if (islit)
1569 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1570 else
dfaa8583 1571 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1572 } else {
1573 if (islit)
1574 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1575 else
dfaa8583 1576 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1577 }
1578 }
4c9649a9
JM
1579 break;
1580 case 0x22:
1581 /* S4ADDQ */
30c7183b
AJ
1582 if (likely(rc != 31)) {
1583 if (ra != 31) {
a7812ae4 1584 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1585 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1586 if (islit)
1587 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1588 else
1589 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1590 tcg_temp_free(tmp);
30c7183b
AJ
1591 } else {
1592 if (islit)
1593 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1594 else
dfaa8583 1595 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1596 }
1597 }
4c9649a9
JM
1598 break;
1599 case 0x29:
1600 /* SUBQ */
30c7183b
AJ
1601 if (likely(rc != 31)) {
1602 if (ra != 31) {
1603 if (islit)
1604 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1605 else
dfaa8583 1606 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1607 } else {
1608 if (islit)
1609 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1610 else
dfaa8583 1611 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1612 }
1613 }
4c9649a9
JM
1614 break;
1615 case 0x2B:
1616 /* S4SUBQ */
30c7183b
AJ
1617 if (likely(rc != 31)) {
1618 if (ra != 31) {
a7812ae4 1619 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1620 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1621 if (islit)
1622 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1623 else
1624 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1625 tcg_temp_free(tmp);
30c7183b
AJ
1626 } else {
1627 if (islit)
1628 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1629 else
dfaa8583 1630 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1631 }
1632 }
4c9649a9
JM
1633 break;
1634 case 0x2D:
1635 /* CMPEQ */
01ff9cc8 1636 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
4c9649a9
JM
1637 break;
1638 case 0x32:
1639 /* S8ADDQ */
30c7183b
AJ
1640 if (likely(rc != 31)) {
1641 if (ra != 31) {
a7812ae4 1642 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1643 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1644 if (islit)
1645 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1646 else
1647 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1648 tcg_temp_free(tmp);
30c7183b
AJ
1649 } else {
1650 if (islit)
1651 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1652 else
dfaa8583 1653 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1654 }
1655 }
4c9649a9
JM
1656 break;
1657 case 0x3B:
1658 /* S8SUBQ */
30c7183b
AJ
1659 if (likely(rc != 31)) {
1660 if (ra != 31) {
a7812ae4 1661 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1662 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1663 if (islit)
1664 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1665 else
1666 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1667 tcg_temp_free(tmp);
30c7183b
AJ
1668 } else {
1669 if (islit)
1670 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1671 else
dfaa8583 1672 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1673 }
1674 }
4c9649a9
JM
1675 break;
1676 case 0x3D:
1677 /* CMPULE */
01ff9cc8 1678 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
4c9649a9
JM
1679 break;
1680 case 0x40:
1681 /* ADDL/V */
a7812ae4 1682 gen_addlv(ra, rb, rc, islit, lit);
4c9649a9
JM
1683 break;
1684 case 0x49:
1685 /* SUBL/V */
a7812ae4 1686 gen_sublv(ra, rb, rc, islit, lit);
4c9649a9
JM
1687 break;
1688 case 0x4D:
1689 /* CMPLT */
01ff9cc8 1690 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
4c9649a9
JM
1691 break;
1692 case 0x60:
1693 /* ADDQ/V */
a7812ae4 1694 gen_addqv(ra, rb, rc, islit, lit);
4c9649a9
JM
1695 break;
1696 case 0x69:
1697 /* SUBQ/V */
a7812ae4 1698 gen_subqv(ra, rb, rc, islit, lit);
4c9649a9
JM
1699 break;
1700 case 0x6D:
1701 /* CMPLE */
01ff9cc8 1702 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
4c9649a9
JM
1703 break;
1704 default:
1705 goto invalid_opc;
1706 }
1707 break;
1708 case 0x11:
1709 switch (fn7) {
1710 case 0x00:
1711 /* AND */
30c7183b 1712 if (likely(rc != 31)) {
dfaa8583 1713 if (ra == 31)
30c7183b
AJ
1714 tcg_gen_movi_i64(cpu_ir[rc], 0);
1715 else if (islit)
1716 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1717 else
1718 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1719 }
4c9649a9
JM
1720 break;
1721 case 0x08:
1722 /* BIC */
30c7183b
AJ
1723 if (likely(rc != 31)) {
1724 if (ra != 31) {
1725 if (islit)
1726 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1727 else
1728 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1729 } else
1730 tcg_gen_movi_i64(cpu_ir[rc], 0);
1731 }
4c9649a9
JM
1732 break;
1733 case 0x14:
1734 /* CMOVLBS */
bbe1dab4 1735 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
1736 break;
1737 case 0x16:
1738 /* CMOVLBC */
bbe1dab4 1739 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
1740 break;
1741 case 0x20:
1742 /* BIS */
30c7183b
AJ
1743 if (likely(rc != 31)) {
1744 if (ra != 31) {
1745 if (islit)
1746 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
8bb6e981 1747 else
30c7183b 1748 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
4c9649a9 1749 } else {
30c7183b
AJ
1750 if (islit)
1751 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1752 else
dfaa8583 1753 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
4c9649a9 1754 }
4c9649a9
JM
1755 }
1756 break;
1757 case 0x24:
1758 /* CMOVEQ */
bbe1dab4 1759 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1760 break;
1761 case 0x26:
1762 /* CMOVNE */
bbe1dab4 1763 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1764 break;
1765 case 0x28:
1766 /* ORNOT */
30c7183b 1767 if (likely(rc != 31)) {
dfaa8583 1768 if (ra != 31) {
30c7183b
AJ
1769 if (islit)
1770 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1771 else
1772 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1773 } else {
1774 if (islit)
1775 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1776 else
1777 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1778 }
1779 }
4c9649a9
JM
1780 break;
1781 case 0x40:
1782 /* XOR */
30c7183b
AJ
1783 if (likely(rc != 31)) {
1784 if (ra != 31) {
1785 if (islit)
1786 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1787 else
dfaa8583 1788 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1789 } else {
1790 if (islit)
1791 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1792 else
dfaa8583 1793 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1794 }
1795 }
4c9649a9
JM
1796 break;
1797 case 0x44:
1798 /* CMOVLT */
bbe1dab4 1799 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1800 break;
1801 case 0x46:
1802 /* CMOVGE */
bbe1dab4 1803 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1804 break;
1805 case 0x48:
1806 /* EQV */
30c7183b
AJ
1807 if (likely(rc != 31)) {
1808 if (ra != 31) {
1809 if (islit)
1810 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1811 else
1812 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1813 } else {
1814 if (islit)
1815 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
30c7183b 1816 else
dfaa8583 1817 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1818 }
1819 }
4c9649a9
JM
1820 break;
1821 case 0x61:
1822 /* AMASK */
ae8ecd42
AJ
1823 if (likely(rc != 31)) {
1824 if (islit)
1a1f7dbc 1825 tcg_gen_movi_i64(cpu_ir[rc], lit);
ae8ecd42 1826 else
1a1f7dbc
AJ
1827 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1828 switch (ctx->env->implver) {
1829 case IMPLVER_2106x:
1830 /* EV4, EV45, LCA, LCA45 & EV5 */
1831 break;
1832 case IMPLVER_21164:
1833 case IMPLVER_21264:
1834 case IMPLVER_21364:
1835 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1836 ~(uint64_t)ctx->amask);
1837 break;
1838 }
ae8ecd42 1839 }
4c9649a9
JM
1840 break;
1841 case 0x64:
1842 /* CMOVLE */
bbe1dab4 1843 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1844 break;
1845 case 0x66:
1846 /* CMOVGT */
bbe1dab4 1847 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1848 break;
1849 case 0x6C:
1850 /* IMPLVER */
3761035f 1851 if (rc != 31)
8579095b 1852 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
4c9649a9
JM
1853 break;
1854 default:
1855 goto invalid_opc;
1856 }
1857 break;
1858 case 0x12:
1859 switch (fn7) {
1860 case 0x02:
1861 /* MSKBL */
14ab1634 1862 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
1863 break;
1864 case 0x06:
1865 /* EXTBL */
377a43b6 1866 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
1867 break;
1868 case 0x0B:
1869 /* INSBL */
248c42f3 1870 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
1871 break;
1872 case 0x12:
1873 /* MSKWL */
14ab1634 1874 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1875 break;
1876 case 0x16:
1877 /* EXTWL */
377a43b6 1878 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1879 break;
1880 case 0x1B:
1881 /* INSWL */
248c42f3 1882 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1883 break;
1884 case 0x22:
1885 /* MSKLL */
14ab1634 1886 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1887 break;
1888 case 0x26:
1889 /* EXTLL */
377a43b6 1890 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1891 break;
1892 case 0x2B:
1893 /* INSLL */
248c42f3 1894 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1895 break;
1896 case 0x30:
1897 /* ZAP */
a7812ae4 1898 gen_zap(ra, rb, rc, islit, lit);
4c9649a9
JM
1899 break;
1900 case 0x31:
1901 /* ZAPNOT */
a7812ae4 1902 gen_zapnot(ra, rb, rc, islit, lit);
4c9649a9
JM
1903 break;
1904 case 0x32:
1905 /* MSKQL */
14ab1634 1906 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1907 break;
1908 case 0x34:
1909 /* SRL */
30c7183b
AJ
1910 if (likely(rc != 31)) {
1911 if (ra != 31) {
1912 if (islit)
1913 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 1914 else {
a7812ae4 1915 TCGv shift = tcg_temp_new();
30c7183b
AJ
1916 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1917 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
1918 tcg_temp_free(shift);
dfaa8583 1919 }
30c7183b
AJ
1920 } else
1921 tcg_gen_movi_i64(cpu_ir[rc], 0);
1922 }
4c9649a9
JM
1923 break;
1924 case 0x36:
1925 /* EXTQL */
377a43b6 1926 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1927 break;
1928 case 0x39:
1929 /* SLL */
30c7183b
AJ
1930 if (likely(rc != 31)) {
1931 if (ra != 31) {
1932 if (islit)
1933 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 1934 else {
a7812ae4 1935 TCGv shift = tcg_temp_new();
30c7183b
AJ
1936 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1937 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
1938 tcg_temp_free(shift);
dfaa8583 1939 }
30c7183b
AJ
1940 } else
1941 tcg_gen_movi_i64(cpu_ir[rc], 0);
1942 }
4c9649a9
JM
1943 break;
1944 case 0x3B:
1945 /* INSQL */
248c42f3 1946 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1947 break;
1948 case 0x3C:
1949 /* SRA */
30c7183b
AJ
1950 if (likely(rc != 31)) {
1951 if (ra != 31) {
1952 if (islit)
1953 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 1954 else {
a7812ae4 1955 TCGv shift = tcg_temp_new();
30c7183b
AJ
1956 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1957 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
1958 tcg_temp_free(shift);
dfaa8583 1959 }
30c7183b
AJ
1960 } else
1961 tcg_gen_movi_i64(cpu_ir[rc], 0);
1962 }
4c9649a9
JM
1963 break;
1964 case 0x52:
1965 /* MSKWH */
ffec44f1 1966 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1967 break;
1968 case 0x57:
1969 /* INSWH */
50eb6e5c 1970 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1971 break;
1972 case 0x5A:
1973 /* EXTWH */
377a43b6 1974 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1975 break;
1976 case 0x62:
1977 /* MSKLH */
ffec44f1 1978 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1979 break;
1980 case 0x67:
1981 /* INSLH */
50eb6e5c 1982 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1983 break;
1984 case 0x6A:
1985 /* EXTLH */
377a43b6 1986 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1987 break;
1988 case 0x72:
1989 /* MSKQH */
ffec44f1 1990 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1991 break;
1992 case 0x77:
1993 /* INSQH */
50eb6e5c 1994 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1995 break;
1996 case 0x7A:
1997 /* EXTQH */
377a43b6 1998 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
1999 break;
2000 default:
2001 goto invalid_opc;
2002 }
2003 break;
2004 case 0x13:
2005 switch (fn7) {
2006 case 0x00:
2007 /* MULL */
30c7183b 2008 if (likely(rc != 31)) {
dfaa8583 2009 if (ra == 31)
30c7183b
AJ
2010 tcg_gen_movi_i64(cpu_ir[rc], 0);
2011 else {
2012 if (islit)
2013 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2014 else
2015 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2016 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2017 }
2018 }
4c9649a9
JM
2019 break;
2020 case 0x20:
2021 /* MULQ */
30c7183b 2022 if (likely(rc != 31)) {
dfaa8583 2023 if (ra == 31)
30c7183b
AJ
2024 tcg_gen_movi_i64(cpu_ir[rc], 0);
2025 else if (islit)
2026 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2027 else
2028 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2029 }
4c9649a9
JM
2030 break;
2031 case 0x30:
2032 /* UMULH */
a7812ae4 2033 gen_umulh(ra, rb, rc, islit, lit);
4c9649a9
JM
2034 break;
2035 case 0x40:
2036 /* MULL/V */
a7812ae4 2037 gen_mullv(ra, rb, rc, islit, lit);
4c9649a9
JM
2038 break;
2039 case 0x60:
2040 /* MULQ/V */
a7812ae4 2041 gen_mulqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2042 break;
2043 default:
2044 goto invalid_opc;
2045 }
2046 break;
2047 case 0x14:
f24518b5 2048 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2049 case 0x04:
2050 /* ITOFS */
2051 if (!(ctx->amask & AMASK_FIX))
2052 goto invalid_opc;
f18cd223
AJ
2053 if (likely(rc != 31)) {
2054 if (ra != 31) {
a7812ae4 2055 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2056 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2057 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2058 tcg_temp_free_i32(tmp);
f18cd223
AJ
2059 } else
2060 tcg_gen_movi_i64(cpu_fir[rc], 0);
2061 }
4c9649a9
JM
2062 break;
2063 case 0x0A:
2064 /* SQRTF */
2065 if (!(ctx->amask & AMASK_FIX))
2066 goto invalid_opc;
a7812ae4 2067 gen_fsqrtf(rb, rc);
4c9649a9
JM
2068 break;
2069 case 0x0B:
2070 /* SQRTS */
2071 if (!(ctx->amask & AMASK_FIX))
2072 goto invalid_opc;
f24518b5 2073 gen_fsqrts(ctx, rb, rc, fn11);
4c9649a9
JM
2074 break;
2075 case 0x14:
2076 /* ITOFF */
2077 if (!(ctx->amask & AMASK_FIX))
2078 goto invalid_opc;
f18cd223
AJ
2079 if (likely(rc != 31)) {
2080 if (ra != 31) {
a7812ae4 2081 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2082 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2083 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2084 tcg_temp_free_i32(tmp);
f18cd223
AJ
2085 } else
2086 tcg_gen_movi_i64(cpu_fir[rc], 0);
2087 }
4c9649a9
JM
2088 break;
2089 case 0x24:
2090 /* ITOFT */
2091 if (!(ctx->amask & AMASK_FIX))
2092 goto invalid_opc;
f18cd223
AJ
2093 if (likely(rc != 31)) {
2094 if (ra != 31)
2095 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2096 else
2097 tcg_gen_movi_i64(cpu_fir[rc], 0);
2098 }
4c9649a9
JM
2099 break;
2100 case 0x2A:
2101 /* SQRTG */
2102 if (!(ctx->amask & AMASK_FIX))
2103 goto invalid_opc;
a7812ae4 2104 gen_fsqrtg(rb, rc);
4c9649a9
JM
2105 break;
2106 case 0x02B:
2107 /* SQRTT */
2108 if (!(ctx->amask & AMASK_FIX))
2109 goto invalid_opc;
f24518b5 2110 gen_fsqrtt(ctx, rb, rc, fn11);
4c9649a9
JM
2111 break;
2112 default:
2113 goto invalid_opc;
2114 }
2115 break;
2116 case 0x15:
2117 /* VAX floating point */
2118 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2119 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2120 case 0x00:
2121 /* ADDF */
a7812ae4 2122 gen_faddf(ra, rb, rc);
4c9649a9
JM
2123 break;
2124 case 0x01:
2125 /* SUBF */
a7812ae4 2126 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2127 break;
2128 case 0x02:
2129 /* MULF */
a7812ae4 2130 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2131 break;
2132 case 0x03:
2133 /* DIVF */
a7812ae4 2134 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2135 break;
2136 case 0x1E:
2137 /* CVTDG */
2138#if 0 // TODO
a7812ae4 2139 gen_fcvtdg(rb, rc);
4c9649a9
JM
2140#else
2141 goto invalid_opc;
2142#endif
2143 break;
2144 case 0x20:
2145 /* ADDG */
a7812ae4 2146 gen_faddg(ra, rb, rc);
4c9649a9
JM
2147 break;
2148 case 0x21:
2149 /* SUBG */
a7812ae4 2150 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2151 break;
2152 case 0x22:
2153 /* MULG */
a7812ae4 2154 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2155 break;
2156 case 0x23:
2157 /* DIVG */
a7812ae4 2158 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2159 break;
2160 case 0x25:
2161 /* CMPGEQ */
a7812ae4 2162 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2163 break;
2164 case 0x26:
2165 /* CMPGLT */
a7812ae4 2166 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2167 break;
2168 case 0x27:
2169 /* CMPGLE */
a7812ae4 2170 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2171 break;
2172 case 0x2C:
2173 /* CVTGF */
a7812ae4 2174 gen_fcvtgf(rb, rc);
4c9649a9
JM
2175 break;
2176 case 0x2D:
2177 /* CVTGD */
2178#if 0 // TODO
a7812ae4 2179 gen_fcvtgd(rb, rc);
4c9649a9
JM
2180#else
2181 goto invalid_opc;
2182#endif
2183 break;
2184 case 0x2F:
2185 /* CVTGQ */
a7812ae4 2186 gen_fcvtgq(rb, rc);
4c9649a9
JM
2187 break;
2188 case 0x3C:
2189 /* CVTQF */
a7812ae4 2190 gen_fcvtqf(rb, rc);
4c9649a9
JM
2191 break;
2192 case 0x3E:
2193 /* CVTQG */
a7812ae4 2194 gen_fcvtqg(rb, rc);
4c9649a9
JM
2195 break;
2196 default:
2197 goto invalid_opc;
2198 }
2199 break;
2200 case 0x16:
2201 /* IEEE floating-point */
f24518b5 2202 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2203 case 0x00:
2204 /* ADDS */
f24518b5 2205 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2206 break;
2207 case 0x01:
2208 /* SUBS */
f24518b5 2209 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2210 break;
2211 case 0x02:
2212 /* MULS */
f24518b5 2213 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2214 break;
2215 case 0x03:
2216 /* DIVS */
f24518b5 2217 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2218 break;
2219 case 0x20:
2220 /* ADDT */
f24518b5 2221 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2222 break;
2223 case 0x21:
2224 /* SUBT */
f24518b5 2225 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2226 break;
2227 case 0x22:
2228 /* MULT */
f24518b5 2229 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2230 break;
2231 case 0x23:
2232 /* DIVT */
f24518b5 2233 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2234 break;
2235 case 0x24:
2236 /* CMPTUN */
f24518b5 2237 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2238 break;
2239 case 0x25:
2240 /* CMPTEQ */
f24518b5 2241 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2242 break;
2243 case 0x26:
2244 /* CMPTLT */
f24518b5 2245 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2246 break;
2247 case 0x27:
2248 /* CMPTLE */
f24518b5 2249 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2250 break;
2251 case 0x2C:
a74b4d2c 2252 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2253 /* CVTST */
f24518b5 2254 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2255 } else {
2256 /* CVTTS */
f24518b5 2257 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2258 }
2259 break;
2260 case 0x2F:
2261 /* CVTTQ */
f24518b5 2262 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2263 break;
2264 case 0x3C:
2265 /* CVTQS */
f24518b5 2266 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2267 break;
2268 case 0x3E:
2269 /* CVTQT */
f24518b5 2270 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2271 break;
2272 default:
2273 goto invalid_opc;
2274 }
2275 break;
2276 case 0x17:
2277 switch (fn11) {
2278 case 0x010:
2279 /* CVTLQ */
a7812ae4 2280 gen_fcvtlq(rb, rc);
4c9649a9
JM
2281 break;
2282 case 0x020:
f18cd223 2283 if (likely(rc != 31)) {
a06d48d9 2284 if (ra == rb) {
4c9649a9 2285 /* FMOV */
a06d48d9
RH
2286 if (ra == 31)
2287 tcg_gen_movi_i64(cpu_fir[rc], 0);
2288 else
2289 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2290 } else {
f18cd223 2291 /* CPYS */
a7812ae4 2292 gen_fcpys(ra, rb, rc);
a06d48d9 2293 }
4c9649a9
JM
2294 }
2295 break;
2296 case 0x021:
2297 /* CPYSN */
a7812ae4 2298 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2299 break;
2300 case 0x022:
2301 /* CPYSE */
a7812ae4 2302 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2303 break;
2304 case 0x024:
2305 /* MT_FPCR */
f18cd223 2306 if (likely(ra != 31))
a7812ae4 2307 gen_helper_store_fpcr(cpu_fir[ra]);
f18cd223
AJ
2308 else {
2309 TCGv tmp = tcg_const_i64(0);
a7812ae4 2310 gen_helper_store_fpcr(tmp);
f18cd223
AJ
2311 tcg_temp_free(tmp);
2312 }
4c9649a9
JM
2313 break;
2314 case 0x025:
2315 /* MF_FPCR */
f18cd223 2316 if (likely(ra != 31))
a7812ae4 2317 gen_helper_load_fpcr(cpu_fir[ra]);
4c9649a9
JM
2318 break;
2319 case 0x02A:
2320 /* FCMOVEQ */
bbe1dab4 2321 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2322 break;
2323 case 0x02B:
2324 /* FCMOVNE */
bbe1dab4 2325 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2326 break;
2327 case 0x02C:
2328 /* FCMOVLT */
bbe1dab4 2329 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2330 break;
2331 case 0x02D:
2332 /* FCMOVGE */
bbe1dab4 2333 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2334 break;
2335 case 0x02E:
2336 /* FCMOVLE */
bbe1dab4 2337 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2338 break;
2339 case 0x02F:
2340 /* FCMOVGT */
bbe1dab4 2341 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2342 break;
2343 case 0x030:
2344 /* CVTQL */
a7812ae4 2345 gen_fcvtql(rb, rc);
4c9649a9
JM
2346 break;
2347 case 0x130:
2348 /* CVTQL/V */
4c9649a9
JM
2349 case 0x530:
2350 /* CVTQL/SV */
735cf45f
RH
2351 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2352 /v doesn't do. The only thing I can think is that /sv is a
2353 valid instruction merely for completeness in the ISA. */
2354 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2355 break;
2356 default:
2357 goto invalid_opc;
2358 }
2359 break;
2360 case 0x18:
2361 switch ((uint16_t)disp16) {
2362 case 0x0000:
2363 /* TRAPB */
2364 /* No-op. Just exit from the current tb */
2365 ret = 2;
2366 break;
2367 case 0x0400:
2368 /* EXCB */
2369 /* No-op. Just exit from the current tb */
2370 ret = 2;
2371 break;
2372 case 0x4000:
2373 /* MB */
2374 /* No-op */
2375 break;
2376 case 0x4400:
2377 /* WMB */
2378 /* No-op */
2379 break;
2380 case 0x8000:
2381 /* FETCH */
2382 /* No-op */
2383 break;
2384 case 0xA000:
2385 /* FETCH_M */
2386 /* No-op */
2387 break;
2388 case 0xC000:
2389 /* RPCC */
3761035f 2390 if (ra != 31)
a7812ae4 2391 gen_helper_load_pcc(cpu_ir[ra]);
4c9649a9
JM
2392 break;
2393 case 0xE000:
2394 /* RC */
3761035f 2395 if (ra != 31)
a7812ae4 2396 gen_helper_rc(cpu_ir[ra]);
4c9649a9
JM
2397 break;
2398 case 0xE800:
2399 /* ECB */
4c9649a9
JM
2400 break;
2401 case 0xF000:
2402 /* RS */
3761035f 2403 if (ra != 31)
a7812ae4 2404 gen_helper_rs(cpu_ir[ra]);
4c9649a9
JM
2405 break;
2406 case 0xF800:
2407 /* WH64 */
2408 /* No-op */
2409 break;
2410 default:
2411 goto invalid_opc;
2412 }
2413 break;
2414 case 0x19:
2415 /* HW_MFPR (PALcode) */
2416#if defined (CONFIG_USER_ONLY)
2417 goto invalid_opc;
2418#else
2419 if (!ctx->pal_mode)
2420 goto invalid_opc;
8bb6e981
AJ
2421 if (ra != 31) {
2422 TCGv tmp = tcg_const_i32(insn & 0xFF);
a7812ae4 2423 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
8bb6e981
AJ
2424 tcg_temp_free(tmp);
2425 }
4c9649a9
JM
2426 break;
2427#endif
2428 case 0x1A:
3761035f
AJ
2429 if (rb != 31)
2430 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2431 else
2432 tcg_gen_movi_i64(cpu_pc, 0);
1304ca87
AJ
2433 if (ra != 31)
2434 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
4c9649a9
JM
2435 /* Those four jumps only differ by the branch prediction hint */
2436 switch (fn2) {
2437 case 0x0:
2438 /* JMP */
2439 break;
2440 case 0x1:
2441 /* JSR */
2442 break;
2443 case 0x2:
2444 /* RET */
2445 break;
2446 case 0x3:
2447 /* JSR_COROUTINE */
2448 break;
2449 }
2450 ret = 1;
2451 break;
2452 case 0x1B:
2453 /* HW_LD (PALcode) */
2454#if defined (CONFIG_USER_ONLY)
2455 goto invalid_opc;
2456#else
2457 if (!ctx->pal_mode)
2458 goto invalid_opc;
8bb6e981 2459 if (ra != 31) {
a7812ae4 2460 TCGv addr = tcg_temp_new();
8bb6e981
AJ
2461 if (rb != 31)
2462 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2463 else
2464 tcg_gen_movi_i64(addr, disp12);
2465 switch ((insn >> 12) & 0xF) {
2466 case 0x0:
b5d51029 2467 /* Longword physical access (hw_ldl/p) */
a7812ae4 2468 gen_helper_ldl_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2469 break;
2470 case 0x1:
b5d51029 2471 /* Quadword physical access (hw_ldq/p) */
a7812ae4 2472 gen_helper_ldq_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2473 break;
2474 case 0x2:
b5d51029 2475 /* Longword physical access with lock (hw_ldl_l/p) */
a7812ae4 2476 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2477 break;
2478 case 0x3:
b5d51029 2479 /* Quadword physical access with lock (hw_ldq_l/p) */
a7812ae4 2480 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2481 break;
2482 case 0x4:
b5d51029
AJ
2483 /* Longword virtual PTE fetch (hw_ldl/v) */
2484 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2485 break;
2486 case 0x5:
b5d51029
AJ
2487 /* Quadword virtual PTE fetch (hw_ldq/v) */
2488 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2489 break;
2490 case 0x6:
2491 /* Incpu_ir[ra]id */
b5d51029 2492 goto invalid_opc;
8bb6e981
AJ
2493 case 0x7:
2494 /* Incpu_ir[ra]id */
b5d51029 2495 goto invalid_opc;
8bb6e981 2496 case 0x8:
b5d51029 2497 /* Longword virtual access (hw_ldl) */
a7812ae4
PB
2498 gen_helper_st_virt_to_phys(addr, addr);
2499 gen_helper_ldl_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2500 break;
2501 case 0x9:
b5d51029 2502 /* Quadword virtual access (hw_ldq) */
a7812ae4
PB
2503 gen_helper_st_virt_to_phys(addr, addr);
2504 gen_helper_ldq_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2505 break;
2506 case 0xA:
b5d51029
AJ
2507 /* Longword virtual access with protection check (hw_ldl/w) */
2508 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2509 break;
2510 case 0xB:
b5d51029
AJ
2511 /* Quadword virtual access with protection check (hw_ldq/w) */
2512 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2513 break;
2514 case 0xC:
b5d51029 2515 /* Longword virtual access with alt access mode (hw_ldl/a)*/
a7812ae4
PB
2516 gen_helper_set_alt_mode();
2517 gen_helper_st_virt_to_phys(addr, addr);
2518 gen_helper_ldl_raw(cpu_ir[ra], addr);
2519 gen_helper_restore_mode();
8bb6e981
AJ
2520 break;
2521 case 0xD:
b5d51029 2522 /* Quadword virtual access with alt access mode (hw_ldq/a) */
a7812ae4
PB
2523 gen_helper_set_alt_mode();
2524 gen_helper_st_virt_to_phys(addr, addr);
2525 gen_helper_ldq_raw(cpu_ir[ra], addr);
2526 gen_helper_restore_mode();
8bb6e981
AJ
2527 break;
2528 case 0xE:
2529 /* Longword virtual access with alternate access mode and
b5d51029 2530 * protection checks (hw_ldl/wa)
8bb6e981 2531 */
a7812ae4
PB
2532 gen_helper_set_alt_mode();
2533 gen_helper_ldl_data(cpu_ir[ra], addr);
2534 gen_helper_restore_mode();
8bb6e981
AJ
2535 break;
2536 case 0xF:
2537 /* Quadword virtual access with alternate access mode and
b5d51029 2538 * protection checks (hw_ldq/wa)
8bb6e981 2539 */
a7812ae4
PB
2540 gen_helper_set_alt_mode();
2541 gen_helper_ldq_data(cpu_ir[ra], addr);
2542 gen_helper_restore_mode();
8bb6e981
AJ
2543 break;
2544 }
2545 tcg_temp_free(addr);
4c9649a9 2546 }
4c9649a9
JM
2547 break;
2548#endif
2549 case 0x1C:
2550 switch (fn7) {
2551 case 0x00:
2552 /* SEXTB */
2553 if (!(ctx->amask & AMASK_BWX))
2554 goto invalid_opc;
ae8ecd42
AJ
2555 if (likely(rc != 31)) {
2556 if (islit)
2557 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
ae8ecd42 2558 else
dfaa8583 2559 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2560 }
4c9649a9
JM
2561 break;
2562 case 0x01:
2563 /* SEXTW */
2564 if (!(ctx->amask & AMASK_BWX))
2565 goto invalid_opc;
ae8ecd42
AJ
2566 if (likely(rc != 31)) {
2567 if (islit)
2568 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
ae8ecd42 2569 else
dfaa8583 2570 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2571 }
4c9649a9
JM
2572 break;
2573 case 0x30:
2574 /* CTPOP */
2575 if (!(ctx->amask & AMASK_CIX))
2576 goto invalid_opc;
ae8ecd42
AJ
2577 if (likely(rc != 31)) {
2578 if (islit)
2579 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
ae8ecd42 2580 else
a7812ae4 2581 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2582 }
4c9649a9
JM
2583 break;
2584 case 0x31:
2585 /* PERR */
2586 if (!(ctx->amask & AMASK_MVI))
2587 goto invalid_opc;
13e4df99 2588 gen_perr(ra, rb, rc, islit, lit);
4c9649a9
JM
2589 break;
2590 case 0x32:
2591 /* CTLZ */
2592 if (!(ctx->amask & AMASK_CIX))
2593 goto invalid_opc;
ae8ecd42
AJ
2594 if (likely(rc != 31)) {
2595 if (islit)
2596 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
ae8ecd42 2597 else
a7812ae4 2598 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2599 }
4c9649a9
JM
2600 break;
2601 case 0x33:
2602 /* CTTZ */
2603 if (!(ctx->amask & AMASK_CIX))
2604 goto invalid_opc;
ae8ecd42
AJ
2605 if (likely(rc != 31)) {
2606 if (islit)
2607 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
ae8ecd42 2608 else
a7812ae4 2609 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2610 }
4c9649a9
JM
2611 break;
2612 case 0x34:
2613 /* UNPKBW */
2614 if (!(ctx->amask & AMASK_MVI))
2615 goto invalid_opc;
13e4df99
RH
2616 if (real_islit || ra != 31)
2617 goto invalid_opc;
2618 gen_unpkbw (rb, rc);
4c9649a9
JM
2619 break;
2620 case 0x35:
13e4df99 2621 /* UNPKBL */
4c9649a9
JM
2622 if (!(ctx->amask & AMASK_MVI))
2623 goto invalid_opc;
13e4df99
RH
2624 if (real_islit || ra != 31)
2625 goto invalid_opc;
2626 gen_unpkbl (rb, rc);
4c9649a9
JM
2627 break;
2628 case 0x36:
2629 /* PKWB */
2630 if (!(ctx->amask & AMASK_MVI))
2631 goto invalid_opc;
13e4df99
RH
2632 if (real_islit || ra != 31)
2633 goto invalid_opc;
2634 gen_pkwb (rb, rc);
4c9649a9
JM
2635 break;
2636 case 0x37:
2637 /* PKLB */
2638 if (!(ctx->amask & AMASK_MVI))
2639 goto invalid_opc;
13e4df99
RH
2640 if (real_islit || ra != 31)
2641 goto invalid_opc;
2642 gen_pklb (rb, rc);
4c9649a9
JM
2643 break;
2644 case 0x38:
2645 /* MINSB8 */
2646 if (!(ctx->amask & AMASK_MVI))
2647 goto invalid_opc;
13e4df99 2648 gen_minsb8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2649 break;
2650 case 0x39:
2651 /* MINSW4 */
2652 if (!(ctx->amask & AMASK_MVI))
2653 goto invalid_opc;
13e4df99 2654 gen_minsw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2655 break;
2656 case 0x3A:
2657 /* MINUB8 */
2658 if (!(ctx->amask & AMASK_MVI))
2659 goto invalid_opc;
13e4df99 2660 gen_minub8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2661 break;
2662 case 0x3B:
2663 /* MINUW4 */
2664 if (!(ctx->amask & AMASK_MVI))
2665 goto invalid_opc;
13e4df99 2666 gen_minuw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2667 break;
2668 case 0x3C:
2669 /* MAXUB8 */
2670 if (!(ctx->amask & AMASK_MVI))
2671 goto invalid_opc;
13e4df99 2672 gen_maxub8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2673 break;
2674 case 0x3D:
2675 /* MAXUW4 */
2676 if (!(ctx->amask & AMASK_MVI))
2677 goto invalid_opc;
13e4df99 2678 gen_maxuw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2679 break;
2680 case 0x3E:
2681 /* MAXSB8 */
2682 if (!(ctx->amask & AMASK_MVI))
2683 goto invalid_opc;
13e4df99 2684 gen_maxsb8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2685 break;
2686 case 0x3F:
2687 /* MAXSW4 */
2688 if (!(ctx->amask & AMASK_MVI))
2689 goto invalid_opc;
13e4df99 2690 gen_maxsw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2691 break;
2692 case 0x70:
2693 /* FTOIT */
2694 if (!(ctx->amask & AMASK_FIX))
2695 goto invalid_opc;
f18cd223
AJ
2696 if (likely(rc != 31)) {
2697 if (ra != 31)
2698 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2699 else
2700 tcg_gen_movi_i64(cpu_ir[rc], 0);
2701 }
4c9649a9
JM
2702 break;
2703 case 0x78:
2704 /* FTOIS */
2705 if (!(ctx->amask & AMASK_FIX))
2706 goto invalid_opc;
f18cd223 2707 if (rc != 31) {
a7812ae4 2708 TCGv_i32 tmp1 = tcg_temp_new_i32();
f18cd223 2709 if (ra != 31)
a7812ae4 2710 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
f18cd223
AJ
2711 else {
2712 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 2713 gen_helper_s_to_memory(tmp1, tmp2);
f18cd223
AJ
2714 tcg_temp_free(tmp2);
2715 }
2716 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
a7812ae4 2717 tcg_temp_free_i32(tmp1);
f18cd223 2718 }
4c9649a9
JM
2719 break;
2720 default:
2721 goto invalid_opc;
2722 }
2723 break;
2724 case 0x1D:
2725 /* HW_MTPR (PALcode) */
2726#if defined (CONFIG_USER_ONLY)
2727 goto invalid_opc;
2728#else
2729 if (!ctx->pal_mode)
2730 goto invalid_opc;
8bb6e981
AJ
2731 else {
2732 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2733 if (ra != 31)
a7812ae4 2734 gen_helper_mtpr(tmp1, cpu_ir[ra]);
8bb6e981
AJ
2735 else {
2736 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 2737 gen_helper_mtpr(tmp1, tmp2);
8bb6e981
AJ
2738 tcg_temp_free(tmp2);
2739 }
2740 tcg_temp_free(tmp1);
2741 ret = 2;
2742 }
4c9649a9
JM
2743 break;
2744#endif
2745 case 0x1E:
2746 /* HW_REI (PALcode) */
2747#if defined (CONFIG_USER_ONLY)
2748 goto invalid_opc;
2749#else
2750 if (!ctx->pal_mode)
2751 goto invalid_opc;
2752 if (rb == 31) {
2753 /* "Old" alpha */
a7812ae4 2754 gen_helper_hw_rei();
4c9649a9 2755 } else {
8bb6e981
AJ
2756 TCGv tmp;
2757
2758 if (ra != 31) {
a7812ae4 2759 tmp = tcg_temp_new();
8bb6e981
AJ
2760 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2761 } else
2762 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
a7812ae4 2763 gen_helper_hw_ret(tmp);
8bb6e981 2764 tcg_temp_free(tmp);
4c9649a9
JM
2765 }
2766 ret = 2;
2767 break;
2768#endif
2769 case 0x1F:
2770 /* HW_ST (PALcode) */
2771#if defined (CONFIG_USER_ONLY)
2772 goto invalid_opc;
2773#else
2774 if (!ctx->pal_mode)
2775 goto invalid_opc;
8bb6e981
AJ
2776 else {
2777 TCGv addr, val;
a7812ae4 2778 addr = tcg_temp_new();
8bb6e981
AJ
2779 if (rb != 31)
2780 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2781 else
2782 tcg_gen_movi_i64(addr, disp12);
2783 if (ra != 31)
2784 val = cpu_ir[ra];
2785 else {
a7812ae4 2786 val = tcg_temp_new();
8bb6e981
AJ
2787 tcg_gen_movi_i64(val, 0);
2788 }
2789 switch ((insn >> 12) & 0xF) {
2790 case 0x0:
2791 /* Longword physical access */
a7812ae4 2792 gen_helper_stl_raw(val, addr);
8bb6e981
AJ
2793 break;
2794 case 0x1:
2795 /* Quadword physical access */
a7812ae4 2796 gen_helper_stq_raw(val, addr);
8bb6e981
AJ
2797 break;
2798 case 0x2:
2799 /* Longword physical access with lock */
a7812ae4 2800 gen_helper_stl_c_raw(val, val, addr);
8bb6e981
AJ
2801 break;
2802 case 0x3:
2803 /* Quadword physical access with lock */
a7812ae4 2804 gen_helper_stq_c_raw(val, val, addr);
8bb6e981
AJ
2805 break;
2806 case 0x4:
2807 /* Longword virtual access */
a7812ae4
PB
2808 gen_helper_st_virt_to_phys(addr, addr);
2809 gen_helper_stl_raw(val, addr);
8bb6e981
AJ
2810 break;
2811 case 0x5:
2812 /* Quadword virtual access */
a7812ae4
PB
2813 gen_helper_st_virt_to_phys(addr, addr);
2814 gen_helper_stq_raw(val, addr);
8bb6e981
AJ
2815 break;
2816 case 0x6:
2817 /* Invalid */
2818 goto invalid_opc;
2819 case 0x7:
2820 /* Invalid */
2821 goto invalid_opc;
2822 case 0x8:
2823 /* Invalid */
2824 goto invalid_opc;
2825 case 0x9:
2826 /* Invalid */
2827 goto invalid_opc;
2828 case 0xA:
2829 /* Invalid */
2830 goto invalid_opc;
2831 case 0xB:
2832 /* Invalid */
2833 goto invalid_opc;
2834 case 0xC:
2835 /* Longword virtual access with alternate access mode */
a7812ae4
PB
2836 gen_helper_set_alt_mode();
2837 gen_helper_st_virt_to_phys(addr, addr);
2838 gen_helper_stl_raw(val, addr);
2839 gen_helper_restore_mode();
8bb6e981
AJ
2840 break;
2841 case 0xD:
2842 /* Quadword virtual access with alternate access mode */
a7812ae4
PB
2843 gen_helper_set_alt_mode();
2844 gen_helper_st_virt_to_phys(addr, addr);
2845 gen_helper_stl_raw(val, addr);
2846 gen_helper_restore_mode();
8bb6e981
AJ
2847 break;
2848 case 0xE:
2849 /* Invalid */
2850 goto invalid_opc;
2851 case 0xF:
2852 /* Invalid */
2853 goto invalid_opc;
2854 }
45d46ce8 2855 if (ra == 31)
8bb6e981
AJ
2856 tcg_temp_free(val);
2857 tcg_temp_free(addr);
4c9649a9 2858 }
4c9649a9
JM
2859 break;
2860#endif
2861 case 0x20:
2862 /* LDF */
f18cd223 2863 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
2864 break;
2865 case 0x21:
2866 /* LDG */
f18cd223 2867 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
2868 break;
2869 case 0x22:
2870 /* LDS */
f18cd223 2871 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
2872 break;
2873 case 0x23:
2874 /* LDT */
f18cd223 2875 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
2876 break;
2877 case 0x24:
2878 /* STF */
57a92c8e 2879 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2880 break;
2881 case 0x25:
2882 /* STG */
57a92c8e 2883 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2884 break;
2885 case 0x26:
2886 /* STS */
57a92c8e 2887 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2888 break;
2889 case 0x27:
2890 /* STT */
57a92c8e 2891 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2892 break;
2893 case 0x28:
2894 /* LDL */
f18cd223 2895 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
2896 break;
2897 case 0x29:
2898 /* LDQ */
f18cd223 2899 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
2900 break;
2901 case 0x2A:
2902 /* LDL_L */
f4ed8679 2903 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2904 break;
2905 case 0x2B:
2906 /* LDQ_L */
f4ed8679 2907 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2908 break;
2909 case 0x2C:
2910 /* STL */
57a92c8e 2911 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
2912 break;
2913 case 0x2D:
2914 /* STQ */
57a92c8e 2915 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
2916 break;
2917 case 0x2E:
2918 /* STL_C */
57a92c8e 2919 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
4c9649a9
JM
2920 break;
2921 case 0x2F:
2922 /* STQ_C */
57a92c8e 2923 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
4c9649a9
JM
2924 break;
2925 case 0x30:
2926 /* BR */
3761035f
AJ
2927 if (ra != 31)
2928 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2929 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
4c9649a9
JM
2930 ret = 1;
2931 break;
a7812ae4 2932 case 0x31: /* FBEQ */
dbb30fe6
RH
2933 gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2934 ret = 1;
2935 break;
a7812ae4 2936 case 0x32: /* FBLT */
dbb30fe6
RH
2937 gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2938 ret = 1;
2939 break;
a7812ae4 2940 case 0x33: /* FBLE */
dbb30fe6 2941 gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
2942 ret = 1;
2943 break;
2944 case 0x34:
2945 /* BSR */
3761035f
AJ
2946 if (ra != 31)
2947 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2948 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
4c9649a9
JM
2949 ret = 1;
2950 break;
a7812ae4 2951 case 0x35: /* FBNE */
dbb30fe6
RH
2952 gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2953 ret = 1;
2954 break;
a7812ae4 2955 case 0x36: /* FBGE */
dbb30fe6
RH
2956 gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2957 ret = 1;
2958 break;
a7812ae4 2959 case 0x37: /* FBGT */
dbb30fe6 2960 gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
2961 ret = 1;
2962 break;
2963 case 0x38:
2964 /* BLBC */
a1516744 2965 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
2966 ret = 1;
2967 break;
2968 case 0x39:
2969 /* BEQ */
a1516744 2970 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
2971 ret = 1;
2972 break;
2973 case 0x3A:
2974 /* BLT */
a1516744 2975 gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
2976 ret = 1;
2977 break;
2978 case 0x3B:
2979 /* BLE */
a1516744 2980 gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
2981 ret = 1;
2982 break;
2983 case 0x3C:
2984 /* BLBS */
a1516744 2985 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
2986 ret = 1;
2987 break;
2988 case 0x3D:
2989 /* BNE */
a1516744 2990 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
2991 ret = 1;
2992 break;
2993 case 0x3E:
2994 /* BGE */
a1516744 2995 gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
2996 ret = 1;
2997 break;
2998 case 0x3F:
2999 /* BGT */
a1516744 3000 gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
3001 ret = 1;
3002 break;
3003 invalid_opc:
3004 gen_invalid(ctx);
3005 ret = 3;
3006 break;
3007 }
3008
3009 return ret;
3010}
3011
636aa200
BS
3012static inline void gen_intermediate_code_internal(CPUState *env,
3013 TranslationBlock *tb,
3014 int search_pc)
4c9649a9 3015{
4c9649a9
JM
3016 DisasContext ctx, *ctxp = &ctx;
3017 target_ulong pc_start;
3018 uint32_t insn;
3019 uint16_t *gen_opc_end;
a1d1bb31 3020 CPUBreakpoint *bp;
4c9649a9
JM
3021 int j, lj = -1;
3022 int ret;
2e70f6ef
PB
3023 int num_insns;
3024 int max_insns;
4c9649a9
JM
3025
3026 pc_start = tb->pc;
4c9649a9 3027 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
4c9649a9
JM
3028 ctx.pc = pc_start;
3029 ctx.amask = env->amask;
8579095b 3030 ctx.env = env;
4c9649a9
JM
3031#if defined (CONFIG_USER_ONLY)
3032 ctx.mem_idx = 0;
3033#else
3034 ctx.mem_idx = ((env->ps >> 3) & 3);
3035 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
3036#endif
f24518b5
RH
3037
3038 /* ??? Every TB begins with unset rounding mode, to be initialized on
3039 the first fp insn of the TB. Alternately we could define a proper
3040 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3041 to reset the FP_STATUS to that default at the end of any TB that
3042 changes the default. We could even (gasp) dynamiclly figure out
3043 what default would be most efficient given the running program. */
3044 ctx.tb_rm = -1;
3045 /* Similarly for flush-to-zero. */
3046 ctx.tb_ftz = -1;
3047
2e70f6ef
PB
3048 num_insns = 0;
3049 max_insns = tb->cflags & CF_COUNT_MASK;
3050 if (max_insns == 0)
3051 max_insns = CF_COUNT_MASK;
3052
3053 gen_icount_start();
4c9649a9 3054 for (ret = 0; ret == 0;) {
72cf2d4f
BS
3055 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3056 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 3057 if (bp->pc == ctx.pc) {
4c9649a9
JM
3058 gen_excp(&ctx, EXCP_DEBUG, 0);
3059 break;
3060 }
3061 }
3062 }
3063 if (search_pc) {
3064 j = gen_opc_ptr - gen_opc_buf;
3065 if (lj < j) {
3066 lj++;
3067 while (lj < j)
3068 gen_opc_instr_start[lj++] = 0;
4c9649a9 3069 }
ed1dda53
AJ
3070 gen_opc_pc[lj] = ctx.pc;
3071 gen_opc_instr_start[lj] = 1;
3072 gen_opc_icount[lj] = num_insns;
4c9649a9 3073 }
2e70f6ef
PB
3074 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3075 gen_io_start();
4c9649a9 3076 insn = ldl_code(ctx.pc);
2e70f6ef 3077 num_insns++;
c4b3be39
RH
3078
3079 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3080 tcg_gen_debug_insn_start(ctx.pc);
3081 }
3082
4c9649a9
JM
3083 ctx.pc += 4;
3084 ret = translate_one(ctxp, insn);
3085 if (ret != 0)
3086 break;
3087 /* if we reach a page boundary or are single stepping, stop
3088 * generation
3089 */
19bf517b
AJ
3090 if (env->singlestep_enabled) {
3091 gen_excp(&ctx, EXCP_DEBUG, 0);
3092 break;
1b530a6d 3093 }
19bf517b 3094
8fcc55f9
AJ
3095 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
3096 break;
3097
3098 if (gen_opc_ptr >= gen_opc_end)
3099 break;
3100
3101 if (num_insns >= max_insns)
3102 break;
3103
1b530a6d
AJ
3104 if (singlestep) {
3105 break;
3106 }
4c9649a9
JM
3107 }
3108 if (ret != 1 && ret != 3) {
496cb5b9 3109 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4c9649a9 3110 }
2e70f6ef
PB
3111 if (tb->cflags & CF_LAST_IO)
3112 gen_io_end();
4c9649a9 3113 /* Generate the return instruction */
57fec1fe 3114 tcg_gen_exit_tb(0);
2e70f6ef 3115 gen_icount_end(tb, num_insns);
4c9649a9
JM
3116 *gen_opc_ptr = INDEX_op_end;
3117 if (search_pc) {
3118 j = gen_opc_ptr - gen_opc_buf;
3119 lj++;
3120 while (lj <= j)
3121 gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3122 } else {
3123 tb->size = ctx.pc - pc_start;
2e70f6ef 3124 tb->icount = num_insns;
4c9649a9 3125 }
806991da 3126#ifdef DEBUG_DISAS
8fec2b8c 3127 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
3128 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3129 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3130 qemu_log("\n");
4c9649a9 3131 }
4c9649a9 3132#endif
4c9649a9
JM
3133}
3134
2cfc5f17 3135void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3136{
2cfc5f17 3137 gen_intermediate_code_internal(env, tb, 0);
4c9649a9
JM
3138}
3139
2cfc5f17 3140void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3141{
2cfc5f17 3142 gen_intermediate_code_internal(env, tb, 1);
4c9649a9
JM
3143}
3144
a964acc6
RH
3145struct cpu_def_t {
3146 const char *name;
3147 int implver, amask;
3148};
3149
3150static const struct cpu_def_t cpu_defs[] = {
3151 { "ev4", IMPLVER_2106x, 0 },
3152 { "ev5", IMPLVER_21164, 0 },
3153 { "ev56", IMPLVER_21164, AMASK_BWX },
3154 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3155 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3156 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3157 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3158 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3159 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3160 { "21064", IMPLVER_2106x, 0 },
3161 { "21164", IMPLVER_21164, 0 },
3162 { "21164a", IMPLVER_21164, AMASK_BWX },
3163 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3164 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3165 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3166 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3167};
3168
aaed909a 3169CPUAlphaState * cpu_alpha_init (const char *cpu_model)
4c9649a9
JM
3170{
3171 CPUAlphaState *env;
a964acc6 3172 int implver, amask, i, max;
4c9649a9
JM
3173
3174 env = qemu_mallocz(sizeof(CPUAlphaState));
4c9649a9 3175 cpu_exec_init(env);
2e70f6ef 3176 alpha_translate_init();
4c9649a9 3177 tlb_flush(env, 1);
a964acc6
RH
3178
3179 /* Default to ev67; no reason not to emulate insns by default. */
3180 implver = IMPLVER_21264;
3181 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3182 | AMASK_TRAP | AMASK_PREFETCH);
3183
3184 max = ARRAY_SIZE(cpu_defs);
3185 for (i = 0; i < max; i++) {
3186 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3187 implver = cpu_defs[i].implver;
3188 amask = cpu_defs[i].amask;
3189 break;
3190 }
3191 }
3192 env->implver = implver;
3193 env->amask = amask;
3194
4c9649a9
JM
3195 env->ps = 0x1F00;
3196#if defined (CONFIG_USER_ONLY)
3197 env->ps |= 1 << 3;
2edd07ef
RH
3198 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3199 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
6049f4f8 3200#else
4c9649a9 3201 pal_init(env);
6049f4f8 3202#endif
dad081ee 3203
4c9649a9 3204 /* Initialize IPR */
dad081ee
RH
3205#if defined (CONFIG_USER_ONLY)
3206 env->ipr[IPR_EXC_ADDR] = 0;
3207 env->ipr[IPR_EXC_SUM] = 0;
3208 env->ipr[IPR_EXC_MASK] = 0;
3209#else
3210 {
f88fe4e3
BS
3211 // uint64_t hwpcb;
3212 // hwpcb = env->ipr[IPR_PCBB];
dad081ee
RH
3213 env->ipr[IPR_ASN] = 0;
3214 env->ipr[IPR_ASTEN] = 0;
3215 env->ipr[IPR_ASTSR] = 0;
3216 env->ipr[IPR_DATFX] = 0;
3217 /* XXX: fix this */
3218 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3219 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3220 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3221 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3222 env->ipr[IPR_FEN] = 0;
3223 env->ipr[IPR_IPL] = 31;
3224 env->ipr[IPR_MCES] = 0;
3225 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
3226 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3227 env->ipr[IPR_SISR] = 0;
3228 env->ipr[IPR_VIRBND] = -1ULL;
3229 }
3230#endif
4c9649a9 3231
0bf46a40 3232 qemu_init_vcpu(env);
4c9649a9
JM
3233 return env;
3234}
aaed909a 3235
d2856f1a
AJ
3236void gen_pc_load(CPUState *env, TranslationBlock *tb,
3237 unsigned long searched_pc, int pc_pos, void *puc)
3238{
3239 env->pc = gen_opc_pc[pc_pos];
3240}