]> git.proxmox.com Git - qemu.git/blame - target-alpha/translate.c
target-alpha: Use do_restore_state for arithmetic exceptions.
[qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
20#include <stdint.h>
21#include <stdlib.h>
22#include <stdio.h>
23
24#include "cpu.h"
25#include "exec-all.h"
26#include "disas.h"
ae8ecd42 27#include "host-utils.h"
57fec1fe 28#include "tcg-op.h"
ca10f867 29#include "qemu-common.h"
4c9649a9 30
a7812ae4
PB
31#include "helper.h"
32#define GEN_HELPER 1
33#include "helper.h"
34
19188121 35#undef ALPHA_DEBUG_DISAS
f24518b5 36#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
37
38#ifdef ALPHA_DEBUG_DISAS
806991da 39# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
40#else
41# define LOG_DISAS(...) do { } while (0)
42#endif
43
4c9649a9
JM
44typedef struct DisasContext DisasContext;
45struct DisasContext {
4af70374
RH
46 struct TranslationBlock *tb;
47 CPUAlphaState *env;
4c9649a9
JM
48 uint64_t pc;
49 int mem_idx;
50#if !defined (CONFIG_USER_ONLY)
51 int pal_mode;
52#endif
53 uint32_t amask;
f24518b5
RH
54
55 /* Current rounding mode for this TB. */
56 int tb_rm;
57 /* Current flush-to-zero setting for this TB. */
58 int tb_ftz;
4c9649a9
JM
59};
60
4af70374
RH
61/* Return values from translate_one, indicating the state of the TB.
62 Note that zero indicates that we are not exiting the TB. */
63
64typedef enum {
65 NO_EXIT,
66
67 /* We have emitted one or more goto_tb. No fixup required. */
68 EXIT_GOTO_TB,
69
70 /* We are not using a goto_tb (for whatever reason), but have updated
71 the PC (for whatever reason), so there's no need to do it again on
72 exiting the TB. */
73 EXIT_PC_UPDATED,
74
75 /* We are exiting the TB, but have neither emitted a goto_tb, nor
76 updated the PC for the next instruction to be executed. */
8aa3fa20
RH
77 EXIT_PC_STALE,
78
79 /* We are ending the TB with a noreturn function call, e.g. longjmp.
80 No following code will be executed. */
81 EXIT_NORETURN,
4af70374
RH
82} ExitStatus;
83
3761035f 84/* global register indexes */
a7812ae4 85static TCGv_ptr cpu_env;
496cb5b9 86static TCGv cpu_ir[31];
f18cd223 87static TCGv cpu_fir[31];
496cb5b9 88static TCGv cpu_pc;
6910b8f6
RH
89static TCGv cpu_lock_addr;
90static TCGv cpu_lock_st_addr;
91static TCGv cpu_lock_value;
ab471ade
RH
92#ifdef CONFIG_USER_ONLY
93static TCGv cpu_uniq;
94#endif
496cb5b9 95
3761035f 96/* register names */
f18cd223 97static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef
PB
98
99#include "gen-icount.h"
100
a5f1b965 101static void alpha_translate_init(void)
2e70f6ef 102{
496cb5b9
AJ
103 int i;
104 char *p;
2e70f6ef 105 static int done_init = 0;
496cb5b9 106
2e70f6ef
PB
107 if (done_init)
108 return;
496cb5b9 109
a7812ae4 110 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
111
112 p = cpu_reg_names;
113 for (i = 0; i < 31; i++) {
114 sprintf(p, "ir%d", i);
a7812ae4
PB
115 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
116 offsetof(CPUState, ir[i]), p);
6ba8dcd7 117 p += (i < 10) ? 4 : 5;
f18cd223
AJ
118
119 sprintf(p, "fir%d", i);
a7812ae4
PB
120 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
121 offsetof(CPUState, fir[i]), p);
f18cd223 122 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
123 }
124
a7812ae4
PB
125 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
126 offsetof(CPUState, pc), "pc");
496cb5b9 127
6910b8f6
RH
128 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
129 offsetof(CPUState, lock_addr),
130 "lock_addr");
131 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
132 offsetof(CPUState, lock_st_addr),
133 "lock_st_addr");
134 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
135 offsetof(CPUState, lock_value),
136 "lock_value");
f4ed8679 137
ab471ade
RH
138#ifdef CONFIG_USER_ONLY
139 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
140 offsetof(CPUState, unique), "uniq");
141#endif
142
496cb5b9 143 /* register helpers */
a7812ae4 144#define GEN_HELPER 2
496cb5b9
AJ
145#include "helper.h"
146
2e70f6ef
PB
147 done_init = 1;
148}
149
bf1b03fe 150static void gen_excp_1(int exception, int error_code)
4c9649a9 151{
a7812ae4 152 TCGv_i32 tmp1, tmp2;
6ad02592 153
6ad02592
AJ
154 tmp1 = tcg_const_i32(exception);
155 tmp2 = tcg_const_i32(error_code);
a7812ae4
PB
156 gen_helper_excp(tmp1, tmp2);
157 tcg_temp_free_i32(tmp2);
158 tcg_temp_free_i32(tmp1);
bf1b03fe 159}
8aa3fa20 160
bf1b03fe
RH
161static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
162{
163 tcg_gen_movi_i64(cpu_pc, ctx->pc);
164 gen_excp_1(exception, error_code);
8aa3fa20 165 return EXIT_NORETURN;
4c9649a9
JM
166}
167
8aa3fa20 168static inline ExitStatus gen_invalid(DisasContext *ctx)
4c9649a9 169{
8aa3fa20 170 return gen_excp(ctx, EXCP_OPCDEC, 0);
4c9649a9
JM
171}
172
636aa200 173static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 174{
a7812ae4
PB
175 TCGv tmp = tcg_temp_new();
176 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 177 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
178 tcg_gen_trunc_i64_i32(tmp32, tmp);
179 gen_helper_memory_to_f(t0, tmp32);
180 tcg_temp_free_i32(tmp32);
f18cd223
AJ
181 tcg_temp_free(tmp);
182}
183
636aa200 184static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 185{
a7812ae4 186 TCGv tmp = tcg_temp_new();
f18cd223 187 tcg_gen_qemu_ld64(tmp, t1, flags);
a7812ae4 188 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
189 tcg_temp_free(tmp);
190}
191
636aa200 192static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 193{
a7812ae4
PB
194 TCGv tmp = tcg_temp_new();
195 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 196 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
197 tcg_gen_trunc_i64_i32(tmp32, tmp);
198 gen_helper_memory_to_s(t0, tmp32);
199 tcg_temp_free_i32(tmp32);
f18cd223
AJ
200 tcg_temp_free(tmp);
201}
202
636aa200 203static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679 204{
f4ed8679 205 tcg_gen_qemu_ld32s(t0, t1, flags);
6910b8f6
RH
206 tcg_gen_mov_i64(cpu_lock_addr, t1);
207 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
208}
209
636aa200 210static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679 211{
f4ed8679 212 tcg_gen_qemu_ld64(t0, t1, flags);
6910b8f6
RH
213 tcg_gen_mov_i64(cpu_lock_addr, t1);
214 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
215}
216
636aa200
BS
217static inline void gen_load_mem(DisasContext *ctx,
218 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
219 int flags),
220 int ra, int rb, int32_t disp16, int fp,
221 int clear)
023d8ca2 222{
6910b8f6 223 TCGv addr, va;
023d8ca2 224
6910b8f6
RH
225 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
226 prefetches, which we can treat as nops. No worries about
227 missed exceptions here. */
228 if (unlikely(ra == 31)) {
023d8ca2 229 return;
6910b8f6 230 }
023d8ca2 231
a7812ae4 232 addr = tcg_temp_new();
023d8ca2
AJ
233 if (rb != 31) {
234 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 235 if (clear) {
023d8ca2 236 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 237 }
023d8ca2 238 } else {
6910b8f6 239 if (clear) {
023d8ca2 240 disp16 &= ~0x7;
6910b8f6 241 }
023d8ca2
AJ
242 tcg_gen_movi_i64(addr, disp16);
243 }
6910b8f6
RH
244
245 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
246 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
247
023d8ca2
AJ
248 tcg_temp_free(addr);
249}
250
636aa200 251static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 252{
a7812ae4
PB
253 TCGv_i32 tmp32 = tcg_temp_new_i32();
254 TCGv tmp = tcg_temp_new();
255 gen_helper_f_to_memory(tmp32, t0);
256 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
257 tcg_gen_qemu_st32(tmp, t1, flags);
258 tcg_temp_free(tmp);
a7812ae4 259 tcg_temp_free_i32(tmp32);
f18cd223
AJ
260}
261
636aa200 262static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 263{
a7812ae4
PB
264 TCGv tmp = tcg_temp_new();
265 gen_helper_g_to_memory(tmp, t0);
f18cd223
AJ
266 tcg_gen_qemu_st64(tmp, t1, flags);
267 tcg_temp_free(tmp);
268}
269
636aa200 270static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 271{
a7812ae4
PB
272 TCGv_i32 tmp32 = tcg_temp_new_i32();
273 TCGv tmp = tcg_temp_new();
274 gen_helper_s_to_memory(tmp32, t0);
275 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
276 tcg_gen_qemu_st32(tmp, t1, flags);
277 tcg_temp_free(tmp);
a7812ae4 278 tcg_temp_free_i32(tmp32);
f18cd223
AJ
279}
280
636aa200
BS
281static inline void gen_store_mem(DisasContext *ctx,
282 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
283 int flags),
284 int ra, int rb, int32_t disp16, int fp,
6910b8f6 285 int clear)
023d8ca2 286{
6910b8f6
RH
287 TCGv addr, va;
288
289 addr = tcg_temp_new();
023d8ca2
AJ
290 if (rb != 31) {
291 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 292 if (clear) {
023d8ca2 293 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 294 }
023d8ca2 295 } else {
6910b8f6 296 if (clear) {
023d8ca2 297 disp16 &= ~0x7;
6910b8f6 298 }
023d8ca2
AJ
299 tcg_gen_movi_i64(addr, disp16);
300 }
6910b8f6
RH
301
302 if (ra == 31) {
303 va = tcg_const_i64(0);
f18cd223 304 } else {
6910b8f6 305 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
023d8ca2 306 }
6910b8f6
RH
307 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
308
023d8ca2 309 tcg_temp_free(addr);
6910b8f6
RH
310 if (ra == 31) {
311 tcg_temp_free(va);
312 }
313}
314
315static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
316 int32_t disp16, int quad)
317{
318 TCGv addr;
319
320 if (ra == 31) {
321 /* ??? Don't bother storing anything. The user can't tell
322 the difference, since the zero register always reads zero. */
323 return NO_EXIT;
324 }
325
326#if defined(CONFIG_USER_ONLY)
327 addr = cpu_lock_st_addr;
328#else
e52458fe 329 addr = tcg_temp_local_new();
6910b8f6
RH
330#endif
331
332 if (rb != 31) {
333 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
334 } else {
335 tcg_gen_movi_i64(addr, disp16);
336 }
337
338#if defined(CONFIG_USER_ONLY)
339 /* ??? This is handled via a complicated version of compare-and-swap
340 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
341 in TCG so that this isn't necessary. */
342 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
343#else
344 /* ??? In system mode we are never multi-threaded, so CAS can be
345 implemented via a non-atomic load-compare-store sequence. */
346 {
347 int lab_fail, lab_done;
348 TCGv val;
349
350 lab_fail = gen_new_label();
351 lab_done = gen_new_label();
e52458fe 352 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
6910b8f6
RH
353
354 val = tcg_temp_new();
355 if (quad) {
356 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
357 } else {
358 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
359 }
e52458fe 360 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
6910b8f6
RH
361
362 if (quad) {
363 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
364 } else {
365 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
366 }
367 tcg_gen_movi_i64(cpu_ir[ra], 1);
368 tcg_gen_br(lab_done);
369
370 gen_set_label(lab_fail);
371 tcg_gen_movi_i64(cpu_ir[ra], 0);
372
373 gen_set_label(lab_done);
374 tcg_gen_movi_i64(cpu_lock_addr, -1);
375
376 tcg_temp_free(addr);
377 return NO_EXIT;
378 }
379#endif
023d8ca2
AJ
380}
381
4af70374 382static int use_goto_tb(DisasContext *ctx, uint64_t dest)
4c9649a9 383{
4af70374
RH
384 /* Check for the dest on the same page as the start of the TB. We
385 also want to suppress goto_tb in the case of single-steping and IO. */
386 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
387 && !ctx->env->singlestep_enabled
388 && !(ctx->tb->cflags & CF_LAST_IO));
389}
dbb30fe6 390
4af70374
RH
391static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
392{
393 uint64_t dest = ctx->pc + (disp << 2);
394
395 if (ra != 31) {
396 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
397 }
398
399 /* Notice branch-to-next; used to initialize RA with the PC. */
400 if (disp == 0) {
401 return 0;
402 } else if (use_goto_tb(ctx, dest)) {
403 tcg_gen_goto_tb(0);
404 tcg_gen_movi_i64(cpu_pc, dest);
4b4a72e5 405 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
4af70374
RH
406 return EXIT_GOTO_TB;
407 } else {
408 tcg_gen_movi_i64(cpu_pc, dest);
409 return EXIT_PC_UPDATED;
410 }
dbb30fe6
RH
411}
412
4af70374
RH
413static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
414 TCGv cmp, int32_t disp)
dbb30fe6 415{
4af70374 416 uint64_t dest = ctx->pc + (disp << 2);
dbb30fe6 417 int lab_true = gen_new_label();
9c29504e 418
4af70374
RH
419 if (use_goto_tb(ctx, dest)) {
420 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
421
422 tcg_gen_goto_tb(0);
423 tcg_gen_movi_i64(cpu_pc, ctx->pc);
4b4a72e5 424 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
4af70374
RH
425
426 gen_set_label(lab_true);
427 tcg_gen_goto_tb(1);
428 tcg_gen_movi_i64(cpu_pc, dest);
4b4a72e5 429 tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
4af70374
RH
430
431 return EXIT_GOTO_TB;
432 } else {
433 int lab_over = gen_new_label();
434
435 /* ??? Consider using either
436 movi pc, next
437 addi tmp, pc, disp
438 movcond pc, cond, 0, tmp, pc
439 or
440 setcond tmp, cond, 0
441 movi pc, next
442 neg tmp, tmp
443 andi tmp, tmp, disp
444 add pc, pc, tmp
445 The current diamond subgraph surely isn't efficient. */
446
447 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
448 tcg_gen_movi_i64(cpu_pc, ctx->pc);
449 tcg_gen_br(lab_over);
450 gen_set_label(lab_true);
451 tcg_gen_movi_i64(cpu_pc, dest);
452 gen_set_label(lab_over);
453
454 return EXIT_PC_UPDATED;
455 }
456}
457
458static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
459 int32_t disp, int mask)
460{
461 TCGv cmp_tmp;
462
463 if (unlikely(ra == 31)) {
464 cmp_tmp = tcg_const_i64(0);
465 } else {
466 cmp_tmp = tcg_temp_new();
9c29504e 467 if (mask) {
4af70374 468 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
dbb30fe6 469 } else {
4af70374 470 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
dbb30fe6 471 }
9c29504e 472 }
4af70374
RH
473
474 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
475}
476
4af70374 477/* Fold -0.0 for comparison with COND. */
dbb30fe6 478
4af70374 479static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 480{
dbb30fe6 481 uint64_t mzero = 1ull << 63;
f18cd223 482
dbb30fe6
RH
483 switch (cond) {
484 case TCG_COND_LE:
485 case TCG_COND_GT:
486 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 487 tcg_gen_mov_i64(dest, src);
a7812ae4 488 break;
dbb30fe6
RH
489
490 case TCG_COND_EQ:
491 case TCG_COND_NE:
492 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 493 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 494 break;
dbb30fe6
RH
495
496 case TCG_COND_GE:
dbb30fe6 497 case TCG_COND_LT:
4af70374
RH
498 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
499 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
500 tcg_gen_neg_i64(dest, dest);
501 tcg_gen_and_i64(dest, dest, src);
a7812ae4 502 break;
dbb30fe6 503
a7812ae4
PB
504 default:
505 abort();
f18cd223 506 }
dbb30fe6
RH
507}
508
4af70374
RH
509static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
510 int32_t disp)
dbb30fe6 511{
4af70374 512 TCGv cmp_tmp;
dbb30fe6
RH
513
514 if (unlikely(ra == 31)) {
515 /* Very uncommon case, but easier to optimize it to an integer
516 comparison than continuing with the floating point comparison. */
4af70374 517 return gen_bcond(ctx, cond, ra, disp, 0);
dbb30fe6
RH
518 }
519
4af70374
RH
520 cmp_tmp = tcg_temp_new();
521 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
522 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
523}
524
bbe1dab4 525static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
4af70374 526 int islit, uint8_t lit, int mask)
4c9649a9 527{
bbe1dab4 528 TCGCond inv_cond = tcg_invert_cond(cond);
9c29504e
AJ
529 int l1;
530
531 if (unlikely(rc == 31))
532 return;
533
534 l1 = gen_new_label();
535
536 if (ra != 31) {
537 if (mask) {
a7812ae4 538 TCGv tmp = tcg_temp_new();
9c29504e
AJ
539 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
540 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
541 tcg_temp_free(tmp);
542 } else
543 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
544 } else {
545 /* Very uncommon case - Do not bother to optimize. */
546 TCGv tmp = tcg_const_i64(0);
547 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
548 tcg_temp_free(tmp);
549 }
550
4c9649a9 551 if (islit)
9c29504e 552 tcg_gen_movi_i64(cpu_ir[rc], lit);
4c9649a9 553 else
dfaa8583 554 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
9c29504e 555 gen_set_label(l1);
4c9649a9
JM
556}
557
bbe1dab4 558static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
dbb30fe6 559{
4af70374 560 TCGv cmp_tmp;
dbb30fe6
RH
561 int l1;
562
4af70374 563 if (unlikely(rc == 31)) {
dbb30fe6 564 return;
4af70374
RH
565 }
566
567 cmp_tmp = tcg_temp_new();
dbb30fe6 568 if (unlikely(ra == 31)) {
4af70374
RH
569 tcg_gen_movi_i64(cmp_tmp, 0);
570 } else {
571 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
dbb30fe6
RH
572 }
573
574 l1 = gen_new_label();
4af70374
RH
575 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
576 tcg_temp_free(cmp_tmp);
dbb30fe6
RH
577
578 if (rb != 31)
579 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
580 else
581 tcg_gen_movi_i64(cpu_fir[rc], 0);
582 gen_set_label(l1);
583}
584
f24518b5
RH
585#define QUAL_RM_N 0x080 /* Round mode nearest even */
586#define QUAL_RM_C 0x000 /* Round mode chopped */
587#define QUAL_RM_M 0x040 /* Round mode minus infinity */
588#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
589#define QUAL_RM_MASK 0x0c0
590
591#define QUAL_U 0x100 /* Underflow enable (fp output) */
592#define QUAL_V 0x100 /* Overflow enable (int output) */
593#define QUAL_S 0x400 /* Software completion enable */
594#define QUAL_I 0x200 /* Inexact detection enable */
595
596static void gen_qual_roundmode(DisasContext *ctx, int fn11)
597{
598 TCGv_i32 tmp;
599
600 fn11 &= QUAL_RM_MASK;
601 if (fn11 == ctx->tb_rm) {
602 return;
603 }
604 ctx->tb_rm = fn11;
605
606 tmp = tcg_temp_new_i32();
607 switch (fn11) {
608 case QUAL_RM_N:
609 tcg_gen_movi_i32(tmp, float_round_nearest_even);
610 break;
611 case QUAL_RM_C:
612 tcg_gen_movi_i32(tmp, float_round_to_zero);
613 break;
614 case QUAL_RM_M:
615 tcg_gen_movi_i32(tmp, float_round_down);
616 break;
617 case QUAL_RM_D:
618 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
619 break;
620 }
621
622#if defined(CONFIG_SOFTFLOAT_INLINE)
623 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
624 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
625 sets the one field. */
626 tcg_gen_st8_i32(tmp, cpu_env,
627 offsetof(CPUState, fp_status.float_rounding_mode));
628#else
629 gen_helper_setroundmode(tmp);
630#endif
631
632 tcg_temp_free_i32(tmp);
633}
634
635static void gen_qual_flushzero(DisasContext *ctx, int fn11)
636{
637 TCGv_i32 tmp;
638
639 fn11 &= QUAL_U;
640 if (fn11 == ctx->tb_ftz) {
641 return;
642 }
643 ctx->tb_ftz = fn11;
644
645 tmp = tcg_temp_new_i32();
646 if (fn11) {
647 /* Underflow is enabled, use the FPCR setting. */
648 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
649 } else {
650 /* Underflow is disabled, force flush-to-zero. */
651 tcg_gen_movi_i32(tmp, 1);
652 }
653
654#if defined(CONFIG_SOFTFLOAT_INLINE)
655 tcg_gen_st8_i32(tmp, cpu_env,
656 offsetof(CPUState, fp_status.flush_to_zero));
657#else
658 gen_helper_setflushzero(tmp);
659#endif
660
661 tcg_temp_free_i32(tmp);
662}
663
664static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
665{
666 TCGv val = tcg_temp_new();
667 if (reg == 31) {
668 tcg_gen_movi_i64(val, 0);
669 } else if (fn11 & QUAL_S) {
670 gen_helper_ieee_input_s(val, cpu_fir[reg]);
671 } else if (is_cmp) {
672 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
673 } else {
674 gen_helper_ieee_input(val, cpu_fir[reg]);
675 }
676 return val;
677}
678
679static void gen_fp_exc_clear(void)
680{
681#if defined(CONFIG_SOFTFLOAT_INLINE)
682 TCGv_i32 zero = tcg_const_i32(0);
683 tcg_gen_st8_i32(zero, cpu_env,
684 offsetof(CPUState, fp_status.float_exception_flags));
685 tcg_temp_free_i32(zero);
686#else
687 gen_helper_fp_exc_clear();
688#endif
689}
690
691static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
692{
693 /* ??? We ought to be able to do something with imprecise exceptions.
694 E.g. notice we're still in the trap shadow of something within the
695 TB and do not generate the code to signal the exception; end the TB
696 when an exception is forced to arrive, either by consumption of a
697 register value or TRAPB or EXCB. */
698 TCGv_i32 exc = tcg_temp_new_i32();
699 TCGv_i32 reg;
700
701#if defined(CONFIG_SOFTFLOAT_INLINE)
702 tcg_gen_ld8u_i32(exc, cpu_env,
703 offsetof(CPUState, fp_status.float_exception_flags));
704#else
705 gen_helper_fp_exc_get(exc);
706#endif
707
708 if (ignore) {
709 tcg_gen_andi_i32(exc, exc, ~ignore);
710 }
711
712 /* ??? Pass in the regno of the destination so that the helper can
713 set EXC_MASK, which contains a bitmask of destination registers
714 that have caused arithmetic traps. A simple userspace emulation
715 does not require this. We do need it for a guest kernel's entArith,
716 or if we were to do something clever with imprecise exceptions. */
717 reg = tcg_const_i32(rc + 32);
718
719 if (fn11 & QUAL_S) {
720 gen_helper_fp_exc_raise_s(exc, reg);
721 } else {
722 gen_helper_fp_exc_raise(exc, reg);
723 }
724
725 tcg_temp_free_i32(reg);
726 tcg_temp_free_i32(exc);
727}
728
729static inline void gen_fp_exc_raise(int rc, int fn11)
730{
731 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 732}
f24518b5 733
593f17e5
RH
734static void gen_fcvtlq(int rb, int rc)
735{
736 if (unlikely(rc == 31)) {
737 return;
738 }
739 if (unlikely(rb == 31)) {
740 tcg_gen_movi_i64(cpu_fir[rc], 0);
741 } else {
742 TCGv tmp = tcg_temp_new();
743
744 /* The arithmetic right shift here, plus the sign-extended mask below
745 yields a sign-extended result without an explicit ext32s_i64. */
746 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
747 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
748 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
749 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
750 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
751
752 tcg_temp_free(tmp);
753 }
754}
755
735cf45f
RH
756static void gen_fcvtql(int rb, int rc)
757{
758 if (unlikely(rc == 31)) {
759 return;
760 }
761 if (unlikely(rb == 31)) {
762 tcg_gen_movi_i64(cpu_fir[rc], 0);
763 } else {
764 TCGv tmp = tcg_temp_new();
765
766 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
767 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
768 tcg_gen_shli_i64(tmp, tmp, 32);
769 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
770 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
771
772 tcg_temp_free(tmp);
773 }
774}
775
776static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
777{
778 if (rb != 31) {
779 int lab = gen_new_label();
780 TCGv tmp = tcg_temp_new();
781
782 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
783 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
784 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
785
786 gen_set_label(lab);
787 }
788 gen_fcvtql(rb, rc);
789}
790
f24518b5
RH
791#define FARITH2(name) \
792static inline void glue(gen_f, name)(int rb, int rc) \
793{ \
794 if (unlikely(rc == 31)) { \
795 return; \
796 } \
797 if (rb != 31) { \
798 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
799 } else { \
800 TCGv tmp = tcg_const_i64(0); \
801 gen_helper_ ## name (cpu_fir[rc], tmp); \
802 tcg_temp_free(tmp); \
803 } \
804}
f24518b5
RH
805
806/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
807FARITH2(sqrtf)
808FARITH2(sqrtg)
a7812ae4
PB
809FARITH2(cvtgf)
810FARITH2(cvtgq)
811FARITH2(cvtqf)
812FARITH2(cvtqg)
f24518b5
RH
813
814static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
815 int rb, int rc, int fn11)
816{
817 TCGv vb;
818
819 /* ??? This is wrong: the instruction is not a nop, it still may
820 raise exceptions. */
821 if (unlikely(rc == 31)) {
822 return;
823 }
824
825 gen_qual_roundmode(ctx, fn11);
826 gen_qual_flushzero(ctx, fn11);
827 gen_fp_exc_clear();
828
829 vb = gen_ieee_input(rb, fn11, 0);
830 helper(cpu_fir[rc], vb);
831 tcg_temp_free(vb);
832
833 gen_fp_exc_raise(rc, fn11);
834}
835
836#define IEEE_ARITH2(name) \
837static inline void glue(gen_f, name)(DisasContext *ctx, \
838 int rb, int rc, int fn11) \
839{ \
840 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
841}
842IEEE_ARITH2(sqrts)
843IEEE_ARITH2(sqrtt)
844IEEE_ARITH2(cvtst)
845IEEE_ARITH2(cvtts)
846
847static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
848{
849 TCGv vb;
850 int ignore = 0;
851
852 /* ??? This is wrong: the instruction is not a nop, it still may
853 raise exceptions. */
854 if (unlikely(rc == 31)) {
855 return;
856 }
857
858 /* No need to set flushzero, since we have an integer output. */
859 gen_fp_exc_clear();
860 vb = gen_ieee_input(rb, fn11, 0);
861
862 /* Almost all integer conversions use cropped rounding, and most
863 also do not have integer overflow enabled. Special case that. */
864 switch (fn11) {
865 case QUAL_RM_C:
866 gen_helper_cvttq_c(cpu_fir[rc], vb);
867 break;
868 case QUAL_V | QUAL_RM_C:
869 case QUAL_S | QUAL_V | QUAL_RM_C:
870 ignore = float_flag_inexact;
871 /* FALLTHRU */
872 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
873 gen_helper_cvttq_svic(cpu_fir[rc], vb);
874 break;
875 default:
876 gen_qual_roundmode(ctx, fn11);
877 gen_helper_cvttq(cpu_fir[rc], vb);
878 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
879 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
880 break;
881 }
882 tcg_temp_free(vb);
883
884 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
885}
886
f24518b5
RH
887static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
888 int rb, int rc, int fn11)
889{
890 TCGv vb;
891
892 /* ??? This is wrong: the instruction is not a nop, it still may
893 raise exceptions. */
894 if (unlikely(rc == 31)) {
895 return;
896 }
897
898 gen_qual_roundmode(ctx, fn11);
899
900 if (rb == 31) {
901 vb = tcg_const_i64(0);
902 } else {
903 vb = cpu_fir[rb];
904 }
905
906 /* The only exception that can be raised by integer conversion
907 is inexact. Thus we only need to worry about exceptions when
908 inexact handling is requested. */
909 if (fn11 & QUAL_I) {
910 gen_fp_exc_clear();
911 helper(cpu_fir[rc], vb);
912 gen_fp_exc_raise(rc, fn11);
913 } else {
914 helper(cpu_fir[rc], vb);
915 }
916
917 if (rb == 31) {
918 tcg_temp_free(vb);
919 }
920}
921
922#define IEEE_INTCVT(name) \
923static inline void glue(gen_f, name)(DisasContext *ctx, \
924 int rb, int rc, int fn11) \
925{ \
926 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
927}
928IEEE_INTCVT(cvtqs)
929IEEE_INTCVT(cvtqt)
930
dc96be4b
RH
931static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
932{
933 TCGv va, vb, vmask;
934 int za = 0, zb = 0;
935
936 if (unlikely(rc == 31)) {
937 return;
938 }
939
940 vmask = tcg_const_i64(mask);
941
942 TCGV_UNUSED_I64(va);
943 if (ra == 31) {
944 if (inv_a) {
945 va = vmask;
946 } else {
947 za = 1;
948 }
949 } else {
950 va = tcg_temp_new_i64();
951 tcg_gen_mov_i64(va, cpu_fir[ra]);
952 if (inv_a) {
953 tcg_gen_andc_i64(va, vmask, va);
954 } else {
955 tcg_gen_and_i64(va, va, vmask);
956 }
957 }
958
959 TCGV_UNUSED_I64(vb);
960 if (rb == 31) {
961 zb = 1;
962 } else {
963 vb = tcg_temp_new_i64();
964 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
965 }
966
967 switch (za << 1 | zb) {
968 case 0 | 0:
969 tcg_gen_or_i64(cpu_fir[rc], va, vb);
970 break;
971 case 0 | 1:
972 tcg_gen_mov_i64(cpu_fir[rc], va);
973 break;
974 case 2 | 0:
975 tcg_gen_mov_i64(cpu_fir[rc], vb);
976 break;
977 case 2 | 1:
978 tcg_gen_movi_i64(cpu_fir[rc], 0);
979 break;
980 }
981
982 tcg_temp_free(vmask);
983 if (ra != 31) {
984 tcg_temp_free(va);
985 }
986 if (rb != 31) {
987 tcg_temp_free(vb);
988 }
989}
990
991static inline void gen_fcpys(int ra, int rb, int rc)
992{
993 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
994}
995
996static inline void gen_fcpysn(int ra, int rb, int rc)
997{
998 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
999}
1000
1001static inline void gen_fcpyse(int ra, int rb, int rc)
1002{
1003 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1004}
1005
f24518b5
RH
1006#define FARITH3(name) \
1007static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1008{ \
1009 TCGv va, vb; \
1010 \
1011 if (unlikely(rc == 31)) { \
1012 return; \
1013 } \
1014 if (ra == 31) { \
1015 va = tcg_const_i64(0); \
1016 } else { \
1017 va = cpu_fir[ra]; \
1018 } \
1019 if (rb == 31) { \
1020 vb = tcg_const_i64(0); \
1021 } else { \
1022 vb = cpu_fir[rb]; \
1023 } \
1024 \
1025 gen_helper_ ## name (cpu_fir[rc], va, vb); \
1026 \
1027 if (ra == 31) { \
1028 tcg_temp_free(va); \
1029 } \
1030 if (rb == 31) { \
1031 tcg_temp_free(vb); \
1032 } \
1033}
f24518b5
RH
1034
1035/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
1036FARITH3(addf)
1037FARITH3(subf)
1038FARITH3(mulf)
1039FARITH3(divf)
1040FARITH3(addg)
1041FARITH3(subg)
1042FARITH3(mulg)
1043FARITH3(divg)
1044FARITH3(cmpgeq)
1045FARITH3(cmpglt)
1046FARITH3(cmpgle)
f24518b5
RH
1047
1048static void gen_ieee_arith3(DisasContext *ctx,
1049 void (*helper)(TCGv, TCGv, TCGv),
1050 int ra, int rb, int rc, int fn11)
1051{
1052 TCGv va, vb;
1053
1054 /* ??? This is wrong: the instruction is not a nop, it still may
1055 raise exceptions. */
1056 if (unlikely(rc == 31)) {
1057 return;
1058 }
1059
1060 gen_qual_roundmode(ctx, fn11);
1061 gen_qual_flushzero(ctx, fn11);
1062 gen_fp_exc_clear();
1063
1064 va = gen_ieee_input(ra, fn11, 0);
1065 vb = gen_ieee_input(rb, fn11, 0);
1066 helper(cpu_fir[rc], va, vb);
1067 tcg_temp_free(va);
1068 tcg_temp_free(vb);
1069
1070 gen_fp_exc_raise(rc, fn11);
1071}
1072
1073#define IEEE_ARITH3(name) \
1074static inline void glue(gen_f, name)(DisasContext *ctx, \
1075 int ra, int rb, int rc, int fn11) \
1076{ \
1077 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1078}
1079IEEE_ARITH3(adds)
1080IEEE_ARITH3(subs)
1081IEEE_ARITH3(muls)
1082IEEE_ARITH3(divs)
1083IEEE_ARITH3(addt)
1084IEEE_ARITH3(subt)
1085IEEE_ARITH3(mult)
1086IEEE_ARITH3(divt)
1087
1088static void gen_ieee_compare(DisasContext *ctx,
1089 void (*helper)(TCGv, TCGv, TCGv),
1090 int ra, int rb, int rc, int fn11)
1091{
1092 TCGv va, vb;
1093
1094 /* ??? This is wrong: the instruction is not a nop, it still may
1095 raise exceptions. */
1096 if (unlikely(rc == 31)) {
1097 return;
1098 }
1099
1100 gen_fp_exc_clear();
1101
1102 va = gen_ieee_input(ra, fn11, 1);
1103 vb = gen_ieee_input(rb, fn11, 1);
1104 helper(cpu_fir[rc], va, vb);
1105 tcg_temp_free(va);
1106 tcg_temp_free(vb);
1107
1108 gen_fp_exc_raise(rc, fn11);
1109}
1110
1111#define IEEE_CMP3(name) \
1112static inline void glue(gen_f, name)(DisasContext *ctx, \
1113 int ra, int rb, int rc, int fn11) \
1114{ \
1115 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1116}
1117IEEE_CMP3(cmptun)
1118IEEE_CMP3(cmpteq)
1119IEEE_CMP3(cmptlt)
1120IEEE_CMP3(cmptle)
a7812ae4 1121
248c42f3
RH
1122static inline uint64_t zapnot_mask(uint8_t lit)
1123{
1124 uint64_t mask = 0;
1125 int i;
1126
1127 for (i = 0; i < 8; ++i) {
1128 if ((lit >> i) & 1)
1129 mask |= 0xffull << (i * 8);
1130 }
1131 return mask;
1132}
1133
87d98f95
RH
1134/* Implement zapnot with an immediate operand, which expands to some
1135 form of immediate AND. This is a basic building block in the
1136 definition of many of the other byte manipulation instructions. */
248c42f3 1137static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 1138{
87d98f95
RH
1139 switch (lit) {
1140 case 0x00:
248c42f3 1141 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
1142 break;
1143 case 0x01:
248c42f3 1144 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
1145 break;
1146 case 0x03:
248c42f3 1147 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
1148 break;
1149 case 0x0f:
248c42f3 1150 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
1151 break;
1152 case 0xff:
248c42f3 1153 tcg_gen_mov_i64(dest, src);
87d98f95
RH
1154 break;
1155 default:
248c42f3 1156 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
87d98f95
RH
1157 break;
1158 }
1159}
1160
1161static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1162{
1163 if (unlikely(rc == 31))
1164 return;
1165 else if (unlikely(ra == 31))
1166 tcg_gen_movi_i64(cpu_ir[rc], 0);
1167 else if (islit)
248c42f3 1168 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
87d98f95
RH
1169 else
1170 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1171}
1172
1173static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1174{
1175 if (unlikely(rc == 31))
1176 return;
1177 else if (unlikely(ra == 31))
1178 tcg_gen_movi_i64(cpu_ir[rc], 0);
1179 else if (islit)
248c42f3 1180 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
87d98f95
RH
1181 else
1182 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1183}
1184
1185
248c42f3 1186/* EXTWH, EXTLH, EXTQH */
ffec44f1
RH
1187static void gen_ext_h(int ra, int rb, int rc, int islit,
1188 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1189{
1190 if (unlikely(rc == 31))
1191 return;
377a43b6
RH
1192 else if (unlikely(ra == 31))
1193 tcg_gen_movi_i64(cpu_ir[rc], 0);
1194 else {
dfaa8583 1195 if (islit) {
377a43b6
RH
1196 lit = (64 - (lit & 7) * 8) & 0x3f;
1197 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
fe2b269a 1198 } else {
377a43b6 1199 TCGv tmp1 = tcg_temp_new();
b3249f63
AJ
1200 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1201 tcg_gen_shli_i64(tmp1, tmp1, 3);
dbf95805
VW
1202 tcg_gen_neg_i64(tmp1, tmp1);
1203 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
dfaa8583 1204 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
b3249f63 1205 tcg_temp_free(tmp1);
dfaa8583 1206 }
248c42f3 1207 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
377a43b6 1208 }
b3249f63
AJ
1209}
1210
248c42f3 1211/* EXTBL, EXTWL, EXTLL, EXTQL */
ffec44f1
RH
1212static void gen_ext_l(int ra, int rb, int rc, int islit,
1213 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1214{
1215 if (unlikely(rc == 31))
1216 return;
377a43b6
RH
1217 else if (unlikely(ra == 31))
1218 tcg_gen_movi_i64(cpu_ir[rc], 0);
1219 else {
dfaa8583 1220 if (islit) {
377a43b6 1221 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
dfaa8583 1222 } else {
a7812ae4 1223 TCGv tmp = tcg_temp_new();
b3249f63
AJ
1224 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1225 tcg_gen_shli_i64(tmp, tmp, 3);
dfaa8583 1226 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
b3249f63 1227 tcg_temp_free(tmp);
fe2b269a 1228 }
248c42f3
RH
1229 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1230 }
1231}
1232
50eb6e5c
RH
1233/* INSWH, INSLH, INSQH */
1234static void gen_ins_h(int ra, int rb, int rc, int islit,
1235 uint8_t lit, uint8_t byte_mask)
1236{
1237 if (unlikely(rc == 31))
1238 return;
1239 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1240 tcg_gen_movi_i64(cpu_ir[rc], 0);
1241 else {
1242 TCGv tmp = tcg_temp_new();
1243
1244 /* The instruction description has us left-shift the byte mask
1245 and extract bits <15:8> and apply that zap at the end. This
1246 is equivalent to simply performing the zap first and shifting
1247 afterward. */
1248 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1249
1250 if (islit) {
1251 /* Note that we have handled the lit==0 case above. */
1252 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1253 } else {
1254 TCGv shift = tcg_temp_new();
1255
1256 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1257 Do this portably by splitting the shift into two parts:
1258 shift_count-1 and 1. Arrange for the -1 by using
1259 ones-complement instead of twos-complement in the negation:
1260 ~((B & 7) * 8) & 63. */
1261
1262 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1263 tcg_gen_shli_i64(shift, shift, 3);
1264 tcg_gen_not_i64(shift, shift);
1265 tcg_gen_andi_i64(shift, shift, 0x3f);
1266
1267 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1268 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1269 tcg_temp_free(shift);
1270 }
1271 tcg_temp_free(tmp);
1272 }
1273}
1274
248c42f3 1275/* INSBL, INSWL, INSLL, INSQL */
ffec44f1
RH
1276static void gen_ins_l(int ra, int rb, int rc, int islit,
1277 uint8_t lit, uint8_t byte_mask)
248c42f3
RH
1278{
1279 if (unlikely(rc == 31))
1280 return;
1281 else if (unlikely(ra == 31))
1282 tcg_gen_movi_i64(cpu_ir[rc], 0);
1283 else {
1284 TCGv tmp = tcg_temp_new();
1285
1286 /* The instruction description has us left-shift the byte mask
1287 the same number of byte slots as the data and apply the zap
1288 at the end. This is equivalent to simply performing the zap
1289 first and shifting afterward. */
1290 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1291
1292 if (islit) {
1293 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1294 } else {
1295 TCGv shift = tcg_temp_new();
1296 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1297 tcg_gen_shli_i64(shift, shift, 3);
1298 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1299 tcg_temp_free(shift);
1300 }
1301 tcg_temp_free(tmp);
377a43b6 1302 }
b3249f63
AJ
1303}
1304
ffec44f1
RH
1305/* MSKWH, MSKLH, MSKQH */
1306static void gen_msk_h(int ra, int rb, int rc, int islit,
1307 uint8_t lit, uint8_t byte_mask)
1308{
1309 if (unlikely(rc == 31))
1310 return;
1311 else if (unlikely(ra == 31))
1312 tcg_gen_movi_i64(cpu_ir[rc], 0);
1313 else if (islit) {
1314 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1315 } else {
1316 TCGv shift = tcg_temp_new();
1317 TCGv mask = tcg_temp_new();
1318
1319 /* The instruction description is as above, where the byte_mask
1320 is shifted left, and then we extract bits <15:8>. This can be
1321 emulated with a right-shift on the expanded byte mask. This
1322 requires extra care because for an input <2:0> == 0 we need a
1323 shift of 64 bits in order to generate a zero. This is done by
1324 splitting the shift into two parts, the variable shift - 1
1325 followed by a constant 1 shift. The code we expand below is
1326 equivalent to ~((B & 7) * 8) & 63. */
1327
1328 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1329 tcg_gen_shli_i64(shift, shift, 3);
1330 tcg_gen_not_i64(shift, shift);
1331 tcg_gen_andi_i64(shift, shift, 0x3f);
1332 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1333 tcg_gen_shr_i64(mask, mask, shift);
1334 tcg_gen_shri_i64(mask, mask, 1);
1335
1336 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1337
1338 tcg_temp_free(mask);
1339 tcg_temp_free(shift);
1340 }
1341}
1342
14ab1634 1343/* MSKBL, MSKWL, MSKLL, MSKQL */
ffec44f1
RH
1344static void gen_msk_l(int ra, int rb, int rc, int islit,
1345 uint8_t lit, uint8_t byte_mask)
14ab1634
RH
1346{
1347 if (unlikely(rc == 31))
1348 return;
1349 else if (unlikely(ra == 31))
1350 tcg_gen_movi_i64(cpu_ir[rc], 0);
1351 else if (islit) {
1352 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1353 } else {
1354 TCGv shift = tcg_temp_new();
1355 TCGv mask = tcg_temp_new();
1356
1357 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1358 tcg_gen_shli_i64(shift, shift, 3);
1359 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1360 tcg_gen_shl_i64(mask, mask, shift);
1361
1362 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1363
1364 tcg_temp_free(mask);
1365 tcg_temp_free(shift);
1366 }
1367}
1368
04acd307 1369/* Code to call arith3 helpers */
a7812ae4 1370#define ARITH3(name) \
636aa200
BS
1371static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1372 uint8_t lit) \
a7812ae4
PB
1373{ \
1374 if (unlikely(rc == 31)) \
1375 return; \
1376 \
1377 if (ra != 31) { \
1378 if (islit) { \
1379 TCGv tmp = tcg_const_i64(lit); \
1380 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1381 tcg_temp_free(tmp); \
1382 } else \
1383 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1384 } else { \
1385 TCGv tmp1 = tcg_const_i64(0); \
1386 if (islit) { \
1387 TCGv tmp2 = tcg_const_i64(lit); \
1388 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1389 tcg_temp_free(tmp2); \
1390 } else \
1391 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1392 tcg_temp_free(tmp1); \
1393 } \
b3249f63 1394}
a7812ae4
PB
1395ARITH3(cmpbge)
1396ARITH3(addlv)
1397ARITH3(sublv)
1398ARITH3(addqv)
1399ARITH3(subqv)
a7812ae4
PB
1400ARITH3(umulh)
1401ARITH3(mullv)
1402ARITH3(mulqv)
13e4df99
RH
1403ARITH3(minub8)
1404ARITH3(minsb8)
1405ARITH3(minuw4)
1406ARITH3(minsw4)
1407ARITH3(maxub8)
1408ARITH3(maxsb8)
1409ARITH3(maxuw4)
1410ARITH3(maxsw4)
1411ARITH3(perr)
1412
1413#define MVIOP2(name) \
1414static inline void glue(gen_, name)(int rb, int rc) \
1415{ \
1416 if (unlikely(rc == 31)) \
1417 return; \
1418 if (unlikely(rb == 31)) \
1419 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1420 else \
1421 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1422}
1423MVIOP2(pklb)
1424MVIOP2(pkwb)
1425MVIOP2(unpkbl)
1426MVIOP2(unpkbw)
b3249f63 1427
9e05960f
RH
1428static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1429 int islit, uint8_t lit)
01ff9cc8 1430{
9e05960f 1431 TCGv va, vb;
01ff9cc8 1432
9e05960f 1433 if (unlikely(rc == 31)) {
13e4df99 1434 return;
9e05960f 1435 }
01ff9cc8 1436
9e05960f
RH
1437 if (ra == 31) {
1438 va = tcg_const_i64(0);
1439 } else {
1440 va = cpu_ir[ra];
1441 }
1442 if (islit) {
1443 vb = tcg_const_i64(lit);
1444 } else {
1445 vb = cpu_ir[rb];
1446 }
01ff9cc8 1447
9e05960f 1448 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
01ff9cc8 1449
9e05960f
RH
1450 if (ra == 31) {
1451 tcg_temp_free(va);
1452 }
1453 if (islit) {
1454 tcg_temp_free(vb);
1455 }
01ff9cc8
AJ
1456}
1457
ac316ca4
RH
1458static void gen_rx(int ra, int set)
1459{
1460 TCGv_i32 tmp;
1461
1462 if (ra != 31) {
1463 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1464 }
1465
1466 tmp = tcg_const_i32(set);
1467 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1468 tcg_temp_free_i32(tmp);
1469}
1470
4af70374 1471static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1472{
1473 uint32_t palcode;
1474 int32_t disp21, disp16, disp12;
f88fe4e3
BS
1475 uint16_t fn11;
1476 uint8_t opc, ra, rb, rc, fpfn, fn7, fn2, islit, real_islit;
adf3c8b6 1477 uint8_t lit;
4af70374 1478 ExitStatus ret;
4c9649a9
JM
1479
1480 /* Decode all instruction fields */
1481 opc = insn >> 26;
1482 ra = (insn >> 21) & 0x1F;
1483 rb = (insn >> 16) & 0x1F;
1484 rc = insn & 0x1F;
13e4df99 1485 real_islit = islit = (insn >> 12) & 1;
dfaa8583
AJ
1486 if (rb == 31 && !islit) {
1487 islit = 1;
1488 lit = 0;
1489 } else
1490 lit = (insn >> 13) & 0xFF;
4c9649a9
JM
1491 palcode = insn & 0x03FFFFFF;
1492 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1493 disp16 = (int16_t)(insn & 0x0000FFFF);
1494 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
4c9649a9
JM
1495 fn11 = (insn >> 5) & 0x000007FF;
1496 fpfn = fn11 & 0x3F;
1497 fn7 = (insn >> 5) & 0x0000007F;
1498 fn2 = (insn >> 5) & 0x00000003;
806991da 1499 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1500 opc, ra, rb, rc, disp16);
806991da 1501
4af70374 1502 ret = NO_EXIT;
4c9649a9
JM
1503 switch (opc) {
1504 case 0x00:
1505 /* CALL_PAL */
ab471ade
RH
1506#ifdef CONFIG_USER_ONLY
1507 if (palcode == 0x9E) {
1508 /* RDUNIQUE */
1509 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1510 break;
1511 } else if (palcode == 0x9F) {
1512 /* WRUNIQUE */
1513 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1514 break;
1515 }
1516#endif
4c9649a9
JM
1517 if (palcode >= 0x80 && palcode < 0xC0) {
1518 /* Unprivileged PAL call */
07b6c13b 1519 ret = gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xBF);
ab471ade
RH
1520 break;
1521 }
1522#ifndef CONFIG_USER_ONLY
1523 if (palcode < 0x40) {
4c9649a9 1524 /* Privileged PAL code */
8417845e 1525 if (ctx->mem_idx != MMU_KERNEL_IDX) {
4c9649a9 1526 goto invalid_opc;
8417845e 1527 }
07b6c13b 1528 ret = gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3F);
4c9649a9 1529 }
ab471ade
RH
1530#endif
1531 /* Invalid PAL call */
1532 goto invalid_opc;
4c9649a9
JM
1533 case 0x01:
1534 /* OPC01 */
1535 goto invalid_opc;
1536 case 0x02:
1537 /* OPC02 */
1538 goto invalid_opc;
1539 case 0x03:
1540 /* OPC03 */
1541 goto invalid_opc;
1542 case 0x04:
1543 /* OPC04 */
1544 goto invalid_opc;
1545 case 0x05:
1546 /* OPC05 */
1547 goto invalid_opc;
1548 case 0x06:
1549 /* OPC06 */
1550 goto invalid_opc;
1551 case 0x07:
1552 /* OPC07 */
1553 goto invalid_opc;
1554 case 0x08:
1555 /* LDA */
1ef4ef4e 1556 if (likely(ra != 31)) {
496cb5b9 1557 if (rb != 31)
3761035f
AJ
1558 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1559 else
1560 tcg_gen_movi_i64(cpu_ir[ra], disp16);
496cb5b9 1561 }
4c9649a9
JM
1562 break;
1563 case 0x09:
1564 /* LDAH */
1ef4ef4e 1565 if (likely(ra != 31)) {
496cb5b9 1566 if (rb != 31)
3761035f
AJ
1567 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1568 else
1569 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
496cb5b9 1570 }
4c9649a9
JM
1571 break;
1572 case 0x0A:
1573 /* LDBU */
1574 if (!(ctx->amask & AMASK_BWX))
1575 goto invalid_opc;
f18cd223 1576 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
4c9649a9
JM
1577 break;
1578 case 0x0B:
1579 /* LDQ_U */
f18cd223 1580 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1581 break;
1582 case 0x0C:
1583 /* LDWU */
1584 if (!(ctx->amask & AMASK_BWX))
1585 goto invalid_opc;
577d5e7f 1586 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
4c9649a9
JM
1587 break;
1588 case 0x0D:
1589 /* STW */
6910b8f6 1590 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
4c9649a9
JM
1591 break;
1592 case 0x0E:
1593 /* STB */
6910b8f6 1594 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
4c9649a9
JM
1595 break;
1596 case 0x0F:
1597 /* STQ_U */
6910b8f6 1598 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1599 break;
1600 case 0x10:
1601 switch (fn7) {
1602 case 0x00:
1603 /* ADDL */
30c7183b
AJ
1604 if (likely(rc != 31)) {
1605 if (ra != 31) {
1606 if (islit) {
1607 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1608 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1609 } else {
30c7183b
AJ
1610 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1611 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1612 }
30c7183b
AJ
1613 } else {
1614 if (islit)
dfaa8583 1615 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1616 else
dfaa8583 1617 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1618 }
1619 }
4c9649a9
JM
1620 break;
1621 case 0x02:
1622 /* S4ADDL */
30c7183b
AJ
1623 if (likely(rc != 31)) {
1624 if (ra != 31) {
a7812ae4 1625 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1626 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1627 if (islit)
1628 tcg_gen_addi_i64(tmp, tmp, lit);
1629 else
1630 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1631 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1632 tcg_temp_free(tmp);
30c7183b
AJ
1633 } else {
1634 if (islit)
1635 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1636 else
dfaa8583 1637 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1638 }
1639 }
4c9649a9
JM
1640 break;
1641 case 0x09:
1642 /* SUBL */
30c7183b
AJ
1643 if (likely(rc != 31)) {
1644 if (ra != 31) {
dfaa8583 1645 if (islit)
30c7183b 1646 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
dfaa8583 1647 else
30c7183b 1648 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
dfaa8583 1649 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1650 } else {
1651 if (islit)
1652 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1653 else {
30c7183b
AJ
1654 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1655 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1656 }
1657 }
4c9649a9
JM
1658 break;
1659 case 0x0B:
1660 /* S4SUBL */
30c7183b
AJ
1661 if (likely(rc != 31)) {
1662 if (ra != 31) {
a7812ae4 1663 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1664 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1665 if (islit)
1666 tcg_gen_subi_i64(tmp, tmp, lit);
1667 else
1668 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1669 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1670 tcg_temp_free(tmp);
30c7183b
AJ
1671 } else {
1672 if (islit)
1673 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1674 else {
30c7183b
AJ
1675 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1676 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1677 }
30c7183b
AJ
1678 }
1679 }
4c9649a9
JM
1680 break;
1681 case 0x0F:
1682 /* CMPBGE */
a7812ae4 1683 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1684 break;
1685 case 0x12:
1686 /* S8ADDL */
30c7183b
AJ
1687 if (likely(rc != 31)) {
1688 if (ra != 31) {
a7812ae4 1689 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1690 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1691 if (islit)
1692 tcg_gen_addi_i64(tmp, tmp, lit);
1693 else
1694 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1695 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1696 tcg_temp_free(tmp);
30c7183b
AJ
1697 } else {
1698 if (islit)
1699 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1700 else
dfaa8583 1701 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1702 }
1703 }
4c9649a9
JM
1704 break;
1705 case 0x1B:
1706 /* S8SUBL */
30c7183b
AJ
1707 if (likely(rc != 31)) {
1708 if (ra != 31) {
a7812ae4 1709 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1710 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1711 if (islit)
1712 tcg_gen_subi_i64(tmp, tmp, lit);
1713 else
1714 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1715 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1716 tcg_temp_free(tmp);
30c7183b
AJ
1717 } else {
1718 if (islit)
1719 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1720 else
30c7183b
AJ
1721 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1722 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1723 }
30c7183b
AJ
1724 }
1725 }
4c9649a9
JM
1726 break;
1727 case 0x1D:
1728 /* CMPULT */
01ff9cc8 1729 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
4c9649a9
JM
1730 break;
1731 case 0x20:
1732 /* ADDQ */
30c7183b
AJ
1733 if (likely(rc != 31)) {
1734 if (ra != 31) {
1735 if (islit)
1736 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1737 else
dfaa8583 1738 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1739 } else {
1740 if (islit)
1741 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1742 else
dfaa8583 1743 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1744 }
1745 }
4c9649a9
JM
1746 break;
1747 case 0x22:
1748 /* S4ADDQ */
30c7183b
AJ
1749 if (likely(rc != 31)) {
1750 if (ra != 31) {
a7812ae4 1751 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1752 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1753 if (islit)
1754 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1755 else
1756 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1757 tcg_temp_free(tmp);
30c7183b
AJ
1758 } else {
1759 if (islit)
1760 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1761 else
dfaa8583 1762 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1763 }
1764 }
4c9649a9
JM
1765 break;
1766 case 0x29:
1767 /* SUBQ */
30c7183b
AJ
1768 if (likely(rc != 31)) {
1769 if (ra != 31) {
1770 if (islit)
1771 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1772 else
dfaa8583 1773 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1774 } else {
1775 if (islit)
1776 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1777 else
dfaa8583 1778 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1779 }
1780 }
4c9649a9
JM
1781 break;
1782 case 0x2B:
1783 /* S4SUBQ */
30c7183b
AJ
1784 if (likely(rc != 31)) {
1785 if (ra != 31) {
a7812ae4 1786 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1787 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1788 if (islit)
1789 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1790 else
1791 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1792 tcg_temp_free(tmp);
30c7183b
AJ
1793 } else {
1794 if (islit)
1795 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1796 else
dfaa8583 1797 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1798 }
1799 }
4c9649a9
JM
1800 break;
1801 case 0x2D:
1802 /* CMPEQ */
01ff9cc8 1803 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
4c9649a9
JM
1804 break;
1805 case 0x32:
1806 /* S8ADDQ */
30c7183b
AJ
1807 if (likely(rc != 31)) {
1808 if (ra != 31) {
a7812ae4 1809 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1810 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1811 if (islit)
1812 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1813 else
1814 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1815 tcg_temp_free(tmp);
30c7183b
AJ
1816 } else {
1817 if (islit)
1818 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1819 else
dfaa8583 1820 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1821 }
1822 }
4c9649a9
JM
1823 break;
1824 case 0x3B:
1825 /* S8SUBQ */
30c7183b
AJ
1826 if (likely(rc != 31)) {
1827 if (ra != 31) {
a7812ae4 1828 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1829 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1830 if (islit)
1831 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1832 else
1833 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1834 tcg_temp_free(tmp);
30c7183b
AJ
1835 } else {
1836 if (islit)
1837 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1838 else
dfaa8583 1839 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1840 }
1841 }
4c9649a9
JM
1842 break;
1843 case 0x3D:
1844 /* CMPULE */
01ff9cc8 1845 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
4c9649a9
JM
1846 break;
1847 case 0x40:
1848 /* ADDL/V */
a7812ae4 1849 gen_addlv(ra, rb, rc, islit, lit);
4c9649a9
JM
1850 break;
1851 case 0x49:
1852 /* SUBL/V */
a7812ae4 1853 gen_sublv(ra, rb, rc, islit, lit);
4c9649a9
JM
1854 break;
1855 case 0x4D:
1856 /* CMPLT */
01ff9cc8 1857 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
4c9649a9
JM
1858 break;
1859 case 0x60:
1860 /* ADDQ/V */
a7812ae4 1861 gen_addqv(ra, rb, rc, islit, lit);
4c9649a9
JM
1862 break;
1863 case 0x69:
1864 /* SUBQ/V */
a7812ae4 1865 gen_subqv(ra, rb, rc, islit, lit);
4c9649a9
JM
1866 break;
1867 case 0x6D:
1868 /* CMPLE */
01ff9cc8 1869 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
4c9649a9
JM
1870 break;
1871 default:
1872 goto invalid_opc;
1873 }
1874 break;
1875 case 0x11:
1876 switch (fn7) {
1877 case 0x00:
1878 /* AND */
30c7183b 1879 if (likely(rc != 31)) {
dfaa8583 1880 if (ra == 31)
30c7183b
AJ
1881 tcg_gen_movi_i64(cpu_ir[rc], 0);
1882 else if (islit)
1883 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1884 else
1885 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1886 }
4c9649a9
JM
1887 break;
1888 case 0x08:
1889 /* BIC */
30c7183b
AJ
1890 if (likely(rc != 31)) {
1891 if (ra != 31) {
1892 if (islit)
1893 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1894 else
1895 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1896 } else
1897 tcg_gen_movi_i64(cpu_ir[rc], 0);
1898 }
4c9649a9
JM
1899 break;
1900 case 0x14:
1901 /* CMOVLBS */
bbe1dab4 1902 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
1903 break;
1904 case 0x16:
1905 /* CMOVLBC */
bbe1dab4 1906 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
1907 break;
1908 case 0x20:
1909 /* BIS */
30c7183b
AJ
1910 if (likely(rc != 31)) {
1911 if (ra != 31) {
1912 if (islit)
1913 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
8bb6e981 1914 else
30c7183b 1915 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
4c9649a9 1916 } else {
30c7183b
AJ
1917 if (islit)
1918 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1919 else
dfaa8583 1920 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
4c9649a9 1921 }
4c9649a9
JM
1922 }
1923 break;
1924 case 0x24:
1925 /* CMOVEQ */
bbe1dab4 1926 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1927 break;
1928 case 0x26:
1929 /* CMOVNE */
bbe1dab4 1930 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1931 break;
1932 case 0x28:
1933 /* ORNOT */
30c7183b 1934 if (likely(rc != 31)) {
dfaa8583 1935 if (ra != 31) {
30c7183b
AJ
1936 if (islit)
1937 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1938 else
1939 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1940 } else {
1941 if (islit)
1942 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1943 else
1944 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1945 }
1946 }
4c9649a9
JM
1947 break;
1948 case 0x40:
1949 /* XOR */
30c7183b
AJ
1950 if (likely(rc != 31)) {
1951 if (ra != 31) {
1952 if (islit)
1953 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1954 else
dfaa8583 1955 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1956 } else {
1957 if (islit)
1958 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1959 else
dfaa8583 1960 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1961 }
1962 }
4c9649a9
JM
1963 break;
1964 case 0x44:
1965 /* CMOVLT */
bbe1dab4 1966 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1967 break;
1968 case 0x46:
1969 /* CMOVGE */
bbe1dab4 1970 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1971 break;
1972 case 0x48:
1973 /* EQV */
30c7183b
AJ
1974 if (likely(rc != 31)) {
1975 if (ra != 31) {
1976 if (islit)
1977 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1978 else
1979 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1980 } else {
1981 if (islit)
1982 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
30c7183b 1983 else
dfaa8583 1984 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1985 }
1986 }
4c9649a9
JM
1987 break;
1988 case 0x61:
1989 /* AMASK */
ae8ecd42
AJ
1990 if (likely(rc != 31)) {
1991 if (islit)
1a1f7dbc 1992 tcg_gen_movi_i64(cpu_ir[rc], lit);
ae8ecd42 1993 else
1a1f7dbc
AJ
1994 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1995 switch (ctx->env->implver) {
1996 case IMPLVER_2106x:
1997 /* EV4, EV45, LCA, LCA45 & EV5 */
1998 break;
1999 case IMPLVER_21164:
2000 case IMPLVER_21264:
2001 case IMPLVER_21364:
2002 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
2003 ~(uint64_t)ctx->amask);
2004 break;
2005 }
ae8ecd42 2006 }
4c9649a9
JM
2007 break;
2008 case 0x64:
2009 /* CMOVLE */
bbe1dab4 2010 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2011 break;
2012 case 0x66:
2013 /* CMOVGT */
bbe1dab4 2014 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2015 break;
2016 case 0x6C:
2017 /* IMPLVER */
3761035f 2018 if (rc != 31)
8579095b 2019 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
4c9649a9
JM
2020 break;
2021 default:
2022 goto invalid_opc;
2023 }
2024 break;
2025 case 0x12:
2026 switch (fn7) {
2027 case 0x02:
2028 /* MSKBL */
14ab1634 2029 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2030 break;
2031 case 0x06:
2032 /* EXTBL */
377a43b6 2033 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2034 break;
2035 case 0x0B:
2036 /* INSBL */
248c42f3 2037 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2038 break;
2039 case 0x12:
2040 /* MSKWL */
14ab1634 2041 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2042 break;
2043 case 0x16:
2044 /* EXTWL */
377a43b6 2045 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2046 break;
2047 case 0x1B:
2048 /* INSWL */
248c42f3 2049 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2050 break;
2051 case 0x22:
2052 /* MSKLL */
14ab1634 2053 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2054 break;
2055 case 0x26:
2056 /* EXTLL */
377a43b6 2057 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2058 break;
2059 case 0x2B:
2060 /* INSLL */
248c42f3 2061 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2062 break;
2063 case 0x30:
2064 /* ZAP */
a7812ae4 2065 gen_zap(ra, rb, rc, islit, lit);
4c9649a9
JM
2066 break;
2067 case 0x31:
2068 /* ZAPNOT */
a7812ae4 2069 gen_zapnot(ra, rb, rc, islit, lit);
4c9649a9
JM
2070 break;
2071 case 0x32:
2072 /* MSKQL */
14ab1634 2073 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2074 break;
2075 case 0x34:
2076 /* SRL */
30c7183b
AJ
2077 if (likely(rc != 31)) {
2078 if (ra != 31) {
2079 if (islit)
2080 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2081 else {
a7812ae4 2082 TCGv shift = tcg_temp_new();
30c7183b
AJ
2083 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2084 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2085 tcg_temp_free(shift);
dfaa8583 2086 }
30c7183b
AJ
2087 } else
2088 tcg_gen_movi_i64(cpu_ir[rc], 0);
2089 }
4c9649a9
JM
2090 break;
2091 case 0x36:
2092 /* EXTQL */
377a43b6 2093 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2094 break;
2095 case 0x39:
2096 /* SLL */
30c7183b
AJ
2097 if (likely(rc != 31)) {
2098 if (ra != 31) {
2099 if (islit)
2100 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2101 else {
a7812ae4 2102 TCGv shift = tcg_temp_new();
30c7183b
AJ
2103 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2104 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2105 tcg_temp_free(shift);
dfaa8583 2106 }
30c7183b
AJ
2107 } else
2108 tcg_gen_movi_i64(cpu_ir[rc], 0);
2109 }
4c9649a9
JM
2110 break;
2111 case 0x3B:
2112 /* INSQL */
248c42f3 2113 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2114 break;
2115 case 0x3C:
2116 /* SRA */
30c7183b
AJ
2117 if (likely(rc != 31)) {
2118 if (ra != 31) {
2119 if (islit)
2120 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2121 else {
a7812ae4 2122 TCGv shift = tcg_temp_new();
30c7183b
AJ
2123 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2124 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2125 tcg_temp_free(shift);
dfaa8583 2126 }
30c7183b
AJ
2127 } else
2128 tcg_gen_movi_i64(cpu_ir[rc], 0);
2129 }
4c9649a9
JM
2130 break;
2131 case 0x52:
2132 /* MSKWH */
ffec44f1 2133 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2134 break;
2135 case 0x57:
2136 /* INSWH */
50eb6e5c 2137 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2138 break;
2139 case 0x5A:
2140 /* EXTWH */
377a43b6 2141 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2142 break;
2143 case 0x62:
2144 /* MSKLH */
ffec44f1 2145 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2146 break;
2147 case 0x67:
2148 /* INSLH */
50eb6e5c 2149 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2150 break;
2151 case 0x6A:
2152 /* EXTLH */
377a43b6 2153 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2154 break;
2155 case 0x72:
2156 /* MSKQH */
ffec44f1 2157 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2158 break;
2159 case 0x77:
2160 /* INSQH */
50eb6e5c 2161 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2162 break;
2163 case 0x7A:
2164 /* EXTQH */
377a43b6 2165 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2166 break;
2167 default:
2168 goto invalid_opc;
2169 }
2170 break;
2171 case 0x13:
2172 switch (fn7) {
2173 case 0x00:
2174 /* MULL */
30c7183b 2175 if (likely(rc != 31)) {
dfaa8583 2176 if (ra == 31)
30c7183b
AJ
2177 tcg_gen_movi_i64(cpu_ir[rc], 0);
2178 else {
2179 if (islit)
2180 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2181 else
2182 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2183 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2184 }
2185 }
4c9649a9
JM
2186 break;
2187 case 0x20:
2188 /* MULQ */
30c7183b 2189 if (likely(rc != 31)) {
dfaa8583 2190 if (ra == 31)
30c7183b
AJ
2191 tcg_gen_movi_i64(cpu_ir[rc], 0);
2192 else if (islit)
2193 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2194 else
2195 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2196 }
4c9649a9
JM
2197 break;
2198 case 0x30:
2199 /* UMULH */
a7812ae4 2200 gen_umulh(ra, rb, rc, islit, lit);
4c9649a9
JM
2201 break;
2202 case 0x40:
2203 /* MULL/V */
a7812ae4 2204 gen_mullv(ra, rb, rc, islit, lit);
4c9649a9
JM
2205 break;
2206 case 0x60:
2207 /* MULQ/V */
a7812ae4 2208 gen_mulqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2209 break;
2210 default:
2211 goto invalid_opc;
2212 }
2213 break;
2214 case 0x14:
f24518b5 2215 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2216 case 0x04:
2217 /* ITOFS */
2218 if (!(ctx->amask & AMASK_FIX))
2219 goto invalid_opc;
f18cd223
AJ
2220 if (likely(rc != 31)) {
2221 if (ra != 31) {
a7812ae4 2222 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2223 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2224 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2225 tcg_temp_free_i32(tmp);
f18cd223
AJ
2226 } else
2227 tcg_gen_movi_i64(cpu_fir[rc], 0);
2228 }
4c9649a9
JM
2229 break;
2230 case 0x0A:
2231 /* SQRTF */
2232 if (!(ctx->amask & AMASK_FIX))
2233 goto invalid_opc;
a7812ae4 2234 gen_fsqrtf(rb, rc);
4c9649a9
JM
2235 break;
2236 case 0x0B:
2237 /* SQRTS */
2238 if (!(ctx->amask & AMASK_FIX))
2239 goto invalid_opc;
f24518b5 2240 gen_fsqrts(ctx, rb, rc, fn11);
4c9649a9
JM
2241 break;
2242 case 0x14:
2243 /* ITOFF */
2244 if (!(ctx->amask & AMASK_FIX))
2245 goto invalid_opc;
f18cd223
AJ
2246 if (likely(rc != 31)) {
2247 if (ra != 31) {
a7812ae4 2248 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2249 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2250 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2251 tcg_temp_free_i32(tmp);
f18cd223
AJ
2252 } else
2253 tcg_gen_movi_i64(cpu_fir[rc], 0);
2254 }
4c9649a9
JM
2255 break;
2256 case 0x24:
2257 /* ITOFT */
2258 if (!(ctx->amask & AMASK_FIX))
2259 goto invalid_opc;
f18cd223
AJ
2260 if (likely(rc != 31)) {
2261 if (ra != 31)
2262 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2263 else
2264 tcg_gen_movi_i64(cpu_fir[rc], 0);
2265 }
4c9649a9
JM
2266 break;
2267 case 0x2A:
2268 /* SQRTG */
2269 if (!(ctx->amask & AMASK_FIX))
2270 goto invalid_opc;
a7812ae4 2271 gen_fsqrtg(rb, rc);
4c9649a9
JM
2272 break;
2273 case 0x02B:
2274 /* SQRTT */
2275 if (!(ctx->amask & AMASK_FIX))
2276 goto invalid_opc;
f24518b5 2277 gen_fsqrtt(ctx, rb, rc, fn11);
4c9649a9
JM
2278 break;
2279 default:
2280 goto invalid_opc;
2281 }
2282 break;
2283 case 0x15:
2284 /* VAX floating point */
2285 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2286 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2287 case 0x00:
2288 /* ADDF */
a7812ae4 2289 gen_faddf(ra, rb, rc);
4c9649a9
JM
2290 break;
2291 case 0x01:
2292 /* SUBF */
a7812ae4 2293 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2294 break;
2295 case 0x02:
2296 /* MULF */
a7812ae4 2297 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2298 break;
2299 case 0x03:
2300 /* DIVF */
a7812ae4 2301 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2302 break;
2303 case 0x1E:
2304 /* CVTDG */
2305#if 0 // TODO
a7812ae4 2306 gen_fcvtdg(rb, rc);
4c9649a9
JM
2307#else
2308 goto invalid_opc;
2309#endif
2310 break;
2311 case 0x20:
2312 /* ADDG */
a7812ae4 2313 gen_faddg(ra, rb, rc);
4c9649a9
JM
2314 break;
2315 case 0x21:
2316 /* SUBG */
a7812ae4 2317 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2318 break;
2319 case 0x22:
2320 /* MULG */
a7812ae4 2321 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2322 break;
2323 case 0x23:
2324 /* DIVG */
a7812ae4 2325 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2326 break;
2327 case 0x25:
2328 /* CMPGEQ */
a7812ae4 2329 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2330 break;
2331 case 0x26:
2332 /* CMPGLT */
a7812ae4 2333 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2334 break;
2335 case 0x27:
2336 /* CMPGLE */
a7812ae4 2337 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2338 break;
2339 case 0x2C:
2340 /* CVTGF */
a7812ae4 2341 gen_fcvtgf(rb, rc);
4c9649a9
JM
2342 break;
2343 case 0x2D:
2344 /* CVTGD */
2345#if 0 // TODO
a7812ae4 2346 gen_fcvtgd(rb, rc);
4c9649a9
JM
2347#else
2348 goto invalid_opc;
2349#endif
2350 break;
2351 case 0x2F:
2352 /* CVTGQ */
a7812ae4 2353 gen_fcvtgq(rb, rc);
4c9649a9
JM
2354 break;
2355 case 0x3C:
2356 /* CVTQF */
a7812ae4 2357 gen_fcvtqf(rb, rc);
4c9649a9
JM
2358 break;
2359 case 0x3E:
2360 /* CVTQG */
a7812ae4 2361 gen_fcvtqg(rb, rc);
4c9649a9
JM
2362 break;
2363 default:
2364 goto invalid_opc;
2365 }
2366 break;
2367 case 0x16:
2368 /* IEEE floating-point */
f24518b5 2369 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2370 case 0x00:
2371 /* ADDS */
f24518b5 2372 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2373 break;
2374 case 0x01:
2375 /* SUBS */
f24518b5 2376 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2377 break;
2378 case 0x02:
2379 /* MULS */
f24518b5 2380 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2381 break;
2382 case 0x03:
2383 /* DIVS */
f24518b5 2384 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2385 break;
2386 case 0x20:
2387 /* ADDT */
f24518b5 2388 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2389 break;
2390 case 0x21:
2391 /* SUBT */
f24518b5 2392 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2393 break;
2394 case 0x22:
2395 /* MULT */
f24518b5 2396 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2397 break;
2398 case 0x23:
2399 /* DIVT */
f24518b5 2400 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2401 break;
2402 case 0x24:
2403 /* CMPTUN */
f24518b5 2404 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2405 break;
2406 case 0x25:
2407 /* CMPTEQ */
f24518b5 2408 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2409 break;
2410 case 0x26:
2411 /* CMPTLT */
f24518b5 2412 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2413 break;
2414 case 0x27:
2415 /* CMPTLE */
f24518b5 2416 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2417 break;
2418 case 0x2C:
a74b4d2c 2419 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2420 /* CVTST */
f24518b5 2421 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2422 } else {
2423 /* CVTTS */
f24518b5 2424 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2425 }
2426 break;
2427 case 0x2F:
2428 /* CVTTQ */
f24518b5 2429 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2430 break;
2431 case 0x3C:
2432 /* CVTQS */
f24518b5 2433 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2434 break;
2435 case 0x3E:
2436 /* CVTQT */
f24518b5 2437 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2438 break;
2439 default:
2440 goto invalid_opc;
2441 }
2442 break;
2443 case 0x17:
2444 switch (fn11) {
2445 case 0x010:
2446 /* CVTLQ */
a7812ae4 2447 gen_fcvtlq(rb, rc);
4c9649a9
JM
2448 break;
2449 case 0x020:
f18cd223 2450 if (likely(rc != 31)) {
a06d48d9 2451 if (ra == rb) {
4c9649a9 2452 /* FMOV */
a06d48d9
RH
2453 if (ra == 31)
2454 tcg_gen_movi_i64(cpu_fir[rc], 0);
2455 else
2456 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2457 } else {
f18cd223 2458 /* CPYS */
a7812ae4 2459 gen_fcpys(ra, rb, rc);
a06d48d9 2460 }
4c9649a9
JM
2461 }
2462 break;
2463 case 0x021:
2464 /* CPYSN */
a7812ae4 2465 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2466 break;
2467 case 0x022:
2468 /* CPYSE */
a7812ae4 2469 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2470 break;
2471 case 0x024:
2472 /* MT_FPCR */
f18cd223 2473 if (likely(ra != 31))
a7812ae4 2474 gen_helper_store_fpcr(cpu_fir[ra]);
f18cd223
AJ
2475 else {
2476 TCGv tmp = tcg_const_i64(0);
a7812ae4 2477 gen_helper_store_fpcr(tmp);
f18cd223
AJ
2478 tcg_temp_free(tmp);
2479 }
4c9649a9
JM
2480 break;
2481 case 0x025:
2482 /* MF_FPCR */
f18cd223 2483 if (likely(ra != 31))
a7812ae4 2484 gen_helper_load_fpcr(cpu_fir[ra]);
4c9649a9
JM
2485 break;
2486 case 0x02A:
2487 /* FCMOVEQ */
bbe1dab4 2488 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2489 break;
2490 case 0x02B:
2491 /* FCMOVNE */
bbe1dab4 2492 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2493 break;
2494 case 0x02C:
2495 /* FCMOVLT */
bbe1dab4 2496 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2497 break;
2498 case 0x02D:
2499 /* FCMOVGE */
bbe1dab4 2500 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2501 break;
2502 case 0x02E:
2503 /* FCMOVLE */
bbe1dab4 2504 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2505 break;
2506 case 0x02F:
2507 /* FCMOVGT */
bbe1dab4 2508 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2509 break;
2510 case 0x030:
2511 /* CVTQL */
a7812ae4 2512 gen_fcvtql(rb, rc);
4c9649a9
JM
2513 break;
2514 case 0x130:
2515 /* CVTQL/V */
4c9649a9
JM
2516 case 0x530:
2517 /* CVTQL/SV */
735cf45f
RH
2518 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2519 /v doesn't do. The only thing I can think is that /sv is a
2520 valid instruction merely for completeness in the ISA. */
2521 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2522 break;
2523 default:
2524 goto invalid_opc;
2525 }
2526 break;
2527 case 0x18:
2528 switch ((uint16_t)disp16) {
2529 case 0x0000:
2530 /* TRAPB */
4af70374 2531 /* No-op. */
4c9649a9
JM
2532 break;
2533 case 0x0400:
2534 /* EXCB */
4af70374 2535 /* No-op. */
4c9649a9
JM
2536 break;
2537 case 0x4000:
2538 /* MB */
2539 /* No-op */
2540 break;
2541 case 0x4400:
2542 /* WMB */
2543 /* No-op */
2544 break;
2545 case 0x8000:
2546 /* FETCH */
2547 /* No-op */
2548 break;
2549 case 0xA000:
2550 /* FETCH_M */
2551 /* No-op */
2552 break;
2553 case 0xC000:
2554 /* RPCC */
3761035f 2555 if (ra != 31)
a7812ae4 2556 gen_helper_load_pcc(cpu_ir[ra]);
4c9649a9
JM
2557 break;
2558 case 0xE000:
2559 /* RC */
ac316ca4 2560 gen_rx(ra, 0);
4c9649a9
JM
2561 break;
2562 case 0xE800:
2563 /* ECB */
4c9649a9
JM
2564 break;
2565 case 0xF000:
2566 /* RS */
ac316ca4 2567 gen_rx(ra, 1);
4c9649a9
JM
2568 break;
2569 case 0xF800:
2570 /* WH64 */
2571 /* No-op */
2572 break;
2573 default:
2574 goto invalid_opc;
2575 }
2576 break;
2577 case 0x19:
2578 /* HW_MFPR (PALcode) */
4c9649a9 2579 goto invalid_opc;
4c9649a9 2580 case 0x1A:
49563a72
RH
2581 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2582 prediction stack action, which of course we don't implement. */
2583 if (rb != 31) {
3761035f 2584 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
49563a72 2585 } else {
3761035f 2586 tcg_gen_movi_i64(cpu_pc, 0);
49563a72
RH
2587 }
2588 if (ra != 31) {
1304ca87 2589 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2590 }
4af70374 2591 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2592 break;
2593 case 0x1B:
2594 /* HW_LD (PALcode) */
2595#if defined (CONFIG_USER_ONLY)
2596 goto invalid_opc;
2597#else
2598 if (!ctx->pal_mode)
2599 goto invalid_opc;
8bb6e981 2600 if (ra != 31) {
a7812ae4 2601 TCGv addr = tcg_temp_new();
8bb6e981
AJ
2602 if (rb != 31)
2603 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2604 else
2605 tcg_gen_movi_i64(addr, disp12);
2606 switch ((insn >> 12) & 0xF) {
2607 case 0x0:
b5d51029 2608 /* Longword physical access (hw_ldl/p) */
2374e73e 2609 gen_helper_ldl_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2610 break;
2611 case 0x1:
b5d51029 2612 /* Quadword physical access (hw_ldq/p) */
2374e73e 2613 gen_helper_ldq_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2614 break;
2615 case 0x2:
b5d51029 2616 /* Longword physical access with lock (hw_ldl_l/p) */
2374e73e 2617 gen_helper_ldl_l_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2618 break;
2619 case 0x3:
b5d51029 2620 /* Quadword physical access with lock (hw_ldq_l/p) */
2374e73e 2621 gen_helper_ldq_l_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2622 break;
2623 case 0x4:
b5d51029 2624 /* Longword virtual PTE fetch (hw_ldl/v) */
2374e73e 2625 goto invalid_opc;
8bb6e981 2626 case 0x5:
b5d51029 2627 /* Quadword virtual PTE fetch (hw_ldq/v) */
2374e73e 2628 goto invalid_opc;
8bb6e981
AJ
2629 break;
2630 case 0x6:
2631 /* Incpu_ir[ra]id */
b5d51029 2632 goto invalid_opc;
8bb6e981
AJ
2633 case 0x7:
2634 /* Incpu_ir[ra]id */
b5d51029 2635 goto invalid_opc;
8bb6e981 2636 case 0x8:
b5d51029 2637 /* Longword virtual access (hw_ldl) */
2374e73e 2638 goto invalid_opc;
8bb6e981 2639 case 0x9:
b5d51029 2640 /* Quadword virtual access (hw_ldq) */
2374e73e 2641 goto invalid_opc;
8bb6e981 2642 case 0xA:
b5d51029 2643 /* Longword virtual access with protection check (hw_ldl/w) */
8417845e 2644 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2645 break;
2646 case 0xB:
b5d51029 2647 /* Quadword virtual access with protection check (hw_ldq/w) */
8417845e 2648 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2649 break;
2650 case 0xC:
b5d51029 2651 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2374e73e 2652 goto invalid_opc;
8bb6e981 2653 case 0xD:
b5d51029 2654 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2374e73e 2655 goto invalid_opc;
8bb6e981
AJ
2656 case 0xE:
2657 /* Longword virtual access with alternate access mode and
2374e73e
RH
2658 protection checks (hw_ldl/wa) */
2659 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2660 break;
2661 case 0xF:
2662 /* Quadword virtual access with alternate access mode and
2374e73e
RH
2663 protection checks (hw_ldq/wa) */
2664 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2665 break;
2666 }
2667 tcg_temp_free(addr);
4c9649a9 2668 }
4c9649a9
JM
2669 break;
2670#endif
2671 case 0x1C:
2672 switch (fn7) {
2673 case 0x00:
2674 /* SEXTB */
2675 if (!(ctx->amask & AMASK_BWX))
2676 goto invalid_opc;
ae8ecd42
AJ
2677 if (likely(rc != 31)) {
2678 if (islit)
2679 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
ae8ecd42 2680 else
dfaa8583 2681 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2682 }
4c9649a9
JM
2683 break;
2684 case 0x01:
2685 /* SEXTW */
2686 if (!(ctx->amask & AMASK_BWX))
2687 goto invalid_opc;
ae8ecd42
AJ
2688 if (likely(rc != 31)) {
2689 if (islit)
2690 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
ae8ecd42 2691 else
dfaa8583 2692 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2693 }
4c9649a9
JM
2694 break;
2695 case 0x30:
2696 /* CTPOP */
2697 if (!(ctx->amask & AMASK_CIX))
2698 goto invalid_opc;
ae8ecd42
AJ
2699 if (likely(rc != 31)) {
2700 if (islit)
2701 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
ae8ecd42 2702 else
a7812ae4 2703 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2704 }
4c9649a9
JM
2705 break;
2706 case 0x31:
2707 /* PERR */
2708 if (!(ctx->amask & AMASK_MVI))
2709 goto invalid_opc;
13e4df99 2710 gen_perr(ra, rb, rc, islit, lit);
4c9649a9
JM
2711 break;
2712 case 0x32:
2713 /* CTLZ */
2714 if (!(ctx->amask & AMASK_CIX))
2715 goto invalid_opc;
ae8ecd42
AJ
2716 if (likely(rc != 31)) {
2717 if (islit)
2718 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
ae8ecd42 2719 else
a7812ae4 2720 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2721 }
4c9649a9
JM
2722 break;
2723 case 0x33:
2724 /* CTTZ */
2725 if (!(ctx->amask & AMASK_CIX))
2726 goto invalid_opc;
ae8ecd42
AJ
2727 if (likely(rc != 31)) {
2728 if (islit)
2729 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
ae8ecd42 2730 else
a7812ae4 2731 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2732 }
4c9649a9
JM
2733 break;
2734 case 0x34:
2735 /* UNPKBW */
2736 if (!(ctx->amask & AMASK_MVI))
2737 goto invalid_opc;
13e4df99
RH
2738 if (real_islit || ra != 31)
2739 goto invalid_opc;
2740 gen_unpkbw (rb, rc);
4c9649a9
JM
2741 break;
2742 case 0x35:
13e4df99 2743 /* UNPKBL */
4c9649a9
JM
2744 if (!(ctx->amask & AMASK_MVI))
2745 goto invalid_opc;
13e4df99
RH
2746 if (real_islit || ra != 31)
2747 goto invalid_opc;
2748 gen_unpkbl (rb, rc);
4c9649a9
JM
2749 break;
2750 case 0x36:
2751 /* PKWB */
2752 if (!(ctx->amask & AMASK_MVI))
2753 goto invalid_opc;
13e4df99
RH
2754 if (real_islit || ra != 31)
2755 goto invalid_opc;
2756 gen_pkwb (rb, rc);
4c9649a9
JM
2757 break;
2758 case 0x37:
2759 /* PKLB */
2760 if (!(ctx->amask & AMASK_MVI))
2761 goto invalid_opc;
13e4df99
RH
2762 if (real_islit || ra != 31)
2763 goto invalid_opc;
2764 gen_pklb (rb, rc);
4c9649a9
JM
2765 break;
2766 case 0x38:
2767 /* MINSB8 */
2768 if (!(ctx->amask & AMASK_MVI))
2769 goto invalid_opc;
13e4df99 2770 gen_minsb8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2771 break;
2772 case 0x39:
2773 /* MINSW4 */
2774 if (!(ctx->amask & AMASK_MVI))
2775 goto invalid_opc;
13e4df99 2776 gen_minsw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2777 break;
2778 case 0x3A:
2779 /* MINUB8 */
2780 if (!(ctx->amask & AMASK_MVI))
2781 goto invalid_opc;
13e4df99 2782 gen_minub8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2783 break;
2784 case 0x3B:
2785 /* MINUW4 */
2786 if (!(ctx->amask & AMASK_MVI))
2787 goto invalid_opc;
13e4df99 2788 gen_minuw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2789 break;
2790 case 0x3C:
2791 /* MAXUB8 */
2792 if (!(ctx->amask & AMASK_MVI))
2793 goto invalid_opc;
13e4df99 2794 gen_maxub8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2795 break;
2796 case 0x3D:
2797 /* MAXUW4 */
2798 if (!(ctx->amask & AMASK_MVI))
2799 goto invalid_opc;
13e4df99 2800 gen_maxuw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2801 break;
2802 case 0x3E:
2803 /* MAXSB8 */
2804 if (!(ctx->amask & AMASK_MVI))
2805 goto invalid_opc;
13e4df99 2806 gen_maxsb8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2807 break;
2808 case 0x3F:
2809 /* MAXSW4 */
2810 if (!(ctx->amask & AMASK_MVI))
2811 goto invalid_opc;
13e4df99 2812 gen_maxsw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2813 break;
2814 case 0x70:
2815 /* FTOIT */
2816 if (!(ctx->amask & AMASK_FIX))
2817 goto invalid_opc;
f18cd223
AJ
2818 if (likely(rc != 31)) {
2819 if (ra != 31)
2820 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2821 else
2822 tcg_gen_movi_i64(cpu_ir[rc], 0);
2823 }
4c9649a9
JM
2824 break;
2825 case 0x78:
2826 /* FTOIS */
2827 if (!(ctx->amask & AMASK_FIX))
2828 goto invalid_opc;
f18cd223 2829 if (rc != 31) {
a7812ae4 2830 TCGv_i32 tmp1 = tcg_temp_new_i32();
f18cd223 2831 if (ra != 31)
a7812ae4 2832 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
f18cd223
AJ
2833 else {
2834 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 2835 gen_helper_s_to_memory(tmp1, tmp2);
f18cd223
AJ
2836 tcg_temp_free(tmp2);
2837 }
2838 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
a7812ae4 2839 tcg_temp_free_i32(tmp1);
f18cd223 2840 }
4c9649a9
JM
2841 break;
2842 default:
2843 goto invalid_opc;
2844 }
2845 break;
2846 case 0x1D:
2847 /* HW_MTPR (PALcode) */
4c9649a9 2848 goto invalid_opc;
4c9649a9 2849 case 0x1E:
508b43ea 2850 /* HW_RET (PALcode) */
4c9649a9
JM
2851#if defined (CONFIG_USER_ONLY)
2852 goto invalid_opc;
2853#else
2854 if (!ctx->pal_mode)
2855 goto invalid_opc;
2856 if (rb == 31) {
508b43ea
RH
2857 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2858 address from EXC_ADDR. This turns out to be useful for our
2859 emulation PALcode, so continue to accept it. */
2860 TCGv tmp = tcg_temp_new();
129d8aa5 2861 /* FIXME: Get exc_addr. */
a7812ae4 2862 gen_helper_hw_ret(tmp);
8bb6e981 2863 tcg_temp_free(tmp);
508b43ea
RH
2864 } else {
2865 gen_helper_hw_ret(cpu_ir[rb]);
4c9649a9 2866 }
4af70374 2867 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2868 break;
2869#endif
2870 case 0x1F:
2871 /* HW_ST (PALcode) */
2872#if defined (CONFIG_USER_ONLY)
2873 goto invalid_opc;
2874#else
2875 if (!ctx->pal_mode)
2876 goto invalid_opc;
8bb6e981
AJ
2877 else {
2878 TCGv addr, val;
a7812ae4 2879 addr = tcg_temp_new();
8bb6e981
AJ
2880 if (rb != 31)
2881 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2882 else
2883 tcg_gen_movi_i64(addr, disp12);
2884 if (ra != 31)
2885 val = cpu_ir[ra];
2886 else {
a7812ae4 2887 val = tcg_temp_new();
8bb6e981
AJ
2888 tcg_gen_movi_i64(val, 0);
2889 }
2890 switch ((insn >> 12) & 0xF) {
2891 case 0x0:
2892 /* Longword physical access */
2374e73e 2893 gen_helper_stl_phys(addr, val);
8bb6e981
AJ
2894 break;
2895 case 0x1:
2896 /* Quadword physical access */
2374e73e 2897 gen_helper_stq_phys(addr, val);
8bb6e981
AJ
2898 break;
2899 case 0x2:
2900 /* Longword physical access with lock */
2374e73e 2901 gen_helper_stl_c_phys(val, addr, val);
8bb6e981
AJ
2902 break;
2903 case 0x3:
2904 /* Quadword physical access with lock */
2374e73e 2905 gen_helper_stq_c_phys(val, addr, val);
8bb6e981
AJ
2906 break;
2907 case 0x4:
2908 /* Longword virtual access */
2374e73e 2909 goto invalid_opc;
8bb6e981
AJ
2910 case 0x5:
2911 /* Quadword virtual access */
2374e73e 2912 goto invalid_opc;
8bb6e981
AJ
2913 case 0x6:
2914 /* Invalid */
2915 goto invalid_opc;
2916 case 0x7:
2917 /* Invalid */
2918 goto invalid_opc;
2919 case 0x8:
2920 /* Invalid */
2921 goto invalid_opc;
2922 case 0x9:
2923 /* Invalid */
2924 goto invalid_opc;
2925 case 0xA:
2926 /* Invalid */
2927 goto invalid_opc;
2928 case 0xB:
2929 /* Invalid */
2930 goto invalid_opc;
2931 case 0xC:
2932 /* Longword virtual access with alternate access mode */
2374e73e 2933 goto invalid_opc;
8bb6e981
AJ
2934 case 0xD:
2935 /* Quadword virtual access with alternate access mode */
2374e73e 2936 goto invalid_opc;
8bb6e981
AJ
2937 case 0xE:
2938 /* Invalid */
2939 goto invalid_opc;
2940 case 0xF:
2941 /* Invalid */
2942 goto invalid_opc;
2943 }
45d46ce8 2944 if (ra == 31)
8bb6e981
AJ
2945 tcg_temp_free(val);
2946 tcg_temp_free(addr);
4c9649a9 2947 }
4c9649a9
JM
2948 break;
2949#endif
2950 case 0x20:
2951 /* LDF */
f18cd223 2952 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
2953 break;
2954 case 0x21:
2955 /* LDG */
f18cd223 2956 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
2957 break;
2958 case 0x22:
2959 /* LDS */
f18cd223 2960 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
2961 break;
2962 case 0x23:
2963 /* LDT */
f18cd223 2964 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
2965 break;
2966 case 0x24:
2967 /* STF */
6910b8f6 2968 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
4c9649a9
JM
2969 break;
2970 case 0x25:
2971 /* STG */
6910b8f6 2972 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
4c9649a9
JM
2973 break;
2974 case 0x26:
2975 /* STS */
6910b8f6 2976 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
4c9649a9
JM
2977 break;
2978 case 0x27:
2979 /* STT */
6910b8f6 2980 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
4c9649a9
JM
2981 break;
2982 case 0x28:
2983 /* LDL */
f18cd223 2984 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
2985 break;
2986 case 0x29:
2987 /* LDQ */
f18cd223 2988 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
2989 break;
2990 case 0x2A:
2991 /* LDL_L */
f4ed8679 2992 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2993 break;
2994 case 0x2B:
2995 /* LDQ_L */
f4ed8679 2996 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2997 break;
2998 case 0x2C:
2999 /* STL */
6910b8f6 3000 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
4c9649a9
JM
3001 break;
3002 case 0x2D:
3003 /* STQ */
6910b8f6 3004 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3005 break;
3006 case 0x2E:
3007 /* STL_C */
6910b8f6 3008 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
4c9649a9
JM
3009 break;
3010 case 0x2F:
3011 /* STQ_C */
6910b8f6 3012 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
4c9649a9
JM
3013 break;
3014 case 0x30:
3015 /* BR */
4af70374 3016 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3017 break;
a7812ae4 3018 case 0x31: /* FBEQ */
4af70374 3019 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 3020 break;
a7812ae4 3021 case 0x32: /* FBLT */
4af70374 3022 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 3023 break;
a7812ae4 3024 case 0x33: /* FBLE */
4af70374 3025 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
3026 break;
3027 case 0x34:
3028 /* BSR */
4af70374 3029 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3030 break;
a7812ae4 3031 case 0x35: /* FBNE */
4af70374 3032 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 3033 break;
a7812ae4 3034 case 0x36: /* FBGE */
4af70374 3035 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 3036 break;
a7812ae4 3037 case 0x37: /* FBGT */
4af70374 3038 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
3039 break;
3040 case 0x38:
3041 /* BLBC */
4af70374 3042 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
3043 break;
3044 case 0x39:
3045 /* BEQ */
4af70374 3046 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
3047 break;
3048 case 0x3A:
3049 /* BLT */
4af70374 3050 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
3051 break;
3052 case 0x3B:
3053 /* BLE */
4af70374 3054 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
3055 break;
3056 case 0x3C:
3057 /* BLBS */
4af70374 3058 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
3059 break;
3060 case 0x3D:
3061 /* BNE */
4af70374 3062 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
3063 break;
3064 case 0x3E:
3065 /* BGE */
4af70374 3066 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
3067 break;
3068 case 0x3F:
3069 /* BGT */
4af70374 3070 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
3071 break;
3072 invalid_opc:
8aa3fa20 3073 ret = gen_invalid(ctx);
4c9649a9
JM
3074 break;
3075 }
3076
3077 return ret;
3078}
3079
636aa200
BS
3080static inline void gen_intermediate_code_internal(CPUState *env,
3081 TranslationBlock *tb,
3082 int search_pc)
4c9649a9 3083{
4c9649a9
JM
3084 DisasContext ctx, *ctxp = &ctx;
3085 target_ulong pc_start;
3086 uint32_t insn;
3087 uint16_t *gen_opc_end;
a1d1bb31 3088 CPUBreakpoint *bp;
4c9649a9 3089 int j, lj = -1;
4af70374 3090 ExitStatus ret;
2e70f6ef
PB
3091 int num_insns;
3092 int max_insns;
4c9649a9
JM
3093
3094 pc_start = tb->pc;
4c9649a9 3095 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
4af70374
RH
3096
3097 ctx.tb = tb;
3098 ctx.env = env;
4c9649a9
JM
3099 ctx.pc = pc_start;
3100 ctx.amask = env->amask;
3101#if defined (CONFIG_USER_ONLY)
3102 ctx.mem_idx = 0;
3103#else
3104 ctx.mem_idx = ((env->ps >> 3) & 3);
129d8aa5 3105 ctx.pal_mode = env->pal_mode;
4c9649a9 3106#endif
f24518b5
RH
3107
3108 /* ??? Every TB begins with unset rounding mode, to be initialized on
3109 the first fp insn of the TB. Alternately we could define a proper
3110 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3111 to reset the FP_STATUS to that default at the end of any TB that
3112 changes the default. We could even (gasp) dynamiclly figure out
3113 what default would be most efficient given the running program. */
3114 ctx.tb_rm = -1;
3115 /* Similarly for flush-to-zero. */
3116 ctx.tb_ftz = -1;
3117
2e70f6ef
PB
3118 num_insns = 0;
3119 max_insns = tb->cflags & CF_COUNT_MASK;
3120 if (max_insns == 0)
3121 max_insns = CF_COUNT_MASK;
3122
3123 gen_icount_start();
4af70374 3124 do {
72cf2d4f
BS
3125 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3126 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 3127 if (bp->pc == ctx.pc) {
4c9649a9
JM
3128 gen_excp(&ctx, EXCP_DEBUG, 0);
3129 break;
3130 }
3131 }
3132 }
3133 if (search_pc) {
3134 j = gen_opc_ptr - gen_opc_buf;
3135 if (lj < j) {
3136 lj++;
3137 while (lj < j)
3138 gen_opc_instr_start[lj++] = 0;
4c9649a9 3139 }
ed1dda53
AJ
3140 gen_opc_pc[lj] = ctx.pc;
3141 gen_opc_instr_start[lj] = 1;
3142 gen_opc_icount[lj] = num_insns;
4c9649a9 3143 }
2e70f6ef
PB
3144 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3145 gen_io_start();
4c9649a9 3146 insn = ldl_code(ctx.pc);
2e70f6ef 3147 num_insns++;
c4b3be39
RH
3148
3149 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3150 tcg_gen_debug_insn_start(ctx.pc);
3151 }
3152
4c9649a9
JM
3153 ctx.pc += 4;
3154 ret = translate_one(ctxp, insn);
19bf517b 3155
bf1b03fe
RH
3156 /* If we reach a page boundary, are single stepping,
3157 or exhaust instruction count, stop generation. */
3158 if (ret == NO_EXIT
3159 && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3160 || gen_opc_ptr >= gen_opc_end
3161 || num_insns >= max_insns
3162 || singlestep
3163 || env->singlestep_enabled)) {
3164 ret = EXIT_PC_STALE;
1b530a6d 3165 }
4af70374
RH
3166 } while (ret == NO_EXIT);
3167
3168 if (tb->cflags & CF_LAST_IO) {
3169 gen_io_end();
4c9649a9 3170 }
4af70374
RH
3171
3172 switch (ret) {
3173 case EXIT_GOTO_TB:
8aa3fa20 3174 case EXIT_NORETURN:
4af70374
RH
3175 break;
3176 case EXIT_PC_STALE:
496cb5b9 3177 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
3178 /* FALLTHRU */
3179 case EXIT_PC_UPDATED:
bf1b03fe
RH
3180 if (env->singlestep_enabled) {
3181 gen_excp_1(EXCP_DEBUG, 0);
3182 } else {
3183 tcg_gen_exit_tb(0);
3184 }
4af70374
RH
3185 break;
3186 default:
3187 abort();
4c9649a9 3188 }
4af70374 3189
2e70f6ef 3190 gen_icount_end(tb, num_insns);
4c9649a9
JM
3191 *gen_opc_ptr = INDEX_op_end;
3192 if (search_pc) {
3193 j = gen_opc_ptr - gen_opc_buf;
3194 lj++;
3195 while (lj <= j)
3196 gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3197 } else {
3198 tb->size = ctx.pc - pc_start;
2e70f6ef 3199 tb->icount = num_insns;
4c9649a9 3200 }
4af70374 3201
806991da 3202#ifdef DEBUG_DISAS
8fec2b8c 3203 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
3204 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3205 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3206 qemu_log("\n");
4c9649a9 3207 }
4c9649a9 3208#endif
4c9649a9
JM
3209}
3210
2cfc5f17 3211void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3212{
2cfc5f17 3213 gen_intermediate_code_internal(env, tb, 0);
4c9649a9
JM
3214}
3215
2cfc5f17 3216void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3217{
2cfc5f17 3218 gen_intermediate_code_internal(env, tb, 1);
4c9649a9
JM
3219}
3220
a964acc6
RH
3221struct cpu_def_t {
3222 const char *name;
3223 int implver, amask;
3224};
3225
3226static const struct cpu_def_t cpu_defs[] = {
3227 { "ev4", IMPLVER_2106x, 0 },
3228 { "ev5", IMPLVER_21164, 0 },
3229 { "ev56", IMPLVER_21164, AMASK_BWX },
3230 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3231 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3232 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3233 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3234 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3235 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3236 { "21064", IMPLVER_2106x, 0 },
3237 { "21164", IMPLVER_21164, 0 },
3238 { "21164a", IMPLVER_21164, AMASK_BWX },
3239 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3240 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3241 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3242 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3243};
3244
aaed909a 3245CPUAlphaState * cpu_alpha_init (const char *cpu_model)
4c9649a9
JM
3246{
3247 CPUAlphaState *env;
a964acc6 3248 int implver, amask, i, max;
4c9649a9
JM
3249
3250 env = qemu_mallocz(sizeof(CPUAlphaState));
4c9649a9 3251 cpu_exec_init(env);
2e70f6ef 3252 alpha_translate_init();
4c9649a9 3253 tlb_flush(env, 1);
a964acc6
RH
3254
3255 /* Default to ev67; no reason not to emulate insns by default. */
3256 implver = IMPLVER_21264;
3257 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3258 | AMASK_TRAP | AMASK_PREFETCH);
3259
3260 max = ARRAY_SIZE(cpu_defs);
3261 for (i = 0; i < max; i++) {
3262 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3263 implver = cpu_defs[i].implver;
3264 amask = cpu_defs[i].amask;
3265 break;
3266 }
3267 }
3268 env->implver = implver;
3269 env->amask = amask;
3270
4c9649a9 3271#if defined (CONFIG_USER_ONLY)
129d8aa5 3272 env->ps = 1 << 3;
2edd07ef
RH
3273 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3274 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
6049f4f8 3275#endif
6910b8f6 3276 env->lock_addr = -1;
dad081ee 3277
0bf46a40 3278 qemu_init_vcpu(env);
4c9649a9
JM
3279 return env;
3280}
aaed909a 3281
e87b7cb0 3282void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
3283{
3284 env->pc = gen_opc_pc[pc_pos];
3285}