]> git.proxmox.com Git - qemu.git/blame - target-alpha/translate.c
target-alpha: Move integer overflow helpers to int_helper.c.
[qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
4c9649a9 20#include "cpu.h"
4c9649a9 21#include "disas.h"
ae8ecd42 22#include "host-utils.h"
57fec1fe 23#include "tcg-op.h"
4c9649a9 24
a7812ae4
PB
25#include "helper.h"
26#define GEN_HELPER 1
27#include "helper.h"
28
19188121 29#undef ALPHA_DEBUG_DISAS
f24518b5 30#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
31
32#ifdef ALPHA_DEBUG_DISAS
806991da 33# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
34#else
35# define LOG_DISAS(...) do { } while (0)
36#endif
37
4c9649a9
JM
38typedef struct DisasContext DisasContext;
39struct DisasContext {
4af70374
RH
40 struct TranslationBlock *tb;
41 CPUAlphaState *env;
4c9649a9
JM
42 uint64_t pc;
43 int mem_idx;
f24518b5
RH
44
45 /* Current rounding mode for this TB. */
46 int tb_rm;
47 /* Current flush-to-zero setting for this TB. */
48 int tb_ftz;
4c9649a9
JM
49};
50
4af70374
RH
51/* Return values from translate_one, indicating the state of the TB.
52 Note that zero indicates that we are not exiting the TB. */
53
54typedef enum {
55 NO_EXIT,
56
57 /* We have emitted one or more goto_tb. No fixup required. */
58 EXIT_GOTO_TB,
59
60 /* We are not using a goto_tb (for whatever reason), but have updated
61 the PC (for whatever reason), so there's no need to do it again on
62 exiting the TB. */
63 EXIT_PC_UPDATED,
64
65 /* We are exiting the TB, but have neither emitted a goto_tb, nor
66 updated the PC for the next instruction to be executed. */
8aa3fa20
RH
67 EXIT_PC_STALE,
68
69 /* We are ending the TB with a noreturn function call, e.g. longjmp.
70 No following code will be executed. */
71 EXIT_NORETURN,
4af70374
RH
72} ExitStatus;
73
3761035f 74/* global register indexes */
a7812ae4 75static TCGv_ptr cpu_env;
496cb5b9 76static TCGv cpu_ir[31];
f18cd223 77static TCGv cpu_fir[31];
496cb5b9 78static TCGv cpu_pc;
6910b8f6
RH
79static TCGv cpu_lock_addr;
80static TCGv cpu_lock_st_addr;
81static TCGv cpu_lock_value;
2ace7e55
RH
82static TCGv cpu_unique;
83#ifndef CONFIG_USER_ONLY
84static TCGv cpu_sysval;
85static TCGv cpu_usp;
ab471ade 86#endif
496cb5b9 87
3761035f 88/* register names */
f18cd223 89static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef
PB
90
91#include "gen-icount.h"
92
a5f1b965 93static void alpha_translate_init(void)
2e70f6ef 94{
496cb5b9
AJ
95 int i;
96 char *p;
2e70f6ef 97 static int done_init = 0;
496cb5b9 98
2e70f6ef
PB
99 if (done_init)
100 return;
496cb5b9 101
a7812ae4 102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
103
104 p = cpu_reg_names;
105 for (i = 0; i < 31; i++) {
106 sprintf(p, "ir%d", i);
a7812ae4 107 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 108 offsetof(CPUAlphaState, ir[i]), p);
6ba8dcd7 109 p += (i < 10) ? 4 : 5;
f18cd223
AJ
110
111 sprintf(p, "fir%d", i);
a7812ae4 112 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 113 offsetof(CPUAlphaState, fir[i]), p);
f18cd223 114 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
115 }
116
a7812ae4 117 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 118 offsetof(CPUAlphaState, pc), "pc");
496cb5b9 119
6910b8f6 120 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 121 offsetof(CPUAlphaState, lock_addr),
6910b8f6
RH
122 "lock_addr");
123 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 124 offsetof(CPUAlphaState, lock_st_addr),
6910b8f6
RH
125 "lock_st_addr");
126 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 127 offsetof(CPUAlphaState, lock_value),
6910b8f6 128 "lock_value");
f4ed8679 129
2ace7e55 130 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 131 offsetof(CPUAlphaState, unique), "unique");
2ace7e55
RH
132#ifndef CONFIG_USER_ONLY
133 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 134 offsetof(CPUAlphaState, sysval), "sysval");
2ace7e55 135 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 136 offsetof(CPUAlphaState, usp), "usp");
ab471ade
RH
137#endif
138
496cb5b9 139 /* register helpers */
a7812ae4 140#define GEN_HELPER 2
496cb5b9
AJ
141#include "helper.h"
142
2e70f6ef
PB
143 done_init = 1;
144}
145
bf1b03fe 146static void gen_excp_1(int exception, int error_code)
4c9649a9 147{
a7812ae4 148 TCGv_i32 tmp1, tmp2;
6ad02592 149
6ad02592
AJ
150 tmp1 = tcg_const_i32(exception);
151 tmp2 = tcg_const_i32(error_code);
b9f0923e 152 gen_helper_excp(cpu_env, tmp1, tmp2);
a7812ae4
PB
153 tcg_temp_free_i32(tmp2);
154 tcg_temp_free_i32(tmp1);
bf1b03fe 155}
8aa3fa20 156
bf1b03fe
RH
157static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
158{
159 tcg_gen_movi_i64(cpu_pc, ctx->pc);
160 gen_excp_1(exception, error_code);
8aa3fa20 161 return EXIT_NORETURN;
4c9649a9
JM
162}
163
8aa3fa20 164static inline ExitStatus gen_invalid(DisasContext *ctx)
4c9649a9 165{
8aa3fa20 166 return gen_excp(ctx, EXCP_OPCDEC, 0);
4c9649a9
JM
167}
168
636aa200 169static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 170{
a7812ae4
PB
171 TCGv tmp = tcg_temp_new();
172 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 173 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
174 tcg_gen_trunc_i64_i32(tmp32, tmp);
175 gen_helper_memory_to_f(t0, tmp32);
176 tcg_temp_free_i32(tmp32);
f18cd223
AJ
177 tcg_temp_free(tmp);
178}
179
636aa200 180static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 181{
a7812ae4 182 TCGv tmp = tcg_temp_new();
f18cd223 183 tcg_gen_qemu_ld64(tmp, t1, flags);
a7812ae4 184 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
185 tcg_temp_free(tmp);
186}
187
636aa200 188static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 189{
a7812ae4
PB
190 TCGv tmp = tcg_temp_new();
191 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 192 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
193 tcg_gen_trunc_i64_i32(tmp32, tmp);
194 gen_helper_memory_to_s(t0, tmp32);
195 tcg_temp_free_i32(tmp32);
f18cd223
AJ
196 tcg_temp_free(tmp);
197}
198
636aa200 199static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679 200{
f4ed8679 201 tcg_gen_qemu_ld32s(t0, t1, flags);
6910b8f6
RH
202 tcg_gen_mov_i64(cpu_lock_addr, t1);
203 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
204}
205
636aa200 206static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679 207{
f4ed8679 208 tcg_gen_qemu_ld64(t0, t1, flags);
6910b8f6
RH
209 tcg_gen_mov_i64(cpu_lock_addr, t1);
210 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
211}
212
636aa200
BS
213static inline void gen_load_mem(DisasContext *ctx,
214 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
215 int flags),
216 int ra, int rb, int32_t disp16, int fp,
217 int clear)
023d8ca2 218{
6910b8f6 219 TCGv addr, va;
023d8ca2 220
6910b8f6
RH
221 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
222 prefetches, which we can treat as nops. No worries about
223 missed exceptions here. */
224 if (unlikely(ra == 31)) {
023d8ca2 225 return;
6910b8f6 226 }
023d8ca2 227
a7812ae4 228 addr = tcg_temp_new();
023d8ca2
AJ
229 if (rb != 31) {
230 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 231 if (clear) {
023d8ca2 232 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 233 }
023d8ca2 234 } else {
6910b8f6 235 if (clear) {
023d8ca2 236 disp16 &= ~0x7;
6910b8f6 237 }
023d8ca2
AJ
238 tcg_gen_movi_i64(addr, disp16);
239 }
6910b8f6
RH
240
241 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
242 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
243
023d8ca2
AJ
244 tcg_temp_free(addr);
245}
246
636aa200 247static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 248{
a7812ae4
PB
249 TCGv_i32 tmp32 = tcg_temp_new_i32();
250 TCGv tmp = tcg_temp_new();
251 gen_helper_f_to_memory(tmp32, t0);
252 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
253 tcg_gen_qemu_st32(tmp, t1, flags);
254 tcg_temp_free(tmp);
a7812ae4 255 tcg_temp_free_i32(tmp32);
f18cd223
AJ
256}
257
636aa200 258static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 259{
a7812ae4
PB
260 TCGv tmp = tcg_temp_new();
261 gen_helper_g_to_memory(tmp, t0);
f18cd223
AJ
262 tcg_gen_qemu_st64(tmp, t1, flags);
263 tcg_temp_free(tmp);
264}
265
636aa200 266static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 267{
a7812ae4
PB
268 TCGv_i32 tmp32 = tcg_temp_new_i32();
269 TCGv tmp = tcg_temp_new();
270 gen_helper_s_to_memory(tmp32, t0);
271 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
272 tcg_gen_qemu_st32(tmp, t1, flags);
273 tcg_temp_free(tmp);
a7812ae4 274 tcg_temp_free_i32(tmp32);
f18cd223
AJ
275}
276
636aa200
BS
277static inline void gen_store_mem(DisasContext *ctx,
278 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
279 int flags),
280 int ra, int rb, int32_t disp16, int fp,
6910b8f6 281 int clear)
023d8ca2 282{
6910b8f6
RH
283 TCGv addr, va;
284
285 addr = tcg_temp_new();
023d8ca2
AJ
286 if (rb != 31) {
287 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 288 if (clear) {
023d8ca2 289 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 290 }
023d8ca2 291 } else {
6910b8f6 292 if (clear) {
023d8ca2 293 disp16 &= ~0x7;
6910b8f6 294 }
023d8ca2
AJ
295 tcg_gen_movi_i64(addr, disp16);
296 }
6910b8f6
RH
297
298 if (ra == 31) {
299 va = tcg_const_i64(0);
f18cd223 300 } else {
6910b8f6 301 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
023d8ca2 302 }
6910b8f6
RH
303 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
304
023d8ca2 305 tcg_temp_free(addr);
6910b8f6
RH
306 if (ra == 31) {
307 tcg_temp_free(va);
308 }
309}
310
311static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
312 int32_t disp16, int quad)
313{
314 TCGv addr;
315
316 if (ra == 31) {
317 /* ??? Don't bother storing anything. The user can't tell
318 the difference, since the zero register always reads zero. */
319 return NO_EXIT;
320 }
321
322#if defined(CONFIG_USER_ONLY)
323 addr = cpu_lock_st_addr;
324#else
e52458fe 325 addr = tcg_temp_local_new();
6910b8f6
RH
326#endif
327
328 if (rb != 31) {
329 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
330 } else {
331 tcg_gen_movi_i64(addr, disp16);
332 }
333
334#if defined(CONFIG_USER_ONLY)
335 /* ??? This is handled via a complicated version of compare-and-swap
336 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
337 in TCG so that this isn't necessary. */
338 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
339#else
340 /* ??? In system mode we are never multi-threaded, so CAS can be
341 implemented via a non-atomic load-compare-store sequence. */
342 {
343 int lab_fail, lab_done;
344 TCGv val;
345
346 lab_fail = gen_new_label();
347 lab_done = gen_new_label();
e52458fe 348 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
6910b8f6
RH
349
350 val = tcg_temp_new();
351 if (quad) {
352 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
353 } else {
354 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
355 }
e52458fe 356 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
6910b8f6
RH
357
358 if (quad) {
359 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
360 } else {
361 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
362 }
363 tcg_gen_movi_i64(cpu_ir[ra], 1);
364 tcg_gen_br(lab_done);
365
366 gen_set_label(lab_fail);
367 tcg_gen_movi_i64(cpu_ir[ra], 0);
368
369 gen_set_label(lab_done);
370 tcg_gen_movi_i64(cpu_lock_addr, -1);
371
372 tcg_temp_free(addr);
373 return NO_EXIT;
374 }
375#endif
023d8ca2
AJ
376}
377
4af70374 378static int use_goto_tb(DisasContext *ctx, uint64_t dest)
4c9649a9 379{
4af70374
RH
380 /* Check for the dest on the same page as the start of the TB. We
381 also want to suppress goto_tb in the case of single-steping and IO. */
382 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
383 && !ctx->env->singlestep_enabled
384 && !(ctx->tb->cflags & CF_LAST_IO));
385}
dbb30fe6 386
4af70374
RH
387static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
388{
389 uint64_t dest = ctx->pc + (disp << 2);
390
391 if (ra != 31) {
392 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
393 }
394
395 /* Notice branch-to-next; used to initialize RA with the PC. */
396 if (disp == 0) {
397 return 0;
398 } else if (use_goto_tb(ctx, dest)) {
399 tcg_gen_goto_tb(0);
400 tcg_gen_movi_i64(cpu_pc, dest);
4b4a72e5 401 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
4af70374
RH
402 return EXIT_GOTO_TB;
403 } else {
404 tcg_gen_movi_i64(cpu_pc, dest);
405 return EXIT_PC_UPDATED;
406 }
dbb30fe6
RH
407}
408
4af70374
RH
409static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
410 TCGv cmp, int32_t disp)
dbb30fe6 411{
4af70374 412 uint64_t dest = ctx->pc + (disp << 2);
dbb30fe6 413 int lab_true = gen_new_label();
9c29504e 414
4af70374
RH
415 if (use_goto_tb(ctx, dest)) {
416 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
417
418 tcg_gen_goto_tb(0);
419 tcg_gen_movi_i64(cpu_pc, ctx->pc);
4b4a72e5 420 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
4af70374
RH
421
422 gen_set_label(lab_true);
423 tcg_gen_goto_tb(1);
424 tcg_gen_movi_i64(cpu_pc, dest);
4b4a72e5 425 tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
4af70374
RH
426
427 return EXIT_GOTO_TB;
428 } else {
429 int lab_over = gen_new_label();
430
431 /* ??? Consider using either
432 movi pc, next
433 addi tmp, pc, disp
434 movcond pc, cond, 0, tmp, pc
435 or
436 setcond tmp, cond, 0
437 movi pc, next
438 neg tmp, tmp
439 andi tmp, tmp, disp
440 add pc, pc, tmp
441 The current diamond subgraph surely isn't efficient. */
442
443 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
444 tcg_gen_movi_i64(cpu_pc, ctx->pc);
445 tcg_gen_br(lab_over);
446 gen_set_label(lab_true);
447 tcg_gen_movi_i64(cpu_pc, dest);
448 gen_set_label(lab_over);
449
450 return EXIT_PC_UPDATED;
451 }
452}
453
454static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
455 int32_t disp, int mask)
456{
457 TCGv cmp_tmp;
458
459 if (unlikely(ra == 31)) {
460 cmp_tmp = tcg_const_i64(0);
461 } else {
462 cmp_tmp = tcg_temp_new();
9c29504e 463 if (mask) {
4af70374 464 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
dbb30fe6 465 } else {
4af70374 466 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
dbb30fe6 467 }
9c29504e 468 }
4af70374
RH
469
470 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
471}
472
4af70374 473/* Fold -0.0 for comparison with COND. */
dbb30fe6 474
4af70374 475static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 476{
dbb30fe6 477 uint64_t mzero = 1ull << 63;
f18cd223 478
dbb30fe6
RH
479 switch (cond) {
480 case TCG_COND_LE:
481 case TCG_COND_GT:
482 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 483 tcg_gen_mov_i64(dest, src);
a7812ae4 484 break;
dbb30fe6
RH
485
486 case TCG_COND_EQ:
487 case TCG_COND_NE:
488 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 489 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 490 break;
dbb30fe6
RH
491
492 case TCG_COND_GE:
dbb30fe6 493 case TCG_COND_LT:
4af70374
RH
494 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
495 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
496 tcg_gen_neg_i64(dest, dest);
497 tcg_gen_and_i64(dest, dest, src);
a7812ae4 498 break;
dbb30fe6 499
a7812ae4
PB
500 default:
501 abort();
f18cd223 502 }
dbb30fe6
RH
503}
504
4af70374
RH
505static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
506 int32_t disp)
dbb30fe6 507{
4af70374 508 TCGv cmp_tmp;
dbb30fe6
RH
509
510 if (unlikely(ra == 31)) {
511 /* Very uncommon case, but easier to optimize it to an integer
512 comparison than continuing with the floating point comparison. */
4af70374 513 return gen_bcond(ctx, cond, ra, disp, 0);
dbb30fe6
RH
514 }
515
4af70374
RH
516 cmp_tmp = tcg_temp_new();
517 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
518 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
519}
520
bbe1dab4 521static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
4af70374 522 int islit, uint8_t lit, int mask)
4c9649a9 523{
bbe1dab4 524 TCGCond inv_cond = tcg_invert_cond(cond);
9c29504e
AJ
525 int l1;
526
527 if (unlikely(rc == 31))
528 return;
529
530 l1 = gen_new_label();
531
532 if (ra != 31) {
533 if (mask) {
a7812ae4 534 TCGv tmp = tcg_temp_new();
9c29504e
AJ
535 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
536 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
537 tcg_temp_free(tmp);
538 } else
539 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
540 } else {
541 /* Very uncommon case - Do not bother to optimize. */
542 TCGv tmp = tcg_const_i64(0);
543 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
544 tcg_temp_free(tmp);
545 }
546
4c9649a9 547 if (islit)
9c29504e 548 tcg_gen_movi_i64(cpu_ir[rc], lit);
4c9649a9 549 else
dfaa8583 550 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
9c29504e 551 gen_set_label(l1);
4c9649a9
JM
552}
553
bbe1dab4 554static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
dbb30fe6 555{
4af70374 556 TCGv cmp_tmp;
dbb30fe6
RH
557 int l1;
558
4af70374 559 if (unlikely(rc == 31)) {
dbb30fe6 560 return;
4af70374
RH
561 }
562
563 cmp_tmp = tcg_temp_new();
dbb30fe6 564 if (unlikely(ra == 31)) {
4af70374
RH
565 tcg_gen_movi_i64(cmp_tmp, 0);
566 } else {
567 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
dbb30fe6
RH
568 }
569
570 l1 = gen_new_label();
4af70374
RH
571 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
572 tcg_temp_free(cmp_tmp);
dbb30fe6
RH
573
574 if (rb != 31)
575 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
576 else
577 tcg_gen_movi_i64(cpu_fir[rc], 0);
578 gen_set_label(l1);
579}
580
f24518b5
RH
581#define QUAL_RM_N 0x080 /* Round mode nearest even */
582#define QUAL_RM_C 0x000 /* Round mode chopped */
583#define QUAL_RM_M 0x040 /* Round mode minus infinity */
584#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
585#define QUAL_RM_MASK 0x0c0
586
587#define QUAL_U 0x100 /* Underflow enable (fp output) */
588#define QUAL_V 0x100 /* Overflow enable (int output) */
589#define QUAL_S 0x400 /* Software completion enable */
590#define QUAL_I 0x200 /* Inexact detection enable */
591
592static void gen_qual_roundmode(DisasContext *ctx, int fn11)
593{
594 TCGv_i32 tmp;
595
596 fn11 &= QUAL_RM_MASK;
597 if (fn11 == ctx->tb_rm) {
598 return;
599 }
600 ctx->tb_rm = fn11;
601
602 tmp = tcg_temp_new_i32();
603 switch (fn11) {
604 case QUAL_RM_N:
605 tcg_gen_movi_i32(tmp, float_round_nearest_even);
606 break;
607 case QUAL_RM_C:
608 tcg_gen_movi_i32(tmp, float_round_to_zero);
609 break;
610 case QUAL_RM_M:
611 tcg_gen_movi_i32(tmp, float_round_down);
612 break;
613 case QUAL_RM_D:
4a58aedf
RH
614 tcg_gen_ld8u_i32(tmp, cpu_env,
615 offsetof(CPUAlphaState, fpcr_dyn_round));
f24518b5
RH
616 break;
617 }
618
619#if defined(CONFIG_SOFTFLOAT_INLINE)
620 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
621 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
622 sets the one field. */
623 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 624 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
f24518b5
RH
625#else
626 gen_helper_setroundmode(tmp);
627#endif
628
629 tcg_temp_free_i32(tmp);
630}
631
632static void gen_qual_flushzero(DisasContext *ctx, int fn11)
633{
634 TCGv_i32 tmp;
635
636 fn11 &= QUAL_U;
637 if (fn11 == ctx->tb_ftz) {
638 return;
639 }
640 ctx->tb_ftz = fn11;
641
642 tmp = tcg_temp_new_i32();
643 if (fn11) {
644 /* Underflow is enabled, use the FPCR setting. */
4a58aedf
RH
645 tcg_gen_ld8u_i32(tmp, cpu_env,
646 offsetof(CPUAlphaState, fpcr_flush_to_zero));
f24518b5
RH
647 } else {
648 /* Underflow is disabled, force flush-to-zero. */
649 tcg_gen_movi_i32(tmp, 1);
650 }
651
652#if defined(CONFIG_SOFTFLOAT_INLINE)
653 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 654 offsetof(CPUAlphaState, fp_status.flush_to_zero));
f24518b5
RH
655#else
656 gen_helper_setflushzero(tmp);
657#endif
658
659 tcg_temp_free_i32(tmp);
660}
661
662static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
663{
664 TCGv val = tcg_temp_new();
665 if (reg == 31) {
666 tcg_gen_movi_i64(val, 0);
667 } else if (fn11 & QUAL_S) {
4a58aedf 668 gen_helper_ieee_input_s(val, cpu_env, cpu_fir[reg]);
f24518b5 669 } else if (is_cmp) {
4a58aedf 670 gen_helper_ieee_input_cmp(val, cpu_env, cpu_fir[reg]);
f24518b5 671 } else {
4a58aedf 672 gen_helper_ieee_input(val, cpu_env, cpu_fir[reg]);
f24518b5
RH
673 }
674 return val;
675}
676
677static void gen_fp_exc_clear(void)
678{
679#if defined(CONFIG_SOFTFLOAT_INLINE)
680 TCGv_i32 zero = tcg_const_i32(0);
681 tcg_gen_st8_i32(zero, cpu_env,
4d5712f1 682 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5
RH
683 tcg_temp_free_i32(zero);
684#else
4a58aedf 685 gen_helper_fp_exc_clear(cpu_env);
f24518b5
RH
686#endif
687}
688
689static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
690{
691 /* ??? We ought to be able to do something with imprecise exceptions.
692 E.g. notice we're still in the trap shadow of something within the
693 TB and do not generate the code to signal the exception; end the TB
694 when an exception is forced to arrive, either by consumption of a
695 register value or TRAPB or EXCB. */
696 TCGv_i32 exc = tcg_temp_new_i32();
697 TCGv_i32 reg;
698
699#if defined(CONFIG_SOFTFLOAT_INLINE)
700 tcg_gen_ld8u_i32(exc, cpu_env,
4d5712f1 701 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5 702#else
4a58aedf 703 gen_helper_fp_exc_get(exc, cpu_env);
f24518b5
RH
704#endif
705
706 if (ignore) {
707 tcg_gen_andi_i32(exc, exc, ~ignore);
708 }
709
710 /* ??? Pass in the regno of the destination so that the helper can
711 set EXC_MASK, which contains a bitmask of destination registers
712 that have caused arithmetic traps. A simple userspace emulation
713 does not require this. We do need it for a guest kernel's entArith,
714 or if we were to do something clever with imprecise exceptions. */
715 reg = tcg_const_i32(rc + 32);
716
717 if (fn11 & QUAL_S) {
4a58aedf 718 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
f24518b5 719 } else {
4a58aedf 720 gen_helper_fp_exc_raise(cpu_env, exc, reg);
f24518b5
RH
721 }
722
723 tcg_temp_free_i32(reg);
724 tcg_temp_free_i32(exc);
725}
726
727static inline void gen_fp_exc_raise(int rc, int fn11)
728{
729 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 730}
f24518b5 731
593f17e5
RH
732static void gen_fcvtlq(int rb, int rc)
733{
734 if (unlikely(rc == 31)) {
735 return;
736 }
737 if (unlikely(rb == 31)) {
738 tcg_gen_movi_i64(cpu_fir[rc], 0);
739 } else {
740 TCGv tmp = tcg_temp_new();
741
742 /* The arithmetic right shift here, plus the sign-extended mask below
743 yields a sign-extended result without an explicit ext32s_i64. */
744 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
745 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
746 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
747 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
748 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
749
750 tcg_temp_free(tmp);
751 }
752}
753
735cf45f
RH
754static void gen_fcvtql(int rb, int rc)
755{
756 if (unlikely(rc == 31)) {
757 return;
758 }
759 if (unlikely(rb == 31)) {
760 tcg_gen_movi_i64(cpu_fir[rc], 0);
761 } else {
762 TCGv tmp = tcg_temp_new();
763
764 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
765 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
766 tcg_gen_shli_i64(tmp, tmp, 32);
767 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
768 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
769
770 tcg_temp_free(tmp);
771 }
772}
773
774static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
775{
776 if (rb != 31) {
777 int lab = gen_new_label();
778 TCGv tmp = tcg_temp_new();
779
780 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
781 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
782 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
783
784 gen_set_label(lab);
785 }
786 gen_fcvtql(rb, rc);
787}
788
4a58aedf
RH
789#define FARITH2(name) \
790 static inline void glue(gen_f, name)(int rb, int rc) \
791 { \
792 if (unlikely(rc == 31)) { \
793 return; \
794 } \
795 if (rb != 31) { \
796 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
797 } else { \
798 TCGv tmp = tcg_const_i64(0); \
799 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
800 tcg_temp_free(tmp); \
801 } \
802 }
f24518b5
RH
803
804/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
805FARITH2(sqrtf)
806FARITH2(sqrtg)
a7812ae4
PB
807FARITH2(cvtgf)
808FARITH2(cvtgq)
809FARITH2(cvtqf)
810FARITH2(cvtqg)
f24518b5 811
4a58aedf
RH
812static void gen_ieee_arith2(DisasContext *ctx,
813 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
814 int rb, int rc, int fn11)
815{
816 TCGv vb;
817
818 /* ??? This is wrong: the instruction is not a nop, it still may
819 raise exceptions. */
820 if (unlikely(rc == 31)) {
821 return;
822 }
823
824 gen_qual_roundmode(ctx, fn11);
825 gen_qual_flushzero(ctx, fn11);
826 gen_fp_exc_clear();
827
828 vb = gen_ieee_input(rb, fn11, 0);
4a58aedf 829 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
830 tcg_temp_free(vb);
831
832 gen_fp_exc_raise(rc, fn11);
833}
834
835#define IEEE_ARITH2(name) \
836static inline void glue(gen_f, name)(DisasContext *ctx, \
837 int rb, int rc, int fn11) \
838{ \
839 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
840}
841IEEE_ARITH2(sqrts)
842IEEE_ARITH2(sqrtt)
843IEEE_ARITH2(cvtst)
844IEEE_ARITH2(cvtts)
845
846static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
847{
848 TCGv vb;
849 int ignore = 0;
850
851 /* ??? This is wrong: the instruction is not a nop, it still may
852 raise exceptions. */
853 if (unlikely(rc == 31)) {
854 return;
855 }
856
857 /* No need to set flushzero, since we have an integer output. */
858 gen_fp_exc_clear();
859 vb = gen_ieee_input(rb, fn11, 0);
860
861 /* Almost all integer conversions use cropped rounding, and most
862 also do not have integer overflow enabled. Special case that. */
863 switch (fn11) {
864 case QUAL_RM_C:
4a58aedf 865 gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
866 break;
867 case QUAL_V | QUAL_RM_C:
868 case QUAL_S | QUAL_V | QUAL_RM_C:
869 ignore = float_flag_inexact;
870 /* FALLTHRU */
871 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
4a58aedf 872 gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
873 break;
874 default:
875 gen_qual_roundmode(ctx, fn11);
4a58aedf 876 gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
877 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
878 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
879 break;
880 }
881 tcg_temp_free(vb);
882
883 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
884}
885
4a58aedf
RH
886static void gen_ieee_intcvt(DisasContext *ctx,
887 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
888 int rb, int rc, int fn11)
889{
890 TCGv vb;
891
892 /* ??? This is wrong: the instruction is not a nop, it still may
893 raise exceptions. */
894 if (unlikely(rc == 31)) {
895 return;
896 }
897
898 gen_qual_roundmode(ctx, fn11);
899
900 if (rb == 31) {
901 vb = tcg_const_i64(0);
902 } else {
903 vb = cpu_fir[rb];
904 }
905
906 /* The only exception that can be raised by integer conversion
907 is inexact. Thus we only need to worry about exceptions when
908 inexact handling is requested. */
909 if (fn11 & QUAL_I) {
910 gen_fp_exc_clear();
4a58aedf 911 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
912 gen_fp_exc_raise(rc, fn11);
913 } else {
4a58aedf 914 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
915 }
916
917 if (rb == 31) {
918 tcg_temp_free(vb);
919 }
920}
921
922#define IEEE_INTCVT(name) \
923static inline void glue(gen_f, name)(DisasContext *ctx, \
924 int rb, int rc, int fn11) \
925{ \
926 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
927}
928IEEE_INTCVT(cvtqs)
929IEEE_INTCVT(cvtqt)
930
dc96be4b
RH
931static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
932{
933 TCGv va, vb, vmask;
934 int za = 0, zb = 0;
935
936 if (unlikely(rc == 31)) {
937 return;
938 }
939
940 vmask = tcg_const_i64(mask);
941
942 TCGV_UNUSED_I64(va);
943 if (ra == 31) {
944 if (inv_a) {
945 va = vmask;
946 } else {
947 za = 1;
948 }
949 } else {
950 va = tcg_temp_new_i64();
951 tcg_gen_mov_i64(va, cpu_fir[ra]);
952 if (inv_a) {
953 tcg_gen_andc_i64(va, vmask, va);
954 } else {
955 tcg_gen_and_i64(va, va, vmask);
956 }
957 }
958
959 TCGV_UNUSED_I64(vb);
960 if (rb == 31) {
961 zb = 1;
962 } else {
963 vb = tcg_temp_new_i64();
964 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
965 }
966
967 switch (za << 1 | zb) {
968 case 0 | 0:
969 tcg_gen_or_i64(cpu_fir[rc], va, vb);
970 break;
971 case 0 | 1:
972 tcg_gen_mov_i64(cpu_fir[rc], va);
973 break;
974 case 2 | 0:
975 tcg_gen_mov_i64(cpu_fir[rc], vb);
976 break;
977 case 2 | 1:
978 tcg_gen_movi_i64(cpu_fir[rc], 0);
979 break;
980 }
981
982 tcg_temp_free(vmask);
983 if (ra != 31) {
984 tcg_temp_free(va);
985 }
986 if (rb != 31) {
987 tcg_temp_free(vb);
988 }
989}
990
991static inline void gen_fcpys(int ra, int rb, int rc)
992{
993 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
994}
995
996static inline void gen_fcpysn(int ra, int rb, int rc)
997{
998 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
999}
1000
1001static inline void gen_fcpyse(int ra, int rb, int rc)
1002{
1003 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1004}
1005
4a58aedf
RH
1006#define FARITH3(name) \
1007 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1008 { \
1009 TCGv va, vb; \
1010 \
1011 if (unlikely(rc == 31)) { \
1012 return; \
1013 } \
1014 if (ra == 31) { \
1015 va = tcg_const_i64(0); \
1016 } else { \
1017 va = cpu_fir[ra]; \
1018 } \
1019 if (rb == 31) { \
1020 vb = tcg_const_i64(0); \
1021 } else { \
1022 vb = cpu_fir[rb]; \
1023 } \
1024 \
1025 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1026 \
1027 if (ra == 31) { \
1028 tcg_temp_free(va); \
1029 } \
1030 if (rb == 31) { \
1031 tcg_temp_free(vb); \
1032 } \
1033 }
f24518b5
RH
1034
1035/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
1036FARITH3(addf)
1037FARITH3(subf)
1038FARITH3(mulf)
1039FARITH3(divf)
1040FARITH3(addg)
1041FARITH3(subg)
1042FARITH3(mulg)
1043FARITH3(divg)
1044FARITH3(cmpgeq)
1045FARITH3(cmpglt)
1046FARITH3(cmpgle)
f24518b5
RH
1047
1048static void gen_ieee_arith3(DisasContext *ctx,
4a58aedf 1049 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
1050 int ra, int rb, int rc, int fn11)
1051{
1052 TCGv va, vb;
1053
1054 /* ??? This is wrong: the instruction is not a nop, it still may
1055 raise exceptions. */
1056 if (unlikely(rc == 31)) {
1057 return;
1058 }
1059
1060 gen_qual_roundmode(ctx, fn11);
1061 gen_qual_flushzero(ctx, fn11);
1062 gen_fp_exc_clear();
1063
1064 va = gen_ieee_input(ra, fn11, 0);
1065 vb = gen_ieee_input(rb, fn11, 0);
4a58aedf 1066 helper(cpu_fir[rc], cpu_env, va, vb);
f24518b5
RH
1067 tcg_temp_free(va);
1068 tcg_temp_free(vb);
1069
1070 gen_fp_exc_raise(rc, fn11);
1071}
1072
1073#define IEEE_ARITH3(name) \
1074static inline void glue(gen_f, name)(DisasContext *ctx, \
1075 int ra, int rb, int rc, int fn11) \
1076{ \
1077 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1078}
1079IEEE_ARITH3(adds)
1080IEEE_ARITH3(subs)
1081IEEE_ARITH3(muls)
1082IEEE_ARITH3(divs)
1083IEEE_ARITH3(addt)
1084IEEE_ARITH3(subt)
1085IEEE_ARITH3(mult)
1086IEEE_ARITH3(divt)
1087
1088static void gen_ieee_compare(DisasContext *ctx,
4a58aedf 1089 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
1090 int ra, int rb, int rc, int fn11)
1091{
1092 TCGv va, vb;
1093
1094 /* ??? This is wrong: the instruction is not a nop, it still may
1095 raise exceptions. */
1096 if (unlikely(rc == 31)) {
1097 return;
1098 }
1099
1100 gen_fp_exc_clear();
1101
1102 va = gen_ieee_input(ra, fn11, 1);
1103 vb = gen_ieee_input(rb, fn11, 1);
4a58aedf 1104 helper(cpu_fir[rc], cpu_env, va, vb);
f24518b5
RH
1105 tcg_temp_free(va);
1106 tcg_temp_free(vb);
1107
1108 gen_fp_exc_raise(rc, fn11);
1109}
1110
1111#define IEEE_CMP3(name) \
1112static inline void glue(gen_f, name)(DisasContext *ctx, \
1113 int ra, int rb, int rc, int fn11) \
1114{ \
1115 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1116}
1117IEEE_CMP3(cmptun)
1118IEEE_CMP3(cmpteq)
1119IEEE_CMP3(cmptlt)
1120IEEE_CMP3(cmptle)
a7812ae4 1121
248c42f3
RH
1122static inline uint64_t zapnot_mask(uint8_t lit)
1123{
1124 uint64_t mask = 0;
1125 int i;
1126
1127 for (i = 0; i < 8; ++i) {
1128 if ((lit >> i) & 1)
1129 mask |= 0xffull << (i * 8);
1130 }
1131 return mask;
1132}
1133
87d98f95
RH
1134/* Implement zapnot with an immediate operand, which expands to some
1135 form of immediate AND. This is a basic building block in the
1136 definition of many of the other byte manipulation instructions. */
248c42f3 1137static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 1138{
87d98f95
RH
1139 switch (lit) {
1140 case 0x00:
248c42f3 1141 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
1142 break;
1143 case 0x01:
248c42f3 1144 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
1145 break;
1146 case 0x03:
248c42f3 1147 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
1148 break;
1149 case 0x0f:
248c42f3 1150 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
1151 break;
1152 case 0xff:
248c42f3 1153 tcg_gen_mov_i64(dest, src);
87d98f95
RH
1154 break;
1155 default:
248c42f3 1156 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
87d98f95
RH
1157 break;
1158 }
1159}
1160
1161static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1162{
1163 if (unlikely(rc == 31))
1164 return;
1165 else if (unlikely(ra == 31))
1166 tcg_gen_movi_i64(cpu_ir[rc], 0);
1167 else if (islit)
248c42f3 1168 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
87d98f95
RH
1169 else
1170 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1171}
1172
1173static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1174{
1175 if (unlikely(rc == 31))
1176 return;
1177 else if (unlikely(ra == 31))
1178 tcg_gen_movi_i64(cpu_ir[rc], 0);
1179 else if (islit)
248c42f3 1180 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
87d98f95
RH
1181 else
1182 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1183}
1184
1185
248c42f3 1186/* EXTWH, EXTLH, EXTQH */
ffec44f1
RH
1187static void gen_ext_h(int ra, int rb, int rc, int islit,
1188 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1189{
1190 if (unlikely(rc == 31))
1191 return;
377a43b6
RH
1192 else if (unlikely(ra == 31))
1193 tcg_gen_movi_i64(cpu_ir[rc], 0);
1194 else {
dfaa8583 1195 if (islit) {
377a43b6
RH
1196 lit = (64 - (lit & 7) * 8) & 0x3f;
1197 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
fe2b269a 1198 } else {
377a43b6 1199 TCGv tmp1 = tcg_temp_new();
b3249f63
AJ
1200 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1201 tcg_gen_shli_i64(tmp1, tmp1, 3);
dbf95805
VW
1202 tcg_gen_neg_i64(tmp1, tmp1);
1203 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
dfaa8583 1204 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
b3249f63 1205 tcg_temp_free(tmp1);
dfaa8583 1206 }
248c42f3 1207 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
377a43b6 1208 }
b3249f63
AJ
1209}
1210
248c42f3 1211/* EXTBL, EXTWL, EXTLL, EXTQL */
ffec44f1
RH
1212static void gen_ext_l(int ra, int rb, int rc, int islit,
1213 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1214{
1215 if (unlikely(rc == 31))
1216 return;
377a43b6
RH
1217 else if (unlikely(ra == 31))
1218 tcg_gen_movi_i64(cpu_ir[rc], 0);
1219 else {
dfaa8583 1220 if (islit) {
377a43b6 1221 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
dfaa8583 1222 } else {
a7812ae4 1223 TCGv tmp = tcg_temp_new();
b3249f63
AJ
1224 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1225 tcg_gen_shli_i64(tmp, tmp, 3);
dfaa8583 1226 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
b3249f63 1227 tcg_temp_free(tmp);
fe2b269a 1228 }
248c42f3
RH
1229 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1230 }
1231}
1232
50eb6e5c
RH
1233/* INSWH, INSLH, INSQH */
1234static void gen_ins_h(int ra, int rb, int rc, int islit,
1235 uint8_t lit, uint8_t byte_mask)
1236{
1237 if (unlikely(rc == 31))
1238 return;
1239 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1240 tcg_gen_movi_i64(cpu_ir[rc], 0);
1241 else {
1242 TCGv tmp = tcg_temp_new();
1243
1244 /* The instruction description has us left-shift the byte mask
1245 and extract bits <15:8> and apply that zap at the end. This
1246 is equivalent to simply performing the zap first and shifting
1247 afterward. */
1248 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1249
1250 if (islit) {
1251 /* Note that we have handled the lit==0 case above. */
1252 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1253 } else {
1254 TCGv shift = tcg_temp_new();
1255
1256 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1257 Do this portably by splitting the shift into two parts:
1258 shift_count-1 and 1. Arrange for the -1 by using
1259 ones-complement instead of twos-complement in the negation:
1260 ~((B & 7) * 8) & 63. */
1261
1262 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1263 tcg_gen_shli_i64(shift, shift, 3);
1264 tcg_gen_not_i64(shift, shift);
1265 tcg_gen_andi_i64(shift, shift, 0x3f);
1266
1267 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1268 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1269 tcg_temp_free(shift);
1270 }
1271 tcg_temp_free(tmp);
1272 }
1273}
1274
248c42f3 1275/* INSBL, INSWL, INSLL, INSQL */
ffec44f1
RH
1276static void gen_ins_l(int ra, int rb, int rc, int islit,
1277 uint8_t lit, uint8_t byte_mask)
248c42f3
RH
1278{
1279 if (unlikely(rc == 31))
1280 return;
1281 else if (unlikely(ra == 31))
1282 tcg_gen_movi_i64(cpu_ir[rc], 0);
1283 else {
1284 TCGv tmp = tcg_temp_new();
1285
1286 /* The instruction description has us left-shift the byte mask
1287 the same number of byte slots as the data and apply the zap
1288 at the end. This is equivalent to simply performing the zap
1289 first and shifting afterward. */
1290 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1291
1292 if (islit) {
1293 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1294 } else {
1295 TCGv shift = tcg_temp_new();
1296 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1297 tcg_gen_shli_i64(shift, shift, 3);
1298 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1299 tcg_temp_free(shift);
1300 }
1301 tcg_temp_free(tmp);
377a43b6 1302 }
b3249f63
AJ
1303}
1304
ffec44f1
RH
1305/* MSKWH, MSKLH, MSKQH */
1306static void gen_msk_h(int ra, int rb, int rc, int islit,
1307 uint8_t lit, uint8_t byte_mask)
1308{
1309 if (unlikely(rc == 31))
1310 return;
1311 else if (unlikely(ra == 31))
1312 tcg_gen_movi_i64(cpu_ir[rc], 0);
1313 else if (islit) {
1314 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1315 } else {
1316 TCGv shift = tcg_temp_new();
1317 TCGv mask = tcg_temp_new();
1318
1319 /* The instruction description is as above, where the byte_mask
1320 is shifted left, and then we extract bits <15:8>. This can be
1321 emulated with a right-shift on the expanded byte mask. This
1322 requires extra care because for an input <2:0> == 0 we need a
1323 shift of 64 bits in order to generate a zero. This is done by
1324 splitting the shift into two parts, the variable shift - 1
1325 followed by a constant 1 shift. The code we expand below is
1326 equivalent to ~((B & 7) * 8) & 63. */
1327
1328 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1329 tcg_gen_shli_i64(shift, shift, 3);
1330 tcg_gen_not_i64(shift, shift);
1331 tcg_gen_andi_i64(shift, shift, 0x3f);
1332 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1333 tcg_gen_shr_i64(mask, mask, shift);
1334 tcg_gen_shri_i64(mask, mask, 1);
1335
1336 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1337
1338 tcg_temp_free(mask);
1339 tcg_temp_free(shift);
1340 }
1341}
1342
14ab1634 1343/* MSKBL, MSKWL, MSKLL, MSKQL */
ffec44f1
RH
1344static void gen_msk_l(int ra, int rb, int rc, int islit,
1345 uint8_t lit, uint8_t byte_mask)
14ab1634
RH
1346{
1347 if (unlikely(rc == 31))
1348 return;
1349 else if (unlikely(ra == 31))
1350 tcg_gen_movi_i64(cpu_ir[rc], 0);
1351 else if (islit) {
1352 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1353 } else {
1354 TCGv shift = tcg_temp_new();
1355 TCGv mask = tcg_temp_new();
1356
1357 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1358 tcg_gen_shli_i64(shift, shift, 3);
1359 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1360 tcg_gen_shl_i64(mask, mask, shift);
1361
1362 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1363
1364 tcg_temp_free(mask);
1365 tcg_temp_free(shift);
1366 }
1367}
1368
04acd307 1369/* Code to call arith3 helpers */
a7812ae4 1370#define ARITH3(name) \
636aa200
BS
1371static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1372 uint8_t lit) \
a7812ae4
PB
1373{ \
1374 if (unlikely(rc == 31)) \
1375 return; \
1376 \
1377 if (ra != 31) { \
1378 if (islit) { \
1379 TCGv tmp = tcg_const_i64(lit); \
1380 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1381 tcg_temp_free(tmp); \
1382 } else \
1383 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1384 } else { \
1385 TCGv tmp1 = tcg_const_i64(0); \
1386 if (islit) { \
1387 TCGv tmp2 = tcg_const_i64(lit); \
1388 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1389 tcg_temp_free(tmp2); \
1390 } else \
1391 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1392 tcg_temp_free(tmp1); \
1393 } \
b3249f63 1394}
a7812ae4 1395ARITH3(umulh)
2958620f 1396ARITH3(cmpbge)
13e4df99
RH
1397ARITH3(minub8)
1398ARITH3(minsb8)
1399ARITH3(minuw4)
1400ARITH3(minsw4)
1401ARITH3(maxub8)
1402ARITH3(maxsb8)
1403ARITH3(maxuw4)
1404ARITH3(maxsw4)
1405ARITH3(perr)
1406
2958620f
RH
1407/* Code to call arith3 helpers */
1408#define ARITH3_EX(name) \
1409 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1410 int islit, uint8_t lit) \
1411 { \
1412 if (unlikely(rc == 31)) { \
1413 return; \
1414 } \
1415 if (ra != 31) { \
1416 if (islit) { \
1417 TCGv tmp = tcg_const_i64(lit); \
1418 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1419 cpu_ir[ra], tmp); \
1420 tcg_temp_free(tmp); \
1421 } else { \
1422 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1423 cpu_ir[ra], cpu_ir[rb]); \
1424 } \
1425 } else { \
1426 TCGv tmp1 = tcg_const_i64(0); \
1427 if (islit) { \
1428 TCGv tmp2 = tcg_const_i64(lit); \
1429 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1430 tcg_temp_free(tmp2); \
1431 } else { \
1432 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1433 } \
1434 tcg_temp_free(tmp1); \
1435 } \
1436 }
1437ARITH3_EX(addlv)
1438ARITH3_EX(sublv)
1439ARITH3_EX(addqv)
1440ARITH3_EX(subqv)
1441ARITH3_EX(mullv)
1442ARITH3_EX(mulqv)
1443
13e4df99
RH
1444#define MVIOP2(name) \
1445static inline void glue(gen_, name)(int rb, int rc) \
1446{ \
1447 if (unlikely(rc == 31)) \
1448 return; \
1449 if (unlikely(rb == 31)) \
1450 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1451 else \
1452 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1453}
1454MVIOP2(pklb)
1455MVIOP2(pkwb)
1456MVIOP2(unpkbl)
1457MVIOP2(unpkbw)
b3249f63 1458
9e05960f
RH
1459static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1460 int islit, uint8_t lit)
01ff9cc8 1461{
9e05960f 1462 TCGv va, vb;
01ff9cc8 1463
9e05960f 1464 if (unlikely(rc == 31)) {
13e4df99 1465 return;
9e05960f 1466 }
01ff9cc8 1467
9e05960f
RH
1468 if (ra == 31) {
1469 va = tcg_const_i64(0);
1470 } else {
1471 va = cpu_ir[ra];
1472 }
1473 if (islit) {
1474 vb = tcg_const_i64(lit);
1475 } else {
1476 vb = cpu_ir[rb];
1477 }
01ff9cc8 1478
9e05960f 1479 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
01ff9cc8 1480
9e05960f
RH
1481 if (ra == 31) {
1482 tcg_temp_free(va);
1483 }
1484 if (islit) {
1485 tcg_temp_free(vb);
1486 }
01ff9cc8
AJ
1487}
1488
ac316ca4
RH
1489static void gen_rx(int ra, int set)
1490{
1491 TCGv_i32 tmp;
1492
1493 if (ra != 31) {
4d5712f1 1494 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1495 }
1496
1497 tmp = tcg_const_i32(set);
4d5712f1 1498 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1499 tcg_temp_free_i32(tmp);
1500}
1501
2ace7e55
RH
1502static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1503{
1504 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1505 to internal cpu registers. */
1506
1507 /* Unprivileged PAL call */
1508 if (palcode >= 0x80 && palcode < 0xC0) {
1509 switch (palcode) {
1510 case 0x86:
1511 /* IMB */
1512 /* No-op inside QEMU. */
1513 break;
1514 case 0x9E:
1515 /* RDUNIQUE */
1516 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1517 break;
1518 case 0x9F:
1519 /* WRUNIQUE */
1520 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1521 break;
1522 default:
1523 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1524 }
1525 return NO_EXIT;
1526 }
1527
1528#ifndef CONFIG_USER_ONLY
1529 /* Privileged PAL code */
1530 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1531 switch (palcode) {
1532 case 0x01:
1533 /* CFLUSH */
1534 /* No-op inside QEMU. */
1535 break;
1536 case 0x02:
1537 /* DRAINA */
1538 /* No-op inside QEMU. */
1539 break;
1540 case 0x2D:
1541 /* WRVPTPTR */
4d5712f1 1542 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
2ace7e55
RH
1543 break;
1544 case 0x31:
1545 /* WRVAL */
1546 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1547 break;
1548 case 0x32:
1549 /* RDVAL */
1550 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1551 break;
1552
1553 case 0x35: {
1554 /* SWPIPL */
1555 TCGv tmp;
1556
1557 /* Note that we already know we're in kernel mode, so we know
1558 that PS only contains the 3 IPL bits. */
4d5712f1 1559 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1560
1561 /* But make sure and store only the 3 IPL bits from the user. */
1562 tmp = tcg_temp_new();
1563 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
4d5712f1 1564 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1565 tcg_temp_free(tmp);
1566 break;
1567 }
1568
1569 case 0x36:
1570 /* RDPS */
4d5712f1 1571 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1572 break;
1573 case 0x38:
1574 /* WRUSP */
1575 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1576 break;
1577 case 0x3A:
1578 /* RDUSP */
1579 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1580 break;
1581 case 0x3C:
1582 /* WHAMI */
1583 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
4d5712f1 1584 offsetof(CPUAlphaState, cpu_index));
2ace7e55
RH
1585 break;
1586
1587 default:
1588 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1589 }
1590 return NO_EXIT;
1591 }
1592#endif
1593
1594 return gen_invalid(ctx);
1595}
1596
26b46094
RH
1597#ifndef CONFIG_USER_ONLY
1598
1599#define PR_BYTE 0x100000
1600#define PR_LONG 0x200000
1601
1602static int cpu_pr_data(int pr)
1603{
1604 switch (pr) {
1605 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1606 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1607 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1608 case 3: return offsetof(CPUAlphaState, trap_arg0);
1609 case 4: return offsetof(CPUAlphaState, trap_arg1);
1610 case 5: return offsetof(CPUAlphaState, trap_arg2);
1611 case 6: return offsetof(CPUAlphaState, exc_addr);
1612 case 7: return offsetof(CPUAlphaState, palbr);
1613 case 8: return offsetof(CPUAlphaState, ptbr);
1614 case 9: return offsetof(CPUAlphaState, vptptr);
1615 case 10: return offsetof(CPUAlphaState, unique);
1616 case 11: return offsetof(CPUAlphaState, sysval);
1617 case 12: return offsetof(CPUAlphaState, usp);
1618
1619 case 32 ... 39:
1620 return offsetof(CPUAlphaState, shadow[pr - 32]);
1621 case 40 ... 63:
1622 return offsetof(CPUAlphaState, scratch[pr - 40]);
c781cf96
RH
1623
1624 case 251:
1625 return offsetof(CPUAlphaState, alarm_expire);
26b46094
RH
1626 }
1627 return 0;
1628}
1629
c781cf96 1630static ExitStatus gen_mfpr(int ra, int regno)
26b46094
RH
1631{
1632 int data = cpu_pr_data(regno);
1633
1634 /* In our emulated PALcode, these processor registers have no
1635 side effects from reading. */
1636 if (ra == 31) {
c781cf96
RH
1637 return NO_EXIT;
1638 }
1639
1640 if (regno == 250) {
1641 /* WALL_TIME */
1642 if (use_icount) {
1643 gen_io_start();
1644 gen_helper_get_time(cpu_ir[ra]);
1645 gen_io_end();
1646 return EXIT_PC_STALE;
1647 } else {
1648 gen_helper_get_time(cpu_ir[ra]);
1649 return NO_EXIT;
1650 }
26b46094
RH
1651 }
1652
1653 /* The basic registers are data only, and unknown registers
1654 are read-zero, write-ignore. */
1655 if (data == 0) {
1656 tcg_gen_movi_i64(cpu_ir[ra], 0);
1657 } else if (data & PR_BYTE) {
1658 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1659 } else if (data & PR_LONG) {
1660 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1661 } else {
1662 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1663 }
c781cf96 1664 return NO_EXIT;
26b46094
RH
1665}
1666
bc24270e 1667static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
26b46094
RH
1668{
1669 TCGv tmp;
bc24270e 1670 int data;
26b46094
RH
1671
1672 if (rb == 31) {
1673 tmp = tcg_const_i64(0);
1674 } else {
1675 tmp = cpu_ir[rb];
1676 }
1677
bc24270e
RH
1678 switch (regno) {
1679 case 255:
3b4fefd6
RH
1680 /* TBIA */
1681 gen_helper_tbia();
bc24270e
RH
1682 break;
1683
1684 case 254:
3b4fefd6
RH
1685 /* TBIS */
1686 gen_helper_tbis(tmp);
bc24270e
RH
1687 break;
1688
1689 case 253:
1690 /* WAIT */
1691 tmp = tcg_const_i64(1);
4d5712f1 1692 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUAlphaState, halted));
bc24270e
RH
1693 return gen_excp(ctx, EXCP_HLT, 0);
1694
034ebc27
RH
1695 case 252:
1696 /* HALT */
1697 gen_helper_halt(tmp);
1698 return EXIT_PC_STALE;
1699
c781cf96
RH
1700 case 251:
1701 /* ALARM */
1702 gen_helper_set_alarm(tmp);
1703 break;
1704
bc24270e 1705 default:
3b4fefd6
RH
1706 /* The basic registers are data only, and unknown registers
1707 are read-zero, write-ignore. */
bc24270e 1708 data = cpu_pr_data(regno);
3b4fefd6
RH
1709 if (data != 0) {
1710 if (data & PR_BYTE) {
1711 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1712 } else if (data & PR_LONG) {
1713 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1714 } else {
1715 tcg_gen_st_i64(tmp, cpu_env, data);
1716 }
26b46094 1717 }
bc24270e 1718 break;
26b46094
RH
1719 }
1720
1721 if (rb == 31) {
1722 tcg_temp_free(tmp);
1723 }
bc24270e
RH
1724
1725 return NO_EXIT;
26b46094
RH
1726}
1727#endif /* !USER_ONLY*/
1728
4af70374 1729static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1730{
1731 uint32_t palcode;
efa64351
MT
1732 int32_t disp21, disp16;
1733#ifndef CONFIG_USER_ONLY
1734 int32_t disp12;
1735#endif
f88fe4e3 1736 uint16_t fn11;
b6fb147c 1737 uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
adf3c8b6 1738 uint8_t lit;
4af70374 1739 ExitStatus ret;
4c9649a9
JM
1740
1741 /* Decode all instruction fields */
1742 opc = insn >> 26;
1743 ra = (insn >> 21) & 0x1F;
1744 rb = (insn >> 16) & 0x1F;
1745 rc = insn & 0x1F;
13e4df99 1746 real_islit = islit = (insn >> 12) & 1;
dfaa8583
AJ
1747 if (rb == 31 && !islit) {
1748 islit = 1;
1749 lit = 0;
1750 } else
1751 lit = (insn >> 13) & 0xFF;
4c9649a9
JM
1752 palcode = insn & 0x03FFFFFF;
1753 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1754 disp16 = (int16_t)(insn & 0x0000FFFF);
efa64351 1755#ifndef CONFIG_USER_ONLY
4c9649a9 1756 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
efa64351 1757#endif
4c9649a9
JM
1758 fn11 = (insn >> 5) & 0x000007FF;
1759 fpfn = fn11 & 0x3F;
1760 fn7 = (insn >> 5) & 0x0000007F;
806991da 1761 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1762 opc, ra, rb, rc, disp16);
806991da 1763
4af70374 1764 ret = NO_EXIT;
4c9649a9
JM
1765 switch (opc) {
1766 case 0x00:
1767 /* CALL_PAL */
2ace7e55
RH
1768 ret = gen_call_pal(ctx, palcode);
1769 break;
4c9649a9
JM
1770 case 0x01:
1771 /* OPC01 */
1772 goto invalid_opc;
1773 case 0x02:
1774 /* OPC02 */
1775 goto invalid_opc;
1776 case 0x03:
1777 /* OPC03 */
1778 goto invalid_opc;
1779 case 0x04:
1780 /* OPC04 */
1781 goto invalid_opc;
1782 case 0x05:
1783 /* OPC05 */
1784 goto invalid_opc;
1785 case 0x06:
1786 /* OPC06 */
1787 goto invalid_opc;
1788 case 0x07:
1789 /* OPC07 */
1790 goto invalid_opc;
1791 case 0x08:
1792 /* LDA */
1ef4ef4e 1793 if (likely(ra != 31)) {
496cb5b9 1794 if (rb != 31)
3761035f
AJ
1795 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1796 else
1797 tcg_gen_movi_i64(cpu_ir[ra], disp16);
496cb5b9 1798 }
4c9649a9
JM
1799 break;
1800 case 0x09:
1801 /* LDAH */
1ef4ef4e 1802 if (likely(ra != 31)) {
496cb5b9 1803 if (rb != 31)
3761035f
AJ
1804 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1805 else
1806 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
496cb5b9 1807 }
4c9649a9
JM
1808 break;
1809 case 0x0A:
1810 /* LDBU */
a18ad893
RH
1811 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1812 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1813 break;
1814 }
1815 goto invalid_opc;
4c9649a9
JM
1816 case 0x0B:
1817 /* LDQ_U */
f18cd223 1818 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1819 break;
1820 case 0x0C:
1821 /* LDWU */
a18ad893
RH
1822 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1823 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1824 break;
1825 }
1826 goto invalid_opc;
4c9649a9
JM
1827 case 0x0D:
1828 /* STW */
6910b8f6 1829 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
4c9649a9
JM
1830 break;
1831 case 0x0E:
1832 /* STB */
6910b8f6 1833 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
4c9649a9
JM
1834 break;
1835 case 0x0F:
1836 /* STQ_U */
6910b8f6 1837 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1838 break;
1839 case 0x10:
1840 switch (fn7) {
1841 case 0x00:
1842 /* ADDL */
30c7183b
AJ
1843 if (likely(rc != 31)) {
1844 if (ra != 31) {
1845 if (islit) {
1846 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1847 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1848 } else {
30c7183b
AJ
1849 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1850 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1851 }
30c7183b
AJ
1852 } else {
1853 if (islit)
dfaa8583 1854 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1855 else
dfaa8583 1856 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1857 }
1858 }
4c9649a9
JM
1859 break;
1860 case 0x02:
1861 /* S4ADDL */
30c7183b
AJ
1862 if (likely(rc != 31)) {
1863 if (ra != 31) {
a7812ae4 1864 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1865 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1866 if (islit)
1867 tcg_gen_addi_i64(tmp, tmp, lit);
1868 else
1869 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1870 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1871 tcg_temp_free(tmp);
30c7183b
AJ
1872 } else {
1873 if (islit)
1874 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1875 else
dfaa8583 1876 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1877 }
1878 }
4c9649a9
JM
1879 break;
1880 case 0x09:
1881 /* SUBL */
30c7183b
AJ
1882 if (likely(rc != 31)) {
1883 if (ra != 31) {
dfaa8583 1884 if (islit)
30c7183b 1885 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
dfaa8583 1886 else
30c7183b 1887 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
dfaa8583 1888 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1889 } else {
1890 if (islit)
1891 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1892 else {
30c7183b
AJ
1893 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1894 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1895 }
1896 }
4c9649a9
JM
1897 break;
1898 case 0x0B:
1899 /* S4SUBL */
30c7183b
AJ
1900 if (likely(rc != 31)) {
1901 if (ra != 31) {
a7812ae4 1902 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1903 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1904 if (islit)
1905 tcg_gen_subi_i64(tmp, tmp, lit);
1906 else
1907 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1908 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1909 tcg_temp_free(tmp);
30c7183b
AJ
1910 } else {
1911 if (islit)
1912 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1913 else {
30c7183b
AJ
1914 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1915 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1916 }
30c7183b
AJ
1917 }
1918 }
4c9649a9
JM
1919 break;
1920 case 0x0F:
1921 /* CMPBGE */
a7812ae4 1922 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1923 break;
1924 case 0x12:
1925 /* S8ADDL */
30c7183b
AJ
1926 if (likely(rc != 31)) {
1927 if (ra != 31) {
a7812ae4 1928 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1929 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1930 if (islit)
1931 tcg_gen_addi_i64(tmp, tmp, lit);
1932 else
1933 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1934 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1935 tcg_temp_free(tmp);
30c7183b
AJ
1936 } else {
1937 if (islit)
1938 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1939 else
dfaa8583 1940 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1941 }
1942 }
4c9649a9
JM
1943 break;
1944 case 0x1B:
1945 /* S8SUBL */
30c7183b
AJ
1946 if (likely(rc != 31)) {
1947 if (ra != 31) {
a7812ae4 1948 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1949 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1950 if (islit)
1951 tcg_gen_subi_i64(tmp, tmp, lit);
1952 else
1953 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1954 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1955 tcg_temp_free(tmp);
30c7183b
AJ
1956 } else {
1957 if (islit)
1958 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1959 else
30c7183b
AJ
1960 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1961 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1962 }
30c7183b
AJ
1963 }
1964 }
4c9649a9
JM
1965 break;
1966 case 0x1D:
1967 /* CMPULT */
01ff9cc8 1968 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
4c9649a9
JM
1969 break;
1970 case 0x20:
1971 /* ADDQ */
30c7183b
AJ
1972 if (likely(rc != 31)) {
1973 if (ra != 31) {
1974 if (islit)
1975 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1976 else
dfaa8583 1977 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1978 } else {
1979 if (islit)
1980 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1981 else
dfaa8583 1982 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1983 }
1984 }
4c9649a9
JM
1985 break;
1986 case 0x22:
1987 /* S4ADDQ */
30c7183b
AJ
1988 if (likely(rc != 31)) {
1989 if (ra != 31) {
a7812ae4 1990 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1991 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1992 if (islit)
1993 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1994 else
1995 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1996 tcg_temp_free(tmp);
30c7183b
AJ
1997 } else {
1998 if (islit)
1999 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2000 else
dfaa8583 2001 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2002 }
2003 }
4c9649a9
JM
2004 break;
2005 case 0x29:
2006 /* SUBQ */
30c7183b
AJ
2007 if (likely(rc != 31)) {
2008 if (ra != 31) {
2009 if (islit)
2010 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 2011 else
dfaa8583 2012 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2013 } else {
2014 if (islit)
2015 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2016 else
dfaa8583 2017 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2018 }
2019 }
4c9649a9
JM
2020 break;
2021 case 0x2B:
2022 /* S4SUBQ */
30c7183b
AJ
2023 if (likely(rc != 31)) {
2024 if (ra != 31) {
a7812ae4 2025 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2026 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2027 if (islit)
2028 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2029 else
2030 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2031 tcg_temp_free(tmp);
30c7183b
AJ
2032 } else {
2033 if (islit)
2034 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2035 else
dfaa8583 2036 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2037 }
2038 }
4c9649a9
JM
2039 break;
2040 case 0x2D:
2041 /* CMPEQ */
01ff9cc8 2042 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
4c9649a9
JM
2043 break;
2044 case 0x32:
2045 /* S8ADDQ */
30c7183b
AJ
2046 if (likely(rc != 31)) {
2047 if (ra != 31) {
a7812ae4 2048 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2049 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2050 if (islit)
2051 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2052 else
2053 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2054 tcg_temp_free(tmp);
30c7183b
AJ
2055 } else {
2056 if (islit)
2057 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2058 else
dfaa8583 2059 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2060 }
2061 }
4c9649a9
JM
2062 break;
2063 case 0x3B:
2064 /* S8SUBQ */
30c7183b
AJ
2065 if (likely(rc != 31)) {
2066 if (ra != 31) {
a7812ae4 2067 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2068 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2069 if (islit)
2070 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2071 else
2072 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2073 tcg_temp_free(tmp);
30c7183b
AJ
2074 } else {
2075 if (islit)
2076 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2077 else
dfaa8583 2078 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2079 }
2080 }
4c9649a9
JM
2081 break;
2082 case 0x3D:
2083 /* CMPULE */
01ff9cc8 2084 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
4c9649a9
JM
2085 break;
2086 case 0x40:
2087 /* ADDL/V */
a7812ae4 2088 gen_addlv(ra, rb, rc, islit, lit);
4c9649a9
JM
2089 break;
2090 case 0x49:
2091 /* SUBL/V */
a7812ae4 2092 gen_sublv(ra, rb, rc, islit, lit);
4c9649a9
JM
2093 break;
2094 case 0x4D:
2095 /* CMPLT */
01ff9cc8 2096 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
4c9649a9
JM
2097 break;
2098 case 0x60:
2099 /* ADDQ/V */
a7812ae4 2100 gen_addqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2101 break;
2102 case 0x69:
2103 /* SUBQ/V */
a7812ae4 2104 gen_subqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2105 break;
2106 case 0x6D:
2107 /* CMPLE */
01ff9cc8 2108 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
4c9649a9
JM
2109 break;
2110 default:
2111 goto invalid_opc;
2112 }
2113 break;
2114 case 0x11:
2115 switch (fn7) {
2116 case 0x00:
2117 /* AND */
30c7183b 2118 if (likely(rc != 31)) {
dfaa8583 2119 if (ra == 31)
30c7183b
AJ
2120 tcg_gen_movi_i64(cpu_ir[rc], 0);
2121 else if (islit)
2122 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2123 else
2124 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2125 }
4c9649a9
JM
2126 break;
2127 case 0x08:
2128 /* BIC */
30c7183b
AJ
2129 if (likely(rc != 31)) {
2130 if (ra != 31) {
2131 if (islit)
2132 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2133 else
2134 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2135 } else
2136 tcg_gen_movi_i64(cpu_ir[rc], 0);
2137 }
4c9649a9
JM
2138 break;
2139 case 0x14:
2140 /* CMOVLBS */
bbe1dab4 2141 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2142 break;
2143 case 0x16:
2144 /* CMOVLBC */
bbe1dab4 2145 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2146 break;
2147 case 0x20:
2148 /* BIS */
30c7183b
AJ
2149 if (likely(rc != 31)) {
2150 if (ra != 31) {
2151 if (islit)
2152 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
8bb6e981 2153 else
30c7183b 2154 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
4c9649a9 2155 } else {
30c7183b
AJ
2156 if (islit)
2157 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2158 else
dfaa8583 2159 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
4c9649a9 2160 }
4c9649a9
JM
2161 }
2162 break;
2163 case 0x24:
2164 /* CMOVEQ */
bbe1dab4 2165 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2166 break;
2167 case 0x26:
2168 /* CMOVNE */
bbe1dab4 2169 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2170 break;
2171 case 0x28:
2172 /* ORNOT */
30c7183b 2173 if (likely(rc != 31)) {
dfaa8583 2174 if (ra != 31) {
30c7183b
AJ
2175 if (islit)
2176 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2177 else
2178 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2179 } else {
2180 if (islit)
2181 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2182 else
2183 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2184 }
2185 }
4c9649a9
JM
2186 break;
2187 case 0x40:
2188 /* XOR */
30c7183b
AJ
2189 if (likely(rc != 31)) {
2190 if (ra != 31) {
2191 if (islit)
2192 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 2193 else
dfaa8583 2194 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2195 } else {
2196 if (islit)
2197 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2198 else
dfaa8583 2199 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2200 }
2201 }
4c9649a9
JM
2202 break;
2203 case 0x44:
2204 /* CMOVLT */
bbe1dab4 2205 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2206 break;
2207 case 0x46:
2208 /* CMOVGE */
bbe1dab4 2209 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2210 break;
2211 case 0x48:
2212 /* EQV */
30c7183b
AJ
2213 if (likely(rc != 31)) {
2214 if (ra != 31) {
2215 if (islit)
2216 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2217 else
2218 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2219 } else {
2220 if (islit)
2221 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
30c7183b 2222 else
dfaa8583 2223 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2224 }
2225 }
4c9649a9
JM
2226 break;
2227 case 0x61:
2228 /* AMASK */
ae8ecd42 2229 if (likely(rc != 31)) {
a18ad893
RH
2230 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2231
2232 if (islit) {
2233 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2234 } else {
2235 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
1a1f7dbc 2236 }
ae8ecd42 2237 }
4c9649a9
JM
2238 break;
2239 case 0x64:
2240 /* CMOVLE */
bbe1dab4 2241 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2242 break;
2243 case 0x66:
2244 /* CMOVGT */
bbe1dab4 2245 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2246 break;
2247 case 0x6C:
2248 /* IMPLVER */
3761035f 2249 if (rc != 31)
8579095b 2250 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
4c9649a9
JM
2251 break;
2252 default:
2253 goto invalid_opc;
2254 }
2255 break;
2256 case 0x12:
2257 switch (fn7) {
2258 case 0x02:
2259 /* MSKBL */
14ab1634 2260 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2261 break;
2262 case 0x06:
2263 /* EXTBL */
377a43b6 2264 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2265 break;
2266 case 0x0B:
2267 /* INSBL */
248c42f3 2268 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2269 break;
2270 case 0x12:
2271 /* MSKWL */
14ab1634 2272 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2273 break;
2274 case 0x16:
2275 /* EXTWL */
377a43b6 2276 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2277 break;
2278 case 0x1B:
2279 /* INSWL */
248c42f3 2280 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2281 break;
2282 case 0x22:
2283 /* MSKLL */
14ab1634 2284 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2285 break;
2286 case 0x26:
2287 /* EXTLL */
377a43b6 2288 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2289 break;
2290 case 0x2B:
2291 /* INSLL */
248c42f3 2292 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2293 break;
2294 case 0x30:
2295 /* ZAP */
a7812ae4 2296 gen_zap(ra, rb, rc, islit, lit);
4c9649a9
JM
2297 break;
2298 case 0x31:
2299 /* ZAPNOT */
a7812ae4 2300 gen_zapnot(ra, rb, rc, islit, lit);
4c9649a9
JM
2301 break;
2302 case 0x32:
2303 /* MSKQL */
14ab1634 2304 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2305 break;
2306 case 0x34:
2307 /* SRL */
30c7183b
AJ
2308 if (likely(rc != 31)) {
2309 if (ra != 31) {
2310 if (islit)
2311 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2312 else {
a7812ae4 2313 TCGv shift = tcg_temp_new();
30c7183b
AJ
2314 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2315 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2316 tcg_temp_free(shift);
dfaa8583 2317 }
30c7183b
AJ
2318 } else
2319 tcg_gen_movi_i64(cpu_ir[rc], 0);
2320 }
4c9649a9
JM
2321 break;
2322 case 0x36:
2323 /* EXTQL */
377a43b6 2324 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2325 break;
2326 case 0x39:
2327 /* SLL */
30c7183b
AJ
2328 if (likely(rc != 31)) {
2329 if (ra != 31) {
2330 if (islit)
2331 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2332 else {
a7812ae4 2333 TCGv shift = tcg_temp_new();
30c7183b
AJ
2334 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2335 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2336 tcg_temp_free(shift);
dfaa8583 2337 }
30c7183b
AJ
2338 } else
2339 tcg_gen_movi_i64(cpu_ir[rc], 0);
2340 }
4c9649a9
JM
2341 break;
2342 case 0x3B:
2343 /* INSQL */
248c42f3 2344 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2345 break;
2346 case 0x3C:
2347 /* SRA */
30c7183b
AJ
2348 if (likely(rc != 31)) {
2349 if (ra != 31) {
2350 if (islit)
2351 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2352 else {
a7812ae4 2353 TCGv shift = tcg_temp_new();
30c7183b
AJ
2354 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2355 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2356 tcg_temp_free(shift);
dfaa8583 2357 }
30c7183b
AJ
2358 } else
2359 tcg_gen_movi_i64(cpu_ir[rc], 0);
2360 }
4c9649a9
JM
2361 break;
2362 case 0x52:
2363 /* MSKWH */
ffec44f1 2364 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2365 break;
2366 case 0x57:
2367 /* INSWH */
50eb6e5c 2368 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2369 break;
2370 case 0x5A:
2371 /* EXTWH */
377a43b6 2372 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2373 break;
2374 case 0x62:
2375 /* MSKLH */
ffec44f1 2376 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2377 break;
2378 case 0x67:
2379 /* INSLH */
50eb6e5c 2380 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2381 break;
2382 case 0x6A:
2383 /* EXTLH */
377a43b6 2384 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2385 break;
2386 case 0x72:
2387 /* MSKQH */
ffec44f1 2388 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2389 break;
2390 case 0x77:
2391 /* INSQH */
50eb6e5c 2392 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2393 break;
2394 case 0x7A:
2395 /* EXTQH */
377a43b6 2396 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2397 break;
2398 default:
2399 goto invalid_opc;
2400 }
2401 break;
2402 case 0x13:
2403 switch (fn7) {
2404 case 0x00:
2405 /* MULL */
30c7183b 2406 if (likely(rc != 31)) {
dfaa8583 2407 if (ra == 31)
30c7183b
AJ
2408 tcg_gen_movi_i64(cpu_ir[rc], 0);
2409 else {
2410 if (islit)
2411 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2412 else
2413 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2414 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2415 }
2416 }
4c9649a9
JM
2417 break;
2418 case 0x20:
2419 /* MULQ */
30c7183b 2420 if (likely(rc != 31)) {
dfaa8583 2421 if (ra == 31)
30c7183b
AJ
2422 tcg_gen_movi_i64(cpu_ir[rc], 0);
2423 else if (islit)
2424 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2425 else
2426 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2427 }
4c9649a9
JM
2428 break;
2429 case 0x30:
2430 /* UMULH */
a7812ae4 2431 gen_umulh(ra, rb, rc, islit, lit);
4c9649a9
JM
2432 break;
2433 case 0x40:
2434 /* MULL/V */
a7812ae4 2435 gen_mullv(ra, rb, rc, islit, lit);
4c9649a9
JM
2436 break;
2437 case 0x60:
2438 /* MULQ/V */
a7812ae4 2439 gen_mulqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2440 break;
2441 default:
2442 goto invalid_opc;
2443 }
2444 break;
2445 case 0x14:
f24518b5 2446 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2447 case 0x04:
2448 /* ITOFS */
a18ad893 2449 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2450 goto invalid_opc;
a18ad893 2451 }
f18cd223
AJ
2452 if (likely(rc != 31)) {
2453 if (ra != 31) {
a7812ae4 2454 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2455 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2456 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2457 tcg_temp_free_i32(tmp);
f18cd223
AJ
2458 } else
2459 tcg_gen_movi_i64(cpu_fir[rc], 0);
2460 }
4c9649a9
JM
2461 break;
2462 case 0x0A:
2463 /* SQRTF */
a18ad893
RH
2464 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2465 gen_fsqrtf(rb, rc);
2466 break;
2467 }
2468 goto invalid_opc;
4c9649a9
JM
2469 case 0x0B:
2470 /* SQRTS */
a18ad893
RH
2471 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2472 gen_fsqrts(ctx, rb, rc, fn11);
2473 break;
2474 }
2475 goto invalid_opc;
4c9649a9
JM
2476 case 0x14:
2477 /* ITOFF */
a18ad893 2478 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2479 goto invalid_opc;
a18ad893 2480 }
f18cd223
AJ
2481 if (likely(rc != 31)) {
2482 if (ra != 31) {
a7812ae4 2483 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2484 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2485 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2486 tcg_temp_free_i32(tmp);
f18cd223
AJ
2487 } else
2488 tcg_gen_movi_i64(cpu_fir[rc], 0);
2489 }
4c9649a9
JM
2490 break;
2491 case 0x24:
2492 /* ITOFT */
a18ad893 2493 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2494 goto invalid_opc;
a18ad893 2495 }
f18cd223
AJ
2496 if (likely(rc != 31)) {
2497 if (ra != 31)
2498 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2499 else
2500 tcg_gen_movi_i64(cpu_fir[rc], 0);
2501 }
4c9649a9
JM
2502 break;
2503 case 0x2A:
2504 /* SQRTG */
a18ad893
RH
2505 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2506 gen_fsqrtg(rb, rc);
2507 break;
2508 }
2509 goto invalid_opc;
4c9649a9
JM
2510 case 0x02B:
2511 /* SQRTT */
a18ad893
RH
2512 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2513 gen_fsqrtt(ctx, rb, rc, fn11);
2514 break;
2515 }
2516 goto invalid_opc;
4c9649a9
JM
2517 default:
2518 goto invalid_opc;
2519 }
2520 break;
2521 case 0x15:
2522 /* VAX floating point */
2523 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2524 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2525 case 0x00:
2526 /* ADDF */
a7812ae4 2527 gen_faddf(ra, rb, rc);
4c9649a9
JM
2528 break;
2529 case 0x01:
2530 /* SUBF */
a7812ae4 2531 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2532 break;
2533 case 0x02:
2534 /* MULF */
a7812ae4 2535 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2536 break;
2537 case 0x03:
2538 /* DIVF */
a7812ae4 2539 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2540 break;
2541 case 0x1E:
2542 /* CVTDG */
2543#if 0 // TODO
a7812ae4 2544 gen_fcvtdg(rb, rc);
4c9649a9
JM
2545#else
2546 goto invalid_opc;
2547#endif
2548 break;
2549 case 0x20:
2550 /* ADDG */
a7812ae4 2551 gen_faddg(ra, rb, rc);
4c9649a9
JM
2552 break;
2553 case 0x21:
2554 /* SUBG */
a7812ae4 2555 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2556 break;
2557 case 0x22:
2558 /* MULG */
a7812ae4 2559 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2560 break;
2561 case 0x23:
2562 /* DIVG */
a7812ae4 2563 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2564 break;
2565 case 0x25:
2566 /* CMPGEQ */
a7812ae4 2567 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2568 break;
2569 case 0x26:
2570 /* CMPGLT */
a7812ae4 2571 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2572 break;
2573 case 0x27:
2574 /* CMPGLE */
a7812ae4 2575 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2576 break;
2577 case 0x2C:
2578 /* CVTGF */
a7812ae4 2579 gen_fcvtgf(rb, rc);
4c9649a9
JM
2580 break;
2581 case 0x2D:
2582 /* CVTGD */
2583#if 0 // TODO
a7812ae4 2584 gen_fcvtgd(rb, rc);
4c9649a9
JM
2585#else
2586 goto invalid_opc;
2587#endif
2588 break;
2589 case 0x2F:
2590 /* CVTGQ */
a7812ae4 2591 gen_fcvtgq(rb, rc);
4c9649a9
JM
2592 break;
2593 case 0x3C:
2594 /* CVTQF */
a7812ae4 2595 gen_fcvtqf(rb, rc);
4c9649a9
JM
2596 break;
2597 case 0x3E:
2598 /* CVTQG */
a7812ae4 2599 gen_fcvtqg(rb, rc);
4c9649a9
JM
2600 break;
2601 default:
2602 goto invalid_opc;
2603 }
2604 break;
2605 case 0x16:
2606 /* IEEE floating-point */
f24518b5 2607 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2608 case 0x00:
2609 /* ADDS */
f24518b5 2610 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2611 break;
2612 case 0x01:
2613 /* SUBS */
f24518b5 2614 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2615 break;
2616 case 0x02:
2617 /* MULS */
f24518b5 2618 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2619 break;
2620 case 0x03:
2621 /* DIVS */
f24518b5 2622 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2623 break;
2624 case 0x20:
2625 /* ADDT */
f24518b5 2626 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2627 break;
2628 case 0x21:
2629 /* SUBT */
f24518b5 2630 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2631 break;
2632 case 0x22:
2633 /* MULT */
f24518b5 2634 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2635 break;
2636 case 0x23:
2637 /* DIVT */
f24518b5 2638 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2639 break;
2640 case 0x24:
2641 /* CMPTUN */
f24518b5 2642 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2643 break;
2644 case 0x25:
2645 /* CMPTEQ */
f24518b5 2646 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2647 break;
2648 case 0x26:
2649 /* CMPTLT */
f24518b5 2650 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2651 break;
2652 case 0x27:
2653 /* CMPTLE */
f24518b5 2654 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2655 break;
2656 case 0x2C:
a74b4d2c 2657 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2658 /* CVTST */
f24518b5 2659 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2660 } else {
2661 /* CVTTS */
f24518b5 2662 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2663 }
2664 break;
2665 case 0x2F:
2666 /* CVTTQ */
f24518b5 2667 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2668 break;
2669 case 0x3C:
2670 /* CVTQS */
f24518b5 2671 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2672 break;
2673 case 0x3E:
2674 /* CVTQT */
f24518b5 2675 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2676 break;
2677 default:
2678 goto invalid_opc;
2679 }
2680 break;
2681 case 0x17:
2682 switch (fn11) {
2683 case 0x010:
2684 /* CVTLQ */
a7812ae4 2685 gen_fcvtlq(rb, rc);
4c9649a9
JM
2686 break;
2687 case 0x020:
f18cd223 2688 if (likely(rc != 31)) {
a06d48d9 2689 if (ra == rb) {
4c9649a9 2690 /* FMOV */
a06d48d9
RH
2691 if (ra == 31)
2692 tcg_gen_movi_i64(cpu_fir[rc], 0);
2693 else
2694 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2695 } else {
f18cd223 2696 /* CPYS */
a7812ae4 2697 gen_fcpys(ra, rb, rc);
a06d48d9 2698 }
4c9649a9
JM
2699 }
2700 break;
2701 case 0x021:
2702 /* CPYSN */
a7812ae4 2703 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2704 break;
2705 case 0x022:
2706 /* CPYSE */
a7812ae4 2707 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2708 break;
2709 case 0x024:
2710 /* MT_FPCR */
f18cd223 2711 if (likely(ra != 31))
a44a2777 2712 gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
f18cd223
AJ
2713 else {
2714 TCGv tmp = tcg_const_i64(0);
a44a2777 2715 gen_helper_store_fpcr(cpu_env, tmp);
f18cd223
AJ
2716 tcg_temp_free(tmp);
2717 }
4c9649a9
JM
2718 break;
2719 case 0x025:
2720 /* MF_FPCR */
f18cd223 2721 if (likely(ra != 31))
a44a2777 2722 gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
4c9649a9
JM
2723 break;
2724 case 0x02A:
2725 /* FCMOVEQ */
bbe1dab4 2726 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2727 break;
2728 case 0x02B:
2729 /* FCMOVNE */
bbe1dab4 2730 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2731 break;
2732 case 0x02C:
2733 /* FCMOVLT */
bbe1dab4 2734 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2735 break;
2736 case 0x02D:
2737 /* FCMOVGE */
bbe1dab4 2738 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2739 break;
2740 case 0x02E:
2741 /* FCMOVLE */
bbe1dab4 2742 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2743 break;
2744 case 0x02F:
2745 /* FCMOVGT */
bbe1dab4 2746 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2747 break;
2748 case 0x030:
2749 /* CVTQL */
a7812ae4 2750 gen_fcvtql(rb, rc);
4c9649a9
JM
2751 break;
2752 case 0x130:
2753 /* CVTQL/V */
4c9649a9
JM
2754 case 0x530:
2755 /* CVTQL/SV */
735cf45f
RH
2756 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2757 /v doesn't do. The only thing I can think is that /sv is a
2758 valid instruction merely for completeness in the ISA. */
2759 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2760 break;
2761 default:
2762 goto invalid_opc;
2763 }
2764 break;
2765 case 0x18:
2766 switch ((uint16_t)disp16) {
2767 case 0x0000:
2768 /* TRAPB */
4af70374 2769 /* No-op. */
4c9649a9
JM
2770 break;
2771 case 0x0400:
2772 /* EXCB */
4af70374 2773 /* No-op. */
4c9649a9
JM
2774 break;
2775 case 0x4000:
2776 /* MB */
2777 /* No-op */
2778 break;
2779 case 0x4400:
2780 /* WMB */
2781 /* No-op */
2782 break;
2783 case 0x8000:
2784 /* FETCH */
2785 /* No-op */
2786 break;
2787 case 0xA000:
2788 /* FETCH_M */
2789 /* No-op */
2790 break;
2791 case 0xC000:
2792 /* RPCC */
a9406ea1
RH
2793 if (ra != 31) {
2794 if (use_icount) {
2795 gen_io_start();
2796 gen_helper_load_pcc(cpu_ir[ra]);
2797 gen_io_end();
2798 ret = EXIT_PC_STALE;
2799 } else {
2800 gen_helper_load_pcc(cpu_ir[ra]);
2801 }
2802 }
4c9649a9
JM
2803 break;
2804 case 0xE000:
2805 /* RC */
ac316ca4 2806 gen_rx(ra, 0);
4c9649a9
JM
2807 break;
2808 case 0xE800:
2809 /* ECB */
4c9649a9
JM
2810 break;
2811 case 0xF000:
2812 /* RS */
ac316ca4 2813 gen_rx(ra, 1);
4c9649a9
JM
2814 break;
2815 case 0xF800:
2816 /* WH64 */
2817 /* No-op */
2818 break;
2819 default:
2820 goto invalid_opc;
2821 }
2822 break;
2823 case 0x19:
2824 /* HW_MFPR (PALcode) */
26b46094 2825#ifndef CONFIG_USER_ONLY
a18ad893 2826 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
c781cf96 2827 return gen_mfpr(ra, insn & 0xffff);
26b46094
RH
2828 }
2829#endif
4c9649a9 2830 goto invalid_opc;
4c9649a9 2831 case 0x1A:
49563a72
RH
2832 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2833 prediction stack action, which of course we don't implement. */
2834 if (rb != 31) {
3761035f 2835 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
49563a72 2836 } else {
3761035f 2837 tcg_gen_movi_i64(cpu_pc, 0);
49563a72
RH
2838 }
2839 if (ra != 31) {
1304ca87 2840 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2841 }
4af70374 2842 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2843 break;
2844 case 0x1B:
2845 /* HW_LD (PALcode) */
a18ad893
RH
2846#ifndef CONFIG_USER_ONLY
2847 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2848 TCGv addr;
2849
2850 if (ra == 31) {
2851 break;
2852 }
2853
2854 addr = tcg_temp_new();
8bb6e981
AJ
2855 if (rb != 31)
2856 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2857 else
2858 tcg_gen_movi_i64(addr, disp12);
2859 switch ((insn >> 12) & 0xF) {
2860 case 0x0:
b5d51029 2861 /* Longword physical access (hw_ldl/p) */
2374e73e 2862 gen_helper_ldl_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2863 break;
2864 case 0x1:
b5d51029 2865 /* Quadword physical access (hw_ldq/p) */
2374e73e 2866 gen_helper_ldq_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2867 break;
2868 case 0x2:
b5d51029 2869 /* Longword physical access with lock (hw_ldl_l/p) */
2374e73e 2870 gen_helper_ldl_l_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2871 break;
2872 case 0x3:
b5d51029 2873 /* Quadword physical access with lock (hw_ldq_l/p) */
2374e73e 2874 gen_helper_ldq_l_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2875 break;
2876 case 0x4:
b5d51029 2877 /* Longword virtual PTE fetch (hw_ldl/v) */
2374e73e 2878 goto invalid_opc;
8bb6e981 2879 case 0x5:
b5d51029 2880 /* Quadword virtual PTE fetch (hw_ldq/v) */
2374e73e 2881 goto invalid_opc;
8bb6e981
AJ
2882 break;
2883 case 0x6:
2884 /* Incpu_ir[ra]id */
b5d51029 2885 goto invalid_opc;
8bb6e981
AJ
2886 case 0x7:
2887 /* Incpu_ir[ra]id */
b5d51029 2888 goto invalid_opc;
8bb6e981 2889 case 0x8:
b5d51029 2890 /* Longword virtual access (hw_ldl) */
2374e73e 2891 goto invalid_opc;
8bb6e981 2892 case 0x9:
b5d51029 2893 /* Quadword virtual access (hw_ldq) */
2374e73e 2894 goto invalid_opc;
8bb6e981 2895 case 0xA:
b5d51029 2896 /* Longword virtual access with protection check (hw_ldl/w) */
8417845e 2897 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2898 break;
2899 case 0xB:
b5d51029 2900 /* Quadword virtual access with protection check (hw_ldq/w) */
8417845e 2901 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2902 break;
2903 case 0xC:
b5d51029 2904 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2374e73e 2905 goto invalid_opc;
8bb6e981 2906 case 0xD:
b5d51029 2907 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2374e73e 2908 goto invalid_opc;
8bb6e981
AJ
2909 case 0xE:
2910 /* Longword virtual access with alternate access mode and
2374e73e
RH
2911 protection checks (hw_ldl/wa) */
2912 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2913 break;
2914 case 0xF:
2915 /* Quadword virtual access with alternate access mode and
2374e73e
RH
2916 protection checks (hw_ldq/wa) */
2917 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2918 break;
2919 }
2920 tcg_temp_free(addr);
a18ad893 2921 break;
4c9649a9 2922 }
4c9649a9 2923#endif
a18ad893 2924 goto invalid_opc;
4c9649a9
JM
2925 case 0x1C:
2926 switch (fn7) {
2927 case 0x00:
2928 /* SEXTB */
a18ad893 2929 if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
4c9649a9 2930 goto invalid_opc;
a18ad893 2931 }
ae8ecd42
AJ
2932 if (likely(rc != 31)) {
2933 if (islit)
2934 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
ae8ecd42 2935 else
dfaa8583 2936 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2937 }
4c9649a9
JM
2938 break;
2939 case 0x01:
2940 /* SEXTW */
a18ad893
RH
2941 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2942 if (likely(rc != 31)) {
2943 if (islit) {
2944 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2945 } else {
2946 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2947 }
2948 }
2949 break;
ae8ecd42 2950 }
a18ad893 2951 goto invalid_opc;
4c9649a9
JM
2952 case 0x30:
2953 /* CTPOP */
a18ad893
RH
2954 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2955 if (likely(rc != 31)) {
2956 if (islit) {
2957 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2958 } else {
2959 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2960 }
2961 }
2962 break;
ae8ecd42 2963 }
a18ad893 2964 goto invalid_opc;
4c9649a9
JM
2965 case 0x31:
2966 /* PERR */
a18ad893
RH
2967 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2968 gen_perr(ra, rb, rc, islit, lit);
2969 break;
2970 }
2971 goto invalid_opc;
4c9649a9
JM
2972 case 0x32:
2973 /* CTLZ */
a18ad893
RH
2974 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2975 if (likely(rc != 31)) {
2976 if (islit) {
2977 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2978 } else {
2979 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2980 }
2981 }
2982 break;
ae8ecd42 2983 }
a18ad893 2984 goto invalid_opc;
4c9649a9
JM
2985 case 0x33:
2986 /* CTTZ */
a18ad893
RH
2987 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2988 if (likely(rc != 31)) {
2989 if (islit) {
2990 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2991 } else {
2992 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2993 }
2994 }
2995 break;
ae8ecd42 2996 }
a18ad893 2997 goto invalid_opc;
4c9649a9
JM
2998 case 0x34:
2999 /* UNPKBW */
a18ad893
RH
3000 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3001 if (real_islit || ra != 31) {
3002 goto invalid_opc;
3003 }
3004 gen_unpkbw(rb, rc);
3005 break;
3006 }
3007 goto invalid_opc;
4c9649a9 3008 case 0x35:
13e4df99 3009 /* UNPKBL */
a18ad893
RH
3010 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3011 if (real_islit || ra != 31) {
3012 goto invalid_opc;
3013 }
3014 gen_unpkbl(rb, rc);
3015 break;
3016 }
3017 goto invalid_opc;
4c9649a9
JM
3018 case 0x36:
3019 /* PKWB */
a18ad893
RH
3020 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3021 if (real_islit || ra != 31) {
3022 goto invalid_opc;
3023 }
3024 gen_pkwb(rb, rc);
3025 break;
3026 }
3027 goto invalid_opc;
4c9649a9
JM
3028 case 0x37:
3029 /* PKLB */
a18ad893
RH
3030 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3031 if (real_islit || ra != 31) {
3032 goto invalid_opc;
3033 }
3034 gen_pklb(rb, rc);
3035 break;
3036 }
3037 goto invalid_opc;
4c9649a9
JM
3038 case 0x38:
3039 /* MINSB8 */
a18ad893
RH
3040 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3041 gen_minsb8(ra, rb, rc, islit, lit);
3042 break;
3043 }
3044 goto invalid_opc;
4c9649a9
JM
3045 case 0x39:
3046 /* MINSW4 */
a18ad893
RH
3047 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3048 gen_minsw4(ra, rb, rc, islit, lit);
3049 break;
3050 }
3051 goto invalid_opc;
4c9649a9
JM
3052 case 0x3A:
3053 /* MINUB8 */
a18ad893
RH
3054 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3055 gen_minub8(ra, rb, rc, islit, lit);
3056 break;
3057 }
3058 goto invalid_opc;
4c9649a9
JM
3059 case 0x3B:
3060 /* MINUW4 */
a18ad893
RH
3061 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3062 gen_minuw4(ra, rb, rc, islit, lit);
3063 break;
3064 }
3065 goto invalid_opc;
4c9649a9
JM
3066 case 0x3C:
3067 /* MAXUB8 */
a18ad893
RH
3068 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3069 gen_maxub8(ra, rb, rc, islit, lit);
3070 break;
3071 }
3072 goto invalid_opc;
4c9649a9
JM
3073 case 0x3D:
3074 /* MAXUW4 */
a18ad893
RH
3075 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3076 gen_maxuw4(ra, rb, rc, islit, lit);
3077 break;
3078 }
3079 goto invalid_opc;
4c9649a9
JM
3080 case 0x3E:
3081 /* MAXSB8 */
a18ad893
RH
3082 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3083 gen_maxsb8(ra, rb, rc, islit, lit);
3084 break;
3085 }
3086 goto invalid_opc;
4c9649a9
JM
3087 case 0x3F:
3088 /* MAXSW4 */
a18ad893
RH
3089 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3090 gen_maxsw4(ra, rb, rc, islit, lit);
3091 break;
3092 }
3093 goto invalid_opc;
4c9649a9
JM
3094 case 0x70:
3095 /* FTOIT */
a18ad893 3096 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 3097 goto invalid_opc;
a18ad893 3098 }
f18cd223
AJ
3099 if (likely(rc != 31)) {
3100 if (ra != 31)
3101 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3102 else
3103 tcg_gen_movi_i64(cpu_ir[rc], 0);
3104 }
4c9649a9
JM
3105 break;
3106 case 0x78:
3107 /* FTOIS */
a18ad893 3108 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 3109 goto invalid_opc;
a18ad893 3110 }
f18cd223 3111 if (rc != 31) {
a7812ae4 3112 TCGv_i32 tmp1 = tcg_temp_new_i32();
f18cd223 3113 if (ra != 31)
a7812ae4 3114 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
f18cd223
AJ
3115 else {
3116 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 3117 gen_helper_s_to_memory(tmp1, tmp2);
f18cd223
AJ
3118 tcg_temp_free(tmp2);
3119 }
3120 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
a7812ae4 3121 tcg_temp_free_i32(tmp1);
f18cd223 3122 }
4c9649a9
JM
3123 break;
3124 default:
3125 goto invalid_opc;
3126 }
3127 break;
3128 case 0x1D:
3129 /* HW_MTPR (PALcode) */
26b46094 3130#ifndef CONFIG_USER_ONLY
a18ad893 3131 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
bc24270e 3132 return gen_mtpr(ctx, rb, insn & 0xffff);
26b46094
RH
3133 }
3134#endif
4c9649a9 3135 goto invalid_opc;
4c9649a9 3136 case 0x1E:
508b43ea 3137 /* HW_RET (PALcode) */
a18ad893
RH
3138#ifndef CONFIG_USER_ONLY
3139 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3140 if (rb == 31) {
3141 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3142 address from EXC_ADDR. This turns out to be useful for our
3143 emulation PALcode, so continue to accept it. */
3144 TCGv tmp = tcg_temp_new();
4d5712f1 3145 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
a18ad893
RH
3146 gen_helper_hw_ret(tmp);
3147 tcg_temp_free(tmp);
3148 } else {
3149 gen_helper_hw_ret(cpu_ir[rb]);
3150 }
3151 ret = EXIT_PC_UPDATED;
3152 break;
4c9649a9 3153 }
4c9649a9 3154#endif
a18ad893 3155 goto invalid_opc;
4c9649a9
JM
3156 case 0x1F:
3157 /* HW_ST (PALcode) */
a18ad893
RH
3158#ifndef CONFIG_USER_ONLY
3159 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
8bb6e981 3160 TCGv addr, val;
a7812ae4 3161 addr = tcg_temp_new();
8bb6e981
AJ
3162 if (rb != 31)
3163 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3164 else
3165 tcg_gen_movi_i64(addr, disp12);
3166 if (ra != 31)
3167 val = cpu_ir[ra];
3168 else {
a7812ae4 3169 val = tcg_temp_new();
8bb6e981
AJ
3170 tcg_gen_movi_i64(val, 0);
3171 }
3172 switch ((insn >> 12) & 0xF) {
3173 case 0x0:
3174 /* Longword physical access */
2374e73e 3175 gen_helper_stl_phys(addr, val);
8bb6e981
AJ
3176 break;
3177 case 0x1:
3178 /* Quadword physical access */
2374e73e 3179 gen_helper_stq_phys(addr, val);
8bb6e981
AJ
3180 break;
3181 case 0x2:
3182 /* Longword physical access with lock */
2374e73e 3183 gen_helper_stl_c_phys(val, addr, val);
8bb6e981
AJ
3184 break;
3185 case 0x3:
3186 /* Quadword physical access with lock */
2374e73e 3187 gen_helper_stq_c_phys(val, addr, val);
8bb6e981
AJ
3188 break;
3189 case 0x4:
3190 /* Longword virtual access */
2374e73e 3191 goto invalid_opc;
8bb6e981
AJ
3192 case 0x5:
3193 /* Quadword virtual access */
2374e73e 3194 goto invalid_opc;
8bb6e981
AJ
3195 case 0x6:
3196 /* Invalid */
3197 goto invalid_opc;
3198 case 0x7:
3199 /* Invalid */
3200 goto invalid_opc;
3201 case 0x8:
3202 /* Invalid */
3203 goto invalid_opc;
3204 case 0x9:
3205 /* Invalid */
3206 goto invalid_opc;
3207 case 0xA:
3208 /* Invalid */
3209 goto invalid_opc;
3210 case 0xB:
3211 /* Invalid */
3212 goto invalid_opc;
3213 case 0xC:
3214 /* Longword virtual access with alternate access mode */
2374e73e 3215 goto invalid_opc;
8bb6e981
AJ
3216 case 0xD:
3217 /* Quadword virtual access with alternate access mode */
2374e73e 3218 goto invalid_opc;
8bb6e981
AJ
3219 case 0xE:
3220 /* Invalid */
3221 goto invalid_opc;
3222 case 0xF:
3223 /* Invalid */
3224 goto invalid_opc;
3225 }
45d46ce8 3226 if (ra == 31)
8bb6e981
AJ
3227 tcg_temp_free(val);
3228 tcg_temp_free(addr);
a18ad893 3229 break;
4c9649a9 3230 }
4c9649a9 3231#endif
a18ad893 3232 goto invalid_opc;
4c9649a9
JM
3233 case 0x20:
3234 /* LDF */
f18cd223 3235 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3236 break;
3237 case 0x21:
3238 /* LDG */
f18cd223 3239 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3240 break;
3241 case 0x22:
3242 /* LDS */
f18cd223 3243 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
3244 break;
3245 case 0x23:
3246 /* LDT */
f18cd223 3247 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3248 break;
3249 case 0x24:
3250 /* STF */
6910b8f6 3251 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3252 break;
3253 case 0x25:
3254 /* STG */
6910b8f6 3255 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3256 break;
3257 case 0x26:
3258 /* STS */
6910b8f6 3259 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
4c9649a9
JM
3260 break;
3261 case 0x27:
3262 /* STT */
6910b8f6 3263 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3264 break;
3265 case 0x28:
3266 /* LDL */
f18cd223 3267 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
3268 break;
3269 case 0x29:
3270 /* LDQ */
f18cd223 3271 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3272 break;
3273 case 0x2A:
3274 /* LDL_L */
f4ed8679 3275 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3276 break;
3277 case 0x2B:
3278 /* LDQ_L */
f4ed8679 3279 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3280 break;
3281 case 0x2C:
3282 /* STL */
6910b8f6 3283 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
4c9649a9
JM
3284 break;
3285 case 0x2D:
3286 /* STQ */
6910b8f6 3287 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3288 break;
3289 case 0x2E:
3290 /* STL_C */
6910b8f6 3291 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
4c9649a9
JM
3292 break;
3293 case 0x2F:
3294 /* STQ_C */
6910b8f6 3295 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
4c9649a9
JM
3296 break;
3297 case 0x30:
3298 /* BR */
4af70374 3299 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3300 break;
a7812ae4 3301 case 0x31: /* FBEQ */
4af70374 3302 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 3303 break;
a7812ae4 3304 case 0x32: /* FBLT */
4af70374 3305 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 3306 break;
a7812ae4 3307 case 0x33: /* FBLE */
4af70374 3308 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
3309 break;
3310 case 0x34:
3311 /* BSR */
4af70374 3312 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3313 break;
a7812ae4 3314 case 0x35: /* FBNE */
4af70374 3315 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 3316 break;
a7812ae4 3317 case 0x36: /* FBGE */
4af70374 3318 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 3319 break;
a7812ae4 3320 case 0x37: /* FBGT */
4af70374 3321 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
3322 break;
3323 case 0x38:
3324 /* BLBC */
4af70374 3325 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
3326 break;
3327 case 0x39:
3328 /* BEQ */
4af70374 3329 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
3330 break;
3331 case 0x3A:
3332 /* BLT */
4af70374 3333 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
3334 break;
3335 case 0x3B:
3336 /* BLE */
4af70374 3337 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
3338 break;
3339 case 0x3C:
3340 /* BLBS */
4af70374 3341 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
3342 break;
3343 case 0x3D:
3344 /* BNE */
4af70374 3345 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
3346 break;
3347 case 0x3E:
3348 /* BGE */
4af70374 3349 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
3350 break;
3351 case 0x3F:
3352 /* BGT */
4af70374 3353 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
3354 break;
3355 invalid_opc:
8aa3fa20 3356 ret = gen_invalid(ctx);
4c9649a9
JM
3357 break;
3358 }
3359
3360 return ret;
3361}
3362
4d5712f1 3363static inline void gen_intermediate_code_internal(CPUAlphaState *env,
636aa200
BS
3364 TranslationBlock *tb,
3365 int search_pc)
4c9649a9 3366{
4c9649a9
JM
3367 DisasContext ctx, *ctxp = &ctx;
3368 target_ulong pc_start;
3369 uint32_t insn;
3370 uint16_t *gen_opc_end;
a1d1bb31 3371 CPUBreakpoint *bp;
4c9649a9 3372 int j, lj = -1;
4af70374 3373 ExitStatus ret;
2e70f6ef
PB
3374 int num_insns;
3375 int max_insns;
4c9649a9
JM
3376
3377 pc_start = tb->pc;
4c9649a9 3378 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
4af70374
RH
3379
3380 ctx.tb = tb;
3381 ctx.env = env;
4c9649a9 3382 ctx.pc = pc_start;
bba9bdce 3383 ctx.mem_idx = cpu_mmu_index(env);
f24518b5
RH
3384
3385 /* ??? Every TB begins with unset rounding mode, to be initialized on
3386 the first fp insn of the TB. Alternately we could define a proper
3387 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3388 to reset the FP_STATUS to that default at the end of any TB that
3389 changes the default. We could even (gasp) dynamiclly figure out
3390 what default would be most efficient given the running program. */
3391 ctx.tb_rm = -1;
3392 /* Similarly for flush-to-zero. */
3393 ctx.tb_ftz = -1;
3394
2e70f6ef
PB
3395 num_insns = 0;
3396 max_insns = tb->cflags & CF_COUNT_MASK;
3397 if (max_insns == 0)
3398 max_insns = CF_COUNT_MASK;
3399
3400 gen_icount_start();
4af70374 3401 do {
72cf2d4f
BS
3402 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3403 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 3404 if (bp->pc == ctx.pc) {
4c9649a9
JM
3405 gen_excp(&ctx, EXCP_DEBUG, 0);
3406 break;
3407 }
3408 }
3409 }
3410 if (search_pc) {
3411 j = gen_opc_ptr - gen_opc_buf;
3412 if (lj < j) {
3413 lj++;
3414 while (lj < j)
3415 gen_opc_instr_start[lj++] = 0;
4c9649a9 3416 }
ed1dda53
AJ
3417 gen_opc_pc[lj] = ctx.pc;
3418 gen_opc_instr_start[lj] = 1;
3419 gen_opc_icount[lj] = num_insns;
4c9649a9 3420 }
2e70f6ef
PB
3421 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3422 gen_io_start();
4c9649a9 3423 insn = ldl_code(ctx.pc);
2e70f6ef 3424 num_insns++;
c4b3be39
RH
3425
3426 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3427 tcg_gen_debug_insn_start(ctx.pc);
3428 }
3429
4c9649a9
JM
3430 ctx.pc += 4;
3431 ret = translate_one(ctxp, insn);
19bf517b 3432
bf1b03fe
RH
3433 /* If we reach a page boundary, are single stepping,
3434 or exhaust instruction count, stop generation. */
3435 if (ret == NO_EXIT
3436 && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3437 || gen_opc_ptr >= gen_opc_end
3438 || num_insns >= max_insns
3439 || singlestep
3440 || env->singlestep_enabled)) {
3441 ret = EXIT_PC_STALE;
1b530a6d 3442 }
4af70374
RH
3443 } while (ret == NO_EXIT);
3444
3445 if (tb->cflags & CF_LAST_IO) {
3446 gen_io_end();
4c9649a9 3447 }
4af70374
RH
3448
3449 switch (ret) {
3450 case EXIT_GOTO_TB:
8aa3fa20 3451 case EXIT_NORETURN:
4af70374
RH
3452 break;
3453 case EXIT_PC_STALE:
496cb5b9 3454 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
3455 /* FALLTHRU */
3456 case EXIT_PC_UPDATED:
bf1b03fe
RH
3457 if (env->singlestep_enabled) {
3458 gen_excp_1(EXCP_DEBUG, 0);
3459 } else {
3460 tcg_gen_exit_tb(0);
3461 }
4af70374
RH
3462 break;
3463 default:
3464 abort();
4c9649a9 3465 }
4af70374 3466
2e70f6ef 3467 gen_icount_end(tb, num_insns);
4c9649a9
JM
3468 *gen_opc_ptr = INDEX_op_end;
3469 if (search_pc) {
3470 j = gen_opc_ptr - gen_opc_buf;
3471 lj++;
3472 while (lj <= j)
3473 gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3474 } else {
3475 tb->size = ctx.pc - pc_start;
2e70f6ef 3476 tb->icount = num_insns;
4c9649a9 3477 }
4af70374 3478
806991da 3479#ifdef DEBUG_DISAS
8fec2b8c 3480 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
3481 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3482 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3483 qemu_log("\n");
4c9649a9 3484 }
4c9649a9 3485#endif
4c9649a9
JM
3486}
3487
4d5712f1 3488void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 3489{
2cfc5f17 3490 gen_intermediate_code_internal(env, tb, 0);
4c9649a9
JM
3491}
3492
4d5712f1 3493void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 3494{
2cfc5f17 3495 gen_intermediate_code_internal(env, tb, 1);
4c9649a9
JM
3496}
3497
a964acc6
RH
3498struct cpu_def_t {
3499 const char *name;
3500 int implver, amask;
3501};
3502
3503static const struct cpu_def_t cpu_defs[] = {
3504 { "ev4", IMPLVER_2106x, 0 },
3505 { "ev5", IMPLVER_21164, 0 },
3506 { "ev56", IMPLVER_21164, AMASK_BWX },
3507 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3508 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3509 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3510 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3511 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3512 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3513 { "21064", IMPLVER_2106x, 0 },
3514 { "21164", IMPLVER_21164, 0 },
3515 { "21164a", IMPLVER_21164, AMASK_BWX },
3516 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3517 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3518 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3519 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3520};
3521
aaed909a 3522CPUAlphaState * cpu_alpha_init (const char *cpu_model)
4c9649a9
JM
3523{
3524 CPUAlphaState *env;
a964acc6 3525 int implver, amask, i, max;
4c9649a9 3526
7267c094 3527 env = g_malloc0(sizeof(CPUAlphaState));
4c9649a9 3528 cpu_exec_init(env);
2e70f6ef 3529 alpha_translate_init();
4c9649a9 3530 tlb_flush(env, 1);
a964acc6
RH
3531
3532 /* Default to ev67; no reason not to emulate insns by default. */
3533 implver = IMPLVER_21264;
3534 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3535 | AMASK_TRAP | AMASK_PREFETCH);
3536
3537 max = ARRAY_SIZE(cpu_defs);
3538 for (i = 0; i < max; i++) {
3539 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3540 implver = cpu_defs[i].implver;
3541 amask = cpu_defs[i].amask;
3542 break;
3543 }
3544 }
3545 env->implver = implver;
3546 env->amask = amask;
3547
4c9649a9 3548#if defined (CONFIG_USER_ONLY)
ea879fc7 3549 env->ps = PS_USER_MODE;
2edd07ef 3550 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
7b745053
RH
3551 | FPCR_UNFD | FPCR_INED | FPCR_DNOD
3552 | FPCR_DYN_NORMAL));
6049f4f8 3553#endif
6910b8f6 3554 env->lock_addr = -1;
26b46094 3555 env->fen = 1;
dad081ee 3556
0bf46a40 3557 qemu_init_vcpu(env);
4c9649a9
JM
3558 return env;
3559}
aaed909a 3560
4d5712f1 3561void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
3562{
3563 env->pc = gen_opc_pc[pc_pos];
3564}