]> git.proxmox.com Git - qemu.git/blame - target-alpha/translate.c
target-mips: Remove unused inline function
[qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
4c9649a9 20#include "cpu.h"
4c9649a9 21#include "disas.h"
ae8ecd42 22#include "host-utils.h"
57fec1fe 23#include "tcg-op.h"
4c9649a9 24
a7812ae4
PB
25#include "helper.h"
26#define GEN_HELPER 1
27#include "helper.h"
28
19188121 29#undef ALPHA_DEBUG_DISAS
f24518b5 30#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
31
32#ifdef ALPHA_DEBUG_DISAS
806991da 33# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
34#else
35# define LOG_DISAS(...) do { } while (0)
36#endif
37
4c9649a9
JM
38typedef struct DisasContext DisasContext;
39struct DisasContext {
4af70374
RH
40 struct TranslationBlock *tb;
41 CPUAlphaState *env;
4c9649a9
JM
42 uint64_t pc;
43 int mem_idx;
f24518b5
RH
44
45 /* Current rounding mode for this TB. */
46 int tb_rm;
47 /* Current flush-to-zero setting for this TB. */
48 int tb_ftz;
4c9649a9
JM
49};
50
4af70374
RH
51/* Return values from translate_one, indicating the state of the TB.
52 Note that zero indicates that we are not exiting the TB. */
53
54typedef enum {
55 NO_EXIT,
56
57 /* We have emitted one or more goto_tb. No fixup required. */
58 EXIT_GOTO_TB,
59
60 /* We are not using a goto_tb (for whatever reason), but have updated
61 the PC (for whatever reason), so there's no need to do it again on
62 exiting the TB. */
63 EXIT_PC_UPDATED,
64
65 /* We are exiting the TB, but have neither emitted a goto_tb, nor
66 updated the PC for the next instruction to be executed. */
8aa3fa20
RH
67 EXIT_PC_STALE,
68
69 /* We are ending the TB with a noreturn function call, e.g. longjmp.
70 No following code will be executed. */
71 EXIT_NORETURN,
4af70374
RH
72} ExitStatus;
73
3761035f 74/* global register indexes */
a7812ae4 75static TCGv_ptr cpu_env;
496cb5b9 76static TCGv cpu_ir[31];
f18cd223 77static TCGv cpu_fir[31];
496cb5b9 78static TCGv cpu_pc;
6910b8f6
RH
79static TCGv cpu_lock_addr;
80static TCGv cpu_lock_st_addr;
81static TCGv cpu_lock_value;
2ace7e55
RH
82static TCGv cpu_unique;
83#ifndef CONFIG_USER_ONLY
84static TCGv cpu_sysval;
85static TCGv cpu_usp;
ab471ade 86#endif
496cb5b9 87
3761035f 88/* register names */
f18cd223 89static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef
PB
90
91#include "gen-icount.h"
92
a5f1b965 93static void alpha_translate_init(void)
2e70f6ef 94{
496cb5b9
AJ
95 int i;
96 char *p;
2e70f6ef 97 static int done_init = 0;
496cb5b9 98
2e70f6ef
PB
99 if (done_init)
100 return;
496cb5b9 101
a7812ae4 102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
103
104 p = cpu_reg_names;
105 for (i = 0; i < 31; i++) {
106 sprintf(p, "ir%d", i);
a7812ae4 107 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 108 offsetof(CPUAlphaState, ir[i]), p);
6ba8dcd7 109 p += (i < 10) ? 4 : 5;
f18cd223
AJ
110
111 sprintf(p, "fir%d", i);
a7812ae4 112 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 113 offsetof(CPUAlphaState, fir[i]), p);
f18cd223 114 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
115 }
116
a7812ae4 117 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 118 offsetof(CPUAlphaState, pc), "pc");
496cb5b9 119
6910b8f6 120 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 121 offsetof(CPUAlphaState, lock_addr),
6910b8f6
RH
122 "lock_addr");
123 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 124 offsetof(CPUAlphaState, lock_st_addr),
6910b8f6
RH
125 "lock_st_addr");
126 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 127 offsetof(CPUAlphaState, lock_value),
6910b8f6 128 "lock_value");
f4ed8679 129
2ace7e55 130 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 131 offsetof(CPUAlphaState, unique), "unique");
2ace7e55
RH
132#ifndef CONFIG_USER_ONLY
133 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 134 offsetof(CPUAlphaState, sysval), "sysval");
2ace7e55 135 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 136 offsetof(CPUAlphaState, usp), "usp");
ab471ade
RH
137#endif
138
496cb5b9 139 /* register helpers */
a7812ae4 140#define GEN_HELPER 2
496cb5b9
AJ
141#include "helper.h"
142
2e70f6ef
PB
143 done_init = 1;
144}
145
bf1b03fe 146static void gen_excp_1(int exception, int error_code)
4c9649a9 147{
a7812ae4 148 TCGv_i32 tmp1, tmp2;
6ad02592 149
6ad02592
AJ
150 tmp1 = tcg_const_i32(exception);
151 tmp2 = tcg_const_i32(error_code);
b9f0923e 152 gen_helper_excp(cpu_env, tmp1, tmp2);
a7812ae4
PB
153 tcg_temp_free_i32(tmp2);
154 tcg_temp_free_i32(tmp1);
bf1b03fe 155}
8aa3fa20 156
bf1b03fe
RH
157static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
158{
159 tcg_gen_movi_i64(cpu_pc, ctx->pc);
160 gen_excp_1(exception, error_code);
8aa3fa20 161 return EXIT_NORETURN;
4c9649a9
JM
162}
163
8aa3fa20 164static inline ExitStatus gen_invalid(DisasContext *ctx)
4c9649a9 165{
8aa3fa20 166 return gen_excp(ctx, EXCP_OPCDEC, 0);
4c9649a9
JM
167}
168
636aa200 169static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 170{
a7812ae4
PB
171 TCGv tmp = tcg_temp_new();
172 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 173 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
174 tcg_gen_trunc_i64_i32(tmp32, tmp);
175 gen_helper_memory_to_f(t0, tmp32);
176 tcg_temp_free_i32(tmp32);
f18cd223
AJ
177 tcg_temp_free(tmp);
178}
179
636aa200 180static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 181{
a7812ae4 182 TCGv tmp = tcg_temp_new();
f18cd223 183 tcg_gen_qemu_ld64(tmp, t1, flags);
a7812ae4 184 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
185 tcg_temp_free(tmp);
186}
187
636aa200 188static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 189{
a7812ae4
PB
190 TCGv tmp = tcg_temp_new();
191 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 192 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
193 tcg_gen_trunc_i64_i32(tmp32, tmp);
194 gen_helper_memory_to_s(t0, tmp32);
195 tcg_temp_free_i32(tmp32);
f18cd223
AJ
196 tcg_temp_free(tmp);
197}
198
636aa200 199static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679 200{
f4ed8679 201 tcg_gen_qemu_ld32s(t0, t1, flags);
6910b8f6
RH
202 tcg_gen_mov_i64(cpu_lock_addr, t1);
203 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
204}
205
636aa200 206static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679 207{
f4ed8679 208 tcg_gen_qemu_ld64(t0, t1, flags);
6910b8f6
RH
209 tcg_gen_mov_i64(cpu_lock_addr, t1);
210 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
211}
212
636aa200
BS
213static inline void gen_load_mem(DisasContext *ctx,
214 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
215 int flags),
216 int ra, int rb, int32_t disp16, int fp,
217 int clear)
023d8ca2 218{
6910b8f6 219 TCGv addr, va;
023d8ca2 220
6910b8f6
RH
221 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
222 prefetches, which we can treat as nops. No worries about
223 missed exceptions here. */
224 if (unlikely(ra == 31)) {
023d8ca2 225 return;
6910b8f6 226 }
023d8ca2 227
a7812ae4 228 addr = tcg_temp_new();
023d8ca2
AJ
229 if (rb != 31) {
230 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 231 if (clear) {
023d8ca2 232 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 233 }
023d8ca2 234 } else {
6910b8f6 235 if (clear) {
023d8ca2 236 disp16 &= ~0x7;
6910b8f6 237 }
023d8ca2
AJ
238 tcg_gen_movi_i64(addr, disp16);
239 }
6910b8f6
RH
240
241 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
242 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
243
023d8ca2
AJ
244 tcg_temp_free(addr);
245}
246
636aa200 247static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 248{
a7812ae4
PB
249 TCGv_i32 tmp32 = tcg_temp_new_i32();
250 TCGv tmp = tcg_temp_new();
251 gen_helper_f_to_memory(tmp32, t0);
252 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
253 tcg_gen_qemu_st32(tmp, t1, flags);
254 tcg_temp_free(tmp);
a7812ae4 255 tcg_temp_free_i32(tmp32);
f18cd223
AJ
256}
257
636aa200 258static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 259{
a7812ae4
PB
260 TCGv tmp = tcg_temp_new();
261 gen_helper_g_to_memory(tmp, t0);
f18cd223
AJ
262 tcg_gen_qemu_st64(tmp, t1, flags);
263 tcg_temp_free(tmp);
264}
265
636aa200 266static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 267{
a7812ae4
PB
268 TCGv_i32 tmp32 = tcg_temp_new_i32();
269 TCGv tmp = tcg_temp_new();
270 gen_helper_s_to_memory(tmp32, t0);
271 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
272 tcg_gen_qemu_st32(tmp, t1, flags);
273 tcg_temp_free(tmp);
a7812ae4 274 tcg_temp_free_i32(tmp32);
f18cd223
AJ
275}
276
636aa200
BS
277static inline void gen_store_mem(DisasContext *ctx,
278 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
279 int flags),
280 int ra, int rb, int32_t disp16, int fp,
6910b8f6 281 int clear)
023d8ca2 282{
6910b8f6
RH
283 TCGv addr, va;
284
285 addr = tcg_temp_new();
023d8ca2
AJ
286 if (rb != 31) {
287 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 288 if (clear) {
023d8ca2 289 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 290 }
023d8ca2 291 } else {
6910b8f6 292 if (clear) {
023d8ca2 293 disp16 &= ~0x7;
6910b8f6 294 }
023d8ca2
AJ
295 tcg_gen_movi_i64(addr, disp16);
296 }
6910b8f6
RH
297
298 if (ra == 31) {
299 va = tcg_const_i64(0);
f18cd223 300 } else {
6910b8f6 301 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
023d8ca2 302 }
6910b8f6
RH
303 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
304
023d8ca2 305 tcg_temp_free(addr);
6910b8f6
RH
306 if (ra == 31) {
307 tcg_temp_free(va);
308 }
309}
310
311static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
312 int32_t disp16, int quad)
313{
314 TCGv addr;
315
316 if (ra == 31) {
317 /* ??? Don't bother storing anything. The user can't tell
318 the difference, since the zero register always reads zero. */
319 return NO_EXIT;
320 }
321
322#if defined(CONFIG_USER_ONLY)
323 addr = cpu_lock_st_addr;
324#else
e52458fe 325 addr = tcg_temp_local_new();
6910b8f6
RH
326#endif
327
328 if (rb != 31) {
329 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
330 } else {
331 tcg_gen_movi_i64(addr, disp16);
332 }
333
334#if defined(CONFIG_USER_ONLY)
335 /* ??? This is handled via a complicated version of compare-and-swap
336 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
337 in TCG so that this isn't necessary. */
338 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
339#else
340 /* ??? In system mode we are never multi-threaded, so CAS can be
341 implemented via a non-atomic load-compare-store sequence. */
342 {
343 int lab_fail, lab_done;
344 TCGv val;
345
346 lab_fail = gen_new_label();
347 lab_done = gen_new_label();
e52458fe 348 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
6910b8f6
RH
349
350 val = tcg_temp_new();
351 if (quad) {
352 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
353 } else {
354 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
355 }
e52458fe 356 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
6910b8f6
RH
357
358 if (quad) {
359 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
360 } else {
361 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
362 }
363 tcg_gen_movi_i64(cpu_ir[ra], 1);
364 tcg_gen_br(lab_done);
365
366 gen_set_label(lab_fail);
367 tcg_gen_movi_i64(cpu_ir[ra], 0);
368
369 gen_set_label(lab_done);
370 tcg_gen_movi_i64(cpu_lock_addr, -1);
371
372 tcg_temp_free(addr);
373 return NO_EXIT;
374 }
375#endif
023d8ca2
AJ
376}
377
4af70374 378static int use_goto_tb(DisasContext *ctx, uint64_t dest)
4c9649a9 379{
4af70374
RH
380 /* Check for the dest on the same page as the start of the TB. We
381 also want to suppress goto_tb in the case of single-steping and IO. */
382 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
383 && !ctx->env->singlestep_enabled
384 && !(ctx->tb->cflags & CF_LAST_IO));
385}
dbb30fe6 386
4af70374
RH
387static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
388{
389 uint64_t dest = ctx->pc + (disp << 2);
390
391 if (ra != 31) {
392 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
393 }
394
395 /* Notice branch-to-next; used to initialize RA with the PC. */
396 if (disp == 0) {
397 return 0;
398 } else if (use_goto_tb(ctx, dest)) {
399 tcg_gen_goto_tb(0);
400 tcg_gen_movi_i64(cpu_pc, dest);
4b4a72e5 401 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
4af70374
RH
402 return EXIT_GOTO_TB;
403 } else {
404 tcg_gen_movi_i64(cpu_pc, dest);
405 return EXIT_PC_UPDATED;
406 }
dbb30fe6
RH
407}
408
4af70374
RH
409static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
410 TCGv cmp, int32_t disp)
dbb30fe6 411{
4af70374 412 uint64_t dest = ctx->pc + (disp << 2);
dbb30fe6 413 int lab_true = gen_new_label();
9c29504e 414
4af70374
RH
415 if (use_goto_tb(ctx, dest)) {
416 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
417
418 tcg_gen_goto_tb(0);
419 tcg_gen_movi_i64(cpu_pc, ctx->pc);
4b4a72e5 420 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
4af70374
RH
421
422 gen_set_label(lab_true);
423 tcg_gen_goto_tb(1);
424 tcg_gen_movi_i64(cpu_pc, dest);
4b4a72e5 425 tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
4af70374
RH
426
427 return EXIT_GOTO_TB;
428 } else {
429 int lab_over = gen_new_label();
430
431 /* ??? Consider using either
432 movi pc, next
433 addi tmp, pc, disp
434 movcond pc, cond, 0, tmp, pc
435 or
436 setcond tmp, cond, 0
437 movi pc, next
438 neg tmp, tmp
439 andi tmp, tmp, disp
440 add pc, pc, tmp
441 The current diamond subgraph surely isn't efficient. */
442
443 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
444 tcg_gen_movi_i64(cpu_pc, ctx->pc);
445 tcg_gen_br(lab_over);
446 gen_set_label(lab_true);
447 tcg_gen_movi_i64(cpu_pc, dest);
448 gen_set_label(lab_over);
449
450 return EXIT_PC_UPDATED;
451 }
452}
453
454static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
455 int32_t disp, int mask)
456{
457 TCGv cmp_tmp;
458
459 if (unlikely(ra == 31)) {
460 cmp_tmp = tcg_const_i64(0);
461 } else {
462 cmp_tmp = tcg_temp_new();
9c29504e 463 if (mask) {
4af70374 464 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
dbb30fe6 465 } else {
4af70374 466 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
dbb30fe6 467 }
9c29504e 468 }
4af70374
RH
469
470 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
471}
472
4af70374 473/* Fold -0.0 for comparison with COND. */
dbb30fe6 474
4af70374 475static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 476{
dbb30fe6 477 uint64_t mzero = 1ull << 63;
f18cd223 478
dbb30fe6
RH
479 switch (cond) {
480 case TCG_COND_LE:
481 case TCG_COND_GT:
482 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 483 tcg_gen_mov_i64(dest, src);
a7812ae4 484 break;
dbb30fe6
RH
485
486 case TCG_COND_EQ:
487 case TCG_COND_NE:
488 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 489 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 490 break;
dbb30fe6
RH
491
492 case TCG_COND_GE:
dbb30fe6 493 case TCG_COND_LT:
4af70374
RH
494 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
495 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
496 tcg_gen_neg_i64(dest, dest);
497 tcg_gen_and_i64(dest, dest, src);
a7812ae4 498 break;
dbb30fe6 499
a7812ae4
PB
500 default:
501 abort();
f18cd223 502 }
dbb30fe6
RH
503}
504
4af70374
RH
505static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
506 int32_t disp)
dbb30fe6 507{
4af70374 508 TCGv cmp_tmp;
dbb30fe6
RH
509
510 if (unlikely(ra == 31)) {
511 /* Very uncommon case, but easier to optimize it to an integer
512 comparison than continuing with the floating point comparison. */
4af70374 513 return gen_bcond(ctx, cond, ra, disp, 0);
dbb30fe6
RH
514 }
515
4af70374
RH
516 cmp_tmp = tcg_temp_new();
517 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
518 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
519}
520
bbe1dab4 521static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
4af70374 522 int islit, uint8_t lit, int mask)
4c9649a9 523{
bbe1dab4 524 TCGCond inv_cond = tcg_invert_cond(cond);
9c29504e
AJ
525 int l1;
526
527 if (unlikely(rc == 31))
528 return;
529
530 l1 = gen_new_label();
531
532 if (ra != 31) {
533 if (mask) {
a7812ae4 534 TCGv tmp = tcg_temp_new();
9c29504e
AJ
535 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
536 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
537 tcg_temp_free(tmp);
538 } else
539 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
540 } else {
541 /* Very uncommon case - Do not bother to optimize. */
542 TCGv tmp = tcg_const_i64(0);
543 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
544 tcg_temp_free(tmp);
545 }
546
4c9649a9 547 if (islit)
9c29504e 548 tcg_gen_movi_i64(cpu_ir[rc], lit);
4c9649a9 549 else
dfaa8583 550 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
9c29504e 551 gen_set_label(l1);
4c9649a9
JM
552}
553
bbe1dab4 554static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
dbb30fe6 555{
4af70374 556 TCGv cmp_tmp;
dbb30fe6
RH
557 int l1;
558
4af70374 559 if (unlikely(rc == 31)) {
dbb30fe6 560 return;
4af70374
RH
561 }
562
563 cmp_tmp = tcg_temp_new();
dbb30fe6 564 if (unlikely(ra == 31)) {
4af70374
RH
565 tcg_gen_movi_i64(cmp_tmp, 0);
566 } else {
567 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
dbb30fe6
RH
568 }
569
570 l1 = gen_new_label();
4af70374
RH
571 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
572 tcg_temp_free(cmp_tmp);
dbb30fe6
RH
573
574 if (rb != 31)
575 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
576 else
577 tcg_gen_movi_i64(cpu_fir[rc], 0);
578 gen_set_label(l1);
579}
580
f24518b5
RH
581#define QUAL_RM_N 0x080 /* Round mode nearest even */
582#define QUAL_RM_C 0x000 /* Round mode chopped */
583#define QUAL_RM_M 0x040 /* Round mode minus infinity */
584#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
585#define QUAL_RM_MASK 0x0c0
586
587#define QUAL_U 0x100 /* Underflow enable (fp output) */
588#define QUAL_V 0x100 /* Overflow enable (int output) */
589#define QUAL_S 0x400 /* Software completion enable */
590#define QUAL_I 0x200 /* Inexact detection enable */
591
592static void gen_qual_roundmode(DisasContext *ctx, int fn11)
593{
594 TCGv_i32 tmp;
595
596 fn11 &= QUAL_RM_MASK;
597 if (fn11 == ctx->tb_rm) {
598 return;
599 }
600 ctx->tb_rm = fn11;
601
602 tmp = tcg_temp_new_i32();
603 switch (fn11) {
604 case QUAL_RM_N:
605 tcg_gen_movi_i32(tmp, float_round_nearest_even);
606 break;
607 case QUAL_RM_C:
608 tcg_gen_movi_i32(tmp, float_round_to_zero);
609 break;
610 case QUAL_RM_M:
611 tcg_gen_movi_i32(tmp, float_round_down);
612 break;
613 case QUAL_RM_D:
4a58aedf
RH
614 tcg_gen_ld8u_i32(tmp, cpu_env,
615 offsetof(CPUAlphaState, fpcr_dyn_round));
f24518b5
RH
616 break;
617 }
618
619#if defined(CONFIG_SOFTFLOAT_INLINE)
620 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
621 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
622 sets the one field. */
623 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 624 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
f24518b5
RH
625#else
626 gen_helper_setroundmode(tmp);
627#endif
628
629 tcg_temp_free_i32(tmp);
630}
631
632static void gen_qual_flushzero(DisasContext *ctx, int fn11)
633{
634 TCGv_i32 tmp;
635
636 fn11 &= QUAL_U;
637 if (fn11 == ctx->tb_ftz) {
638 return;
639 }
640 ctx->tb_ftz = fn11;
641
642 tmp = tcg_temp_new_i32();
643 if (fn11) {
644 /* Underflow is enabled, use the FPCR setting. */
4a58aedf
RH
645 tcg_gen_ld8u_i32(tmp, cpu_env,
646 offsetof(CPUAlphaState, fpcr_flush_to_zero));
f24518b5
RH
647 } else {
648 /* Underflow is disabled, force flush-to-zero. */
649 tcg_gen_movi_i32(tmp, 1);
650 }
651
652#if defined(CONFIG_SOFTFLOAT_INLINE)
653 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 654 offsetof(CPUAlphaState, fp_status.flush_to_zero));
f24518b5
RH
655#else
656 gen_helper_setflushzero(tmp);
657#endif
658
659 tcg_temp_free_i32(tmp);
660}
661
662static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
663{
74343409 664 TCGv val;
f24518b5 665 if (reg == 31) {
74343409 666 val = tcg_const_i64(0);
f24518b5 667 } else {
74343409
RH
668 if ((fn11 & QUAL_S) == 0) {
669 if (is_cmp) {
670 gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
671 } else {
672 gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
673 }
674 }
675 val = tcg_temp_new();
676 tcg_gen_mov_i64(val, cpu_fir[reg]);
f24518b5
RH
677 }
678 return val;
679}
680
681static void gen_fp_exc_clear(void)
682{
683#if defined(CONFIG_SOFTFLOAT_INLINE)
684 TCGv_i32 zero = tcg_const_i32(0);
685 tcg_gen_st8_i32(zero, cpu_env,
4d5712f1 686 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5
RH
687 tcg_temp_free_i32(zero);
688#else
4a58aedf 689 gen_helper_fp_exc_clear(cpu_env);
f24518b5
RH
690#endif
691}
692
693static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
694{
695 /* ??? We ought to be able to do something with imprecise exceptions.
696 E.g. notice we're still in the trap shadow of something within the
697 TB and do not generate the code to signal the exception; end the TB
698 when an exception is forced to arrive, either by consumption of a
699 register value or TRAPB or EXCB. */
700 TCGv_i32 exc = tcg_temp_new_i32();
701 TCGv_i32 reg;
702
703#if defined(CONFIG_SOFTFLOAT_INLINE)
704 tcg_gen_ld8u_i32(exc, cpu_env,
4d5712f1 705 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5 706#else
4a58aedf 707 gen_helper_fp_exc_get(exc, cpu_env);
f24518b5
RH
708#endif
709
710 if (ignore) {
711 tcg_gen_andi_i32(exc, exc, ~ignore);
712 }
713
714 /* ??? Pass in the regno of the destination so that the helper can
715 set EXC_MASK, which contains a bitmask of destination registers
716 that have caused arithmetic traps. A simple userspace emulation
717 does not require this. We do need it for a guest kernel's entArith,
718 or if we were to do something clever with imprecise exceptions. */
719 reg = tcg_const_i32(rc + 32);
720
721 if (fn11 & QUAL_S) {
4a58aedf 722 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
f24518b5 723 } else {
4a58aedf 724 gen_helper_fp_exc_raise(cpu_env, exc, reg);
f24518b5
RH
725 }
726
727 tcg_temp_free_i32(reg);
728 tcg_temp_free_i32(exc);
729}
730
731static inline void gen_fp_exc_raise(int rc, int fn11)
732{
733 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 734}
f24518b5 735
593f17e5
RH
736static void gen_fcvtlq(int rb, int rc)
737{
738 if (unlikely(rc == 31)) {
739 return;
740 }
741 if (unlikely(rb == 31)) {
742 tcg_gen_movi_i64(cpu_fir[rc], 0);
743 } else {
744 TCGv tmp = tcg_temp_new();
745
746 /* The arithmetic right shift here, plus the sign-extended mask below
747 yields a sign-extended result without an explicit ext32s_i64. */
748 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
749 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
750 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
751 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
752 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
753
754 tcg_temp_free(tmp);
755 }
756}
757
735cf45f
RH
758static void gen_fcvtql(int rb, int rc)
759{
760 if (unlikely(rc == 31)) {
761 return;
762 }
763 if (unlikely(rb == 31)) {
764 tcg_gen_movi_i64(cpu_fir[rc], 0);
765 } else {
766 TCGv tmp = tcg_temp_new();
767
768 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
769 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
770 tcg_gen_shli_i64(tmp, tmp, 32);
771 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
772 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
773
774 tcg_temp_free(tmp);
775 }
776}
777
778static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
779{
780 if (rb != 31) {
781 int lab = gen_new_label();
782 TCGv tmp = tcg_temp_new();
783
784 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
785 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
786 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
787
788 gen_set_label(lab);
789 }
790 gen_fcvtql(rb, rc);
791}
792
4a58aedf
RH
793#define FARITH2(name) \
794 static inline void glue(gen_f, name)(int rb, int rc) \
795 { \
796 if (unlikely(rc == 31)) { \
797 return; \
798 } \
799 if (rb != 31) { \
800 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
801 } else { \
802 TCGv tmp = tcg_const_i64(0); \
803 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
804 tcg_temp_free(tmp); \
805 } \
806 }
f24518b5
RH
807
808/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
809FARITH2(sqrtf)
810FARITH2(sqrtg)
a7812ae4
PB
811FARITH2(cvtgf)
812FARITH2(cvtgq)
813FARITH2(cvtqf)
814FARITH2(cvtqg)
f24518b5 815
4a58aedf
RH
816static void gen_ieee_arith2(DisasContext *ctx,
817 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
818 int rb, int rc, int fn11)
819{
820 TCGv vb;
821
822 /* ??? This is wrong: the instruction is not a nop, it still may
823 raise exceptions. */
824 if (unlikely(rc == 31)) {
825 return;
826 }
827
828 gen_qual_roundmode(ctx, fn11);
829 gen_qual_flushzero(ctx, fn11);
830 gen_fp_exc_clear();
831
832 vb = gen_ieee_input(rb, fn11, 0);
4a58aedf 833 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
834 tcg_temp_free(vb);
835
836 gen_fp_exc_raise(rc, fn11);
837}
838
839#define IEEE_ARITH2(name) \
840static inline void glue(gen_f, name)(DisasContext *ctx, \
841 int rb, int rc, int fn11) \
842{ \
843 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
844}
845IEEE_ARITH2(sqrts)
846IEEE_ARITH2(sqrtt)
847IEEE_ARITH2(cvtst)
848IEEE_ARITH2(cvtts)
849
850static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
851{
852 TCGv vb;
853 int ignore = 0;
854
855 /* ??? This is wrong: the instruction is not a nop, it still may
856 raise exceptions. */
857 if (unlikely(rc == 31)) {
858 return;
859 }
860
861 /* No need to set flushzero, since we have an integer output. */
862 gen_fp_exc_clear();
863 vb = gen_ieee_input(rb, fn11, 0);
864
865 /* Almost all integer conversions use cropped rounding, and most
866 also do not have integer overflow enabled. Special case that. */
867 switch (fn11) {
868 case QUAL_RM_C:
4a58aedf 869 gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
870 break;
871 case QUAL_V | QUAL_RM_C:
872 case QUAL_S | QUAL_V | QUAL_RM_C:
873 ignore = float_flag_inexact;
874 /* FALLTHRU */
875 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
4a58aedf 876 gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
877 break;
878 default:
879 gen_qual_roundmode(ctx, fn11);
4a58aedf 880 gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
881 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
882 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
883 break;
884 }
885 tcg_temp_free(vb);
886
887 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
888}
889
4a58aedf
RH
890static void gen_ieee_intcvt(DisasContext *ctx,
891 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
892 int rb, int rc, int fn11)
893{
894 TCGv vb;
895
896 /* ??? This is wrong: the instruction is not a nop, it still may
897 raise exceptions. */
898 if (unlikely(rc == 31)) {
899 return;
900 }
901
902 gen_qual_roundmode(ctx, fn11);
903
904 if (rb == 31) {
905 vb = tcg_const_i64(0);
906 } else {
907 vb = cpu_fir[rb];
908 }
909
910 /* The only exception that can be raised by integer conversion
911 is inexact. Thus we only need to worry about exceptions when
912 inexact handling is requested. */
913 if (fn11 & QUAL_I) {
914 gen_fp_exc_clear();
4a58aedf 915 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
916 gen_fp_exc_raise(rc, fn11);
917 } else {
4a58aedf 918 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
919 }
920
921 if (rb == 31) {
922 tcg_temp_free(vb);
923 }
924}
925
926#define IEEE_INTCVT(name) \
927static inline void glue(gen_f, name)(DisasContext *ctx, \
928 int rb, int rc, int fn11) \
929{ \
930 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
931}
932IEEE_INTCVT(cvtqs)
933IEEE_INTCVT(cvtqt)
934
dc96be4b
RH
935static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
936{
937 TCGv va, vb, vmask;
938 int za = 0, zb = 0;
939
940 if (unlikely(rc == 31)) {
941 return;
942 }
943
944 vmask = tcg_const_i64(mask);
945
946 TCGV_UNUSED_I64(va);
947 if (ra == 31) {
948 if (inv_a) {
949 va = vmask;
950 } else {
951 za = 1;
952 }
953 } else {
954 va = tcg_temp_new_i64();
955 tcg_gen_mov_i64(va, cpu_fir[ra]);
956 if (inv_a) {
957 tcg_gen_andc_i64(va, vmask, va);
958 } else {
959 tcg_gen_and_i64(va, va, vmask);
960 }
961 }
962
963 TCGV_UNUSED_I64(vb);
964 if (rb == 31) {
965 zb = 1;
966 } else {
967 vb = tcg_temp_new_i64();
968 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
969 }
970
971 switch (za << 1 | zb) {
972 case 0 | 0:
973 tcg_gen_or_i64(cpu_fir[rc], va, vb);
974 break;
975 case 0 | 1:
976 tcg_gen_mov_i64(cpu_fir[rc], va);
977 break;
978 case 2 | 0:
979 tcg_gen_mov_i64(cpu_fir[rc], vb);
980 break;
981 case 2 | 1:
982 tcg_gen_movi_i64(cpu_fir[rc], 0);
983 break;
984 }
985
986 tcg_temp_free(vmask);
987 if (ra != 31) {
988 tcg_temp_free(va);
989 }
990 if (rb != 31) {
991 tcg_temp_free(vb);
992 }
993}
994
995static inline void gen_fcpys(int ra, int rb, int rc)
996{
997 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
998}
999
1000static inline void gen_fcpysn(int ra, int rb, int rc)
1001{
1002 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1003}
1004
1005static inline void gen_fcpyse(int ra, int rb, int rc)
1006{
1007 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1008}
1009
4a58aedf
RH
1010#define FARITH3(name) \
1011 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1012 { \
1013 TCGv va, vb; \
1014 \
1015 if (unlikely(rc == 31)) { \
1016 return; \
1017 } \
1018 if (ra == 31) { \
1019 va = tcg_const_i64(0); \
1020 } else { \
1021 va = cpu_fir[ra]; \
1022 } \
1023 if (rb == 31) { \
1024 vb = tcg_const_i64(0); \
1025 } else { \
1026 vb = cpu_fir[rb]; \
1027 } \
1028 \
1029 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1030 \
1031 if (ra == 31) { \
1032 tcg_temp_free(va); \
1033 } \
1034 if (rb == 31) { \
1035 tcg_temp_free(vb); \
1036 } \
1037 }
f24518b5
RH
1038
1039/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
1040FARITH3(addf)
1041FARITH3(subf)
1042FARITH3(mulf)
1043FARITH3(divf)
1044FARITH3(addg)
1045FARITH3(subg)
1046FARITH3(mulg)
1047FARITH3(divg)
1048FARITH3(cmpgeq)
1049FARITH3(cmpglt)
1050FARITH3(cmpgle)
f24518b5
RH
1051
1052static void gen_ieee_arith3(DisasContext *ctx,
4a58aedf 1053 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
1054 int ra, int rb, int rc, int fn11)
1055{
1056 TCGv va, vb;
1057
1058 /* ??? This is wrong: the instruction is not a nop, it still may
1059 raise exceptions. */
1060 if (unlikely(rc == 31)) {
1061 return;
1062 }
1063
1064 gen_qual_roundmode(ctx, fn11);
1065 gen_qual_flushzero(ctx, fn11);
1066 gen_fp_exc_clear();
1067
1068 va = gen_ieee_input(ra, fn11, 0);
1069 vb = gen_ieee_input(rb, fn11, 0);
4a58aedf 1070 helper(cpu_fir[rc], cpu_env, va, vb);
f24518b5
RH
1071 tcg_temp_free(va);
1072 tcg_temp_free(vb);
1073
1074 gen_fp_exc_raise(rc, fn11);
1075}
1076
1077#define IEEE_ARITH3(name) \
1078static inline void glue(gen_f, name)(DisasContext *ctx, \
1079 int ra, int rb, int rc, int fn11) \
1080{ \
1081 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1082}
1083IEEE_ARITH3(adds)
1084IEEE_ARITH3(subs)
1085IEEE_ARITH3(muls)
1086IEEE_ARITH3(divs)
1087IEEE_ARITH3(addt)
1088IEEE_ARITH3(subt)
1089IEEE_ARITH3(mult)
1090IEEE_ARITH3(divt)
1091
1092static void gen_ieee_compare(DisasContext *ctx,
4a58aedf 1093 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
1094 int ra, int rb, int rc, int fn11)
1095{
1096 TCGv va, vb;
1097
1098 /* ??? This is wrong: the instruction is not a nop, it still may
1099 raise exceptions. */
1100 if (unlikely(rc == 31)) {
1101 return;
1102 }
1103
1104 gen_fp_exc_clear();
1105
1106 va = gen_ieee_input(ra, fn11, 1);
1107 vb = gen_ieee_input(rb, fn11, 1);
4a58aedf 1108 helper(cpu_fir[rc], cpu_env, va, vb);
f24518b5
RH
1109 tcg_temp_free(va);
1110 tcg_temp_free(vb);
1111
1112 gen_fp_exc_raise(rc, fn11);
1113}
1114
1115#define IEEE_CMP3(name) \
1116static inline void glue(gen_f, name)(DisasContext *ctx, \
1117 int ra, int rb, int rc, int fn11) \
1118{ \
1119 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1120}
1121IEEE_CMP3(cmptun)
1122IEEE_CMP3(cmpteq)
1123IEEE_CMP3(cmptlt)
1124IEEE_CMP3(cmptle)
a7812ae4 1125
248c42f3
RH
1126static inline uint64_t zapnot_mask(uint8_t lit)
1127{
1128 uint64_t mask = 0;
1129 int i;
1130
1131 for (i = 0; i < 8; ++i) {
1132 if ((lit >> i) & 1)
1133 mask |= 0xffull << (i * 8);
1134 }
1135 return mask;
1136}
1137
87d98f95
RH
1138/* Implement zapnot with an immediate operand, which expands to some
1139 form of immediate AND. This is a basic building block in the
1140 definition of many of the other byte manipulation instructions. */
248c42f3 1141static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 1142{
87d98f95
RH
1143 switch (lit) {
1144 case 0x00:
248c42f3 1145 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
1146 break;
1147 case 0x01:
248c42f3 1148 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
1149 break;
1150 case 0x03:
248c42f3 1151 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
1152 break;
1153 case 0x0f:
248c42f3 1154 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
1155 break;
1156 case 0xff:
248c42f3 1157 tcg_gen_mov_i64(dest, src);
87d98f95
RH
1158 break;
1159 default:
248c42f3 1160 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
87d98f95
RH
1161 break;
1162 }
1163}
1164
1165static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1166{
1167 if (unlikely(rc == 31))
1168 return;
1169 else if (unlikely(ra == 31))
1170 tcg_gen_movi_i64(cpu_ir[rc], 0);
1171 else if (islit)
248c42f3 1172 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
87d98f95
RH
1173 else
1174 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1175}
1176
1177static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1178{
1179 if (unlikely(rc == 31))
1180 return;
1181 else if (unlikely(ra == 31))
1182 tcg_gen_movi_i64(cpu_ir[rc], 0);
1183 else if (islit)
248c42f3 1184 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
87d98f95
RH
1185 else
1186 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1187}
1188
1189
248c42f3 1190/* EXTWH, EXTLH, EXTQH */
ffec44f1
RH
1191static void gen_ext_h(int ra, int rb, int rc, int islit,
1192 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1193{
1194 if (unlikely(rc == 31))
1195 return;
377a43b6
RH
1196 else if (unlikely(ra == 31))
1197 tcg_gen_movi_i64(cpu_ir[rc], 0);
1198 else {
dfaa8583 1199 if (islit) {
377a43b6
RH
1200 lit = (64 - (lit & 7) * 8) & 0x3f;
1201 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
fe2b269a 1202 } else {
377a43b6 1203 TCGv tmp1 = tcg_temp_new();
b3249f63
AJ
1204 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1205 tcg_gen_shli_i64(tmp1, tmp1, 3);
dbf95805
VW
1206 tcg_gen_neg_i64(tmp1, tmp1);
1207 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
dfaa8583 1208 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
b3249f63 1209 tcg_temp_free(tmp1);
dfaa8583 1210 }
248c42f3 1211 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
377a43b6 1212 }
b3249f63
AJ
1213}
1214
248c42f3 1215/* EXTBL, EXTWL, EXTLL, EXTQL */
ffec44f1
RH
1216static void gen_ext_l(int ra, int rb, int rc, int islit,
1217 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1218{
1219 if (unlikely(rc == 31))
1220 return;
377a43b6
RH
1221 else if (unlikely(ra == 31))
1222 tcg_gen_movi_i64(cpu_ir[rc], 0);
1223 else {
dfaa8583 1224 if (islit) {
377a43b6 1225 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
dfaa8583 1226 } else {
a7812ae4 1227 TCGv tmp = tcg_temp_new();
b3249f63
AJ
1228 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1229 tcg_gen_shli_i64(tmp, tmp, 3);
dfaa8583 1230 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
b3249f63 1231 tcg_temp_free(tmp);
fe2b269a 1232 }
248c42f3
RH
1233 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1234 }
1235}
1236
50eb6e5c
RH
1237/* INSWH, INSLH, INSQH */
1238static void gen_ins_h(int ra, int rb, int rc, int islit,
1239 uint8_t lit, uint8_t byte_mask)
1240{
1241 if (unlikely(rc == 31))
1242 return;
1243 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1244 tcg_gen_movi_i64(cpu_ir[rc], 0);
1245 else {
1246 TCGv tmp = tcg_temp_new();
1247
1248 /* The instruction description has us left-shift the byte mask
1249 and extract bits <15:8> and apply that zap at the end. This
1250 is equivalent to simply performing the zap first and shifting
1251 afterward. */
1252 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1253
1254 if (islit) {
1255 /* Note that we have handled the lit==0 case above. */
1256 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1257 } else {
1258 TCGv shift = tcg_temp_new();
1259
1260 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1261 Do this portably by splitting the shift into two parts:
1262 shift_count-1 and 1. Arrange for the -1 by using
1263 ones-complement instead of twos-complement in the negation:
1264 ~((B & 7) * 8) & 63. */
1265
1266 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1267 tcg_gen_shli_i64(shift, shift, 3);
1268 tcg_gen_not_i64(shift, shift);
1269 tcg_gen_andi_i64(shift, shift, 0x3f);
1270
1271 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1272 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1273 tcg_temp_free(shift);
1274 }
1275 tcg_temp_free(tmp);
1276 }
1277}
1278
248c42f3 1279/* INSBL, INSWL, INSLL, INSQL */
ffec44f1
RH
1280static void gen_ins_l(int ra, int rb, int rc, int islit,
1281 uint8_t lit, uint8_t byte_mask)
248c42f3
RH
1282{
1283 if (unlikely(rc == 31))
1284 return;
1285 else if (unlikely(ra == 31))
1286 tcg_gen_movi_i64(cpu_ir[rc], 0);
1287 else {
1288 TCGv tmp = tcg_temp_new();
1289
1290 /* The instruction description has us left-shift the byte mask
1291 the same number of byte slots as the data and apply the zap
1292 at the end. This is equivalent to simply performing the zap
1293 first and shifting afterward. */
1294 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1295
1296 if (islit) {
1297 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1298 } else {
1299 TCGv shift = tcg_temp_new();
1300 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1301 tcg_gen_shli_i64(shift, shift, 3);
1302 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1303 tcg_temp_free(shift);
1304 }
1305 tcg_temp_free(tmp);
377a43b6 1306 }
b3249f63
AJ
1307}
1308
ffec44f1
RH
1309/* MSKWH, MSKLH, MSKQH */
1310static void gen_msk_h(int ra, int rb, int rc, int islit,
1311 uint8_t lit, uint8_t byte_mask)
1312{
1313 if (unlikely(rc == 31))
1314 return;
1315 else if (unlikely(ra == 31))
1316 tcg_gen_movi_i64(cpu_ir[rc], 0);
1317 else if (islit) {
1318 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1319 } else {
1320 TCGv shift = tcg_temp_new();
1321 TCGv mask = tcg_temp_new();
1322
1323 /* The instruction description is as above, where the byte_mask
1324 is shifted left, and then we extract bits <15:8>. This can be
1325 emulated with a right-shift on the expanded byte mask. This
1326 requires extra care because for an input <2:0> == 0 we need a
1327 shift of 64 bits in order to generate a zero. This is done by
1328 splitting the shift into two parts, the variable shift - 1
1329 followed by a constant 1 shift. The code we expand below is
1330 equivalent to ~((B & 7) * 8) & 63. */
1331
1332 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1333 tcg_gen_shli_i64(shift, shift, 3);
1334 tcg_gen_not_i64(shift, shift);
1335 tcg_gen_andi_i64(shift, shift, 0x3f);
1336 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1337 tcg_gen_shr_i64(mask, mask, shift);
1338 tcg_gen_shri_i64(mask, mask, 1);
1339
1340 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1341
1342 tcg_temp_free(mask);
1343 tcg_temp_free(shift);
1344 }
1345}
1346
14ab1634 1347/* MSKBL, MSKWL, MSKLL, MSKQL */
ffec44f1
RH
1348static void gen_msk_l(int ra, int rb, int rc, int islit,
1349 uint8_t lit, uint8_t byte_mask)
14ab1634
RH
1350{
1351 if (unlikely(rc == 31))
1352 return;
1353 else if (unlikely(ra == 31))
1354 tcg_gen_movi_i64(cpu_ir[rc], 0);
1355 else if (islit) {
1356 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1357 } else {
1358 TCGv shift = tcg_temp_new();
1359 TCGv mask = tcg_temp_new();
1360
1361 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1362 tcg_gen_shli_i64(shift, shift, 3);
1363 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1364 tcg_gen_shl_i64(mask, mask, shift);
1365
1366 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1367
1368 tcg_temp_free(mask);
1369 tcg_temp_free(shift);
1370 }
1371}
1372
04acd307 1373/* Code to call arith3 helpers */
a7812ae4 1374#define ARITH3(name) \
636aa200
BS
1375static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1376 uint8_t lit) \
a7812ae4
PB
1377{ \
1378 if (unlikely(rc == 31)) \
1379 return; \
1380 \
1381 if (ra != 31) { \
1382 if (islit) { \
1383 TCGv tmp = tcg_const_i64(lit); \
1384 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1385 tcg_temp_free(tmp); \
1386 } else \
1387 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1388 } else { \
1389 TCGv tmp1 = tcg_const_i64(0); \
1390 if (islit) { \
1391 TCGv tmp2 = tcg_const_i64(lit); \
1392 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1393 tcg_temp_free(tmp2); \
1394 } else \
1395 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1396 tcg_temp_free(tmp1); \
1397 } \
b3249f63 1398}
a7812ae4 1399ARITH3(umulh)
2958620f 1400ARITH3(cmpbge)
13e4df99
RH
1401ARITH3(minub8)
1402ARITH3(minsb8)
1403ARITH3(minuw4)
1404ARITH3(minsw4)
1405ARITH3(maxub8)
1406ARITH3(maxsb8)
1407ARITH3(maxuw4)
1408ARITH3(maxsw4)
1409ARITH3(perr)
1410
2958620f
RH
1411/* Code to call arith3 helpers */
1412#define ARITH3_EX(name) \
1413 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1414 int islit, uint8_t lit) \
1415 { \
1416 if (unlikely(rc == 31)) { \
1417 return; \
1418 } \
1419 if (ra != 31) { \
1420 if (islit) { \
1421 TCGv tmp = tcg_const_i64(lit); \
1422 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1423 cpu_ir[ra], tmp); \
1424 tcg_temp_free(tmp); \
1425 } else { \
1426 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1427 cpu_ir[ra], cpu_ir[rb]); \
1428 } \
1429 } else { \
1430 TCGv tmp1 = tcg_const_i64(0); \
1431 if (islit) { \
1432 TCGv tmp2 = tcg_const_i64(lit); \
1433 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1434 tcg_temp_free(tmp2); \
1435 } else { \
1436 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1437 } \
1438 tcg_temp_free(tmp1); \
1439 } \
1440 }
1441ARITH3_EX(addlv)
1442ARITH3_EX(sublv)
1443ARITH3_EX(addqv)
1444ARITH3_EX(subqv)
1445ARITH3_EX(mullv)
1446ARITH3_EX(mulqv)
1447
13e4df99
RH
1448#define MVIOP2(name) \
1449static inline void glue(gen_, name)(int rb, int rc) \
1450{ \
1451 if (unlikely(rc == 31)) \
1452 return; \
1453 if (unlikely(rb == 31)) \
1454 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1455 else \
1456 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1457}
1458MVIOP2(pklb)
1459MVIOP2(pkwb)
1460MVIOP2(unpkbl)
1461MVIOP2(unpkbw)
b3249f63 1462
9e05960f
RH
1463static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1464 int islit, uint8_t lit)
01ff9cc8 1465{
9e05960f 1466 TCGv va, vb;
01ff9cc8 1467
9e05960f 1468 if (unlikely(rc == 31)) {
13e4df99 1469 return;
9e05960f 1470 }
01ff9cc8 1471
9e05960f
RH
1472 if (ra == 31) {
1473 va = tcg_const_i64(0);
1474 } else {
1475 va = cpu_ir[ra];
1476 }
1477 if (islit) {
1478 vb = tcg_const_i64(lit);
1479 } else {
1480 vb = cpu_ir[rb];
1481 }
01ff9cc8 1482
9e05960f 1483 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
01ff9cc8 1484
9e05960f
RH
1485 if (ra == 31) {
1486 tcg_temp_free(va);
1487 }
1488 if (islit) {
1489 tcg_temp_free(vb);
1490 }
01ff9cc8
AJ
1491}
1492
ac316ca4
RH
1493static void gen_rx(int ra, int set)
1494{
1495 TCGv_i32 tmp;
1496
1497 if (ra != 31) {
4d5712f1 1498 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1499 }
1500
1501 tmp = tcg_const_i32(set);
4d5712f1 1502 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1503 tcg_temp_free_i32(tmp);
1504}
1505
2ace7e55
RH
1506static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1507{
1508 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1509 to internal cpu registers. */
1510
1511 /* Unprivileged PAL call */
1512 if (palcode >= 0x80 && palcode < 0xC0) {
1513 switch (palcode) {
1514 case 0x86:
1515 /* IMB */
1516 /* No-op inside QEMU. */
1517 break;
1518 case 0x9E:
1519 /* RDUNIQUE */
1520 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1521 break;
1522 case 0x9F:
1523 /* WRUNIQUE */
1524 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1525 break;
1526 default:
1527 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1528 }
1529 return NO_EXIT;
1530 }
1531
1532#ifndef CONFIG_USER_ONLY
1533 /* Privileged PAL code */
1534 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1535 switch (palcode) {
1536 case 0x01:
1537 /* CFLUSH */
1538 /* No-op inside QEMU. */
1539 break;
1540 case 0x02:
1541 /* DRAINA */
1542 /* No-op inside QEMU. */
1543 break;
1544 case 0x2D:
1545 /* WRVPTPTR */
4d5712f1 1546 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
2ace7e55
RH
1547 break;
1548 case 0x31:
1549 /* WRVAL */
1550 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1551 break;
1552 case 0x32:
1553 /* RDVAL */
1554 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1555 break;
1556
1557 case 0x35: {
1558 /* SWPIPL */
1559 TCGv tmp;
1560
1561 /* Note that we already know we're in kernel mode, so we know
1562 that PS only contains the 3 IPL bits. */
4d5712f1 1563 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1564
1565 /* But make sure and store only the 3 IPL bits from the user. */
1566 tmp = tcg_temp_new();
1567 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
4d5712f1 1568 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1569 tcg_temp_free(tmp);
1570 break;
1571 }
1572
1573 case 0x36:
1574 /* RDPS */
4d5712f1 1575 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1576 break;
1577 case 0x38:
1578 /* WRUSP */
1579 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1580 break;
1581 case 0x3A:
1582 /* RDUSP */
1583 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1584 break;
1585 case 0x3C:
1586 /* WHAMI */
1587 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
4d5712f1 1588 offsetof(CPUAlphaState, cpu_index));
2ace7e55
RH
1589 break;
1590
1591 default:
1592 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1593 }
1594 return NO_EXIT;
1595 }
1596#endif
1597
1598 return gen_invalid(ctx);
1599}
1600
26b46094
RH
1601#ifndef CONFIG_USER_ONLY
1602
1603#define PR_BYTE 0x100000
1604#define PR_LONG 0x200000
1605
1606static int cpu_pr_data(int pr)
1607{
1608 switch (pr) {
1609 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1610 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1611 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1612 case 3: return offsetof(CPUAlphaState, trap_arg0);
1613 case 4: return offsetof(CPUAlphaState, trap_arg1);
1614 case 5: return offsetof(CPUAlphaState, trap_arg2);
1615 case 6: return offsetof(CPUAlphaState, exc_addr);
1616 case 7: return offsetof(CPUAlphaState, palbr);
1617 case 8: return offsetof(CPUAlphaState, ptbr);
1618 case 9: return offsetof(CPUAlphaState, vptptr);
1619 case 10: return offsetof(CPUAlphaState, unique);
1620 case 11: return offsetof(CPUAlphaState, sysval);
1621 case 12: return offsetof(CPUAlphaState, usp);
1622
1623 case 32 ... 39:
1624 return offsetof(CPUAlphaState, shadow[pr - 32]);
1625 case 40 ... 63:
1626 return offsetof(CPUAlphaState, scratch[pr - 40]);
c781cf96
RH
1627
1628 case 251:
1629 return offsetof(CPUAlphaState, alarm_expire);
26b46094
RH
1630 }
1631 return 0;
1632}
1633
c781cf96 1634static ExitStatus gen_mfpr(int ra, int regno)
26b46094
RH
1635{
1636 int data = cpu_pr_data(regno);
1637
1638 /* In our emulated PALcode, these processor registers have no
1639 side effects from reading. */
1640 if (ra == 31) {
c781cf96
RH
1641 return NO_EXIT;
1642 }
1643
1644 if (regno == 250) {
1645 /* WALL_TIME */
1646 if (use_icount) {
1647 gen_io_start();
1648 gen_helper_get_time(cpu_ir[ra]);
1649 gen_io_end();
1650 return EXIT_PC_STALE;
1651 } else {
1652 gen_helper_get_time(cpu_ir[ra]);
1653 return NO_EXIT;
1654 }
26b46094
RH
1655 }
1656
1657 /* The basic registers are data only, and unknown registers
1658 are read-zero, write-ignore. */
1659 if (data == 0) {
1660 tcg_gen_movi_i64(cpu_ir[ra], 0);
1661 } else if (data & PR_BYTE) {
1662 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1663 } else if (data & PR_LONG) {
1664 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1665 } else {
1666 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1667 }
c781cf96 1668 return NO_EXIT;
26b46094
RH
1669}
1670
bc24270e 1671static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
26b46094
RH
1672{
1673 TCGv tmp;
bc24270e 1674 int data;
26b46094
RH
1675
1676 if (rb == 31) {
1677 tmp = tcg_const_i64(0);
1678 } else {
1679 tmp = cpu_ir[rb];
1680 }
1681
bc24270e
RH
1682 switch (regno) {
1683 case 255:
3b4fefd6 1684 /* TBIA */
69163fbb 1685 gen_helper_tbia(cpu_env);
bc24270e
RH
1686 break;
1687
1688 case 254:
3b4fefd6 1689 /* TBIS */
69163fbb 1690 gen_helper_tbis(cpu_env, tmp);
bc24270e
RH
1691 break;
1692
1693 case 253:
1694 /* WAIT */
1695 tmp = tcg_const_i64(1);
4d5712f1 1696 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUAlphaState, halted));
bc24270e
RH
1697 return gen_excp(ctx, EXCP_HLT, 0);
1698
034ebc27
RH
1699 case 252:
1700 /* HALT */
1701 gen_helper_halt(tmp);
1702 return EXIT_PC_STALE;
1703
c781cf96
RH
1704 case 251:
1705 /* ALARM */
69163fbb 1706 gen_helper_set_alarm(cpu_env, tmp);
c781cf96
RH
1707 break;
1708
bc24270e 1709 default:
3b4fefd6
RH
1710 /* The basic registers are data only, and unknown registers
1711 are read-zero, write-ignore. */
bc24270e 1712 data = cpu_pr_data(regno);
3b4fefd6
RH
1713 if (data != 0) {
1714 if (data & PR_BYTE) {
1715 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1716 } else if (data & PR_LONG) {
1717 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1718 } else {
1719 tcg_gen_st_i64(tmp, cpu_env, data);
1720 }
26b46094 1721 }
bc24270e 1722 break;
26b46094
RH
1723 }
1724
1725 if (rb == 31) {
1726 tcg_temp_free(tmp);
1727 }
bc24270e
RH
1728
1729 return NO_EXIT;
26b46094
RH
1730}
1731#endif /* !USER_ONLY*/
1732
4af70374 1733static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1734{
1735 uint32_t palcode;
efa64351
MT
1736 int32_t disp21, disp16;
1737#ifndef CONFIG_USER_ONLY
1738 int32_t disp12;
1739#endif
f88fe4e3 1740 uint16_t fn11;
b6fb147c 1741 uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
adf3c8b6 1742 uint8_t lit;
4af70374 1743 ExitStatus ret;
4c9649a9
JM
1744
1745 /* Decode all instruction fields */
1746 opc = insn >> 26;
1747 ra = (insn >> 21) & 0x1F;
1748 rb = (insn >> 16) & 0x1F;
1749 rc = insn & 0x1F;
13e4df99 1750 real_islit = islit = (insn >> 12) & 1;
dfaa8583
AJ
1751 if (rb == 31 && !islit) {
1752 islit = 1;
1753 lit = 0;
1754 } else
1755 lit = (insn >> 13) & 0xFF;
4c9649a9
JM
1756 palcode = insn & 0x03FFFFFF;
1757 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1758 disp16 = (int16_t)(insn & 0x0000FFFF);
efa64351 1759#ifndef CONFIG_USER_ONLY
4c9649a9 1760 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
efa64351 1761#endif
4c9649a9
JM
1762 fn11 = (insn >> 5) & 0x000007FF;
1763 fpfn = fn11 & 0x3F;
1764 fn7 = (insn >> 5) & 0x0000007F;
806991da 1765 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1766 opc, ra, rb, rc, disp16);
806991da 1767
4af70374 1768 ret = NO_EXIT;
4c9649a9
JM
1769 switch (opc) {
1770 case 0x00:
1771 /* CALL_PAL */
2ace7e55
RH
1772 ret = gen_call_pal(ctx, palcode);
1773 break;
4c9649a9
JM
1774 case 0x01:
1775 /* OPC01 */
1776 goto invalid_opc;
1777 case 0x02:
1778 /* OPC02 */
1779 goto invalid_opc;
1780 case 0x03:
1781 /* OPC03 */
1782 goto invalid_opc;
1783 case 0x04:
1784 /* OPC04 */
1785 goto invalid_opc;
1786 case 0x05:
1787 /* OPC05 */
1788 goto invalid_opc;
1789 case 0x06:
1790 /* OPC06 */
1791 goto invalid_opc;
1792 case 0x07:
1793 /* OPC07 */
1794 goto invalid_opc;
1795 case 0x08:
1796 /* LDA */
1ef4ef4e 1797 if (likely(ra != 31)) {
496cb5b9 1798 if (rb != 31)
3761035f
AJ
1799 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1800 else
1801 tcg_gen_movi_i64(cpu_ir[ra], disp16);
496cb5b9 1802 }
4c9649a9
JM
1803 break;
1804 case 0x09:
1805 /* LDAH */
1ef4ef4e 1806 if (likely(ra != 31)) {
496cb5b9 1807 if (rb != 31)
3761035f
AJ
1808 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1809 else
1810 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
496cb5b9 1811 }
4c9649a9
JM
1812 break;
1813 case 0x0A:
1814 /* LDBU */
a18ad893
RH
1815 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1816 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1817 break;
1818 }
1819 goto invalid_opc;
4c9649a9
JM
1820 case 0x0B:
1821 /* LDQ_U */
f18cd223 1822 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1823 break;
1824 case 0x0C:
1825 /* LDWU */
a18ad893
RH
1826 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1827 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1828 break;
1829 }
1830 goto invalid_opc;
4c9649a9
JM
1831 case 0x0D:
1832 /* STW */
6910b8f6 1833 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
4c9649a9
JM
1834 break;
1835 case 0x0E:
1836 /* STB */
6910b8f6 1837 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
4c9649a9
JM
1838 break;
1839 case 0x0F:
1840 /* STQ_U */
6910b8f6 1841 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1842 break;
1843 case 0x10:
1844 switch (fn7) {
1845 case 0x00:
1846 /* ADDL */
30c7183b
AJ
1847 if (likely(rc != 31)) {
1848 if (ra != 31) {
1849 if (islit) {
1850 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1851 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1852 } else {
30c7183b
AJ
1853 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1854 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1855 }
30c7183b
AJ
1856 } else {
1857 if (islit)
dfaa8583 1858 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1859 else
dfaa8583 1860 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1861 }
1862 }
4c9649a9
JM
1863 break;
1864 case 0x02:
1865 /* S4ADDL */
30c7183b
AJ
1866 if (likely(rc != 31)) {
1867 if (ra != 31) {
a7812ae4 1868 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1869 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1870 if (islit)
1871 tcg_gen_addi_i64(tmp, tmp, lit);
1872 else
1873 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1874 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1875 tcg_temp_free(tmp);
30c7183b
AJ
1876 } else {
1877 if (islit)
1878 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1879 else
dfaa8583 1880 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1881 }
1882 }
4c9649a9
JM
1883 break;
1884 case 0x09:
1885 /* SUBL */
30c7183b
AJ
1886 if (likely(rc != 31)) {
1887 if (ra != 31) {
dfaa8583 1888 if (islit)
30c7183b 1889 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
dfaa8583 1890 else
30c7183b 1891 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
dfaa8583 1892 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1893 } else {
1894 if (islit)
1895 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1896 else {
30c7183b
AJ
1897 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1898 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1899 }
1900 }
4c9649a9
JM
1901 break;
1902 case 0x0B:
1903 /* S4SUBL */
30c7183b
AJ
1904 if (likely(rc != 31)) {
1905 if (ra != 31) {
a7812ae4 1906 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1907 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1908 if (islit)
1909 tcg_gen_subi_i64(tmp, tmp, lit);
1910 else
1911 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1912 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1913 tcg_temp_free(tmp);
30c7183b
AJ
1914 } else {
1915 if (islit)
1916 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1917 else {
30c7183b
AJ
1918 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1919 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1920 }
30c7183b
AJ
1921 }
1922 }
4c9649a9
JM
1923 break;
1924 case 0x0F:
1925 /* CMPBGE */
a7812ae4 1926 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1927 break;
1928 case 0x12:
1929 /* S8ADDL */
30c7183b
AJ
1930 if (likely(rc != 31)) {
1931 if (ra != 31) {
a7812ae4 1932 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1933 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1934 if (islit)
1935 tcg_gen_addi_i64(tmp, tmp, lit);
1936 else
1937 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1938 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1939 tcg_temp_free(tmp);
30c7183b
AJ
1940 } else {
1941 if (islit)
1942 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1943 else
dfaa8583 1944 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1945 }
1946 }
4c9649a9
JM
1947 break;
1948 case 0x1B:
1949 /* S8SUBL */
30c7183b
AJ
1950 if (likely(rc != 31)) {
1951 if (ra != 31) {
a7812ae4 1952 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1953 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1954 if (islit)
1955 tcg_gen_subi_i64(tmp, tmp, lit);
1956 else
1957 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1958 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1959 tcg_temp_free(tmp);
30c7183b
AJ
1960 } else {
1961 if (islit)
1962 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1963 else
30c7183b
AJ
1964 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1965 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1966 }
30c7183b
AJ
1967 }
1968 }
4c9649a9
JM
1969 break;
1970 case 0x1D:
1971 /* CMPULT */
01ff9cc8 1972 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
4c9649a9
JM
1973 break;
1974 case 0x20:
1975 /* ADDQ */
30c7183b
AJ
1976 if (likely(rc != 31)) {
1977 if (ra != 31) {
1978 if (islit)
1979 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1980 else
dfaa8583 1981 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1982 } else {
1983 if (islit)
1984 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1985 else
dfaa8583 1986 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1987 }
1988 }
4c9649a9
JM
1989 break;
1990 case 0x22:
1991 /* S4ADDQ */
30c7183b
AJ
1992 if (likely(rc != 31)) {
1993 if (ra != 31) {
a7812ae4 1994 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1995 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1996 if (islit)
1997 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1998 else
1999 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2000 tcg_temp_free(tmp);
30c7183b
AJ
2001 } else {
2002 if (islit)
2003 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2004 else
dfaa8583 2005 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2006 }
2007 }
4c9649a9
JM
2008 break;
2009 case 0x29:
2010 /* SUBQ */
30c7183b
AJ
2011 if (likely(rc != 31)) {
2012 if (ra != 31) {
2013 if (islit)
2014 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 2015 else
dfaa8583 2016 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2017 } else {
2018 if (islit)
2019 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2020 else
dfaa8583 2021 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2022 }
2023 }
4c9649a9
JM
2024 break;
2025 case 0x2B:
2026 /* S4SUBQ */
30c7183b
AJ
2027 if (likely(rc != 31)) {
2028 if (ra != 31) {
a7812ae4 2029 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2030 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2031 if (islit)
2032 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2033 else
2034 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2035 tcg_temp_free(tmp);
30c7183b
AJ
2036 } else {
2037 if (islit)
2038 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2039 else
dfaa8583 2040 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2041 }
2042 }
4c9649a9
JM
2043 break;
2044 case 0x2D:
2045 /* CMPEQ */
01ff9cc8 2046 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
4c9649a9
JM
2047 break;
2048 case 0x32:
2049 /* S8ADDQ */
30c7183b
AJ
2050 if (likely(rc != 31)) {
2051 if (ra != 31) {
a7812ae4 2052 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2053 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2054 if (islit)
2055 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2056 else
2057 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2058 tcg_temp_free(tmp);
30c7183b
AJ
2059 } else {
2060 if (islit)
2061 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2062 else
dfaa8583 2063 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2064 }
2065 }
4c9649a9
JM
2066 break;
2067 case 0x3B:
2068 /* S8SUBQ */
30c7183b
AJ
2069 if (likely(rc != 31)) {
2070 if (ra != 31) {
a7812ae4 2071 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2072 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2073 if (islit)
2074 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2075 else
2076 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2077 tcg_temp_free(tmp);
30c7183b
AJ
2078 } else {
2079 if (islit)
2080 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2081 else
dfaa8583 2082 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2083 }
2084 }
4c9649a9
JM
2085 break;
2086 case 0x3D:
2087 /* CMPULE */
01ff9cc8 2088 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
4c9649a9
JM
2089 break;
2090 case 0x40:
2091 /* ADDL/V */
a7812ae4 2092 gen_addlv(ra, rb, rc, islit, lit);
4c9649a9
JM
2093 break;
2094 case 0x49:
2095 /* SUBL/V */
a7812ae4 2096 gen_sublv(ra, rb, rc, islit, lit);
4c9649a9
JM
2097 break;
2098 case 0x4D:
2099 /* CMPLT */
01ff9cc8 2100 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
4c9649a9
JM
2101 break;
2102 case 0x60:
2103 /* ADDQ/V */
a7812ae4 2104 gen_addqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2105 break;
2106 case 0x69:
2107 /* SUBQ/V */
a7812ae4 2108 gen_subqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2109 break;
2110 case 0x6D:
2111 /* CMPLE */
01ff9cc8 2112 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
4c9649a9
JM
2113 break;
2114 default:
2115 goto invalid_opc;
2116 }
2117 break;
2118 case 0x11:
2119 switch (fn7) {
2120 case 0x00:
2121 /* AND */
30c7183b 2122 if (likely(rc != 31)) {
dfaa8583 2123 if (ra == 31)
30c7183b
AJ
2124 tcg_gen_movi_i64(cpu_ir[rc], 0);
2125 else if (islit)
2126 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2127 else
2128 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2129 }
4c9649a9
JM
2130 break;
2131 case 0x08:
2132 /* BIC */
30c7183b
AJ
2133 if (likely(rc != 31)) {
2134 if (ra != 31) {
2135 if (islit)
2136 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2137 else
2138 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2139 } else
2140 tcg_gen_movi_i64(cpu_ir[rc], 0);
2141 }
4c9649a9
JM
2142 break;
2143 case 0x14:
2144 /* CMOVLBS */
bbe1dab4 2145 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2146 break;
2147 case 0x16:
2148 /* CMOVLBC */
bbe1dab4 2149 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2150 break;
2151 case 0x20:
2152 /* BIS */
30c7183b
AJ
2153 if (likely(rc != 31)) {
2154 if (ra != 31) {
2155 if (islit)
2156 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
8bb6e981 2157 else
30c7183b 2158 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
4c9649a9 2159 } else {
30c7183b
AJ
2160 if (islit)
2161 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2162 else
dfaa8583 2163 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
4c9649a9 2164 }
4c9649a9
JM
2165 }
2166 break;
2167 case 0x24:
2168 /* CMOVEQ */
bbe1dab4 2169 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2170 break;
2171 case 0x26:
2172 /* CMOVNE */
bbe1dab4 2173 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2174 break;
2175 case 0x28:
2176 /* ORNOT */
30c7183b 2177 if (likely(rc != 31)) {
dfaa8583 2178 if (ra != 31) {
30c7183b
AJ
2179 if (islit)
2180 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2181 else
2182 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2183 } else {
2184 if (islit)
2185 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2186 else
2187 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2188 }
2189 }
4c9649a9
JM
2190 break;
2191 case 0x40:
2192 /* XOR */
30c7183b
AJ
2193 if (likely(rc != 31)) {
2194 if (ra != 31) {
2195 if (islit)
2196 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 2197 else
dfaa8583 2198 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2199 } else {
2200 if (islit)
2201 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2202 else
dfaa8583 2203 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2204 }
2205 }
4c9649a9
JM
2206 break;
2207 case 0x44:
2208 /* CMOVLT */
bbe1dab4 2209 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2210 break;
2211 case 0x46:
2212 /* CMOVGE */
bbe1dab4 2213 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2214 break;
2215 case 0x48:
2216 /* EQV */
30c7183b
AJ
2217 if (likely(rc != 31)) {
2218 if (ra != 31) {
2219 if (islit)
2220 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2221 else
2222 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2223 } else {
2224 if (islit)
2225 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
30c7183b 2226 else
dfaa8583 2227 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2228 }
2229 }
4c9649a9
JM
2230 break;
2231 case 0x61:
2232 /* AMASK */
ae8ecd42 2233 if (likely(rc != 31)) {
a18ad893
RH
2234 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2235
2236 if (islit) {
2237 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2238 } else {
2239 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
1a1f7dbc 2240 }
ae8ecd42 2241 }
4c9649a9
JM
2242 break;
2243 case 0x64:
2244 /* CMOVLE */
bbe1dab4 2245 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2246 break;
2247 case 0x66:
2248 /* CMOVGT */
bbe1dab4 2249 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2250 break;
2251 case 0x6C:
2252 /* IMPLVER */
3761035f 2253 if (rc != 31)
8579095b 2254 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
4c9649a9
JM
2255 break;
2256 default:
2257 goto invalid_opc;
2258 }
2259 break;
2260 case 0x12:
2261 switch (fn7) {
2262 case 0x02:
2263 /* MSKBL */
14ab1634 2264 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2265 break;
2266 case 0x06:
2267 /* EXTBL */
377a43b6 2268 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2269 break;
2270 case 0x0B:
2271 /* INSBL */
248c42f3 2272 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2273 break;
2274 case 0x12:
2275 /* MSKWL */
14ab1634 2276 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2277 break;
2278 case 0x16:
2279 /* EXTWL */
377a43b6 2280 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2281 break;
2282 case 0x1B:
2283 /* INSWL */
248c42f3 2284 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2285 break;
2286 case 0x22:
2287 /* MSKLL */
14ab1634 2288 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2289 break;
2290 case 0x26:
2291 /* EXTLL */
377a43b6 2292 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2293 break;
2294 case 0x2B:
2295 /* INSLL */
248c42f3 2296 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2297 break;
2298 case 0x30:
2299 /* ZAP */
a7812ae4 2300 gen_zap(ra, rb, rc, islit, lit);
4c9649a9
JM
2301 break;
2302 case 0x31:
2303 /* ZAPNOT */
a7812ae4 2304 gen_zapnot(ra, rb, rc, islit, lit);
4c9649a9
JM
2305 break;
2306 case 0x32:
2307 /* MSKQL */
14ab1634 2308 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2309 break;
2310 case 0x34:
2311 /* SRL */
30c7183b
AJ
2312 if (likely(rc != 31)) {
2313 if (ra != 31) {
2314 if (islit)
2315 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2316 else {
a7812ae4 2317 TCGv shift = tcg_temp_new();
30c7183b
AJ
2318 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2319 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2320 tcg_temp_free(shift);
dfaa8583 2321 }
30c7183b
AJ
2322 } else
2323 tcg_gen_movi_i64(cpu_ir[rc], 0);
2324 }
4c9649a9
JM
2325 break;
2326 case 0x36:
2327 /* EXTQL */
377a43b6 2328 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2329 break;
2330 case 0x39:
2331 /* SLL */
30c7183b
AJ
2332 if (likely(rc != 31)) {
2333 if (ra != 31) {
2334 if (islit)
2335 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2336 else {
a7812ae4 2337 TCGv shift = tcg_temp_new();
30c7183b
AJ
2338 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2339 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2340 tcg_temp_free(shift);
dfaa8583 2341 }
30c7183b
AJ
2342 } else
2343 tcg_gen_movi_i64(cpu_ir[rc], 0);
2344 }
4c9649a9
JM
2345 break;
2346 case 0x3B:
2347 /* INSQL */
248c42f3 2348 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2349 break;
2350 case 0x3C:
2351 /* SRA */
30c7183b
AJ
2352 if (likely(rc != 31)) {
2353 if (ra != 31) {
2354 if (islit)
2355 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2356 else {
a7812ae4 2357 TCGv shift = tcg_temp_new();
30c7183b
AJ
2358 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2359 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2360 tcg_temp_free(shift);
dfaa8583 2361 }
30c7183b
AJ
2362 } else
2363 tcg_gen_movi_i64(cpu_ir[rc], 0);
2364 }
4c9649a9
JM
2365 break;
2366 case 0x52:
2367 /* MSKWH */
ffec44f1 2368 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2369 break;
2370 case 0x57:
2371 /* INSWH */
50eb6e5c 2372 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2373 break;
2374 case 0x5A:
2375 /* EXTWH */
377a43b6 2376 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2377 break;
2378 case 0x62:
2379 /* MSKLH */
ffec44f1 2380 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2381 break;
2382 case 0x67:
2383 /* INSLH */
50eb6e5c 2384 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2385 break;
2386 case 0x6A:
2387 /* EXTLH */
377a43b6 2388 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2389 break;
2390 case 0x72:
2391 /* MSKQH */
ffec44f1 2392 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2393 break;
2394 case 0x77:
2395 /* INSQH */
50eb6e5c 2396 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2397 break;
2398 case 0x7A:
2399 /* EXTQH */
377a43b6 2400 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2401 break;
2402 default:
2403 goto invalid_opc;
2404 }
2405 break;
2406 case 0x13:
2407 switch (fn7) {
2408 case 0x00:
2409 /* MULL */
30c7183b 2410 if (likely(rc != 31)) {
dfaa8583 2411 if (ra == 31)
30c7183b
AJ
2412 tcg_gen_movi_i64(cpu_ir[rc], 0);
2413 else {
2414 if (islit)
2415 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2416 else
2417 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2418 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2419 }
2420 }
4c9649a9
JM
2421 break;
2422 case 0x20:
2423 /* MULQ */
30c7183b 2424 if (likely(rc != 31)) {
dfaa8583 2425 if (ra == 31)
30c7183b
AJ
2426 tcg_gen_movi_i64(cpu_ir[rc], 0);
2427 else if (islit)
2428 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2429 else
2430 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2431 }
4c9649a9
JM
2432 break;
2433 case 0x30:
2434 /* UMULH */
a7812ae4 2435 gen_umulh(ra, rb, rc, islit, lit);
4c9649a9
JM
2436 break;
2437 case 0x40:
2438 /* MULL/V */
a7812ae4 2439 gen_mullv(ra, rb, rc, islit, lit);
4c9649a9
JM
2440 break;
2441 case 0x60:
2442 /* MULQ/V */
a7812ae4 2443 gen_mulqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2444 break;
2445 default:
2446 goto invalid_opc;
2447 }
2448 break;
2449 case 0x14:
f24518b5 2450 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2451 case 0x04:
2452 /* ITOFS */
a18ad893 2453 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2454 goto invalid_opc;
a18ad893 2455 }
f18cd223
AJ
2456 if (likely(rc != 31)) {
2457 if (ra != 31) {
a7812ae4 2458 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2459 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2460 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2461 tcg_temp_free_i32(tmp);
f18cd223
AJ
2462 } else
2463 tcg_gen_movi_i64(cpu_fir[rc], 0);
2464 }
4c9649a9
JM
2465 break;
2466 case 0x0A:
2467 /* SQRTF */
a18ad893
RH
2468 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2469 gen_fsqrtf(rb, rc);
2470 break;
2471 }
2472 goto invalid_opc;
4c9649a9
JM
2473 case 0x0B:
2474 /* SQRTS */
a18ad893
RH
2475 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2476 gen_fsqrts(ctx, rb, rc, fn11);
2477 break;
2478 }
2479 goto invalid_opc;
4c9649a9
JM
2480 case 0x14:
2481 /* ITOFF */
a18ad893 2482 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2483 goto invalid_opc;
a18ad893 2484 }
f18cd223
AJ
2485 if (likely(rc != 31)) {
2486 if (ra != 31) {
a7812ae4 2487 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2488 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2489 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2490 tcg_temp_free_i32(tmp);
f18cd223
AJ
2491 } else
2492 tcg_gen_movi_i64(cpu_fir[rc], 0);
2493 }
4c9649a9
JM
2494 break;
2495 case 0x24:
2496 /* ITOFT */
a18ad893 2497 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2498 goto invalid_opc;
a18ad893 2499 }
f18cd223
AJ
2500 if (likely(rc != 31)) {
2501 if (ra != 31)
2502 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2503 else
2504 tcg_gen_movi_i64(cpu_fir[rc], 0);
2505 }
4c9649a9
JM
2506 break;
2507 case 0x2A:
2508 /* SQRTG */
a18ad893
RH
2509 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2510 gen_fsqrtg(rb, rc);
2511 break;
2512 }
2513 goto invalid_opc;
4c9649a9
JM
2514 case 0x02B:
2515 /* SQRTT */
a18ad893
RH
2516 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2517 gen_fsqrtt(ctx, rb, rc, fn11);
2518 break;
2519 }
2520 goto invalid_opc;
4c9649a9
JM
2521 default:
2522 goto invalid_opc;
2523 }
2524 break;
2525 case 0x15:
2526 /* VAX floating point */
2527 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2528 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2529 case 0x00:
2530 /* ADDF */
a7812ae4 2531 gen_faddf(ra, rb, rc);
4c9649a9
JM
2532 break;
2533 case 0x01:
2534 /* SUBF */
a7812ae4 2535 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2536 break;
2537 case 0x02:
2538 /* MULF */
a7812ae4 2539 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2540 break;
2541 case 0x03:
2542 /* DIVF */
a7812ae4 2543 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2544 break;
2545 case 0x1E:
2546 /* CVTDG */
2547#if 0 // TODO
a7812ae4 2548 gen_fcvtdg(rb, rc);
4c9649a9
JM
2549#else
2550 goto invalid_opc;
2551#endif
2552 break;
2553 case 0x20:
2554 /* ADDG */
a7812ae4 2555 gen_faddg(ra, rb, rc);
4c9649a9
JM
2556 break;
2557 case 0x21:
2558 /* SUBG */
a7812ae4 2559 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2560 break;
2561 case 0x22:
2562 /* MULG */
a7812ae4 2563 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2564 break;
2565 case 0x23:
2566 /* DIVG */
a7812ae4 2567 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2568 break;
2569 case 0x25:
2570 /* CMPGEQ */
a7812ae4 2571 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2572 break;
2573 case 0x26:
2574 /* CMPGLT */
a7812ae4 2575 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2576 break;
2577 case 0x27:
2578 /* CMPGLE */
a7812ae4 2579 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2580 break;
2581 case 0x2C:
2582 /* CVTGF */
a7812ae4 2583 gen_fcvtgf(rb, rc);
4c9649a9
JM
2584 break;
2585 case 0x2D:
2586 /* CVTGD */
2587#if 0 // TODO
a7812ae4 2588 gen_fcvtgd(rb, rc);
4c9649a9
JM
2589#else
2590 goto invalid_opc;
2591#endif
2592 break;
2593 case 0x2F:
2594 /* CVTGQ */
a7812ae4 2595 gen_fcvtgq(rb, rc);
4c9649a9
JM
2596 break;
2597 case 0x3C:
2598 /* CVTQF */
a7812ae4 2599 gen_fcvtqf(rb, rc);
4c9649a9
JM
2600 break;
2601 case 0x3E:
2602 /* CVTQG */
a7812ae4 2603 gen_fcvtqg(rb, rc);
4c9649a9
JM
2604 break;
2605 default:
2606 goto invalid_opc;
2607 }
2608 break;
2609 case 0x16:
2610 /* IEEE floating-point */
f24518b5 2611 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2612 case 0x00:
2613 /* ADDS */
f24518b5 2614 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2615 break;
2616 case 0x01:
2617 /* SUBS */
f24518b5 2618 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2619 break;
2620 case 0x02:
2621 /* MULS */
f24518b5 2622 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2623 break;
2624 case 0x03:
2625 /* DIVS */
f24518b5 2626 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2627 break;
2628 case 0x20:
2629 /* ADDT */
f24518b5 2630 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2631 break;
2632 case 0x21:
2633 /* SUBT */
f24518b5 2634 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2635 break;
2636 case 0x22:
2637 /* MULT */
f24518b5 2638 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2639 break;
2640 case 0x23:
2641 /* DIVT */
f24518b5 2642 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2643 break;
2644 case 0x24:
2645 /* CMPTUN */
f24518b5 2646 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2647 break;
2648 case 0x25:
2649 /* CMPTEQ */
f24518b5 2650 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2651 break;
2652 case 0x26:
2653 /* CMPTLT */
f24518b5 2654 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2655 break;
2656 case 0x27:
2657 /* CMPTLE */
f24518b5 2658 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2659 break;
2660 case 0x2C:
a74b4d2c 2661 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2662 /* CVTST */
f24518b5 2663 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2664 } else {
2665 /* CVTTS */
f24518b5 2666 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2667 }
2668 break;
2669 case 0x2F:
2670 /* CVTTQ */
f24518b5 2671 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2672 break;
2673 case 0x3C:
2674 /* CVTQS */
f24518b5 2675 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2676 break;
2677 case 0x3E:
2678 /* CVTQT */
f24518b5 2679 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2680 break;
2681 default:
2682 goto invalid_opc;
2683 }
2684 break;
2685 case 0x17:
2686 switch (fn11) {
2687 case 0x010:
2688 /* CVTLQ */
a7812ae4 2689 gen_fcvtlq(rb, rc);
4c9649a9
JM
2690 break;
2691 case 0x020:
f18cd223 2692 if (likely(rc != 31)) {
a06d48d9 2693 if (ra == rb) {
4c9649a9 2694 /* FMOV */
a06d48d9
RH
2695 if (ra == 31)
2696 tcg_gen_movi_i64(cpu_fir[rc], 0);
2697 else
2698 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2699 } else {
f18cd223 2700 /* CPYS */
a7812ae4 2701 gen_fcpys(ra, rb, rc);
a06d48d9 2702 }
4c9649a9
JM
2703 }
2704 break;
2705 case 0x021:
2706 /* CPYSN */
a7812ae4 2707 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2708 break;
2709 case 0x022:
2710 /* CPYSE */
a7812ae4 2711 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2712 break;
2713 case 0x024:
2714 /* MT_FPCR */
f18cd223 2715 if (likely(ra != 31))
a44a2777 2716 gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
f18cd223
AJ
2717 else {
2718 TCGv tmp = tcg_const_i64(0);
a44a2777 2719 gen_helper_store_fpcr(cpu_env, tmp);
f18cd223
AJ
2720 tcg_temp_free(tmp);
2721 }
4c9649a9
JM
2722 break;
2723 case 0x025:
2724 /* MF_FPCR */
f18cd223 2725 if (likely(ra != 31))
a44a2777 2726 gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
4c9649a9
JM
2727 break;
2728 case 0x02A:
2729 /* FCMOVEQ */
bbe1dab4 2730 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2731 break;
2732 case 0x02B:
2733 /* FCMOVNE */
bbe1dab4 2734 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2735 break;
2736 case 0x02C:
2737 /* FCMOVLT */
bbe1dab4 2738 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2739 break;
2740 case 0x02D:
2741 /* FCMOVGE */
bbe1dab4 2742 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2743 break;
2744 case 0x02E:
2745 /* FCMOVLE */
bbe1dab4 2746 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2747 break;
2748 case 0x02F:
2749 /* FCMOVGT */
bbe1dab4 2750 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2751 break;
2752 case 0x030:
2753 /* CVTQL */
a7812ae4 2754 gen_fcvtql(rb, rc);
4c9649a9
JM
2755 break;
2756 case 0x130:
2757 /* CVTQL/V */
4c9649a9
JM
2758 case 0x530:
2759 /* CVTQL/SV */
735cf45f
RH
2760 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2761 /v doesn't do. The only thing I can think is that /sv is a
2762 valid instruction merely for completeness in the ISA. */
2763 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2764 break;
2765 default:
2766 goto invalid_opc;
2767 }
2768 break;
2769 case 0x18:
2770 switch ((uint16_t)disp16) {
2771 case 0x0000:
2772 /* TRAPB */
4af70374 2773 /* No-op. */
4c9649a9
JM
2774 break;
2775 case 0x0400:
2776 /* EXCB */
4af70374 2777 /* No-op. */
4c9649a9
JM
2778 break;
2779 case 0x4000:
2780 /* MB */
2781 /* No-op */
2782 break;
2783 case 0x4400:
2784 /* WMB */
2785 /* No-op */
2786 break;
2787 case 0x8000:
2788 /* FETCH */
2789 /* No-op */
2790 break;
2791 case 0xA000:
2792 /* FETCH_M */
2793 /* No-op */
2794 break;
2795 case 0xC000:
2796 /* RPCC */
a9406ea1
RH
2797 if (ra != 31) {
2798 if (use_icount) {
2799 gen_io_start();
69163fbb 2800 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
a9406ea1
RH
2801 gen_io_end();
2802 ret = EXIT_PC_STALE;
2803 } else {
69163fbb 2804 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
a9406ea1
RH
2805 }
2806 }
4c9649a9
JM
2807 break;
2808 case 0xE000:
2809 /* RC */
ac316ca4 2810 gen_rx(ra, 0);
4c9649a9
JM
2811 break;
2812 case 0xE800:
2813 /* ECB */
4c9649a9
JM
2814 break;
2815 case 0xF000:
2816 /* RS */
ac316ca4 2817 gen_rx(ra, 1);
4c9649a9
JM
2818 break;
2819 case 0xF800:
2820 /* WH64 */
2821 /* No-op */
2822 break;
2823 default:
2824 goto invalid_opc;
2825 }
2826 break;
2827 case 0x19:
2828 /* HW_MFPR (PALcode) */
26b46094 2829#ifndef CONFIG_USER_ONLY
a18ad893 2830 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
c781cf96 2831 return gen_mfpr(ra, insn & 0xffff);
26b46094
RH
2832 }
2833#endif
4c9649a9 2834 goto invalid_opc;
4c9649a9 2835 case 0x1A:
49563a72
RH
2836 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2837 prediction stack action, which of course we don't implement. */
2838 if (rb != 31) {
3761035f 2839 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
49563a72 2840 } else {
3761035f 2841 tcg_gen_movi_i64(cpu_pc, 0);
49563a72
RH
2842 }
2843 if (ra != 31) {
1304ca87 2844 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2845 }
4af70374 2846 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2847 break;
2848 case 0x1B:
2849 /* HW_LD (PALcode) */
a18ad893
RH
2850#ifndef CONFIG_USER_ONLY
2851 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2852 TCGv addr;
2853
2854 if (ra == 31) {
2855 break;
2856 }
2857
2858 addr = tcg_temp_new();
8bb6e981
AJ
2859 if (rb != 31)
2860 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2861 else
2862 tcg_gen_movi_i64(addr, disp12);
2863 switch ((insn >> 12) & 0xF) {
2864 case 0x0:
b5d51029 2865 /* Longword physical access (hw_ldl/p) */
2374e73e 2866 gen_helper_ldl_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2867 break;
2868 case 0x1:
b5d51029 2869 /* Quadword physical access (hw_ldq/p) */
2374e73e 2870 gen_helper_ldq_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2871 break;
2872 case 0x2:
b5d51029 2873 /* Longword physical access with lock (hw_ldl_l/p) */
c3082755 2874 gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr);
8bb6e981
AJ
2875 break;
2876 case 0x3:
b5d51029 2877 /* Quadword physical access with lock (hw_ldq_l/p) */
c3082755 2878 gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr);
8bb6e981
AJ
2879 break;
2880 case 0x4:
b5d51029 2881 /* Longword virtual PTE fetch (hw_ldl/v) */
2374e73e 2882 goto invalid_opc;
8bb6e981 2883 case 0x5:
b5d51029 2884 /* Quadword virtual PTE fetch (hw_ldq/v) */
2374e73e 2885 goto invalid_opc;
8bb6e981
AJ
2886 break;
2887 case 0x6:
2888 /* Incpu_ir[ra]id */
b5d51029 2889 goto invalid_opc;
8bb6e981
AJ
2890 case 0x7:
2891 /* Incpu_ir[ra]id */
b5d51029 2892 goto invalid_opc;
8bb6e981 2893 case 0x8:
b5d51029 2894 /* Longword virtual access (hw_ldl) */
2374e73e 2895 goto invalid_opc;
8bb6e981 2896 case 0x9:
b5d51029 2897 /* Quadword virtual access (hw_ldq) */
2374e73e 2898 goto invalid_opc;
8bb6e981 2899 case 0xA:
b5d51029 2900 /* Longword virtual access with protection check (hw_ldl/w) */
8417845e 2901 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2902 break;
2903 case 0xB:
b5d51029 2904 /* Quadword virtual access with protection check (hw_ldq/w) */
8417845e 2905 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2906 break;
2907 case 0xC:
b5d51029 2908 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2374e73e 2909 goto invalid_opc;
8bb6e981 2910 case 0xD:
b5d51029 2911 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2374e73e 2912 goto invalid_opc;
8bb6e981
AJ
2913 case 0xE:
2914 /* Longword virtual access with alternate access mode and
2374e73e
RH
2915 protection checks (hw_ldl/wa) */
2916 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2917 break;
2918 case 0xF:
2919 /* Quadword virtual access with alternate access mode and
2374e73e
RH
2920 protection checks (hw_ldq/wa) */
2921 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2922 break;
2923 }
2924 tcg_temp_free(addr);
a18ad893 2925 break;
4c9649a9 2926 }
4c9649a9 2927#endif
a18ad893 2928 goto invalid_opc;
4c9649a9
JM
2929 case 0x1C:
2930 switch (fn7) {
2931 case 0x00:
2932 /* SEXTB */
a18ad893 2933 if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
4c9649a9 2934 goto invalid_opc;
a18ad893 2935 }
ae8ecd42
AJ
2936 if (likely(rc != 31)) {
2937 if (islit)
2938 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
ae8ecd42 2939 else
dfaa8583 2940 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2941 }
4c9649a9
JM
2942 break;
2943 case 0x01:
2944 /* SEXTW */
a18ad893
RH
2945 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2946 if (likely(rc != 31)) {
2947 if (islit) {
2948 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2949 } else {
2950 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2951 }
2952 }
2953 break;
ae8ecd42 2954 }
a18ad893 2955 goto invalid_opc;
4c9649a9
JM
2956 case 0x30:
2957 /* CTPOP */
a18ad893
RH
2958 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2959 if (likely(rc != 31)) {
2960 if (islit) {
2961 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2962 } else {
2963 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2964 }
2965 }
2966 break;
ae8ecd42 2967 }
a18ad893 2968 goto invalid_opc;
4c9649a9
JM
2969 case 0x31:
2970 /* PERR */
a18ad893
RH
2971 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2972 gen_perr(ra, rb, rc, islit, lit);
2973 break;
2974 }
2975 goto invalid_opc;
4c9649a9
JM
2976 case 0x32:
2977 /* CTLZ */
a18ad893
RH
2978 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2979 if (likely(rc != 31)) {
2980 if (islit) {
2981 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2982 } else {
2983 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2984 }
2985 }
2986 break;
ae8ecd42 2987 }
a18ad893 2988 goto invalid_opc;
4c9649a9
JM
2989 case 0x33:
2990 /* CTTZ */
a18ad893
RH
2991 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2992 if (likely(rc != 31)) {
2993 if (islit) {
2994 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2995 } else {
2996 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2997 }
2998 }
2999 break;
ae8ecd42 3000 }
a18ad893 3001 goto invalid_opc;
4c9649a9
JM
3002 case 0x34:
3003 /* UNPKBW */
a18ad893
RH
3004 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3005 if (real_islit || ra != 31) {
3006 goto invalid_opc;
3007 }
3008 gen_unpkbw(rb, rc);
3009 break;
3010 }
3011 goto invalid_opc;
4c9649a9 3012 case 0x35:
13e4df99 3013 /* UNPKBL */
a18ad893
RH
3014 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3015 if (real_islit || ra != 31) {
3016 goto invalid_opc;
3017 }
3018 gen_unpkbl(rb, rc);
3019 break;
3020 }
3021 goto invalid_opc;
4c9649a9
JM
3022 case 0x36:
3023 /* PKWB */
a18ad893
RH
3024 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3025 if (real_islit || ra != 31) {
3026 goto invalid_opc;
3027 }
3028 gen_pkwb(rb, rc);
3029 break;
3030 }
3031 goto invalid_opc;
4c9649a9
JM
3032 case 0x37:
3033 /* PKLB */
a18ad893
RH
3034 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3035 if (real_islit || ra != 31) {
3036 goto invalid_opc;
3037 }
3038 gen_pklb(rb, rc);
3039 break;
3040 }
3041 goto invalid_opc;
4c9649a9
JM
3042 case 0x38:
3043 /* MINSB8 */
a18ad893
RH
3044 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3045 gen_minsb8(ra, rb, rc, islit, lit);
3046 break;
3047 }
3048 goto invalid_opc;
4c9649a9
JM
3049 case 0x39:
3050 /* MINSW4 */
a18ad893
RH
3051 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3052 gen_minsw4(ra, rb, rc, islit, lit);
3053 break;
3054 }
3055 goto invalid_opc;
4c9649a9
JM
3056 case 0x3A:
3057 /* MINUB8 */
a18ad893
RH
3058 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3059 gen_minub8(ra, rb, rc, islit, lit);
3060 break;
3061 }
3062 goto invalid_opc;
4c9649a9
JM
3063 case 0x3B:
3064 /* MINUW4 */
a18ad893
RH
3065 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3066 gen_minuw4(ra, rb, rc, islit, lit);
3067 break;
3068 }
3069 goto invalid_opc;
4c9649a9
JM
3070 case 0x3C:
3071 /* MAXUB8 */
a18ad893
RH
3072 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3073 gen_maxub8(ra, rb, rc, islit, lit);
3074 break;
3075 }
3076 goto invalid_opc;
4c9649a9
JM
3077 case 0x3D:
3078 /* MAXUW4 */
a18ad893
RH
3079 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3080 gen_maxuw4(ra, rb, rc, islit, lit);
3081 break;
3082 }
3083 goto invalid_opc;
4c9649a9
JM
3084 case 0x3E:
3085 /* MAXSB8 */
a18ad893
RH
3086 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3087 gen_maxsb8(ra, rb, rc, islit, lit);
3088 break;
3089 }
3090 goto invalid_opc;
4c9649a9
JM
3091 case 0x3F:
3092 /* MAXSW4 */
a18ad893
RH
3093 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3094 gen_maxsw4(ra, rb, rc, islit, lit);
3095 break;
3096 }
3097 goto invalid_opc;
4c9649a9
JM
3098 case 0x70:
3099 /* FTOIT */
a18ad893 3100 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 3101 goto invalid_opc;
a18ad893 3102 }
f18cd223
AJ
3103 if (likely(rc != 31)) {
3104 if (ra != 31)
3105 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3106 else
3107 tcg_gen_movi_i64(cpu_ir[rc], 0);
3108 }
4c9649a9
JM
3109 break;
3110 case 0x78:
3111 /* FTOIS */
a18ad893 3112 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 3113 goto invalid_opc;
a18ad893 3114 }
f18cd223 3115 if (rc != 31) {
a7812ae4 3116 TCGv_i32 tmp1 = tcg_temp_new_i32();
f18cd223 3117 if (ra != 31)
a7812ae4 3118 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
f18cd223
AJ
3119 else {
3120 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 3121 gen_helper_s_to_memory(tmp1, tmp2);
f18cd223
AJ
3122 tcg_temp_free(tmp2);
3123 }
3124 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
a7812ae4 3125 tcg_temp_free_i32(tmp1);
f18cd223 3126 }
4c9649a9
JM
3127 break;
3128 default:
3129 goto invalid_opc;
3130 }
3131 break;
3132 case 0x1D:
3133 /* HW_MTPR (PALcode) */
26b46094 3134#ifndef CONFIG_USER_ONLY
a18ad893 3135 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
bc24270e 3136 return gen_mtpr(ctx, rb, insn & 0xffff);
26b46094
RH
3137 }
3138#endif
4c9649a9 3139 goto invalid_opc;
4c9649a9 3140 case 0x1E:
508b43ea 3141 /* HW_RET (PALcode) */
a18ad893
RH
3142#ifndef CONFIG_USER_ONLY
3143 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3144 if (rb == 31) {
3145 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3146 address from EXC_ADDR. This turns out to be useful for our
3147 emulation PALcode, so continue to accept it. */
3148 TCGv tmp = tcg_temp_new();
4d5712f1 3149 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
69163fbb 3150 gen_helper_hw_ret(cpu_env, tmp);
a18ad893
RH
3151 tcg_temp_free(tmp);
3152 } else {
69163fbb 3153 gen_helper_hw_ret(cpu_env, cpu_ir[rb]);
a18ad893
RH
3154 }
3155 ret = EXIT_PC_UPDATED;
3156 break;
4c9649a9 3157 }
4c9649a9 3158#endif
a18ad893 3159 goto invalid_opc;
4c9649a9
JM
3160 case 0x1F:
3161 /* HW_ST (PALcode) */
a18ad893
RH
3162#ifndef CONFIG_USER_ONLY
3163 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
8bb6e981 3164 TCGv addr, val;
a7812ae4 3165 addr = tcg_temp_new();
8bb6e981
AJ
3166 if (rb != 31)
3167 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3168 else
3169 tcg_gen_movi_i64(addr, disp12);
3170 if (ra != 31)
3171 val = cpu_ir[ra];
3172 else {
a7812ae4 3173 val = tcg_temp_new();
8bb6e981
AJ
3174 tcg_gen_movi_i64(val, 0);
3175 }
3176 switch ((insn >> 12) & 0xF) {
3177 case 0x0:
3178 /* Longword physical access */
2374e73e 3179 gen_helper_stl_phys(addr, val);
8bb6e981
AJ
3180 break;
3181 case 0x1:
3182 /* Quadword physical access */
2374e73e 3183 gen_helper_stq_phys(addr, val);
8bb6e981
AJ
3184 break;
3185 case 0x2:
3186 /* Longword physical access with lock */
c3082755 3187 gen_helper_stl_c_phys(val, cpu_env, addr, val);
8bb6e981
AJ
3188 break;
3189 case 0x3:
3190 /* Quadword physical access with lock */
c3082755 3191 gen_helper_stq_c_phys(val, cpu_env, addr, val);
8bb6e981
AJ
3192 break;
3193 case 0x4:
3194 /* Longword virtual access */
2374e73e 3195 goto invalid_opc;
8bb6e981
AJ
3196 case 0x5:
3197 /* Quadword virtual access */
2374e73e 3198 goto invalid_opc;
8bb6e981
AJ
3199 case 0x6:
3200 /* Invalid */
3201 goto invalid_opc;
3202 case 0x7:
3203 /* Invalid */
3204 goto invalid_opc;
3205 case 0x8:
3206 /* Invalid */
3207 goto invalid_opc;
3208 case 0x9:
3209 /* Invalid */
3210 goto invalid_opc;
3211 case 0xA:
3212 /* Invalid */
3213 goto invalid_opc;
3214 case 0xB:
3215 /* Invalid */
3216 goto invalid_opc;
3217 case 0xC:
3218 /* Longword virtual access with alternate access mode */
2374e73e 3219 goto invalid_opc;
8bb6e981
AJ
3220 case 0xD:
3221 /* Quadword virtual access with alternate access mode */
2374e73e 3222 goto invalid_opc;
8bb6e981
AJ
3223 case 0xE:
3224 /* Invalid */
3225 goto invalid_opc;
3226 case 0xF:
3227 /* Invalid */
3228 goto invalid_opc;
3229 }
45d46ce8 3230 if (ra == 31)
8bb6e981
AJ
3231 tcg_temp_free(val);
3232 tcg_temp_free(addr);
a18ad893 3233 break;
4c9649a9 3234 }
4c9649a9 3235#endif
a18ad893 3236 goto invalid_opc;
4c9649a9
JM
3237 case 0x20:
3238 /* LDF */
f18cd223 3239 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3240 break;
3241 case 0x21:
3242 /* LDG */
f18cd223 3243 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3244 break;
3245 case 0x22:
3246 /* LDS */
f18cd223 3247 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
3248 break;
3249 case 0x23:
3250 /* LDT */
f18cd223 3251 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3252 break;
3253 case 0x24:
3254 /* STF */
6910b8f6 3255 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3256 break;
3257 case 0x25:
3258 /* STG */
6910b8f6 3259 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3260 break;
3261 case 0x26:
3262 /* STS */
6910b8f6 3263 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
4c9649a9
JM
3264 break;
3265 case 0x27:
3266 /* STT */
6910b8f6 3267 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3268 break;
3269 case 0x28:
3270 /* LDL */
f18cd223 3271 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
3272 break;
3273 case 0x29:
3274 /* LDQ */
f18cd223 3275 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3276 break;
3277 case 0x2A:
3278 /* LDL_L */
f4ed8679 3279 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3280 break;
3281 case 0x2B:
3282 /* LDQ_L */
f4ed8679 3283 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3284 break;
3285 case 0x2C:
3286 /* STL */
6910b8f6 3287 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
4c9649a9
JM
3288 break;
3289 case 0x2D:
3290 /* STQ */
6910b8f6 3291 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3292 break;
3293 case 0x2E:
3294 /* STL_C */
6910b8f6 3295 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
4c9649a9
JM
3296 break;
3297 case 0x2F:
3298 /* STQ_C */
6910b8f6 3299 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
4c9649a9
JM
3300 break;
3301 case 0x30:
3302 /* BR */
4af70374 3303 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3304 break;
a7812ae4 3305 case 0x31: /* FBEQ */
4af70374 3306 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 3307 break;
a7812ae4 3308 case 0x32: /* FBLT */
4af70374 3309 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 3310 break;
a7812ae4 3311 case 0x33: /* FBLE */
4af70374 3312 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
3313 break;
3314 case 0x34:
3315 /* BSR */
4af70374 3316 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3317 break;
a7812ae4 3318 case 0x35: /* FBNE */
4af70374 3319 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 3320 break;
a7812ae4 3321 case 0x36: /* FBGE */
4af70374 3322 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 3323 break;
a7812ae4 3324 case 0x37: /* FBGT */
4af70374 3325 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
3326 break;
3327 case 0x38:
3328 /* BLBC */
4af70374 3329 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
3330 break;
3331 case 0x39:
3332 /* BEQ */
4af70374 3333 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
3334 break;
3335 case 0x3A:
3336 /* BLT */
4af70374 3337 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
3338 break;
3339 case 0x3B:
3340 /* BLE */
4af70374 3341 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
3342 break;
3343 case 0x3C:
3344 /* BLBS */
4af70374 3345 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
3346 break;
3347 case 0x3D:
3348 /* BNE */
4af70374 3349 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
3350 break;
3351 case 0x3E:
3352 /* BGE */
4af70374 3353 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
3354 break;
3355 case 0x3F:
3356 /* BGT */
4af70374 3357 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
3358 break;
3359 invalid_opc:
8aa3fa20 3360 ret = gen_invalid(ctx);
4c9649a9
JM
3361 break;
3362 }
3363
3364 return ret;
3365}
3366
4d5712f1 3367static inline void gen_intermediate_code_internal(CPUAlphaState *env,
636aa200
BS
3368 TranslationBlock *tb,
3369 int search_pc)
4c9649a9 3370{
4c9649a9
JM
3371 DisasContext ctx, *ctxp = &ctx;
3372 target_ulong pc_start;
3373 uint32_t insn;
3374 uint16_t *gen_opc_end;
a1d1bb31 3375 CPUBreakpoint *bp;
4c9649a9 3376 int j, lj = -1;
4af70374 3377 ExitStatus ret;
2e70f6ef
PB
3378 int num_insns;
3379 int max_insns;
4c9649a9
JM
3380
3381 pc_start = tb->pc;
4c9649a9 3382 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
4af70374
RH
3383
3384 ctx.tb = tb;
3385 ctx.env = env;
4c9649a9 3386 ctx.pc = pc_start;
bba9bdce 3387 ctx.mem_idx = cpu_mmu_index(env);
f24518b5
RH
3388
3389 /* ??? Every TB begins with unset rounding mode, to be initialized on
3390 the first fp insn of the TB. Alternately we could define a proper
3391 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3392 to reset the FP_STATUS to that default at the end of any TB that
3393 changes the default. We could even (gasp) dynamiclly figure out
3394 what default would be most efficient given the running program. */
3395 ctx.tb_rm = -1;
3396 /* Similarly for flush-to-zero. */
3397 ctx.tb_ftz = -1;
3398
2e70f6ef
PB
3399 num_insns = 0;
3400 max_insns = tb->cflags & CF_COUNT_MASK;
3401 if (max_insns == 0)
3402 max_insns = CF_COUNT_MASK;
3403
3404 gen_icount_start();
4af70374 3405 do {
72cf2d4f
BS
3406 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3407 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 3408 if (bp->pc == ctx.pc) {
4c9649a9
JM
3409 gen_excp(&ctx, EXCP_DEBUG, 0);
3410 break;
3411 }
3412 }
3413 }
3414 if (search_pc) {
3415 j = gen_opc_ptr - gen_opc_buf;
3416 if (lj < j) {
3417 lj++;
3418 while (lj < j)
3419 gen_opc_instr_start[lj++] = 0;
4c9649a9 3420 }
ed1dda53
AJ
3421 gen_opc_pc[lj] = ctx.pc;
3422 gen_opc_instr_start[lj] = 1;
3423 gen_opc_icount[lj] = num_insns;
4c9649a9 3424 }
2e70f6ef
PB
3425 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3426 gen_io_start();
c3082755 3427 insn = cpu_ldl_code(env, ctx.pc);
2e70f6ef 3428 num_insns++;
c4b3be39
RH
3429
3430 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3431 tcg_gen_debug_insn_start(ctx.pc);
3432 }
3433
4c9649a9
JM
3434 ctx.pc += 4;
3435 ret = translate_one(ctxp, insn);
19bf517b 3436
bf1b03fe
RH
3437 /* If we reach a page boundary, are single stepping,
3438 or exhaust instruction count, stop generation. */
3439 if (ret == NO_EXIT
3440 && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3441 || gen_opc_ptr >= gen_opc_end
3442 || num_insns >= max_insns
3443 || singlestep
3444 || env->singlestep_enabled)) {
3445 ret = EXIT_PC_STALE;
1b530a6d 3446 }
4af70374
RH
3447 } while (ret == NO_EXIT);
3448
3449 if (tb->cflags & CF_LAST_IO) {
3450 gen_io_end();
4c9649a9 3451 }
4af70374
RH
3452
3453 switch (ret) {
3454 case EXIT_GOTO_TB:
8aa3fa20 3455 case EXIT_NORETURN:
4af70374
RH
3456 break;
3457 case EXIT_PC_STALE:
496cb5b9 3458 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
3459 /* FALLTHRU */
3460 case EXIT_PC_UPDATED:
bf1b03fe
RH
3461 if (env->singlestep_enabled) {
3462 gen_excp_1(EXCP_DEBUG, 0);
3463 } else {
3464 tcg_gen_exit_tb(0);
3465 }
4af70374
RH
3466 break;
3467 default:
3468 abort();
4c9649a9 3469 }
4af70374 3470
2e70f6ef 3471 gen_icount_end(tb, num_insns);
4c9649a9
JM
3472 *gen_opc_ptr = INDEX_op_end;
3473 if (search_pc) {
3474 j = gen_opc_ptr - gen_opc_buf;
3475 lj++;
3476 while (lj <= j)
3477 gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3478 } else {
3479 tb->size = ctx.pc - pc_start;
2e70f6ef 3480 tb->icount = num_insns;
4c9649a9 3481 }
4af70374 3482
806991da 3483#ifdef DEBUG_DISAS
8fec2b8c 3484 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
3485 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3486 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3487 qemu_log("\n");
4c9649a9 3488 }
4c9649a9 3489#endif
4c9649a9
JM
3490}
3491
4d5712f1 3492void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 3493{
2cfc5f17 3494 gen_intermediate_code_internal(env, tb, 0);
4c9649a9
JM
3495}
3496
4d5712f1 3497void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 3498{
2cfc5f17 3499 gen_intermediate_code_internal(env, tb, 1);
4c9649a9
JM
3500}
3501
a964acc6
RH
3502struct cpu_def_t {
3503 const char *name;
3504 int implver, amask;
3505};
3506
3507static const struct cpu_def_t cpu_defs[] = {
3508 { "ev4", IMPLVER_2106x, 0 },
3509 { "ev5", IMPLVER_21164, 0 },
3510 { "ev56", IMPLVER_21164, AMASK_BWX },
3511 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3512 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3513 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3514 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3515 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3516 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3517 { "21064", IMPLVER_2106x, 0 },
3518 { "21164", IMPLVER_21164, 0 },
3519 { "21164a", IMPLVER_21164, AMASK_BWX },
3520 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3521 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3522 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3523 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3524};
3525
aaed909a 3526CPUAlphaState * cpu_alpha_init (const char *cpu_model)
4c9649a9 3527{
25ebd80f 3528 AlphaCPU *cpu;
4c9649a9 3529 CPUAlphaState *env;
a964acc6 3530 int implver, amask, i, max;
4c9649a9 3531
25ebd80f
AF
3532 cpu = ALPHA_CPU(object_new(TYPE_ALPHA_CPU));
3533 env = &cpu->env;
9444006f 3534
2e70f6ef 3535 alpha_translate_init();
a964acc6
RH
3536
3537 /* Default to ev67; no reason not to emulate insns by default. */
3538 implver = IMPLVER_21264;
3539 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3540 | AMASK_TRAP | AMASK_PREFETCH);
3541
3542 max = ARRAY_SIZE(cpu_defs);
3543 for (i = 0; i < max; i++) {
3544 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3545 implver = cpu_defs[i].implver;
3546 amask = cpu_defs[i].amask;
3547 break;
3548 }
3549 }
3550 env->implver = implver;
3551 env->amask = amask;
3552
0bf46a40 3553 qemu_init_vcpu(env);
4c9649a9
JM
3554 return env;
3555}
aaed909a 3556
4d5712f1 3557void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
3558{
3559 env->pc = gen_opc_pc[pc_pos];
3560}