]> git.proxmox.com Git - mirror_qemu.git/blame - target-alpha/translate.c
target-alpha: Convert ARITH3 to source/sink
[mirror_qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
4c9649a9 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
1de7afc9 22#include "qemu/host-utils.h"
57fec1fe 23#include "tcg-op.h"
4c9649a9 24
a7812ae4
PB
25#include "helper.h"
26#define GEN_HELPER 1
27#include "helper.h"
28
19188121 29#undef ALPHA_DEBUG_DISAS
f24518b5 30#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
31
32#ifdef ALPHA_DEBUG_DISAS
806991da 33# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
34#else
35# define LOG_DISAS(...) do { } while (0)
36#endif
37
4c9649a9
JM
38typedef struct DisasContext DisasContext;
39struct DisasContext {
4af70374 40 struct TranslationBlock *tb;
4c9649a9
JM
41 uint64_t pc;
42 int mem_idx;
f24518b5
RH
43
44 /* Current rounding mode for this TB. */
45 int tb_rm;
46 /* Current flush-to-zero setting for this TB. */
47 int tb_ftz;
ca6862a6 48
801c4c28
RH
49 /* implver value for this CPU. */
50 int implver;
51
194cfb43
RH
52 /* Temporaries for $31 and $f31 as source and destination. */
53 TCGv zero;
54 TCGv sink;
55 /* Temporary for immediate constants. */
56 TCGv lit;
57
ca6862a6 58 bool singlestep_enabled;
4c9649a9
JM
59};
60
4af70374
RH
61/* Return values from translate_one, indicating the state of the TB.
62 Note that zero indicates that we are not exiting the TB. */
63
64typedef enum {
65 NO_EXIT,
66
67 /* We have emitted one or more goto_tb. No fixup required. */
68 EXIT_GOTO_TB,
69
70 /* We are not using a goto_tb (for whatever reason), but have updated
71 the PC (for whatever reason), so there's no need to do it again on
72 exiting the TB. */
73 EXIT_PC_UPDATED,
74
75 /* We are exiting the TB, but have neither emitted a goto_tb, nor
76 updated the PC for the next instruction to be executed. */
8aa3fa20
RH
77 EXIT_PC_STALE,
78
79 /* We are ending the TB with a noreturn function call, e.g. longjmp.
80 No following code will be executed. */
81 EXIT_NORETURN,
4af70374
RH
82} ExitStatus;
83
3761035f 84/* global register indexes */
a7812ae4 85static TCGv_ptr cpu_env;
496cb5b9 86static TCGv cpu_ir[31];
f18cd223 87static TCGv cpu_fir[31];
496cb5b9 88static TCGv cpu_pc;
6910b8f6
RH
89static TCGv cpu_lock_addr;
90static TCGv cpu_lock_st_addr;
91static TCGv cpu_lock_value;
2ace7e55
RH
92static TCGv cpu_unique;
93#ifndef CONFIG_USER_ONLY
94static TCGv cpu_sysval;
95static TCGv cpu_usp;
ab471ade 96#endif
496cb5b9 97
3761035f 98/* register names */
f18cd223 99static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef 100
022c62cb 101#include "exec/gen-icount.h"
2e70f6ef 102
0c28246f 103void alpha_translate_init(void)
2e70f6ef 104{
496cb5b9
AJ
105 int i;
106 char *p;
2e70f6ef 107 static int done_init = 0;
496cb5b9 108
67debe3a 109 if (done_init) {
2e70f6ef 110 return;
67debe3a 111 }
496cb5b9 112
a7812ae4 113 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
114
115 p = cpu_reg_names;
116 for (i = 0; i < 31; i++) {
117 sprintf(p, "ir%d", i);
a7812ae4 118 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 119 offsetof(CPUAlphaState, ir[i]), p);
6ba8dcd7 120 p += (i < 10) ? 4 : 5;
f18cd223
AJ
121
122 sprintf(p, "fir%d", i);
a7812ae4 123 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 124 offsetof(CPUAlphaState, fir[i]), p);
f18cd223 125 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
126 }
127
a7812ae4 128 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 129 offsetof(CPUAlphaState, pc), "pc");
496cb5b9 130
6910b8f6 131 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 132 offsetof(CPUAlphaState, lock_addr),
6910b8f6
RH
133 "lock_addr");
134 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 135 offsetof(CPUAlphaState, lock_st_addr),
6910b8f6
RH
136 "lock_st_addr");
137 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 138 offsetof(CPUAlphaState, lock_value),
6910b8f6 139 "lock_value");
f4ed8679 140
2ace7e55 141 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 142 offsetof(CPUAlphaState, unique), "unique");
2ace7e55
RH
143#ifndef CONFIG_USER_ONLY
144 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 145 offsetof(CPUAlphaState, sysval), "sysval");
2ace7e55 146 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 147 offsetof(CPUAlphaState, usp), "usp");
ab471ade
RH
148#endif
149
2e70f6ef
PB
150 done_init = 1;
151}
152
194cfb43
RH
153static TCGv load_zero(DisasContext *ctx)
154{
155 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
156 ctx->zero = tcg_const_local_i64(0);
157 }
158 return ctx->zero;
159}
160
161static TCGv dest_sink(DisasContext *ctx)
162{
163 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
164 ctx->sink = tcg_temp_local_new();
165 }
166 return ctx->sink;
167}
168
169static TCGv load_gpr(DisasContext *ctx, unsigned reg)
170{
171 if (likely(reg < 31)) {
172 return cpu_ir[reg];
173 } else {
174 return load_zero(ctx);
175 }
176}
177
178static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
179 uint8_t lit, bool islit)
180{
181 if (islit) {
182 ctx->lit = tcg_const_i64(lit);
183 return ctx->lit;
184 } else if (likely(reg < 31)) {
185 return cpu_ir[reg];
186 } else {
187 return load_zero(ctx);
188 }
189}
190
191static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
192{
193 if (likely(reg < 31)) {
194 return cpu_ir[reg];
195 } else {
196 return dest_sink(ctx);
197 }
198}
199
6b88b37c 200static TCGv load_fpr(DisasContext *ctx, unsigned reg)
194cfb43
RH
201{
202 if (likely(reg < 31)) {
203 return cpu_fir[reg];
204 } else {
205 return load_zero(ctx);
206 }
207}
208
075b8ddb 209static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
194cfb43
RH
210{
211 if (likely(reg < 31)) {
212 return cpu_fir[reg];
213 } else {
214 return dest_sink(ctx);
215 }
216}
217
bf1b03fe 218static void gen_excp_1(int exception, int error_code)
4c9649a9 219{
a7812ae4 220 TCGv_i32 tmp1, tmp2;
6ad02592 221
6ad02592
AJ
222 tmp1 = tcg_const_i32(exception);
223 tmp2 = tcg_const_i32(error_code);
b9f0923e 224 gen_helper_excp(cpu_env, tmp1, tmp2);
a7812ae4
PB
225 tcg_temp_free_i32(tmp2);
226 tcg_temp_free_i32(tmp1);
bf1b03fe 227}
8aa3fa20 228
bf1b03fe
RH
229static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
230{
231 tcg_gen_movi_i64(cpu_pc, ctx->pc);
232 gen_excp_1(exception, error_code);
8aa3fa20 233 return EXIT_NORETURN;
4c9649a9
JM
234}
235
8aa3fa20 236static inline ExitStatus gen_invalid(DisasContext *ctx)
4c9649a9 237{
8aa3fa20 238 return gen_excp(ctx, EXCP_OPCDEC, 0);
4c9649a9
JM
239}
240
636aa200 241static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 242{
a7812ae4 243 TCGv_i32 tmp32 = tcg_temp_new_i32();
f8da40ae 244 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4
PB
245 gen_helper_memory_to_f(t0, tmp32);
246 tcg_temp_free_i32(tmp32);
f18cd223
AJ
247}
248
636aa200 249static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 250{
a7812ae4 251 TCGv tmp = tcg_temp_new();
f8da40ae 252 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
a7812ae4 253 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
254 tcg_temp_free(tmp);
255}
256
636aa200 257static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 258{
a7812ae4 259 TCGv_i32 tmp32 = tcg_temp_new_i32();
f8da40ae 260 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4
PB
261 gen_helper_memory_to_s(t0, tmp32);
262 tcg_temp_free_i32(tmp32);
f18cd223
AJ
263}
264
636aa200 265static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679 266{
f8da40ae 267 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
6910b8f6
RH
268 tcg_gen_mov_i64(cpu_lock_addr, t1);
269 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
270}
271
636aa200 272static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679 273{
f8da40ae 274 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
6910b8f6
RH
275 tcg_gen_mov_i64(cpu_lock_addr, t1);
276 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
277}
278
636aa200
BS
279static inline void gen_load_mem(DisasContext *ctx,
280 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
281 int flags),
595b8fdd
RH
282 int ra, int rb, int32_t disp16, bool fp,
283 bool clear)
023d8ca2 284{
595b8fdd 285 TCGv tmp, addr, va;
023d8ca2 286
6910b8f6
RH
287 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
288 prefetches, which we can treat as nops. No worries about
289 missed exceptions here. */
290 if (unlikely(ra == 31)) {
023d8ca2 291 return;
6910b8f6 292 }
023d8ca2 293
595b8fdd
RH
294 tmp = tcg_temp_new();
295 addr = load_gpr(ctx, rb);
296
297 if (disp16) {
298 tcg_gen_addi_i64(tmp, addr, disp16);
299 addr = tmp;
300 }
301 if (clear) {
302 tcg_gen_andi_i64(tmp, addr, ~0x7);
303 addr = tmp;
023d8ca2 304 }
6910b8f6
RH
305
306 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
307 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
308
595b8fdd 309 tcg_temp_free(tmp);
023d8ca2
AJ
310}
311
636aa200 312static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 313{
a7812ae4 314 TCGv_i32 tmp32 = tcg_temp_new_i32();
a7812ae4 315 gen_helper_f_to_memory(tmp32, t0);
f8da40ae 316 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4 317 tcg_temp_free_i32(tmp32);
f18cd223
AJ
318}
319
636aa200 320static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 321{
a7812ae4
PB
322 TCGv tmp = tcg_temp_new();
323 gen_helper_g_to_memory(tmp, t0);
f8da40ae 324 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
f18cd223
AJ
325 tcg_temp_free(tmp);
326}
327
636aa200 328static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 329{
a7812ae4 330 TCGv_i32 tmp32 = tcg_temp_new_i32();
a7812ae4 331 gen_helper_s_to_memory(tmp32, t0);
f8da40ae 332 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4 333 tcg_temp_free_i32(tmp32);
f18cd223
AJ
334}
335
636aa200
BS
336static inline void gen_store_mem(DisasContext *ctx,
337 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
338 int flags),
595b8fdd
RH
339 int ra, int rb, int32_t disp16, bool fp,
340 bool clear)
023d8ca2 341{
595b8fdd 342 TCGv tmp, addr, va;
6910b8f6 343
595b8fdd
RH
344 tmp = tcg_temp_new();
345 addr = load_gpr(ctx, rb);
6910b8f6 346
595b8fdd
RH
347 if (disp16) {
348 tcg_gen_addi_i64(tmp, addr, disp16);
349 addr = tmp;
350 }
351 if (clear) {
352 tcg_gen_andi_i64(tmp, addr, ~0x7);
353 addr = tmp;
023d8ca2 354 }
595b8fdd
RH
355
356 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
6910b8f6
RH
357 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
358
595b8fdd 359 tcg_temp_free(tmp);
6910b8f6
RH
360}
361
362static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
363 int32_t disp16, int quad)
364{
365 TCGv addr;
366
367 if (ra == 31) {
368 /* ??? Don't bother storing anything. The user can't tell
369 the difference, since the zero register always reads zero. */
370 return NO_EXIT;
371 }
372
373#if defined(CONFIG_USER_ONLY)
374 addr = cpu_lock_st_addr;
375#else
e52458fe 376 addr = tcg_temp_local_new();
6910b8f6
RH
377#endif
378
cd2d46fd 379 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
6910b8f6
RH
380
381#if defined(CONFIG_USER_ONLY)
382 /* ??? This is handled via a complicated version of compare-and-swap
383 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
384 in TCG so that this isn't necessary. */
385 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
386#else
387 /* ??? In system mode we are never multi-threaded, so CAS can be
388 implemented via a non-atomic load-compare-store sequence. */
389 {
390 int lab_fail, lab_done;
391 TCGv val;
392
393 lab_fail = gen_new_label();
394 lab_done = gen_new_label();
e52458fe 395 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
6910b8f6
RH
396
397 val = tcg_temp_new();
f8da40ae 398 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
e52458fe 399 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
6910b8f6 400
f8da40ae
RH
401 tcg_gen_qemu_st_i64(cpu_ir[ra], addr, ctx->mem_idx,
402 quad ? MO_LEQ : MO_LEUL);
6910b8f6
RH
403 tcg_gen_movi_i64(cpu_ir[ra], 1);
404 tcg_gen_br(lab_done);
405
406 gen_set_label(lab_fail);
407 tcg_gen_movi_i64(cpu_ir[ra], 0);
408
409 gen_set_label(lab_done);
410 tcg_gen_movi_i64(cpu_lock_addr, -1);
411
412 tcg_temp_free(addr);
413 return NO_EXIT;
414 }
415#endif
023d8ca2
AJ
416}
417
b114b68a 418static bool in_superpage(DisasContext *ctx, int64_t addr)
4c9649a9 419{
b114b68a
RH
420 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
421 && addr < 0
422 && ((addr >> 41) & 3) == 2
423 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
424}
425
426static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
427{
428 /* Suppress goto_tb in the case of single-steping and IO. */
429 if (ctx->singlestep_enabled || (ctx->tb->cflags & CF_LAST_IO)) {
430 return false;
431 }
432 /* If the destination is in the superpage, the page perms can't change. */
433 if (in_superpage(ctx, dest)) {
434 return true;
435 }
436 /* Check for the dest on the same page as the start of the TB. */
437 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
4af70374 438}
dbb30fe6 439
4af70374
RH
440static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
441{
442 uint64_t dest = ctx->pc + (disp << 2);
443
444 if (ra != 31) {
445 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
446 }
447
448 /* Notice branch-to-next; used to initialize RA with the PC. */
449 if (disp == 0) {
450 return 0;
451 } else if (use_goto_tb(ctx, dest)) {
452 tcg_gen_goto_tb(0);
453 tcg_gen_movi_i64(cpu_pc, dest);
8cfd0495 454 tcg_gen_exit_tb((uintptr_t)ctx->tb);
4af70374
RH
455 return EXIT_GOTO_TB;
456 } else {
457 tcg_gen_movi_i64(cpu_pc, dest);
458 return EXIT_PC_UPDATED;
459 }
dbb30fe6
RH
460}
461
4af70374
RH
462static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
463 TCGv cmp, int32_t disp)
dbb30fe6 464{
4af70374 465 uint64_t dest = ctx->pc + (disp << 2);
dbb30fe6 466 int lab_true = gen_new_label();
9c29504e 467
4af70374
RH
468 if (use_goto_tb(ctx, dest)) {
469 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
470
471 tcg_gen_goto_tb(0);
472 tcg_gen_movi_i64(cpu_pc, ctx->pc);
8cfd0495 473 tcg_gen_exit_tb((uintptr_t)ctx->tb);
4af70374
RH
474
475 gen_set_label(lab_true);
476 tcg_gen_goto_tb(1);
477 tcg_gen_movi_i64(cpu_pc, dest);
8cfd0495 478 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
4af70374
RH
479
480 return EXIT_GOTO_TB;
481 } else {
57e289de
RH
482 TCGv_i64 z = tcg_const_i64(0);
483 TCGv_i64 d = tcg_const_i64(dest);
484 TCGv_i64 p = tcg_const_i64(ctx->pc);
4af70374 485
57e289de 486 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
4af70374 487
57e289de
RH
488 tcg_temp_free_i64(z);
489 tcg_temp_free_i64(d);
490 tcg_temp_free_i64(p);
4af70374
RH
491 return EXIT_PC_UPDATED;
492 }
493}
494
495static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
496 int32_t disp, int mask)
497{
498 TCGv cmp_tmp;
499
500 if (unlikely(ra == 31)) {
501 cmp_tmp = tcg_const_i64(0);
502 } else {
503 cmp_tmp = tcg_temp_new();
9c29504e 504 if (mask) {
4af70374 505 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
dbb30fe6 506 } else {
4af70374 507 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
dbb30fe6 508 }
9c29504e 509 }
4af70374
RH
510
511 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
512}
513
4af70374 514/* Fold -0.0 for comparison with COND. */
dbb30fe6 515
4af70374 516static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 517{
dbb30fe6 518 uint64_t mzero = 1ull << 63;
f18cd223 519
dbb30fe6
RH
520 switch (cond) {
521 case TCG_COND_LE:
522 case TCG_COND_GT:
523 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 524 tcg_gen_mov_i64(dest, src);
a7812ae4 525 break;
dbb30fe6
RH
526
527 case TCG_COND_EQ:
528 case TCG_COND_NE:
529 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 530 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 531 break;
dbb30fe6
RH
532
533 case TCG_COND_GE:
dbb30fe6 534 case TCG_COND_LT:
4af70374
RH
535 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
536 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
537 tcg_gen_neg_i64(dest, dest);
538 tcg_gen_and_i64(dest, dest, src);
a7812ae4 539 break;
dbb30fe6 540
a7812ae4
PB
541 default:
542 abort();
f18cd223 543 }
dbb30fe6
RH
544}
545
4af70374
RH
546static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
547 int32_t disp)
dbb30fe6 548{
4af70374 549 TCGv cmp_tmp;
dbb30fe6
RH
550
551 if (unlikely(ra == 31)) {
552 /* Very uncommon case, but easier to optimize it to an integer
553 comparison than continuing with the floating point comparison. */
4af70374 554 return gen_bcond(ctx, cond, ra, disp, 0);
dbb30fe6
RH
555 }
556
4af70374
RH
557 cmp_tmp = tcg_temp_new();
558 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
559 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
560}
561
bbe1dab4 562static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
dbb30fe6 563{
57e289de 564 TCGv_i64 c1, z, v1;
dbb30fe6 565
4af70374 566 if (unlikely(rc == 31)) {
dbb30fe6 567 return;
4af70374
RH
568 }
569
57e289de 570 c1 = tcg_temp_new_i64();
dbb30fe6 571 if (unlikely(ra == 31)) {
57e289de 572 tcg_gen_movi_i64(c1, 0);
4af70374 573 } else {
57e289de 574 gen_fold_mzero(cond, c1, cpu_fir[ra]);
dbb30fe6 575 }
57e289de
RH
576 if (rb == 31) {
577 v1 = tcg_const_i64(0);
578 } else {
579 v1 = cpu_fir[rb];
580 }
581 z = tcg_const_i64(0);
dbb30fe6 582
57e289de 583 tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]);
dbb30fe6 584
57e289de
RH
585 tcg_temp_free_i64(z);
586 tcg_temp_free_i64(c1);
587 if (rb == 31) {
588 tcg_temp_free_i64(v1);
589 }
dbb30fe6
RH
590}
591
f24518b5
RH
592#define QUAL_RM_N 0x080 /* Round mode nearest even */
593#define QUAL_RM_C 0x000 /* Round mode chopped */
594#define QUAL_RM_M 0x040 /* Round mode minus infinity */
595#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
596#define QUAL_RM_MASK 0x0c0
597
598#define QUAL_U 0x100 /* Underflow enable (fp output) */
599#define QUAL_V 0x100 /* Overflow enable (int output) */
600#define QUAL_S 0x400 /* Software completion enable */
601#define QUAL_I 0x200 /* Inexact detection enable */
602
603static void gen_qual_roundmode(DisasContext *ctx, int fn11)
604{
605 TCGv_i32 tmp;
606
607 fn11 &= QUAL_RM_MASK;
608 if (fn11 == ctx->tb_rm) {
609 return;
610 }
611 ctx->tb_rm = fn11;
612
613 tmp = tcg_temp_new_i32();
614 switch (fn11) {
615 case QUAL_RM_N:
616 tcg_gen_movi_i32(tmp, float_round_nearest_even);
617 break;
618 case QUAL_RM_C:
619 tcg_gen_movi_i32(tmp, float_round_to_zero);
620 break;
621 case QUAL_RM_M:
622 tcg_gen_movi_i32(tmp, float_round_down);
623 break;
624 case QUAL_RM_D:
4a58aedf
RH
625 tcg_gen_ld8u_i32(tmp, cpu_env,
626 offsetof(CPUAlphaState, fpcr_dyn_round));
f24518b5
RH
627 break;
628 }
629
630#if defined(CONFIG_SOFTFLOAT_INLINE)
6b4c305c 631 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
f24518b5
RH
632 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
633 sets the one field. */
634 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 635 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
f24518b5
RH
636#else
637 gen_helper_setroundmode(tmp);
638#endif
639
640 tcg_temp_free_i32(tmp);
641}
642
643static void gen_qual_flushzero(DisasContext *ctx, int fn11)
644{
645 TCGv_i32 tmp;
646
647 fn11 &= QUAL_U;
648 if (fn11 == ctx->tb_ftz) {
649 return;
650 }
651 ctx->tb_ftz = fn11;
652
653 tmp = tcg_temp_new_i32();
654 if (fn11) {
655 /* Underflow is enabled, use the FPCR setting. */
4a58aedf
RH
656 tcg_gen_ld8u_i32(tmp, cpu_env,
657 offsetof(CPUAlphaState, fpcr_flush_to_zero));
f24518b5
RH
658 } else {
659 /* Underflow is disabled, force flush-to-zero. */
660 tcg_gen_movi_i32(tmp, 1);
661 }
662
663#if defined(CONFIG_SOFTFLOAT_INLINE)
664 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 665 offsetof(CPUAlphaState, fp_status.flush_to_zero));
f24518b5
RH
666#else
667 gen_helper_setflushzero(tmp);
668#endif
669
670 tcg_temp_free_i32(tmp);
671}
672
673static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
674{
74343409 675 TCGv val;
f24518b5 676 if (reg == 31) {
74343409 677 val = tcg_const_i64(0);
f24518b5 678 } else {
74343409
RH
679 if ((fn11 & QUAL_S) == 0) {
680 if (is_cmp) {
681 gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
682 } else {
683 gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
684 }
685 }
686 val = tcg_temp_new();
687 tcg_gen_mov_i64(val, cpu_fir[reg]);
f24518b5
RH
688 }
689 return val;
690}
691
692static void gen_fp_exc_clear(void)
693{
694#if defined(CONFIG_SOFTFLOAT_INLINE)
695 TCGv_i32 zero = tcg_const_i32(0);
696 tcg_gen_st8_i32(zero, cpu_env,
4d5712f1 697 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5
RH
698 tcg_temp_free_i32(zero);
699#else
4a58aedf 700 gen_helper_fp_exc_clear(cpu_env);
f24518b5
RH
701#endif
702}
703
704static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
705{
706 /* ??? We ought to be able to do something with imprecise exceptions.
707 E.g. notice we're still in the trap shadow of something within the
708 TB and do not generate the code to signal the exception; end the TB
709 when an exception is forced to arrive, either by consumption of a
710 register value or TRAPB or EXCB. */
711 TCGv_i32 exc = tcg_temp_new_i32();
712 TCGv_i32 reg;
713
714#if defined(CONFIG_SOFTFLOAT_INLINE)
715 tcg_gen_ld8u_i32(exc, cpu_env,
4d5712f1 716 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5 717#else
4a58aedf 718 gen_helper_fp_exc_get(exc, cpu_env);
f24518b5
RH
719#endif
720
721 if (ignore) {
722 tcg_gen_andi_i32(exc, exc, ~ignore);
723 }
724
725 /* ??? Pass in the regno of the destination so that the helper can
726 set EXC_MASK, which contains a bitmask of destination registers
727 that have caused arithmetic traps. A simple userspace emulation
728 does not require this. We do need it for a guest kernel's entArith,
729 or if we were to do something clever with imprecise exceptions. */
730 reg = tcg_const_i32(rc + 32);
731
732 if (fn11 & QUAL_S) {
4a58aedf 733 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
f24518b5 734 } else {
4a58aedf 735 gen_helper_fp_exc_raise(cpu_env, exc, reg);
f24518b5
RH
736 }
737
738 tcg_temp_free_i32(reg);
739 tcg_temp_free_i32(exc);
740}
741
742static inline void gen_fp_exc_raise(int rc, int fn11)
743{
744 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 745}
f24518b5 746
593f17e5
RH
747static void gen_fcvtlq(int rb, int rc)
748{
749 if (unlikely(rc == 31)) {
750 return;
751 }
752 if (unlikely(rb == 31)) {
753 tcg_gen_movi_i64(cpu_fir[rc], 0);
754 } else {
755 TCGv tmp = tcg_temp_new();
756
757 /* The arithmetic right shift here, plus the sign-extended mask below
758 yields a sign-extended result without an explicit ext32s_i64. */
759 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
760 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
761 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
762 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
763 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
764
765 tcg_temp_free(tmp);
766 }
767}
768
735cf45f
RH
769static void gen_fcvtql(int rb, int rc)
770{
771 if (unlikely(rc == 31)) {
772 return;
773 }
774 if (unlikely(rb == 31)) {
775 tcg_gen_movi_i64(cpu_fir[rc], 0);
776 } else {
777 TCGv tmp = tcg_temp_new();
778
779 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
780 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
781 tcg_gen_shli_i64(tmp, tmp, 32);
782 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
783 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
784
785 tcg_temp_free(tmp);
786 }
787}
788
789static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
790{
791 if (rb != 31) {
792 int lab = gen_new_label();
793 TCGv tmp = tcg_temp_new();
794
795 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
796 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
797 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
798
799 gen_set_label(lab);
800 }
801 gen_fcvtql(rb, rc);
802}
803
4a58aedf
RH
804static void gen_ieee_arith2(DisasContext *ctx,
805 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
806 int rb, int rc, int fn11)
807{
808 TCGv vb;
809
810 /* ??? This is wrong: the instruction is not a nop, it still may
811 raise exceptions. */
812 if (unlikely(rc == 31)) {
813 return;
814 }
815
816 gen_qual_roundmode(ctx, fn11);
817 gen_qual_flushzero(ctx, fn11);
818 gen_fp_exc_clear();
819
820 vb = gen_ieee_input(rb, fn11, 0);
4a58aedf 821 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
822 tcg_temp_free(vb);
823
824 gen_fp_exc_raise(rc, fn11);
825}
826
827#define IEEE_ARITH2(name) \
828static inline void glue(gen_f, name)(DisasContext *ctx, \
829 int rb, int rc, int fn11) \
830{ \
831 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
832}
833IEEE_ARITH2(sqrts)
834IEEE_ARITH2(sqrtt)
835IEEE_ARITH2(cvtst)
836IEEE_ARITH2(cvtts)
837
838static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
839{
840 TCGv vb;
841 int ignore = 0;
842
843 /* ??? This is wrong: the instruction is not a nop, it still may
844 raise exceptions. */
845 if (unlikely(rc == 31)) {
846 return;
847 }
848
849 /* No need to set flushzero, since we have an integer output. */
850 gen_fp_exc_clear();
851 vb = gen_ieee_input(rb, fn11, 0);
852
853 /* Almost all integer conversions use cropped rounding, and most
854 also do not have integer overflow enabled. Special case that. */
855 switch (fn11) {
856 case QUAL_RM_C:
4a58aedf 857 gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
858 break;
859 case QUAL_V | QUAL_RM_C:
860 case QUAL_S | QUAL_V | QUAL_RM_C:
861 ignore = float_flag_inexact;
862 /* FALLTHRU */
863 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
4a58aedf 864 gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
865 break;
866 default:
867 gen_qual_roundmode(ctx, fn11);
4a58aedf 868 gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
869 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
870 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
871 break;
872 }
873 tcg_temp_free(vb);
874
875 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
876}
877
4a58aedf
RH
878static void gen_ieee_intcvt(DisasContext *ctx,
879 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
880 int rb, int rc, int fn11)
881{
882 TCGv vb;
883
884 /* ??? This is wrong: the instruction is not a nop, it still may
885 raise exceptions. */
886 if (unlikely(rc == 31)) {
887 return;
888 }
889
890 gen_qual_roundmode(ctx, fn11);
891
892 if (rb == 31) {
893 vb = tcg_const_i64(0);
894 } else {
895 vb = cpu_fir[rb];
896 }
897
898 /* The only exception that can be raised by integer conversion
899 is inexact. Thus we only need to worry about exceptions when
900 inexact handling is requested. */
901 if (fn11 & QUAL_I) {
902 gen_fp_exc_clear();
4a58aedf 903 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
904 gen_fp_exc_raise(rc, fn11);
905 } else {
4a58aedf 906 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
907 }
908
909 if (rb == 31) {
910 tcg_temp_free(vb);
911 }
912}
913
914#define IEEE_INTCVT(name) \
915static inline void glue(gen_f, name)(DisasContext *ctx, \
916 int rb, int rc, int fn11) \
917{ \
918 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
919}
920IEEE_INTCVT(cvtqs)
921IEEE_INTCVT(cvtqt)
922
dc96be4b
RH
923static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
924{
925 TCGv va, vb, vmask;
926 int za = 0, zb = 0;
927
928 if (unlikely(rc == 31)) {
929 return;
930 }
931
932 vmask = tcg_const_i64(mask);
933
934 TCGV_UNUSED_I64(va);
935 if (ra == 31) {
936 if (inv_a) {
937 va = vmask;
938 } else {
939 za = 1;
940 }
941 } else {
942 va = tcg_temp_new_i64();
943 tcg_gen_mov_i64(va, cpu_fir[ra]);
944 if (inv_a) {
945 tcg_gen_andc_i64(va, vmask, va);
946 } else {
947 tcg_gen_and_i64(va, va, vmask);
948 }
949 }
950
951 TCGV_UNUSED_I64(vb);
952 if (rb == 31) {
953 zb = 1;
954 } else {
955 vb = tcg_temp_new_i64();
956 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
957 }
958
959 switch (za << 1 | zb) {
960 case 0 | 0:
961 tcg_gen_or_i64(cpu_fir[rc], va, vb);
962 break;
963 case 0 | 1:
964 tcg_gen_mov_i64(cpu_fir[rc], va);
965 break;
966 case 2 | 0:
967 tcg_gen_mov_i64(cpu_fir[rc], vb);
968 break;
969 case 2 | 1:
970 tcg_gen_movi_i64(cpu_fir[rc], 0);
971 break;
972 }
973
974 tcg_temp_free(vmask);
975 if (ra != 31) {
976 tcg_temp_free(va);
977 }
978 if (rb != 31) {
979 tcg_temp_free(vb);
980 }
981}
982
983static inline void gen_fcpys(int ra, int rb, int rc)
984{
985 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
986}
987
988static inline void gen_fcpysn(int ra, int rb, int rc)
989{
990 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
991}
992
993static inline void gen_fcpyse(int ra, int rb, int rc)
994{
995 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
996}
997
f24518b5 998static void gen_ieee_arith3(DisasContext *ctx,
4a58aedf 999 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
1000 int ra, int rb, int rc, int fn11)
1001{
1002 TCGv va, vb;
1003
1004 /* ??? This is wrong: the instruction is not a nop, it still may
1005 raise exceptions. */
1006 if (unlikely(rc == 31)) {
1007 return;
1008 }
1009
1010 gen_qual_roundmode(ctx, fn11);
1011 gen_qual_flushzero(ctx, fn11);
1012 gen_fp_exc_clear();
1013
1014 va = gen_ieee_input(ra, fn11, 0);
1015 vb = gen_ieee_input(rb, fn11, 0);
4a58aedf 1016 helper(cpu_fir[rc], cpu_env, va, vb);
f24518b5
RH
1017 tcg_temp_free(va);
1018 tcg_temp_free(vb);
1019
1020 gen_fp_exc_raise(rc, fn11);
1021}
1022
1023#define IEEE_ARITH3(name) \
1024static inline void glue(gen_f, name)(DisasContext *ctx, \
1025 int ra, int rb, int rc, int fn11) \
1026{ \
1027 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1028}
1029IEEE_ARITH3(adds)
1030IEEE_ARITH3(subs)
1031IEEE_ARITH3(muls)
1032IEEE_ARITH3(divs)
1033IEEE_ARITH3(addt)
1034IEEE_ARITH3(subt)
1035IEEE_ARITH3(mult)
1036IEEE_ARITH3(divt)
1037
1038static void gen_ieee_compare(DisasContext *ctx,
4a58aedf 1039 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
1040 int ra, int rb, int rc, int fn11)
1041{
1042 TCGv va, vb;
1043
1044 /* ??? This is wrong: the instruction is not a nop, it still may
1045 raise exceptions. */
1046 if (unlikely(rc == 31)) {
1047 return;
1048 }
1049
1050 gen_fp_exc_clear();
1051
1052 va = gen_ieee_input(ra, fn11, 1);
1053 vb = gen_ieee_input(rb, fn11, 1);
4a58aedf 1054 helper(cpu_fir[rc], cpu_env, va, vb);
f24518b5
RH
1055 tcg_temp_free(va);
1056 tcg_temp_free(vb);
1057
1058 gen_fp_exc_raise(rc, fn11);
1059}
1060
1061#define IEEE_CMP3(name) \
1062static inline void glue(gen_f, name)(DisasContext *ctx, \
1063 int ra, int rb, int rc, int fn11) \
1064{ \
1065 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1066}
1067IEEE_CMP3(cmptun)
1068IEEE_CMP3(cmpteq)
1069IEEE_CMP3(cmptlt)
1070IEEE_CMP3(cmptle)
a7812ae4 1071
248c42f3
RH
1072static inline uint64_t zapnot_mask(uint8_t lit)
1073{
1074 uint64_t mask = 0;
1075 int i;
1076
1077 for (i = 0; i < 8; ++i) {
67debe3a 1078 if ((lit >> i) & 1) {
248c42f3 1079 mask |= 0xffull << (i * 8);
67debe3a 1080 }
248c42f3
RH
1081 }
1082 return mask;
1083}
1084
87d98f95
RH
1085/* Implement zapnot with an immediate operand, which expands to some
1086 form of immediate AND. This is a basic building block in the
1087 definition of many of the other byte manipulation instructions. */
248c42f3 1088static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 1089{
87d98f95
RH
1090 switch (lit) {
1091 case 0x00:
248c42f3 1092 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
1093 break;
1094 case 0x01:
248c42f3 1095 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
1096 break;
1097 case 0x03:
248c42f3 1098 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
1099 break;
1100 case 0x0f:
248c42f3 1101 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
1102 break;
1103 case 0xff:
248c42f3 1104 tcg_gen_mov_i64(dest, src);
87d98f95
RH
1105 break;
1106 default:
b144be9e 1107 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
87d98f95
RH
1108 break;
1109 }
1110}
1111
248c42f3 1112/* EXTWH, EXTLH, EXTQH */
9a734d64 1113static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1 1114 uint8_t lit, uint8_t byte_mask)
b3249f63 1115{
9a734d64
RH
1116 if (islit) {
1117 tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
67debe3a 1118 } else {
9a734d64
RH
1119 TCGv tmp = tcg_temp_new();
1120 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
1121 tcg_gen_neg_i64(tmp, tmp);
1122 tcg_gen_andi_i64(tmp, tmp, 0x3f);
1123 tcg_gen_shl_i64(vc, va, tmp);
1124 tcg_temp_free(tmp);
377a43b6 1125 }
9a734d64 1126 gen_zapnoti(vc, vc, byte_mask);
b3249f63
AJ
1127}
1128
248c42f3 1129/* EXTBL, EXTWL, EXTLL, EXTQL */
9a734d64 1130static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1 1131 uint8_t lit, uint8_t byte_mask)
b3249f63 1132{
9a734d64
RH
1133 if (islit) {
1134 tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
67debe3a 1135 } else {
9a734d64
RH
1136 TCGv tmp = tcg_temp_new();
1137 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
1138 tcg_gen_shli_i64(tmp, tmp, 3);
1139 tcg_gen_shr_i64(vc, va, tmp);
1140 tcg_temp_free(tmp);
248c42f3 1141 }
9a734d64 1142 gen_zapnoti(vc, vc, byte_mask);
248c42f3
RH
1143}
1144
50eb6e5c 1145/* INSWH, INSLH, INSQH */
5e5863ec 1146static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
50eb6e5c
RH
1147 uint8_t lit, uint8_t byte_mask)
1148{
5e5863ec 1149 TCGv tmp = tcg_temp_new();
50eb6e5c 1150
5e5863ec
RH
1151 /* The instruction description has us left-shift the byte mask and extract
1152 bits <15:8> and apply that zap at the end. This is equivalent to simply
1153 performing the zap first and shifting afterward. */
1154 gen_zapnoti(tmp, va, byte_mask);
50eb6e5c 1155
5e5863ec
RH
1156 if (islit) {
1157 lit &= 7;
1158 if (unlikely(lit == 0)) {
1159 tcg_gen_movi_i64(vc, 0);
50eb6e5c 1160 } else {
5e5863ec 1161 tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
50eb6e5c 1162 }
5e5863ec
RH
1163 } else {
1164 TCGv shift = tcg_temp_new();
1165
1166 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1167 portably by splitting the shift into two parts: shift_count-1 and 1.
1168 Arrange for the -1 by using ones-complement instead of
1169 twos-complement in the negation: ~(B * 8) & 63. */
1170
1171 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1172 tcg_gen_not_i64(shift, shift);
1173 tcg_gen_andi_i64(shift, shift, 0x3f);
1174
1175 tcg_gen_shr_i64(vc, tmp, shift);
1176 tcg_gen_shri_i64(vc, vc, 1);
1177 tcg_temp_free(shift);
50eb6e5c 1178 }
5e5863ec 1179 tcg_temp_free(tmp);
50eb6e5c
RH
1180}
1181
248c42f3 1182/* INSBL, INSWL, INSLL, INSQL */
5e5863ec 1183static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1 1184 uint8_t lit, uint8_t byte_mask)
248c42f3 1185{
5e5863ec 1186 TCGv tmp = tcg_temp_new();
248c42f3 1187
5e5863ec
RH
1188 /* The instruction description has us left-shift the byte mask
1189 the same number of byte slots as the data and apply the zap
1190 at the end. This is equivalent to simply performing the zap
1191 first and shifting afterward. */
1192 gen_zapnoti(tmp, va, byte_mask);
248c42f3 1193
5e5863ec
RH
1194 if (islit) {
1195 tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
1196 } else {
1197 TCGv shift = tcg_temp_new();
1198 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1199 tcg_gen_shli_i64(shift, shift, 3);
1200 tcg_gen_shl_i64(vc, tmp, shift);
1201 tcg_temp_free(shift);
377a43b6 1202 }
5e5863ec 1203 tcg_temp_free(tmp);
b3249f63
AJ
1204}
1205
ffec44f1 1206/* MSKWH, MSKLH, MSKQH */
9a8fa1bd 1207static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1
RH
1208 uint8_t lit, uint8_t byte_mask)
1209{
9a8fa1bd
RH
1210 if (islit) {
1211 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
ffec44f1
RH
1212 } else {
1213 TCGv shift = tcg_temp_new();
1214 TCGv mask = tcg_temp_new();
1215
1216 /* The instruction description is as above, where the byte_mask
1217 is shifted left, and then we extract bits <15:8>. This can be
1218 emulated with a right-shift on the expanded byte mask. This
1219 requires extra care because for an input <2:0> == 0 we need a
1220 shift of 64 bits in order to generate a zero. This is done by
1221 splitting the shift into two parts, the variable shift - 1
1222 followed by a constant 1 shift. The code we expand below is
9a8fa1bd 1223 equivalent to ~(B * 8) & 63. */
ffec44f1 1224
9a8fa1bd 1225 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
ffec44f1
RH
1226 tcg_gen_not_i64(shift, shift);
1227 tcg_gen_andi_i64(shift, shift, 0x3f);
1228 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1229 tcg_gen_shr_i64(mask, mask, shift);
1230 tcg_gen_shri_i64(mask, mask, 1);
1231
9a8fa1bd 1232 tcg_gen_andc_i64(vc, va, mask);
ffec44f1
RH
1233
1234 tcg_temp_free(mask);
1235 tcg_temp_free(shift);
1236 }
1237}
1238
14ab1634 1239/* MSKBL, MSKWL, MSKLL, MSKQL */
9a8fa1bd 1240static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1 1241 uint8_t lit, uint8_t byte_mask)
14ab1634 1242{
9a8fa1bd
RH
1243 if (islit) {
1244 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
14ab1634
RH
1245 } else {
1246 TCGv shift = tcg_temp_new();
1247 TCGv mask = tcg_temp_new();
1248
9a8fa1bd 1249 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
14ab1634 1250 tcg_gen_shli_i64(shift, shift, 3);
9a8fa1bd 1251 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
14ab1634
RH
1252 tcg_gen_shl_i64(mask, mask, shift);
1253
9a8fa1bd 1254 tcg_gen_andc_i64(vc, va, mask);
14ab1634
RH
1255
1256 tcg_temp_free(mask);
1257 tcg_temp_free(shift);
1258 }
1259}
1260
13e4df99
RH
1261#define MVIOP2(name) \
1262static inline void glue(gen_, name)(int rb, int rc) \
1263{ \
1264 if (unlikely(rc == 31)) \
1265 return; \
1266 if (unlikely(rb == 31)) \
1267 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1268 else \
1269 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1270}
1271MVIOP2(pklb)
1272MVIOP2(pkwb)
1273MVIOP2(unpkbl)
1274MVIOP2(unpkbw)
b3249f63 1275
ac316ca4
RH
1276static void gen_rx(int ra, int set)
1277{
1278 TCGv_i32 tmp;
1279
1280 if (ra != 31) {
4d5712f1 1281 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1282 }
1283
1284 tmp = tcg_const_i32(set);
4d5712f1 1285 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1286 tcg_temp_free_i32(tmp);
1287}
1288
2ace7e55
RH
1289static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1290{
1291 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1292 to internal cpu registers. */
1293
1294 /* Unprivileged PAL call */
1295 if (palcode >= 0x80 && palcode < 0xC0) {
1296 switch (palcode) {
1297 case 0x86:
1298 /* IMB */
1299 /* No-op inside QEMU. */
1300 break;
1301 case 0x9E:
1302 /* RDUNIQUE */
1303 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1304 break;
1305 case 0x9F:
1306 /* WRUNIQUE */
1307 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1308 break;
1309 default:
ba96394e
RH
1310 palcode &= 0xbf;
1311 goto do_call_pal;
2ace7e55
RH
1312 }
1313 return NO_EXIT;
1314 }
1315
1316#ifndef CONFIG_USER_ONLY
1317 /* Privileged PAL code */
1318 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1319 switch (palcode) {
1320 case 0x01:
1321 /* CFLUSH */
1322 /* No-op inside QEMU. */
1323 break;
1324 case 0x02:
1325 /* DRAINA */
1326 /* No-op inside QEMU. */
1327 break;
1328 case 0x2D:
1329 /* WRVPTPTR */
4d5712f1 1330 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
2ace7e55
RH
1331 break;
1332 case 0x31:
1333 /* WRVAL */
1334 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1335 break;
1336 case 0x32:
1337 /* RDVAL */
1338 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1339 break;
1340
1341 case 0x35: {
1342 /* SWPIPL */
1343 TCGv tmp;
1344
1345 /* Note that we already know we're in kernel mode, so we know
1346 that PS only contains the 3 IPL bits. */
4d5712f1 1347 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1348
1349 /* But make sure and store only the 3 IPL bits from the user. */
1350 tmp = tcg_temp_new();
1351 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
4d5712f1 1352 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1353 tcg_temp_free(tmp);
1354 break;
1355 }
1356
1357 case 0x36:
1358 /* RDPS */
4d5712f1 1359 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1360 break;
1361 case 0x38:
1362 /* WRUSP */
1363 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1364 break;
1365 case 0x3A:
1366 /* RDUSP */
1367 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1368 break;
1369 case 0x3C:
1370 /* WHAMI */
1371 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
55e5c285 1372 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
2ace7e55
RH
1373 break;
1374
1375 default:
ba96394e
RH
1376 palcode &= 0x3f;
1377 goto do_call_pal;
2ace7e55
RH
1378 }
1379 return NO_EXIT;
1380 }
1381#endif
2ace7e55 1382 return gen_invalid(ctx);
ba96394e
RH
1383
1384 do_call_pal:
1385#ifdef CONFIG_USER_ONLY
1386 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1387#else
1388 {
1389 TCGv pc = tcg_const_i64(ctx->pc);
1390 TCGv entry = tcg_const_i64(palcode & 0x80
1391 ? 0x2000 + (palcode - 0x80) * 64
1392 : 0x1000 + palcode * 64);
1393
1394 gen_helper_call_pal(cpu_env, pc, entry);
1395
1396 tcg_temp_free(entry);
1397 tcg_temp_free(pc);
a9ead832
RH
1398
1399 /* Since the destination is running in PALmode, we don't really
73f395fa 1400 need the page permissions check. We'll see the existence of
a9ead832
RH
1401 the page when we create the TB, and we'll flush all TBs if
1402 we change the PAL base register. */
1403 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1404 tcg_gen_goto_tb(0);
8cfd0495 1405 tcg_gen_exit_tb((uintptr_t)ctx->tb);
a9ead832
RH
1406 return EXIT_GOTO_TB;
1407 }
1408
ba96394e
RH
1409 return EXIT_PC_UPDATED;
1410 }
1411#endif
2ace7e55
RH
1412}
1413
26b46094
RH
1414#ifndef CONFIG_USER_ONLY
1415
1416#define PR_BYTE 0x100000
1417#define PR_LONG 0x200000
1418
1419static int cpu_pr_data(int pr)
1420{
1421 switch (pr) {
1422 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1423 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1424 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1425 case 3: return offsetof(CPUAlphaState, trap_arg0);
1426 case 4: return offsetof(CPUAlphaState, trap_arg1);
1427 case 5: return offsetof(CPUAlphaState, trap_arg2);
1428 case 6: return offsetof(CPUAlphaState, exc_addr);
1429 case 7: return offsetof(CPUAlphaState, palbr);
1430 case 8: return offsetof(CPUAlphaState, ptbr);
1431 case 9: return offsetof(CPUAlphaState, vptptr);
1432 case 10: return offsetof(CPUAlphaState, unique);
1433 case 11: return offsetof(CPUAlphaState, sysval);
1434 case 12: return offsetof(CPUAlphaState, usp);
1435
1436 case 32 ... 39:
1437 return offsetof(CPUAlphaState, shadow[pr - 32]);
1438 case 40 ... 63:
1439 return offsetof(CPUAlphaState, scratch[pr - 40]);
c781cf96
RH
1440
1441 case 251:
1442 return offsetof(CPUAlphaState, alarm_expire);
26b46094
RH
1443 }
1444 return 0;
1445}
1446
c781cf96 1447static ExitStatus gen_mfpr(int ra, int regno)
26b46094
RH
1448{
1449 int data = cpu_pr_data(regno);
1450
1451 /* In our emulated PALcode, these processor registers have no
1452 side effects from reading. */
1453 if (ra == 31) {
c781cf96
RH
1454 return NO_EXIT;
1455 }
1456
19e0cbb8
RH
1457 /* Special help for VMTIME and WALLTIME. */
1458 if (regno == 250 || regno == 249) {
1459 void (*helper)(TCGv) = gen_helper_get_walltime;
1460 if (regno == 249) {
1461 helper = gen_helper_get_vmtime;
1462 }
c781cf96
RH
1463 if (use_icount) {
1464 gen_io_start();
19e0cbb8 1465 helper(cpu_ir[ra]);
c781cf96
RH
1466 gen_io_end();
1467 return EXIT_PC_STALE;
1468 } else {
19e0cbb8 1469 helper(cpu_ir[ra]);
c781cf96
RH
1470 return NO_EXIT;
1471 }
26b46094
RH
1472 }
1473
1474 /* The basic registers are data only, and unknown registers
1475 are read-zero, write-ignore. */
1476 if (data == 0) {
1477 tcg_gen_movi_i64(cpu_ir[ra], 0);
1478 } else if (data & PR_BYTE) {
1479 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1480 } else if (data & PR_LONG) {
1481 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1482 } else {
1483 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1484 }
c781cf96 1485 return NO_EXIT;
26b46094
RH
1486}
1487
bc24270e 1488static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
26b46094
RH
1489{
1490 TCGv tmp;
bc24270e 1491 int data;
26b46094
RH
1492
1493 if (rb == 31) {
1494 tmp = tcg_const_i64(0);
1495 } else {
1496 tmp = cpu_ir[rb];
1497 }
1498
bc24270e
RH
1499 switch (regno) {
1500 case 255:
3b4fefd6 1501 /* TBIA */
69163fbb 1502 gen_helper_tbia(cpu_env);
bc24270e
RH
1503 break;
1504
1505 case 254:
3b4fefd6 1506 /* TBIS */
69163fbb 1507 gen_helper_tbis(cpu_env, tmp);
bc24270e
RH
1508 break;
1509
1510 case 253:
1511 /* WAIT */
1512 tmp = tcg_const_i64(1);
259186a7
AF
1513 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1514 offsetof(CPUState, halted));
bc24270e
RH
1515 return gen_excp(ctx, EXCP_HLT, 0);
1516
034ebc27
RH
1517 case 252:
1518 /* HALT */
1519 gen_helper_halt(tmp);
1520 return EXIT_PC_STALE;
1521
c781cf96
RH
1522 case 251:
1523 /* ALARM */
69163fbb 1524 gen_helper_set_alarm(cpu_env, tmp);
c781cf96
RH
1525 break;
1526
a9ead832
RH
1527 case 7:
1528 /* PALBR */
1529 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, palbr));
1530 /* Changing the PAL base register implies un-chaining all of the TBs
1531 that ended with a CALL_PAL. Since the base register usually only
1532 changes during boot, flushing everything works well. */
1533 gen_helper_tb_flush(cpu_env);
1534 return EXIT_PC_STALE;
1535
bc24270e 1536 default:
3b4fefd6
RH
1537 /* The basic registers are data only, and unknown registers
1538 are read-zero, write-ignore. */
bc24270e 1539 data = cpu_pr_data(regno);
3b4fefd6
RH
1540 if (data != 0) {
1541 if (data & PR_BYTE) {
1542 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1543 } else if (data & PR_LONG) {
1544 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1545 } else {
1546 tcg_gen_st_i64(tmp, cpu_env, data);
1547 }
26b46094 1548 }
bc24270e 1549 break;
26b46094
RH
1550 }
1551
1552 if (rb == 31) {
1553 tcg_temp_free(tmp);
1554 }
bc24270e
RH
1555
1556 return NO_EXIT;
26b46094
RH
1557}
1558#endif /* !USER_ONLY*/
1559
5238c886
RH
1560#define REQUIRE_TB_FLAG(FLAG) \
1561 do { \
1562 if ((ctx->tb->flags & (FLAG)) == 0) { \
1563 goto invalid_opc; \
1564 } \
1565 } while (0)
1566
64f45e49
RH
1567#define REQUIRE_REG_31(WHICH) \
1568 do { \
1569 if (WHICH != 31) { \
1570 goto invalid_opc; \
1571 } \
1572 } while (0)
1573
4af70374 1574static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1575{
1576 uint32_t palcode;
efa64351
MT
1577 int32_t disp21, disp16;
1578#ifndef CONFIG_USER_ONLY
1579 int32_t disp12;
1580#endif
f88fe4e3 1581 uint16_t fn11;
194cfb43
RH
1582 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1583 bool islit;
1584 TCGv va, vb, vc, tmp;
075b8ddb 1585 TCGv_i32 t32;
4af70374 1586 ExitStatus ret;
4c9649a9
JM
1587
1588 /* Decode all instruction fields */
1589 opc = insn >> 26;
1590 ra = (insn >> 21) & 0x1F;
1591 rb = (insn >> 16) & 0x1F;
1592 rc = insn & 0x1F;
64f45e49 1593 islit = (insn >> 12) & 1;
dfaa8583
AJ
1594 if (rb == 31 && !islit) {
1595 islit = 1;
1596 lit = 0;
194cfb43 1597 } else {
dfaa8583 1598 lit = (insn >> 13) & 0xFF;
194cfb43 1599 }
4c9649a9
JM
1600 palcode = insn & 0x03FFFFFF;
1601 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1602 disp16 = (int16_t)(insn & 0x0000FFFF);
efa64351 1603#ifndef CONFIG_USER_ONLY
4c9649a9 1604 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
efa64351 1605#endif
4c9649a9
JM
1606 fn11 = (insn >> 5) & 0x000007FF;
1607 fpfn = fn11 & 0x3F;
1608 fn7 = (insn >> 5) & 0x0000007F;
806991da 1609 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1610 opc, ra, rb, rc, disp16);
806991da 1611
4af70374 1612 ret = NO_EXIT;
4c9649a9
JM
1613 switch (opc) {
1614 case 0x00:
1615 /* CALL_PAL */
2ace7e55
RH
1616 ret = gen_call_pal(ctx, palcode);
1617 break;
4c9649a9
JM
1618 case 0x01:
1619 /* OPC01 */
1620 goto invalid_opc;
1621 case 0x02:
1622 /* OPC02 */
1623 goto invalid_opc;
1624 case 0x03:
1625 /* OPC03 */
1626 goto invalid_opc;
1627 case 0x04:
1628 /* OPC04 */
1629 goto invalid_opc;
1630 case 0x05:
1631 /* OPC05 */
1632 goto invalid_opc;
1633 case 0x06:
1634 /* OPC06 */
1635 goto invalid_opc;
1636 case 0x07:
1637 /* OPC07 */
1638 goto invalid_opc;
194cfb43 1639
4c9649a9
JM
1640 case 0x09:
1641 /* LDAH */
194cfb43
RH
1642 disp16 = (uint32_t)disp16 << 16;
1643 /* fall through */
1644 case 0x08:
1645 /* LDA */
1646 va = dest_gpr(ctx, ra);
1647 /* It's worth special-casing immediate loads. */
1648 if (rb == 31) {
1649 tcg_gen_movi_i64(va, disp16);
1650 } else {
1651 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
496cb5b9 1652 }
4c9649a9 1653 break;
194cfb43 1654
4c9649a9
JM
1655 case 0x0A:
1656 /* LDBU */
5238c886
RH
1657 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1658 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1659 break;
4c9649a9
JM
1660 case 0x0B:
1661 /* LDQ_U */
f18cd223 1662 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1663 break;
1664 case 0x0C:
1665 /* LDWU */
5238c886
RH
1666 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1667 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1668 break;
4c9649a9
JM
1669 case 0x0D:
1670 /* STW */
5238c886 1671 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
6910b8f6 1672 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
4c9649a9
JM
1673 break;
1674 case 0x0E:
1675 /* STB */
5238c886 1676 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
6910b8f6 1677 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
4c9649a9
JM
1678 break;
1679 case 0x0F:
1680 /* STQ_U */
6910b8f6 1681 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
4c9649a9 1682 break;
194cfb43 1683
4c9649a9 1684 case 0x10:
194cfb43
RH
1685 vc = dest_gpr(ctx, rc);
1686 vb = load_gpr_lit(ctx, rb, lit, islit);
1687
1688 if (ra == 31) {
1689 if (fn7 == 0x00) {
1690 /* Special case ADDL as SEXTL. */
1691 tcg_gen_ext32s_i64(vc, vb);
1692 break;
1693 }
1694 if (fn7 == 0x29) {
1695 /* Special case SUBQ as NEGQ. */
1696 tcg_gen_neg_i64(vc, vb);
1697 break;
1698 }
1699 }
1700
1701 va = load_gpr(ctx, ra);
4c9649a9
JM
1702 switch (fn7) {
1703 case 0x00:
1704 /* ADDL */
194cfb43
RH
1705 tcg_gen_add_i64(vc, va, vb);
1706 tcg_gen_ext32s_i64(vc, vc);
4c9649a9
JM
1707 break;
1708 case 0x02:
1709 /* S4ADDL */
194cfb43
RH
1710 tmp = tcg_temp_new();
1711 tcg_gen_shli_i64(tmp, va, 2);
1712 tcg_gen_add_i64(tmp, tmp, vb);
1713 tcg_gen_ext32s_i64(vc, tmp);
1714 tcg_temp_free(tmp);
4c9649a9
JM
1715 break;
1716 case 0x09:
1717 /* SUBL */
194cfb43
RH
1718 tcg_gen_sub_i64(vc, va, vb);
1719 tcg_gen_ext32s_i64(vc, vc);
4c9649a9
JM
1720 break;
1721 case 0x0B:
1722 /* S4SUBL */
194cfb43
RH
1723 tmp = tcg_temp_new();
1724 tcg_gen_shli_i64(tmp, va, 2);
1725 tcg_gen_sub_i64(tmp, tmp, vb);
1726 tcg_gen_ext32s_i64(vc, tmp);
1727 tcg_temp_free(tmp);
4c9649a9
JM
1728 break;
1729 case 0x0F:
1730 /* CMPBGE */
cd2754ad 1731 gen_helper_cmpbge(vc, va, vb);
4c9649a9
JM
1732 break;
1733 case 0x12:
1734 /* S8ADDL */
194cfb43
RH
1735 tmp = tcg_temp_new();
1736 tcg_gen_shli_i64(tmp, va, 3);
1737 tcg_gen_add_i64(tmp, tmp, vb);
1738 tcg_gen_ext32s_i64(vc, tmp);
1739 tcg_temp_free(tmp);
4c9649a9
JM
1740 break;
1741 case 0x1B:
1742 /* S8SUBL */
194cfb43
RH
1743 tmp = tcg_temp_new();
1744 tcg_gen_shli_i64(tmp, va, 3);
1745 tcg_gen_sub_i64(tmp, tmp, vb);
1746 tcg_gen_ext32s_i64(vc, tmp);
1747 tcg_temp_free(tmp);
4c9649a9
JM
1748 break;
1749 case 0x1D:
1750 /* CMPULT */
95868348 1751 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
4c9649a9
JM
1752 break;
1753 case 0x20:
1754 /* ADDQ */
194cfb43 1755 tcg_gen_add_i64(vc, va, vb);
4c9649a9
JM
1756 break;
1757 case 0x22:
1758 /* S4ADDQ */
194cfb43
RH
1759 tmp = tcg_temp_new();
1760 tcg_gen_shli_i64(tmp, va, 2);
1761 tcg_gen_add_i64(vc, tmp, vb);
1762 tcg_temp_free(tmp);
4c9649a9
JM
1763 break;
1764 case 0x29:
1765 /* SUBQ */
194cfb43 1766 tcg_gen_sub_i64(vc, va, vb);
4c9649a9
JM
1767 break;
1768 case 0x2B:
1769 /* S4SUBQ */
194cfb43
RH
1770 tmp = tcg_temp_new();
1771 tcg_gen_shli_i64(tmp, va, 2);
1772 tcg_gen_sub_i64(vc, tmp, vb);
1773 tcg_temp_free(tmp);
4c9649a9
JM
1774 break;
1775 case 0x2D:
1776 /* CMPEQ */
95868348 1777 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
4c9649a9
JM
1778 break;
1779 case 0x32:
1780 /* S8ADDQ */
194cfb43
RH
1781 tmp = tcg_temp_new();
1782 tcg_gen_shli_i64(tmp, va, 3);
1783 tcg_gen_add_i64(vc, tmp, vb);
1784 tcg_temp_free(tmp);
4c9649a9
JM
1785 break;
1786 case 0x3B:
1787 /* S8SUBQ */
194cfb43
RH
1788 tmp = tcg_temp_new();
1789 tcg_gen_shli_i64(tmp, va, 3);
1790 tcg_gen_sub_i64(vc, tmp, vb);
1791 tcg_temp_free(tmp);
4c9649a9
JM
1792 break;
1793 case 0x3D:
1794 /* CMPULE */
95868348 1795 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
4c9649a9
JM
1796 break;
1797 case 0x40:
1798 /* ADDL/V */
42774a56 1799 gen_helper_addlv(vc, cpu_env, va, vb);
4c9649a9
JM
1800 break;
1801 case 0x49:
1802 /* SUBL/V */
42774a56 1803 gen_helper_sublv(vc, cpu_env, va, vb);
4c9649a9
JM
1804 break;
1805 case 0x4D:
1806 /* CMPLT */
95868348 1807 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
4c9649a9
JM
1808 break;
1809 case 0x60:
1810 /* ADDQ/V */
42774a56 1811 gen_helper_addqv(vc, cpu_env, va, vb);
4c9649a9
JM
1812 break;
1813 case 0x69:
1814 /* SUBQ/V */
42774a56 1815 gen_helper_subqv(vc, cpu_env, va, vb);
4c9649a9
JM
1816 break;
1817 case 0x6D:
1818 /* CMPLE */
95868348 1819 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
4c9649a9
JM
1820 break;
1821 default:
1822 goto invalid_opc;
1823 }
1824 break;
db4a1645 1825
4c9649a9 1826 case 0x11:
db4a1645
RH
1827 if (fn7 == 0x20) {
1828 if (rc == 31) {
1829 /* Special case BIS as NOP. */
1830 break;
1831 }
1832 if (ra == 31) {
1833 /* Special case BIS as MOV. */
1834 vc = dest_gpr(ctx, rc);
1835 if (islit) {
1836 tcg_gen_movi_i64(vc, lit);
67debe3a 1837 } else {
db4a1645 1838 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
67debe3a 1839 }
db4a1645 1840 break;
30c7183b 1841 }
db4a1645
RH
1842 }
1843
1844 vc = dest_gpr(ctx, rc);
1845 vb = load_gpr_lit(ctx, rb, lit, islit);
1846
1847 if (fn7 == 0x28 && ra == 31) {
1848 /* Special case ORNOT as NOT. */
1849 tcg_gen_not_i64(vc, vb);
1850 break;
1851 }
1852
1853 va = load_gpr(ctx, ra);
1854 switch (fn7) {
1855 case 0x00:
1856 /* AND */
1857 tcg_gen_and_i64(vc, va, vb);
4c9649a9
JM
1858 break;
1859 case 0x08:
1860 /* BIC */
db4a1645 1861 tcg_gen_andc_i64(vc, va, vb);
4c9649a9
JM
1862 break;
1863 case 0x14:
1864 /* CMOVLBS */
83ebb7cd
RH
1865 tmp = tcg_temp_new();
1866 tcg_gen_andi_i64(tmp, va, 1);
1867 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1868 vb, load_gpr(ctx, rc));
1869 tcg_temp_free(tmp);
4c9649a9
JM
1870 break;
1871 case 0x16:
1872 /* CMOVLBC */
83ebb7cd
RH
1873 tmp = tcg_temp_new();
1874 tcg_gen_andi_i64(tmp, va, 1);
1875 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1876 vb, load_gpr(ctx, rc));
1877 tcg_temp_free(tmp);
4c9649a9
JM
1878 break;
1879 case 0x20:
1880 /* BIS */
db4a1645 1881 tcg_gen_or_i64(vc, va, vb);
4c9649a9
JM
1882 break;
1883 case 0x24:
1884 /* CMOVEQ */
83ebb7cd
RH
1885 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1886 vb, load_gpr(ctx, rc));
4c9649a9
JM
1887 break;
1888 case 0x26:
1889 /* CMOVNE */
83ebb7cd
RH
1890 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1891 vb, load_gpr(ctx, rc));
4c9649a9
JM
1892 break;
1893 case 0x28:
1894 /* ORNOT */
db4a1645 1895 tcg_gen_orc_i64(vc, va, vb);
4c9649a9
JM
1896 break;
1897 case 0x40:
1898 /* XOR */
db4a1645 1899 tcg_gen_xor_i64(vc, va, vb);
4c9649a9
JM
1900 break;
1901 case 0x44:
1902 /* CMOVLT */
83ebb7cd
RH
1903 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1904 vb, load_gpr(ctx, rc));
4c9649a9
JM
1905 break;
1906 case 0x46:
1907 /* CMOVGE */
83ebb7cd
RH
1908 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1909 vb, load_gpr(ctx, rc));
4c9649a9
JM
1910 break;
1911 case 0x48:
1912 /* EQV */
db4a1645 1913 tcg_gen_eqv_i64(vc, va, vb);
4c9649a9
JM
1914 break;
1915 case 0x61:
1916 /* AMASK */
64f45e49 1917 REQUIRE_REG_31(ra);
db4a1645 1918 {
a18ad893 1919 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
db4a1645 1920 tcg_gen_andi_i64(vc, vb, ~amask);
ae8ecd42 1921 }
4c9649a9
JM
1922 break;
1923 case 0x64:
1924 /* CMOVLE */
83ebb7cd
RH
1925 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1926 vb, load_gpr(ctx, rc));
4c9649a9
JM
1927 break;
1928 case 0x66:
1929 /* CMOVGT */
83ebb7cd
RH
1930 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1931 vb, load_gpr(ctx, rc));
4c9649a9
JM
1932 break;
1933 case 0x6C:
1934 /* IMPLVER */
64f45e49 1935 REQUIRE_REG_31(ra);
db4a1645 1936 tcg_gen_movi_i64(vc, ctx->implver);
4c9649a9
JM
1937 break;
1938 default:
1939 goto invalid_opc;
1940 }
1941 break;
3bd67b7d 1942
4c9649a9 1943 case 0x12:
3bd67b7d
RH
1944 vc = dest_gpr(ctx, rc);
1945 va = load_gpr(ctx, ra);
4c9649a9
JM
1946 switch (fn7) {
1947 case 0x02:
1948 /* MSKBL */
9a8fa1bd 1949 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
4c9649a9
JM
1950 break;
1951 case 0x06:
1952 /* EXTBL */
9a734d64 1953 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
4c9649a9
JM
1954 break;
1955 case 0x0B:
1956 /* INSBL */
5e5863ec 1957 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
4c9649a9
JM
1958 break;
1959 case 0x12:
1960 /* MSKWL */
9a8fa1bd 1961 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
1962 break;
1963 case 0x16:
1964 /* EXTWL */
9a734d64 1965 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
1966 break;
1967 case 0x1B:
1968 /* INSWL */
5e5863ec 1969 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
1970 break;
1971 case 0x22:
1972 /* MSKLL */
9a8fa1bd 1973 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
1974 break;
1975 case 0x26:
1976 /* EXTLL */
9a734d64 1977 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
1978 break;
1979 case 0x2B:
1980 /* INSLL */
5e5863ec 1981 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
1982 break;
1983 case 0x30:
1984 /* ZAP */
b144be9e
RH
1985 if (islit) {
1986 gen_zapnoti(vc, va, ~lit);
1987 } else {
1988 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1989 }
4c9649a9
JM
1990 break;
1991 case 0x31:
1992 /* ZAPNOT */
b144be9e
RH
1993 if (islit) {
1994 gen_zapnoti(vc, va, lit);
1995 } else {
1996 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1997 }
4c9649a9
JM
1998 break;
1999 case 0x32:
2000 /* MSKQL */
9a8fa1bd 2001 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
2002 break;
2003 case 0x34:
2004 /* SRL */
3bd67b7d
RH
2005 if (islit) {
2006 tcg_gen_shri_i64(vc, va, lit & 0x3f);
2007 } else {
2008 tmp = tcg_temp_new();
2009 vb = load_gpr(ctx, rb);
2010 tcg_gen_andi_i64(tmp, vb, 0x3f);
2011 tcg_gen_shr_i64(vc, va, tmp);
2012 tcg_temp_free(tmp);
30c7183b 2013 }
4c9649a9
JM
2014 break;
2015 case 0x36:
2016 /* EXTQL */
9a734d64 2017 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
2018 break;
2019 case 0x39:
2020 /* SLL */
3bd67b7d
RH
2021 if (islit) {
2022 tcg_gen_shli_i64(vc, va, lit & 0x3f);
2023 } else {
2024 tmp = tcg_temp_new();
2025 vb = load_gpr(ctx, rb);
2026 tcg_gen_andi_i64(tmp, vb, 0x3f);
2027 tcg_gen_shl_i64(vc, va, tmp);
2028 tcg_temp_free(tmp);
30c7183b 2029 }
4c9649a9
JM
2030 break;
2031 case 0x3B:
2032 /* INSQL */
5e5863ec 2033 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
2034 break;
2035 case 0x3C:
2036 /* SRA */
3bd67b7d
RH
2037 if (islit) {
2038 tcg_gen_sari_i64(vc, va, lit & 0x3f);
2039 } else {
2040 tmp = tcg_temp_new();
2041 vb = load_gpr(ctx, rb);
2042 tcg_gen_andi_i64(tmp, vb, 0x3f);
2043 tcg_gen_sar_i64(vc, va, tmp);
2044 tcg_temp_free(tmp);
30c7183b 2045 }
4c9649a9
JM
2046 break;
2047 case 0x52:
2048 /* MSKWH */
9a8fa1bd 2049 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
2050 break;
2051 case 0x57:
2052 /* INSWH */
5e5863ec 2053 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
2054 break;
2055 case 0x5A:
2056 /* EXTWH */
9a734d64 2057 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
2058 break;
2059 case 0x62:
2060 /* MSKLH */
9a8fa1bd 2061 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
2062 break;
2063 case 0x67:
2064 /* INSLH */
5e5863ec 2065 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
2066 break;
2067 case 0x6A:
2068 /* EXTLH */
9a734d64 2069 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
2070 break;
2071 case 0x72:
2072 /* MSKQH */
9a8fa1bd 2073 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
2074 break;
2075 case 0x77:
2076 /* INSQH */
5e5863ec 2077 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
2078 break;
2079 case 0x7A:
2080 /* EXTQH */
9a734d64 2081 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
2082 break;
2083 default:
2084 goto invalid_opc;
2085 }
2086 break;
de4d3555 2087
4c9649a9 2088 case 0x13:
de4d3555
RH
2089 vc = dest_gpr(ctx, rc);
2090 vb = load_gpr_lit(ctx, rb, lit, islit);
2091 va = load_gpr(ctx, ra);
4c9649a9
JM
2092 switch (fn7) {
2093 case 0x00:
2094 /* MULL */
de4d3555
RH
2095 tcg_gen_mul_i64(vc, va, vb);
2096 tcg_gen_ext32s_i64(vc, vc);
4c9649a9
JM
2097 break;
2098 case 0x20:
2099 /* MULQ */
de4d3555 2100 tcg_gen_mul_i64(vc, va, vb);
4c9649a9
JM
2101 break;
2102 case 0x30:
2103 /* UMULH */
de4d3555
RH
2104 tmp = tcg_temp_new();
2105 tcg_gen_mulu2_i64(tmp, vc, va, vb);
2106 tcg_temp_free(tmp);
4c9649a9
JM
2107 break;
2108 case 0x40:
2109 /* MULL/V */
42774a56 2110 gen_helper_mullv(vc, cpu_env, va, vb);
4c9649a9
JM
2111 break;
2112 case 0x60:
2113 /* MULQ/V */
42774a56 2114 gen_helper_mulqv(vc, cpu_env, va, vb);
4c9649a9
JM
2115 break;
2116 default:
2117 goto invalid_opc;
2118 }
2119 break;
075b8ddb 2120
4c9649a9 2121 case 0x14:
5238c886 2122 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
baee04ab 2123 vc = dest_fpr(ctx, rc);
f24518b5 2124 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2125 case 0x04:
2126 /* ITOFS */
64f45e49 2127 REQUIRE_REG_31(rb);
075b8ddb
RH
2128 t32 = tcg_temp_new_i32();
2129 va = load_gpr(ctx, ra);
075b8ddb
RH
2130 tcg_gen_trunc_i64_i32(t32, va);
2131 gen_helper_memory_to_s(vc, t32);
2132 tcg_temp_free_i32(t32);
4c9649a9
JM
2133 break;
2134 case 0x0A:
2135 /* SQRTF */
64f45e49 2136 REQUIRE_REG_31(ra);
baee04ab
RH
2137 vb = load_fpr(ctx, rb);
2138 gen_helper_sqrtf(vc, cpu_env, vb);
5238c886 2139 break;
4c9649a9
JM
2140 case 0x0B:
2141 /* SQRTS */
64f45e49 2142 REQUIRE_REG_31(ra);
5238c886
RH
2143 gen_fsqrts(ctx, rb, rc, fn11);
2144 break;
4c9649a9
JM
2145 case 0x14:
2146 /* ITOFF */
64f45e49 2147 REQUIRE_REG_31(rb);
075b8ddb
RH
2148 t32 = tcg_temp_new_i32();
2149 va = load_gpr(ctx, ra);
075b8ddb
RH
2150 tcg_gen_trunc_i64_i32(t32, va);
2151 gen_helper_memory_to_f(vc, t32);
2152 tcg_temp_free_i32(t32);
4c9649a9
JM
2153 break;
2154 case 0x24:
2155 /* ITOFT */
64f45e49 2156 REQUIRE_REG_31(rb);
075b8ddb 2157 va = load_gpr(ctx, ra);
075b8ddb 2158 tcg_gen_mov_i64(vc, va);
4c9649a9
JM
2159 break;
2160 case 0x2A:
2161 /* SQRTG */
64f45e49 2162 REQUIRE_REG_31(ra);
baee04ab
RH
2163 vb = load_fpr(ctx, rb);
2164 gen_helper_sqrtg(vc, cpu_env, vb);
5238c886 2165 break;
4c9649a9
JM
2166 case 0x02B:
2167 /* SQRTT */
64f45e49 2168 REQUIRE_REG_31(ra);
5238c886
RH
2169 gen_fsqrtt(ctx, rb, rc, fn11);
2170 break;
4c9649a9
JM
2171 default:
2172 goto invalid_opc;
2173 }
2174 break;
6b88b37c 2175
4c9649a9
JM
2176 case 0x15:
2177 /* VAX floating point */
2178 /* XXX: rounding mode and trap are ignored (!) */
baee04ab
RH
2179 vc = dest_fpr(ctx, rc);
2180 vb = load_fpr(ctx, rb);
3d045dbc 2181 va = load_fpr(ctx, ra);
f24518b5 2182 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2183 case 0x00:
2184 /* ADDF */
3d045dbc 2185 gen_helper_addf(vc, cpu_env, va, vb);
4c9649a9
JM
2186 break;
2187 case 0x01:
2188 /* SUBF */
3d045dbc 2189 gen_helper_subf(vc, cpu_env, va, vb);
4c9649a9
JM
2190 break;
2191 case 0x02:
2192 /* MULF */
3d045dbc 2193 gen_helper_mulf(vc, cpu_env, va, vb);
4c9649a9
JM
2194 break;
2195 case 0x03:
2196 /* DIVF */
3d045dbc 2197 gen_helper_divf(vc, cpu_env, va, vb);
4c9649a9
JM
2198 break;
2199 case 0x1E:
64f45e49
RH
2200 /* CVTDG -- TODO */
2201 REQUIRE_REG_31(ra);
4c9649a9 2202 goto invalid_opc;
4c9649a9
JM
2203 case 0x20:
2204 /* ADDG */
3d045dbc 2205 gen_helper_addg(vc, cpu_env, va, vb);
4c9649a9
JM
2206 break;
2207 case 0x21:
2208 /* SUBG */
3d045dbc 2209 gen_helper_subg(vc, cpu_env, va, vb);
4c9649a9
JM
2210 break;
2211 case 0x22:
2212 /* MULG */
3d045dbc 2213 gen_helper_mulg(vc, cpu_env, va, vb);
4c9649a9
JM
2214 break;
2215 case 0x23:
2216 /* DIVG */
3d045dbc 2217 gen_helper_divg(vc, cpu_env, va, vb);
4c9649a9
JM
2218 break;
2219 case 0x25:
2220 /* CMPGEQ */
3d045dbc 2221 gen_helper_cmpgeq(vc, cpu_env, va, vb);
4c9649a9
JM
2222 break;
2223 case 0x26:
2224 /* CMPGLT */
3d045dbc 2225 gen_helper_cmpglt(vc, cpu_env, va, vb);
4c9649a9
JM
2226 break;
2227 case 0x27:
2228 /* CMPGLE */
3d045dbc 2229 gen_helper_cmpgle(vc, cpu_env, va, vb);
4c9649a9
JM
2230 break;
2231 case 0x2C:
2232 /* CVTGF */
64f45e49 2233 REQUIRE_REG_31(ra);
baee04ab 2234 gen_helper_cvtgf(vc, cpu_env, vb);
4c9649a9
JM
2235 break;
2236 case 0x2D:
64f45e49
RH
2237 /* CVTGD -- TODO */
2238 REQUIRE_REG_31(ra);
4c9649a9 2239 goto invalid_opc;
4c9649a9
JM
2240 case 0x2F:
2241 /* CVTGQ */
64f45e49 2242 REQUIRE_REG_31(ra);
baee04ab 2243 gen_helper_cvtgq(vc, cpu_env, vb);
4c9649a9
JM
2244 break;
2245 case 0x3C:
2246 /* CVTQF */
64f45e49 2247 REQUIRE_REG_31(ra);
baee04ab 2248 gen_helper_cvtqf(vc, cpu_env, vb);
4c9649a9
JM
2249 break;
2250 case 0x3E:
2251 /* CVTQG */
64f45e49 2252 REQUIRE_REG_31(ra);
baee04ab 2253 gen_helper_cvtqg(vc, cpu_env, vb);
4c9649a9
JM
2254 break;
2255 default:
2256 goto invalid_opc;
2257 }
2258 break;
6b88b37c 2259
4c9649a9
JM
2260 case 0x16:
2261 /* IEEE floating-point */
f24518b5 2262 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2263 case 0x00:
2264 /* ADDS */
f24518b5 2265 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2266 break;
2267 case 0x01:
2268 /* SUBS */
f24518b5 2269 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2270 break;
2271 case 0x02:
2272 /* MULS */
f24518b5 2273 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2274 break;
2275 case 0x03:
2276 /* DIVS */
f24518b5 2277 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2278 break;
2279 case 0x20:
2280 /* ADDT */
f24518b5 2281 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2282 break;
2283 case 0x21:
2284 /* SUBT */
f24518b5 2285 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2286 break;
2287 case 0x22:
2288 /* MULT */
f24518b5 2289 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2290 break;
2291 case 0x23:
2292 /* DIVT */
f24518b5 2293 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2294 break;
2295 case 0x24:
2296 /* CMPTUN */
f24518b5 2297 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2298 break;
2299 case 0x25:
2300 /* CMPTEQ */
f24518b5 2301 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2302 break;
2303 case 0x26:
2304 /* CMPTLT */
f24518b5 2305 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2306 break;
2307 case 0x27:
2308 /* CMPTLE */
f24518b5 2309 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2310 break;
2311 case 0x2C:
64f45e49 2312 REQUIRE_REG_31(ra);
a74b4d2c 2313 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2314 /* CVTST */
f24518b5 2315 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2316 } else {
2317 /* CVTTS */
f24518b5 2318 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2319 }
2320 break;
2321 case 0x2F:
2322 /* CVTTQ */
64f45e49 2323 REQUIRE_REG_31(ra);
f24518b5 2324 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2325 break;
2326 case 0x3C:
2327 /* CVTQS */
64f45e49 2328 REQUIRE_REG_31(ra);
f24518b5 2329 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2330 break;
2331 case 0x3E:
2332 /* CVTQT */
64f45e49 2333 REQUIRE_REG_31(ra);
f24518b5 2334 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2335 break;
2336 default:
2337 goto invalid_opc;
2338 }
2339 break;
6b88b37c 2340
4c9649a9
JM
2341 case 0x17:
2342 switch (fn11) {
2343 case 0x010:
2344 /* CVTLQ */
64f45e49 2345 REQUIRE_REG_31(ra);
a7812ae4 2346 gen_fcvtlq(rb, rc);
4c9649a9
JM
2347 break;
2348 case 0x020:
6b88b37c
RH
2349 /* CPYS */
2350 if (rc == 31) {
2351 /* Special case CPYS as FNOP. */
2352 } else if (ra == rb) {
2353 vc = dest_fpr(ctx, rc);
2354 /* Special case CPYS as FMOV. */
2355 if (ra == 31) {
2356 tcg_gen_movi_i64(vc, 0);
a06d48d9 2357 } else {
6b88b37c
RH
2358 va = load_fpr(ctx, ra);
2359 tcg_gen_mov_i64(vc, va);
a06d48d9 2360 }
6b88b37c
RH
2361 } else {
2362 gen_fcpys(ra, rb, rc);
4c9649a9
JM
2363 }
2364 break;
2365 case 0x021:
2366 /* CPYSN */
a7812ae4 2367 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2368 break;
2369 case 0x022:
2370 /* CPYSE */
a7812ae4 2371 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2372 break;
2373 case 0x024:
2374 /* MT_FPCR */
6b88b37c
RH
2375 va = load_fpr(ctx, ra);
2376 gen_helper_store_fpcr(cpu_env, va);
4c9649a9
JM
2377 break;
2378 case 0x025:
2379 /* MF_FPCR */
6b88b37c
RH
2380 va = dest_fpr(ctx, ra);
2381 gen_helper_load_fpcr(va, cpu_env);
4c9649a9
JM
2382 break;
2383 case 0x02A:
2384 /* FCMOVEQ */
bbe1dab4 2385 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2386 break;
2387 case 0x02B:
2388 /* FCMOVNE */
bbe1dab4 2389 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2390 break;
2391 case 0x02C:
2392 /* FCMOVLT */
bbe1dab4 2393 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2394 break;
2395 case 0x02D:
2396 /* FCMOVGE */
bbe1dab4 2397 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2398 break;
2399 case 0x02E:
2400 /* FCMOVLE */
bbe1dab4 2401 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2402 break;
2403 case 0x02F:
2404 /* FCMOVGT */
bbe1dab4 2405 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2406 break;
2407 case 0x030:
2408 /* CVTQL */
64f45e49 2409 REQUIRE_REG_31(ra);
a7812ae4 2410 gen_fcvtql(rb, rc);
4c9649a9
JM
2411 break;
2412 case 0x130:
2413 /* CVTQL/V */
4c9649a9
JM
2414 case 0x530:
2415 /* CVTQL/SV */
64f45e49 2416 REQUIRE_REG_31(ra);
735cf45f
RH
2417 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2418 /v doesn't do. The only thing I can think is that /sv is a
2419 valid instruction merely for completeness in the ISA. */
2420 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2421 break;
2422 default:
2423 goto invalid_opc;
2424 }
2425 break;
89fe090b 2426
4c9649a9
JM
2427 case 0x18:
2428 switch ((uint16_t)disp16) {
2429 case 0x0000:
2430 /* TRAPB */
4af70374 2431 /* No-op. */
4c9649a9
JM
2432 break;
2433 case 0x0400:
2434 /* EXCB */
4af70374 2435 /* No-op. */
4c9649a9
JM
2436 break;
2437 case 0x4000:
2438 /* MB */
2439 /* No-op */
2440 break;
2441 case 0x4400:
2442 /* WMB */
2443 /* No-op */
2444 break;
2445 case 0x8000:
2446 /* FETCH */
2447 /* No-op */
2448 break;
2449 case 0xA000:
2450 /* FETCH_M */
2451 /* No-op */
2452 break;
2453 case 0xC000:
2454 /* RPCC */
89fe090b
RH
2455 va = dest_gpr(ctx, ra);
2456 if (use_icount) {
2457 gen_io_start();
2458 gen_helper_load_pcc(va, cpu_env);
2459 gen_io_end();
2460 ret = EXIT_PC_STALE;
2461 } else {
2462 gen_helper_load_pcc(va, cpu_env);
a9406ea1 2463 }
4c9649a9
JM
2464 break;
2465 case 0xE000:
2466 /* RC */
ac316ca4 2467 gen_rx(ra, 0);
4c9649a9
JM
2468 break;
2469 case 0xE800:
2470 /* ECB */
4c9649a9
JM
2471 break;
2472 case 0xF000:
2473 /* RS */
ac316ca4 2474 gen_rx(ra, 1);
4c9649a9
JM
2475 break;
2476 case 0xF800:
2477 /* WH64 */
2478 /* No-op */
2479 break;
2480 default:
2481 goto invalid_opc;
2482 }
2483 break;
8f56ced8 2484
4c9649a9
JM
2485 case 0x19:
2486 /* HW_MFPR (PALcode) */
26b46094 2487#ifndef CONFIG_USER_ONLY
5238c886
RH
2488 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2489 return gen_mfpr(ra, insn & 0xffff);
2490#else
4c9649a9 2491 goto invalid_opc;
5238c886 2492#endif
8f56ced8 2493
4c9649a9 2494 case 0x1A:
49563a72
RH
2495 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2496 prediction stack action, which of course we don't implement. */
8f56ced8
RH
2497 vb = load_gpr(ctx, rb);
2498 tcg_gen_andi_i64(cpu_pc, vb, ~3);
49563a72 2499 if (ra != 31) {
1304ca87 2500 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2501 }
4af70374 2502 ret = EXIT_PC_UPDATED;
4c9649a9 2503 break;
1eaa1da7 2504
4c9649a9
JM
2505 case 0x1B:
2506 /* HW_LD (PALcode) */
a18ad893 2507#ifndef CONFIG_USER_ONLY
5238c886
RH
2508 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2509 {
1eaa1da7
RH
2510 TCGv addr = tcg_temp_new();
2511 vb = load_gpr(ctx, rb);
2512 va = dest_gpr(ctx, ra);
a18ad893 2513
1eaa1da7 2514 tcg_gen_addi_i64(addr, vb, disp12);
8bb6e981
AJ
2515 switch ((insn >> 12) & 0xF) {
2516 case 0x0:
b5d51029 2517 /* Longword physical access (hw_ldl/p) */
1eaa1da7 2518 gen_helper_ldl_phys(va, cpu_env, addr);
8bb6e981
AJ
2519 break;
2520 case 0x1:
b5d51029 2521 /* Quadword physical access (hw_ldq/p) */
1eaa1da7 2522 gen_helper_ldq_phys(va, cpu_env, addr);
8bb6e981
AJ
2523 break;
2524 case 0x2:
b5d51029 2525 /* Longword physical access with lock (hw_ldl_l/p) */
1eaa1da7 2526 gen_helper_ldl_l_phys(va, cpu_env, addr);
8bb6e981
AJ
2527 break;
2528 case 0x3:
b5d51029 2529 /* Quadword physical access with lock (hw_ldq_l/p) */
1eaa1da7 2530 gen_helper_ldq_l_phys(va, cpu_env, addr);
8bb6e981
AJ
2531 break;
2532 case 0x4:
b5d51029 2533 /* Longword virtual PTE fetch (hw_ldl/v) */
2374e73e 2534 goto invalid_opc;
8bb6e981 2535 case 0x5:
b5d51029 2536 /* Quadword virtual PTE fetch (hw_ldq/v) */
2374e73e 2537 goto invalid_opc;
8bb6e981
AJ
2538 break;
2539 case 0x6:
2540 /* Incpu_ir[ra]id */
b5d51029 2541 goto invalid_opc;
8bb6e981
AJ
2542 case 0x7:
2543 /* Incpu_ir[ra]id */
b5d51029 2544 goto invalid_opc;
8bb6e981 2545 case 0x8:
b5d51029 2546 /* Longword virtual access (hw_ldl) */
2374e73e 2547 goto invalid_opc;
8bb6e981 2548 case 0x9:
b5d51029 2549 /* Quadword virtual access (hw_ldq) */
2374e73e 2550 goto invalid_opc;
8bb6e981 2551 case 0xA:
b5d51029 2552 /* Longword virtual access with protection check (hw_ldl/w) */
1eaa1da7 2553 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
8bb6e981
AJ
2554 break;
2555 case 0xB:
b5d51029 2556 /* Quadword virtual access with protection check (hw_ldq/w) */
1eaa1da7 2557 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
8bb6e981
AJ
2558 break;
2559 case 0xC:
b5d51029 2560 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2374e73e 2561 goto invalid_opc;
8bb6e981 2562 case 0xD:
b5d51029 2563 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2374e73e 2564 goto invalid_opc;
8bb6e981
AJ
2565 case 0xE:
2566 /* Longword virtual access with alternate access mode and
2374e73e 2567 protection checks (hw_ldl/wa) */
1eaa1da7 2568 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
8bb6e981
AJ
2569 break;
2570 case 0xF:
2571 /* Quadword virtual access with alternate access mode and
2374e73e 2572 protection checks (hw_ldq/wa) */
1eaa1da7 2573 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
8bb6e981
AJ
2574 break;
2575 }
2576 tcg_temp_free(addr);
a18ad893 2577 break;
4c9649a9 2578 }
5238c886 2579#else
a18ad893 2580 goto invalid_opc;
5238c886 2581#endif
c67b67e5 2582
4c9649a9 2583 case 0x1C:
c67b67e5 2584 vc = dest_gpr(ctx, rc);
cd2754ad
RH
2585 if (fn7 == 0x70) {
2586 /* FTOIT */
2587 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2588 REQUIRE_REG_31(rb);
2589 va = load_fpr(ctx, ra);
2590 tcg_gen_mov_i64(vc, va);
2591 break;
2592 } else if (fn7 == 0x78) {
2593 /* FTOIS */
2594 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2595 REQUIRE_REG_31(rb);
2596 t32 = tcg_temp_new_i32();
2597 va = load_fpr(ctx, ra);
2598 gen_helper_s_to_memory(t32, va);
2599 tcg_gen_ext_i32_i64(vc, t32);
2600 tcg_temp_free_i32(t32);
2601 break;
2602 }
2603
2604 vb = load_gpr_lit(ctx, rb, lit, islit);
4c9649a9
JM
2605 switch (fn7) {
2606 case 0x00:
2607 /* SEXTB */
5238c886 2608 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
64f45e49 2609 REQUIRE_REG_31(ra);
c67b67e5 2610 tcg_gen_ext8s_i64(vc, vb);
4c9649a9
JM
2611 break;
2612 case 0x01:
2613 /* SEXTW */
5238c886 2614 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
64f45e49 2615 REQUIRE_REG_31(ra);
c67b67e5 2616 tcg_gen_ext16s_i64(vc, vb);
5238c886 2617 break;
4c9649a9
JM
2618 case 0x30:
2619 /* CTPOP */
5238c886 2620 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
64f45e49 2621 REQUIRE_REG_31(ra);
c67b67e5 2622 gen_helper_ctpop(vc, vb);
5238c886 2623 break;
4c9649a9
JM
2624 case 0x31:
2625 /* PERR */
5238c886 2626 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2627 va = load_gpr(ctx, ra);
2628 gen_helper_perr(vc, va, vb);
5238c886 2629 break;
4c9649a9
JM
2630 case 0x32:
2631 /* CTLZ */
5238c886 2632 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
64f45e49 2633 REQUIRE_REG_31(ra);
c67b67e5 2634 gen_helper_ctlz(vc, vb);
5238c886 2635 break;
4c9649a9
JM
2636 case 0x33:
2637 /* CTTZ */
5238c886 2638 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
64f45e49 2639 REQUIRE_REG_31(ra);
c67b67e5 2640 gen_helper_cttz(vc, vb);
5238c886 2641 break;
4c9649a9
JM
2642 case 0x34:
2643 /* UNPKBW */
5238c886 2644 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
64f45e49 2645 REQUIRE_REG_31(ra);
5238c886
RH
2646 gen_unpkbw(rb, rc);
2647 break;
4c9649a9 2648 case 0x35:
13e4df99 2649 /* UNPKBL */
5238c886 2650 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
64f45e49 2651 REQUIRE_REG_31(ra);
5238c886
RH
2652 gen_unpkbl(rb, rc);
2653 break;
4c9649a9
JM
2654 case 0x36:
2655 /* PKWB */
5238c886 2656 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
64f45e49 2657 REQUIRE_REG_31(ra);
5238c886
RH
2658 gen_pkwb(rb, rc);
2659 break;
4c9649a9
JM
2660 case 0x37:
2661 /* PKLB */
5238c886 2662 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
64f45e49 2663 REQUIRE_REG_31(ra);
5238c886
RH
2664 gen_pklb(rb, rc);
2665 break;
4c9649a9
JM
2666 case 0x38:
2667 /* MINSB8 */
5238c886 2668 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2669 va = load_gpr(ctx, ra);
2670 gen_helper_minsb8(vc, va, vb);
5238c886 2671 break;
4c9649a9
JM
2672 case 0x39:
2673 /* MINSW4 */
5238c886 2674 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2675 va = load_gpr(ctx, ra);
2676 gen_helper_minsw4(vc, va, vb);
5238c886 2677 break;
4c9649a9
JM
2678 case 0x3A:
2679 /* MINUB8 */
5238c886 2680 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2681 va = load_gpr(ctx, ra);
2682 gen_helper_minub8(vc, va, vb);
5238c886 2683 break;
4c9649a9
JM
2684 case 0x3B:
2685 /* MINUW4 */
5238c886 2686 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2687 va = load_gpr(ctx, ra);
2688 gen_helper_minuw4(vc, va, vb);
5238c886 2689 break;
4c9649a9
JM
2690 case 0x3C:
2691 /* MAXUB8 */
5238c886 2692 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2693 va = load_gpr(ctx, ra);
2694 gen_helper_maxub8(vc, va, vb);
5238c886 2695 break;
4c9649a9
JM
2696 case 0x3D:
2697 /* MAXUW4 */
5238c886 2698 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2699 va = load_gpr(ctx, ra);
2700 gen_helper_maxuw4(vc, va, vb);
5238c886 2701 break;
4c9649a9
JM
2702 case 0x3E:
2703 /* MAXSB8 */
5238c886 2704 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2705 va = load_gpr(ctx, ra);
2706 gen_helper_maxsb8(vc, va, vb);
5238c886 2707 break;
4c9649a9
JM
2708 case 0x3F:
2709 /* MAXSW4 */
5238c886 2710 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2711 va = load_gpr(ctx, ra);
2712 gen_helper_maxsw4(vc, va, vb);
4c9649a9
JM
2713 break;
2714 default:
2715 goto invalid_opc;
2716 }
2717 break;
46010969 2718
4c9649a9
JM
2719 case 0x1D:
2720 /* HW_MTPR (PALcode) */
26b46094 2721#ifndef CONFIG_USER_ONLY
5238c886
RH
2722 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2723 return gen_mtpr(ctx, rb, insn & 0xffff);
2724#else
4c9649a9 2725 goto invalid_opc;
5238c886 2726#endif
46010969 2727
4c9649a9 2728 case 0x1E:
508b43ea 2729 /* HW_RET (PALcode) */
a18ad893 2730#ifndef CONFIG_USER_ONLY
5238c886
RH
2731 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2732 if (rb == 31) {
2733 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2734 address from EXC_ADDR. This turns out to be useful for our
2735 emulation PALcode, so continue to accept it. */
46010969 2736 tmp = tcg_temp_new();
5238c886
RH
2737 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
2738 gen_helper_hw_ret(cpu_env, tmp);
2739 tcg_temp_free(tmp);
2740 } else {
46010969 2741 gen_helper_hw_ret(cpu_env, load_gpr(ctx, rb));
4c9649a9 2742 }
5238c886
RH
2743 ret = EXIT_PC_UPDATED;
2744 break;
2745#else
a18ad893 2746 goto invalid_opc;
5238c886 2747#endif
a4af3044 2748
4c9649a9
JM
2749 case 0x1F:
2750 /* HW_ST (PALcode) */
a18ad893 2751#ifndef CONFIG_USER_ONLY
5238c886
RH
2752 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2753 {
a4af3044
RH
2754 TCGv addr = tcg_temp_new();
2755 va = load_gpr(ctx, ra);
2756 vb = load_gpr(ctx, rb);
2757
2758 tcg_gen_addi_i64(addr, vb, disp12);
8bb6e981
AJ
2759 switch ((insn >> 12) & 0xF) {
2760 case 0x0:
2761 /* Longword physical access */
a4af3044 2762 gen_helper_stl_phys(cpu_env, addr, va);
8bb6e981
AJ
2763 break;
2764 case 0x1:
2765 /* Quadword physical access */
a4af3044 2766 gen_helper_stq_phys(cpu_env, addr, va);
8bb6e981
AJ
2767 break;
2768 case 0x2:
2769 /* Longword physical access with lock */
a4af3044 2770 gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
8bb6e981
AJ
2771 break;
2772 case 0x3:
2773 /* Quadword physical access with lock */
a4af3044 2774 gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
8bb6e981
AJ
2775 break;
2776 case 0x4:
2777 /* Longword virtual access */
2374e73e 2778 goto invalid_opc;
8bb6e981
AJ
2779 case 0x5:
2780 /* Quadword virtual access */
2374e73e 2781 goto invalid_opc;
8bb6e981
AJ
2782 case 0x6:
2783 /* Invalid */
2784 goto invalid_opc;
2785 case 0x7:
2786 /* Invalid */
2787 goto invalid_opc;
2788 case 0x8:
2789 /* Invalid */
2790 goto invalid_opc;
2791 case 0x9:
2792 /* Invalid */
2793 goto invalid_opc;
2794 case 0xA:
2795 /* Invalid */
2796 goto invalid_opc;
2797 case 0xB:
2798 /* Invalid */
2799 goto invalid_opc;
2800 case 0xC:
2801 /* Longword virtual access with alternate access mode */
2374e73e 2802 goto invalid_opc;
8bb6e981
AJ
2803 case 0xD:
2804 /* Quadword virtual access with alternate access mode */
2374e73e 2805 goto invalid_opc;
8bb6e981
AJ
2806 case 0xE:
2807 /* Invalid */
2808 goto invalid_opc;
2809 case 0xF:
2810 /* Invalid */
2811 goto invalid_opc;
2812 }
8bb6e981 2813 tcg_temp_free(addr);
a18ad893 2814 break;
4c9649a9 2815 }
5238c886 2816#else
a18ad893 2817 goto invalid_opc;
5238c886 2818#endif
4c9649a9
JM
2819 case 0x20:
2820 /* LDF */
f18cd223 2821 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
2822 break;
2823 case 0x21:
2824 /* LDG */
f18cd223 2825 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
2826 break;
2827 case 0x22:
2828 /* LDS */
f18cd223 2829 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
2830 break;
2831 case 0x23:
2832 /* LDT */
f18cd223 2833 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
2834 break;
2835 case 0x24:
2836 /* STF */
6910b8f6 2837 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
4c9649a9
JM
2838 break;
2839 case 0x25:
2840 /* STG */
6910b8f6 2841 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
4c9649a9
JM
2842 break;
2843 case 0x26:
2844 /* STS */
6910b8f6 2845 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
4c9649a9
JM
2846 break;
2847 case 0x27:
2848 /* STT */
6910b8f6 2849 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
4c9649a9
JM
2850 break;
2851 case 0x28:
2852 /* LDL */
f18cd223 2853 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
2854 break;
2855 case 0x29:
2856 /* LDQ */
f18cd223 2857 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
2858 break;
2859 case 0x2A:
2860 /* LDL_L */
f4ed8679 2861 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2862 break;
2863 case 0x2B:
2864 /* LDQ_L */
f4ed8679 2865 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2866 break;
2867 case 0x2C:
2868 /* STL */
6910b8f6 2869 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
4c9649a9
JM
2870 break;
2871 case 0x2D:
2872 /* STQ */
6910b8f6 2873 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
4c9649a9
JM
2874 break;
2875 case 0x2E:
2876 /* STL_C */
6910b8f6 2877 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
4c9649a9
JM
2878 break;
2879 case 0x2F:
2880 /* STQ_C */
6910b8f6 2881 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
4c9649a9
JM
2882 break;
2883 case 0x30:
2884 /* BR */
4af70374 2885 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 2886 break;
a7812ae4 2887 case 0x31: /* FBEQ */
4af70374 2888 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 2889 break;
a7812ae4 2890 case 0x32: /* FBLT */
4af70374 2891 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 2892 break;
a7812ae4 2893 case 0x33: /* FBLE */
4af70374 2894 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
2895 break;
2896 case 0x34:
2897 /* BSR */
4af70374 2898 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 2899 break;
a7812ae4 2900 case 0x35: /* FBNE */
4af70374 2901 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 2902 break;
a7812ae4 2903 case 0x36: /* FBGE */
4af70374 2904 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 2905 break;
a7812ae4 2906 case 0x37: /* FBGT */
4af70374 2907 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
2908 break;
2909 case 0x38:
2910 /* BLBC */
4af70374 2911 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
2912 break;
2913 case 0x39:
2914 /* BEQ */
4af70374 2915 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
2916 break;
2917 case 0x3A:
2918 /* BLT */
4af70374 2919 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
2920 break;
2921 case 0x3B:
2922 /* BLE */
4af70374 2923 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
2924 break;
2925 case 0x3C:
2926 /* BLBS */
4af70374 2927 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
2928 break;
2929 case 0x3D:
2930 /* BNE */
4af70374 2931 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
2932 break;
2933 case 0x3E:
2934 /* BGE */
4af70374 2935 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
2936 break;
2937 case 0x3F:
2938 /* BGT */
4af70374 2939 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
2940 break;
2941 invalid_opc:
8aa3fa20 2942 ret = gen_invalid(ctx);
4c9649a9
JM
2943 break;
2944 }
2945
2946 return ret;
2947}
2948
86a35f7c 2949static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
636aa200 2950 TranslationBlock *tb,
86a35f7c 2951 bool search_pc)
4c9649a9 2952{
ed2803da 2953 CPUState *cs = CPU(cpu);
86a35f7c 2954 CPUAlphaState *env = &cpu->env;
4c9649a9
JM
2955 DisasContext ctx, *ctxp = &ctx;
2956 target_ulong pc_start;
b114b68a 2957 target_ulong pc_mask;
4c9649a9
JM
2958 uint32_t insn;
2959 uint16_t *gen_opc_end;
a1d1bb31 2960 CPUBreakpoint *bp;
4c9649a9 2961 int j, lj = -1;
4af70374 2962 ExitStatus ret;
2e70f6ef
PB
2963 int num_insns;
2964 int max_insns;
4c9649a9
JM
2965
2966 pc_start = tb->pc;
92414b31 2967 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4af70374
RH
2968
2969 ctx.tb = tb;
4c9649a9 2970 ctx.pc = pc_start;
bba9bdce 2971 ctx.mem_idx = cpu_mmu_index(env);
801c4c28 2972 ctx.implver = env->implver;
ed2803da 2973 ctx.singlestep_enabled = cs->singlestep_enabled;
f24518b5
RH
2974
2975 /* ??? Every TB begins with unset rounding mode, to be initialized on
2976 the first fp insn of the TB. Alternately we could define a proper
2977 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2978 to reset the FP_STATUS to that default at the end of any TB that
2979 changes the default. We could even (gasp) dynamiclly figure out
2980 what default would be most efficient given the running program. */
2981 ctx.tb_rm = -1;
2982 /* Similarly for flush-to-zero. */
2983 ctx.tb_ftz = -1;
2984
2e70f6ef
PB
2985 num_insns = 0;
2986 max_insns = tb->cflags & CF_COUNT_MASK;
b114b68a 2987 if (max_insns == 0) {
2e70f6ef 2988 max_insns = CF_COUNT_MASK;
b114b68a
RH
2989 }
2990
2991 if (in_superpage(&ctx, pc_start)) {
2992 pc_mask = (1ULL << 41) - 1;
2993 } else {
2994 pc_mask = ~TARGET_PAGE_MASK;
2995 }
2e70f6ef 2996
806f352d 2997 gen_tb_start();
4af70374 2998 do {
f0c3c505
AF
2999 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
3000 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 3001 if (bp->pc == ctx.pc) {
4c9649a9
JM
3002 gen_excp(&ctx, EXCP_DEBUG, 0);
3003 break;
3004 }
3005 }
3006 }
3007 if (search_pc) {
92414b31 3008 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4c9649a9
JM
3009 if (lj < j) {
3010 lj++;
3011 while (lj < j)
ab1103de 3012 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4c9649a9 3013 }
25983cad 3014 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
ab1103de 3015 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 3016 tcg_ctx.gen_opc_icount[lj] = num_insns;
4c9649a9 3017 }
67debe3a 3018 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 3019 gen_io_start();
67debe3a 3020 }
c3082755 3021 insn = cpu_ldl_code(env, ctx.pc);
2e70f6ef 3022 num_insns++;
c4b3be39 3023
fdefe51c 3024 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
c4b3be39
RH
3025 tcg_gen_debug_insn_start(ctx.pc);
3026 }
3027
194cfb43
RH
3028 TCGV_UNUSED_I64(ctx.zero);
3029 TCGV_UNUSED_I64(ctx.sink);
3030 TCGV_UNUSED_I64(ctx.lit);
3031
4c9649a9
JM
3032 ctx.pc += 4;
3033 ret = translate_one(ctxp, insn);
19bf517b 3034
194cfb43
RH
3035 if (!TCGV_IS_UNUSED_I64(ctx.sink)) {
3036 tcg_gen_discard_i64(ctx.sink);
3037 tcg_temp_free(ctx.sink);
3038 }
3039 if (!TCGV_IS_UNUSED_I64(ctx.zero)) {
3040 tcg_temp_free(ctx.zero);
3041 }
3042 if (!TCGV_IS_UNUSED_I64(ctx.lit)) {
3043 tcg_temp_free(ctx.lit);
3044 }
3045
bf1b03fe
RH
3046 /* If we reach a page boundary, are single stepping,
3047 or exhaust instruction count, stop generation. */
3048 if (ret == NO_EXIT
b114b68a 3049 && ((ctx.pc & pc_mask) == 0
efd7f486 3050 || tcg_ctx.gen_opc_ptr >= gen_opc_end
bf1b03fe
RH
3051 || num_insns >= max_insns
3052 || singlestep
ca6862a6 3053 || ctx.singlestep_enabled)) {
bf1b03fe 3054 ret = EXIT_PC_STALE;
1b530a6d 3055 }
4af70374
RH
3056 } while (ret == NO_EXIT);
3057
3058 if (tb->cflags & CF_LAST_IO) {
3059 gen_io_end();
4c9649a9 3060 }
4af70374
RH
3061
3062 switch (ret) {
3063 case EXIT_GOTO_TB:
8aa3fa20 3064 case EXIT_NORETURN:
4af70374
RH
3065 break;
3066 case EXIT_PC_STALE:
496cb5b9 3067 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
3068 /* FALLTHRU */
3069 case EXIT_PC_UPDATED:
ca6862a6 3070 if (ctx.singlestep_enabled) {
bf1b03fe
RH
3071 gen_excp_1(EXCP_DEBUG, 0);
3072 } else {
3073 tcg_gen_exit_tb(0);
3074 }
4af70374
RH
3075 break;
3076 default:
3077 abort();
4c9649a9 3078 }
4af70374 3079
806f352d 3080 gen_tb_end(tb, num_insns);
efd7f486 3081 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4c9649a9 3082 if (search_pc) {
92414b31 3083 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4c9649a9
JM
3084 lj++;
3085 while (lj <= j)
ab1103de 3086 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3087 } else {
3088 tb->size = ctx.pc - pc_start;
2e70f6ef 3089 tb->icount = num_insns;
4c9649a9 3090 }
4af70374 3091
806991da 3092#ifdef DEBUG_DISAS
8fec2b8c 3093 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39 3094 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 3095 log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
93fcfe39 3096 qemu_log("\n");
4c9649a9 3097 }
4c9649a9 3098#endif
4c9649a9
JM
3099}
3100
4d5712f1 3101void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 3102{
86a35f7c 3103 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
4c9649a9
JM
3104}
3105
4d5712f1 3106void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 3107{
86a35f7c 3108 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
4c9649a9
JM
3109}
3110
4d5712f1 3111void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 3112{
25983cad 3113 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
d2856f1a 3114}