]> git.proxmox.com Git - mirror_qemu.git/blame - target-alpha/translate.c
Merge remote-tracking branch 'remotes/jnsnow/tags/ide-pull-request' into staging
[mirror_qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
4c9649a9 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
1de7afc9 22#include "qemu/host-utils.h"
57fec1fe 23#include "tcg-op.h"
f08b6170 24#include "exec/cpu_ldst.h"
4c9649a9 25
2ef6175a
RH
26#include "exec/helper-proto.h"
27#include "exec/helper-gen.h"
a7812ae4 28
a7e30d84
LV
29#include "trace-tcg.h"
30
31
19188121 32#undef ALPHA_DEBUG_DISAS
f24518b5 33#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
34
35#ifdef ALPHA_DEBUG_DISAS
806991da 36# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
37#else
38# define LOG_DISAS(...) do { } while (0)
39#endif
40
4c9649a9
JM
41typedef struct DisasContext DisasContext;
42struct DisasContext {
4af70374 43 struct TranslationBlock *tb;
4c9649a9
JM
44 uint64_t pc;
45 int mem_idx;
f24518b5
RH
46
47 /* Current rounding mode for this TB. */
48 int tb_rm;
49 /* Current flush-to-zero setting for this TB. */
50 int tb_ftz;
ca6862a6 51
801c4c28
RH
52 /* implver value for this CPU. */
53 int implver;
54
194cfb43
RH
55 /* Temporaries for $31 and $f31 as source and destination. */
56 TCGv zero;
57 TCGv sink;
58 /* Temporary for immediate constants. */
59 TCGv lit;
60
ca6862a6 61 bool singlestep_enabled;
4c9649a9
JM
62};
63
4af70374
RH
64/* Return values from translate_one, indicating the state of the TB.
65 Note that zero indicates that we are not exiting the TB. */
66
67typedef enum {
68 NO_EXIT,
69
70 /* We have emitted one or more goto_tb. No fixup required. */
71 EXIT_GOTO_TB,
72
73 /* We are not using a goto_tb (for whatever reason), but have updated
74 the PC (for whatever reason), so there's no need to do it again on
75 exiting the TB. */
76 EXIT_PC_UPDATED,
77
78 /* We are exiting the TB, but have neither emitted a goto_tb, nor
79 updated the PC for the next instruction to be executed. */
8aa3fa20
RH
80 EXIT_PC_STALE,
81
82 /* We are ending the TB with a noreturn function call, e.g. longjmp.
83 No following code will be executed. */
84 EXIT_NORETURN,
4af70374
RH
85} ExitStatus;
86
3761035f 87/* global register indexes */
a7812ae4 88static TCGv_ptr cpu_env;
496cb5b9 89static TCGv cpu_ir[31];
f18cd223 90static TCGv cpu_fir[31];
496cb5b9 91static TCGv cpu_pc;
6910b8f6
RH
92static TCGv cpu_lock_addr;
93static TCGv cpu_lock_st_addr;
94static TCGv cpu_lock_value;
496cb5b9 95
022c62cb 96#include "exec/gen-icount.h"
2e70f6ef 97
0c28246f 98void alpha_translate_init(void)
2e70f6ef 99{
39acc647
RH
100#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
101
102 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
103 static const GlobalVar vars[] = {
104 DEF_VAR(pc),
105 DEF_VAR(lock_addr),
106 DEF_VAR(lock_st_addr),
107 DEF_VAR(lock_value),
39acc647
RH
108 };
109
110#undef DEF_VAR
111
112 /* Use the symbolic register names that match the disassembler. */
113 static const char greg_names[31][4] = {
114 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
115 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
116 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
117 "t10", "t11", "ra", "t12", "at", "gp", "sp"
118 };
119 static const char freg_names[31][4] = {
120 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
121 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
122 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
123 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
124 };
125
126 static bool done_init = 0;
496cb5b9 127 int i;
496cb5b9 128
67debe3a 129 if (done_init) {
2e70f6ef 130 return;
67debe3a 131 }
39acc647 132 done_init = 1;
496cb5b9 133
a7812ae4 134 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9 135
496cb5b9 136 for (i = 0; i < 31; i++) {
a7812ae4 137 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
39acc647
RH
138 offsetof(CPUAlphaState, ir[i]),
139 greg_names[i]);
140 }
f18cd223 141
39acc647 142 for (i = 0; i < 31; i++) {
a7812ae4 143 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
39acc647
RH
144 offsetof(CPUAlphaState, fir[i]),
145 freg_names[i]);
496cb5b9
AJ
146 }
147
39acc647
RH
148 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
149 const GlobalVar *v = &vars[i];
150 *v->var = tcg_global_mem_new_i64(TCG_AREG0, v->ofs, v->name);
151 }
2e70f6ef
PB
152}
153
194cfb43
RH
154static TCGv load_zero(DisasContext *ctx)
155{
156 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
8f811b9a 157 ctx->zero = tcg_const_i64(0);
194cfb43
RH
158 }
159 return ctx->zero;
160}
161
162static TCGv dest_sink(DisasContext *ctx)
163{
164 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
8f811b9a 165 ctx->sink = tcg_temp_new();
194cfb43
RH
166 }
167 return ctx->sink;
168}
169
170static TCGv load_gpr(DisasContext *ctx, unsigned reg)
171{
172 if (likely(reg < 31)) {
173 return cpu_ir[reg];
174 } else {
175 return load_zero(ctx);
176 }
177}
178
179static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
180 uint8_t lit, bool islit)
181{
182 if (islit) {
183 ctx->lit = tcg_const_i64(lit);
184 return ctx->lit;
185 } else if (likely(reg < 31)) {
186 return cpu_ir[reg];
187 } else {
188 return load_zero(ctx);
189 }
190}
191
192static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
193{
194 if (likely(reg < 31)) {
195 return cpu_ir[reg];
196 } else {
197 return dest_sink(ctx);
198 }
199}
200
6b88b37c 201static TCGv load_fpr(DisasContext *ctx, unsigned reg)
194cfb43
RH
202{
203 if (likely(reg < 31)) {
204 return cpu_fir[reg];
205 } else {
206 return load_zero(ctx);
207 }
208}
209
075b8ddb 210static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
194cfb43
RH
211{
212 if (likely(reg < 31)) {
213 return cpu_fir[reg];
214 } else {
215 return dest_sink(ctx);
216 }
217}
218
bf1b03fe 219static void gen_excp_1(int exception, int error_code)
4c9649a9 220{
a7812ae4 221 TCGv_i32 tmp1, tmp2;
6ad02592 222
6ad02592
AJ
223 tmp1 = tcg_const_i32(exception);
224 tmp2 = tcg_const_i32(error_code);
b9f0923e 225 gen_helper_excp(cpu_env, tmp1, tmp2);
a7812ae4
PB
226 tcg_temp_free_i32(tmp2);
227 tcg_temp_free_i32(tmp1);
bf1b03fe 228}
8aa3fa20 229
bf1b03fe
RH
230static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
231{
232 tcg_gen_movi_i64(cpu_pc, ctx->pc);
233 gen_excp_1(exception, error_code);
8aa3fa20 234 return EXIT_NORETURN;
4c9649a9
JM
235}
236
8aa3fa20 237static inline ExitStatus gen_invalid(DisasContext *ctx)
4c9649a9 238{
8aa3fa20 239 return gen_excp(ctx, EXCP_OPCDEC, 0);
4c9649a9
JM
240}
241
636aa200 242static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 243{
a7812ae4 244 TCGv_i32 tmp32 = tcg_temp_new_i32();
f8da40ae 245 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4
PB
246 gen_helper_memory_to_f(t0, tmp32);
247 tcg_temp_free_i32(tmp32);
f18cd223
AJ
248}
249
636aa200 250static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 251{
a7812ae4 252 TCGv tmp = tcg_temp_new();
f8da40ae 253 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
a7812ae4 254 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
255 tcg_temp_free(tmp);
256}
257
636aa200 258static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 259{
a7812ae4 260 TCGv_i32 tmp32 = tcg_temp_new_i32();
f8da40ae 261 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4
PB
262 gen_helper_memory_to_s(t0, tmp32);
263 tcg_temp_free_i32(tmp32);
f18cd223
AJ
264}
265
636aa200 266static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679 267{
f8da40ae 268 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
6910b8f6
RH
269 tcg_gen_mov_i64(cpu_lock_addr, t1);
270 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
271}
272
636aa200 273static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679 274{
f8da40ae 275 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
6910b8f6
RH
276 tcg_gen_mov_i64(cpu_lock_addr, t1);
277 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
278}
279
636aa200
BS
280static inline void gen_load_mem(DisasContext *ctx,
281 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
282 int flags),
595b8fdd
RH
283 int ra, int rb, int32_t disp16, bool fp,
284 bool clear)
023d8ca2 285{
595b8fdd 286 TCGv tmp, addr, va;
023d8ca2 287
6910b8f6
RH
288 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
289 prefetches, which we can treat as nops. No worries about
290 missed exceptions here. */
291 if (unlikely(ra == 31)) {
023d8ca2 292 return;
6910b8f6 293 }
023d8ca2 294
595b8fdd
RH
295 tmp = tcg_temp_new();
296 addr = load_gpr(ctx, rb);
297
298 if (disp16) {
299 tcg_gen_addi_i64(tmp, addr, disp16);
300 addr = tmp;
301 }
302 if (clear) {
303 tcg_gen_andi_i64(tmp, addr, ~0x7);
304 addr = tmp;
023d8ca2 305 }
6910b8f6
RH
306
307 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
308 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
309
595b8fdd 310 tcg_temp_free(tmp);
023d8ca2
AJ
311}
312
636aa200 313static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 314{
a7812ae4 315 TCGv_i32 tmp32 = tcg_temp_new_i32();
a7812ae4 316 gen_helper_f_to_memory(tmp32, t0);
f8da40ae 317 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4 318 tcg_temp_free_i32(tmp32);
f18cd223
AJ
319}
320
636aa200 321static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 322{
a7812ae4
PB
323 TCGv tmp = tcg_temp_new();
324 gen_helper_g_to_memory(tmp, t0);
f8da40ae 325 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
f18cd223
AJ
326 tcg_temp_free(tmp);
327}
328
636aa200 329static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 330{
a7812ae4 331 TCGv_i32 tmp32 = tcg_temp_new_i32();
a7812ae4 332 gen_helper_s_to_memory(tmp32, t0);
f8da40ae 333 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4 334 tcg_temp_free_i32(tmp32);
f18cd223
AJ
335}
336
636aa200
BS
337static inline void gen_store_mem(DisasContext *ctx,
338 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
339 int flags),
595b8fdd
RH
340 int ra, int rb, int32_t disp16, bool fp,
341 bool clear)
023d8ca2 342{
595b8fdd 343 TCGv tmp, addr, va;
6910b8f6 344
595b8fdd
RH
345 tmp = tcg_temp_new();
346 addr = load_gpr(ctx, rb);
6910b8f6 347
595b8fdd
RH
348 if (disp16) {
349 tcg_gen_addi_i64(tmp, addr, disp16);
350 addr = tmp;
351 }
352 if (clear) {
353 tcg_gen_andi_i64(tmp, addr, ~0x7);
354 addr = tmp;
023d8ca2 355 }
595b8fdd
RH
356
357 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
6910b8f6
RH
358 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
359
595b8fdd 360 tcg_temp_free(tmp);
6910b8f6
RH
361}
362
363static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
364 int32_t disp16, int quad)
365{
366 TCGv addr;
367
368 if (ra == 31) {
369 /* ??? Don't bother storing anything. The user can't tell
370 the difference, since the zero register always reads zero. */
371 return NO_EXIT;
372 }
373
374#if defined(CONFIG_USER_ONLY)
375 addr = cpu_lock_st_addr;
376#else
e52458fe 377 addr = tcg_temp_local_new();
6910b8f6
RH
378#endif
379
cd2d46fd 380 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
6910b8f6
RH
381
382#if defined(CONFIG_USER_ONLY)
383 /* ??? This is handled via a complicated version of compare-and-swap
384 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
385 in TCG so that this isn't necessary. */
386 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
387#else
388 /* ??? In system mode we are never multi-threaded, so CAS can be
389 implemented via a non-atomic load-compare-store sequence. */
390 {
42a268c2 391 TCGLabel *lab_fail, *lab_done;
6910b8f6
RH
392 TCGv val;
393
394 lab_fail = gen_new_label();
395 lab_done = gen_new_label();
e52458fe 396 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
6910b8f6
RH
397
398 val = tcg_temp_new();
f8da40ae 399 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
e52458fe 400 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
6910b8f6 401
f8da40ae
RH
402 tcg_gen_qemu_st_i64(cpu_ir[ra], addr, ctx->mem_idx,
403 quad ? MO_LEQ : MO_LEUL);
6910b8f6
RH
404 tcg_gen_movi_i64(cpu_ir[ra], 1);
405 tcg_gen_br(lab_done);
406
407 gen_set_label(lab_fail);
408 tcg_gen_movi_i64(cpu_ir[ra], 0);
409
410 gen_set_label(lab_done);
411 tcg_gen_movi_i64(cpu_lock_addr, -1);
412
413 tcg_temp_free(addr);
414 return NO_EXIT;
415 }
416#endif
023d8ca2
AJ
417}
418
b114b68a 419static bool in_superpage(DisasContext *ctx, int64_t addr)
4c9649a9 420{
b114b68a
RH
421 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
422 && addr < 0
423 && ((addr >> 41) & 3) == 2
424 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
425}
426
427static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
428{
429 /* Suppress goto_tb in the case of single-steping and IO. */
e566be04
RH
430 if ((ctx->tb->cflags & CF_LAST_IO)
431 || ctx->singlestep_enabled || singlestep) {
b114b68a
RH
432 return false;
433 }
434 /* If the destination is in the superpage, the page perms can't change. */
435 if (in_superpage(ctx, dest)) {
436 return true;
437 }
438 /* Check for the dest on the same page as the start of the TB. */
439 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
4af70374 440}
dbb30fe6 441
4af70374
RH
442static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
443{
444 uint64_t dest = ctx->pc + (disp << 2);
445
446 if (ra != 31) {
447 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
448 }
449
450 /* Notice branch-to-next; used to initialize RA with the PC. */
451 if (disp == 0) {
452 return 0;
453 } else if (use_goto_tb(ctx, dest)) {
454 tcg_gen_goto_tb(0);
455 tcg_gen_movi_i64(cpu_pc, dest);
8cfd0495 456 tcg_gen_exit_tb((uintptr_t)ctx->tb);
4af70374
RH
457 return EXIT_GOTO_TB;
458 } else {
459 tcg_gen_movi_i64(cpu_pc, dest);
460 return EXIT_PC_UPDATED;
461 }
dbb30fe6
RH
462}
463
4af70374
RH
464static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
465 TCGv cmp, int32_t disp)
dbb30fe6 466{
4af70374 467 uint64_t dest = ctx->pc + (disp << 2);
42a268c2 468 TCGLabel *lab_true = gen_new_label();
9c29504e 469
4af70374
RH
470 if (use_goto_tb(ctx, dest)) {
471 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
472
473 tcg_gen_goto_tb(0);
474 tcg_gen_movi_i64(cpu_pc, ctx->pc);
8cfd0495 475 tcg_gen_exit_tb((uintptr_t)ctx->tb);
4af70374
RH
476
477 gen_set_label(lab_true);
478 tcg_gen_goto_tb(1);
479 tcg_gen_movi_i64(cpu_pc, dest);
8cfd0495 480 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
4af70374
RH
481
482 return EXIT_GOTO_TB;
483 } else {
57e289de
RH
484 TCGv_i64 z = tcg_const_i64(0);
485 TCGv_i64 d = tcg_const_i64(dest);
486 TCGv_i64 p = tcg_const_i64(ctx->pc);
4af70374 487
57e289de 488 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
4af70374 489
57e289de
RH
490 tcg_temp_free_i64(z);
491 tcg_temp_free_i64(d);
492 tcg_temp_free_i64(p);
4af70374
RH
493 return EXIT_PC_UPDATED;
494 }
495}
496
497static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
498 int32_t disp, int mask)
499{
500 TCGv cmp_tmp;
501
76bff4f8 502 if (mask) {
4af70374 503 cmp_tmp = tcg_temp_new();
76bff4f8
RH
504 tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
505 } else {
506 cmp_tmp = load_gpr(ctx, ra);
9c29504e 507 }
4af70374
RH
508
509 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
510}
511
4af70374 512/* Fold -0.0 for comparison with COND. */
dbb30fe6 513
4af70374 514static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 515{
dbb30fe6 516 uint64_t mzero = 1ull << 63;
f18cd223 517
dbb30fe6
RH
518 switch (cond) {
519 case TCG_COND_LE:
520 case TCG_COND_GT:
521 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 522 tcg_gen_mov_i64(dest, src);
a7812ae4 523 break;
dbb30fe6
RH
524
525 case TCG_COND_EQ:
526 case TCG_COND_NE:
527 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 528 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 529 break;
dbb30fe6
RH
530
531 case TCG_COND_GE:
dbb30fe6 532 case TCG_COND_LT:
4af70374
RH
533 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
534 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
535 tcg_gen_neg_i64(dest, dest);
536 tcg_gen_and_i64(dest, dest, src);
a7812ae4 537 break;
dbb30fe6 538
a7812ae4
PB
539 default:
540 abort();
f18cd223 541 }
dbb30fe6
RH
542}
543
4af70374
RH
544static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
545 int32_t disp)
dbb30fe6 546{
76bff4f8
RH
547 TCGv cmp_tmp = tcg_temp_new();
548 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
4af70374 549 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
550}
551
65809352 552static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
dbb30fe6 553{
65809352 554 TCGv_i64 va, vb, z;
dbb30fe6 555
65809352
RH
556 z = load_zero(ctx);
557 vb = load_fpr(ctx, rb);
558 va = tcg_temp_new();
559 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
dbb30fe6 560
65809352 561 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
dbb30fe6 562
65809352 563 tcg_temp_free(va);
dbb30fe6
RH
564}
565
f24518b5
RH
566#define QUAL_RM_N 0x080 /* Round mode nearest even */
567#define QUAL_RM_C 0x000 /* Round mode chopped */
568#define QUAL_RM_M 0x040 /* Round mode minus infinity */
569#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
570#define QUAL_RM_MASK 0x0c0
571
572#define QUAL_U 0x100 /* Underflow enable (fp output) */
573#define QUAL_V 0x100 /* Overflow enable (int output) */
574#define QUAL_S 0x400 /* Software completion enable */
575#define QUAL_I 0x200 /* Inexact detection enable */
576
577static void gen_qual_roundmode(DisasContext *ctx, int fn11)
578{
579 TCGv_i32 tmp;
580
581 fn11 &= QUAL_RM_MASK;
582 if (fn11 == ctx->tb_rm) {
583 return;
584 }
585 ctx->tb_rm = fn11;
586
587 tmp = tcg_temp_new_i32();
588 switch (fn11) {
589 case QUAL_RM_N:
590 tcg_gen_movi_i32(tmp, float_round_nearest_even);
591 break;
592 case QUAL_RM_C:
593 tcg_gen_movi_i32(tmp, float_round_to_zero);
594 break;
595 case QUAL_RM_M:
596 tcg_gen_movi_i32(tmp, float_round_down);
597 break;
598 case QUAL_RM_D:
4a58aedf
RH
599 tcg_gen_ld8u_i32(tmp, cpu_env,
600 offsetof(CPUAlphaState, fpcr_dyn_round));
f24518b5
RH
601 break;
602 }
603
604#if defined(CONFIG_SOFTFLOAT_INLINE)
6b4c305c 605 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
f24518b5
RH
606 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
607 sets the one field. */
608 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 609 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
f24518b5
RH
610#else
611 gen_helper_setroundmode(tmp);
612#endif
613
614 tcg_temp_free_i32(tmp);
615}
616
617static void gen_qual_flushzero(DisasContext *ctx, int fn11)
618{
619 TCGv_i32 tmp;
620
621 fn11 &= QUAL_U;
622 if (fn11 == ctx->tb_ftz) {
623 return;
624 }
625 ctx->tb_ftz = fn11;
626
627 tmp = tcg_temp_new_i32();
628 if (fn11) {
629 /* Underflow is enabled, use the FPCR setting. */
4a58aedf
RH
630 tcg_gen_ld8u_i32(tmp, cpu_env,
631 offsetof(CPUAlphaState, fpcr_flush_to_zero));
f24518b5
RH
632 } else {
633 /* Underflow is disabled, force flush-to-zero. */
634 tcg_gen_movi_i32(tmp, 1);
635 }
636
637#if defined(CONFIG_SOFTFLOAT_INLINE)
638 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 639 offsetof(CPUAlphaState, fp_status.flush_to_zero));
f24518b5
RH
640#else
641 gen_helper_setflushzero(tmp);
642#endif
643
644 tcg_temp_free_i32(tmp);
645}
646
8b0190bb 647static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
f24518b5 648{
74343409 649 TCGv val;
8b0190bb
RH
650
651 if (unlikely(reg == 31)) {
652 val = load_zero(ctx);
f24518b5 653 } else {
8b0190bb 654 val = cpu_fir[reg];
74343409
RH
655 if ((fn11 & QUAL_S) == 0) {
656 if (is_cmp) {
8b0190bb 657 gen_helper_ieee_input_cmp(cpu_env, val);
74343409 658 } else {
8b0190bb 659 gen_helper_ieee_input(cpu_env, val);
74343409 660 }
b99e8069
RH
661 } else {
662#ifndef CONFIG_USER_ONLY
663 /* In system mode, raise exceptions for denormals like real
664 hardware. In user mode, proceed as if the OS completion
665 handler is handling the denormal as per spec. */
666 gen_helper_ieee_input_s(cpu_env, val);
667#endif
74343409 668 }
f24518b5
RH
669 }
670 return val;
671}
672
471d4930 673static void gen_fp_exc_raise(int rc, int fn11)
f24518b5
RH
674{
675 /* ??? We ought to be able to do something with imprecise exceptions.
676 E.g. notice we're still in the trap shadow of something within the
677 TB and do not generate the code to signal the exception; end the TB
678 when an exception is forced to arrive, either by consumption of a
679 register value or TRAPB or EXCB. */
471d4930
RH
680 TCGv_i32 reg, ign;
681 uint32_t ignore = 0;
682
683 if (!(fn11 & QUAL_U)) {
684 /* Note that QUAL_U == QUAL_V, so ignore either. */
685 ignore |= FPCR_UNF | FPCR_IOV;
686 }
687 if (!(fn11 & QUAL_I)) {
688 ignore |= FPCR_INE;
689 }
690 ign = tcg_const_i32(ignore);
f24518b5 691
f24518b5
RH
692 /* ??? Pass in the regno of the destination so that the helper can
693 set EXC_MASK, which contains a bitmask of destination registers
694 that have caused arithmetic traps. A simple userspace emulation
695 does not require this. We do need it for a guest kernel's entArith,
696 or if we were to do something clever with imprecise exceptions. */
697 reg = tcg_const_i32(rc + 32);
f24518b5 698 if (fn11 & QUAL_S) {
f3d3aad4 699 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
f24518b5 700 } else {
f3d3aad4 701 gen_helper_fp_exc_raise(cpu_env, ign, reg);
f24518b5
RH
702 }
703
704 tcg_temp_free_i32(reg);
f3d3aad4 705 tcg_temp_free_i32(ign);
f24518b5
RH
706}
707
3da653fa 708static void gen_cvtlq(TCGv vc, TCGv vb)
593f17e5 709{
e8d8fef4 710 TCGv tmp = tcg_temp_new();
735cf45f 711
e8d8fef4
RH
712 /* The arithmetic right shift here, plus the sign-extended mask below
713 yields a sign-extended result without an explicit ext32s_i64. */
714 tcg_gen_sari_i64(tmp, vb, 32);
715 tcg_gen_shri_i64(vc, vb, 29);
716 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
717 tcg_gen_andi_i64(vc, vc, 0x3fffffff);
718 tcg_gen_or_i64(vc, vc, tmp);
735cf45f 719
e8d8fef4 720 tcg_temp_free(tmp);
735cf45f
RH
721}
722
4a58aedf
RH
723static void gen_ieee_arith2(DisasContext *ctx,
724 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
725 int rb, int rc, int fn11)
726{
727 TCGv vb;
728
f24518b5
RH
729 gen_qual_roundmode(ctx, fn11);
730 gen_qual_flushzero(ctx, fn11);
f24518b5 731
8b0190bb 732 vb = gen_ieee_input(ctx, rb, fn11, 0);
e20b8c04 733 helper(dest_fpr(ctx, rc), cpu_env, vb);
f24518b5
RH
734
735 gen_fp_exc_raise(rc, fn11);
736}
737
738#define IEEE_ARITH2(name) \
3da653fa
RH
739static inline void glue(gen_, name)(DisasContext *ctx, \
740 int rb, int rc, int fn11) \
f24518b5
RH
741{ \
742 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
743}
744IEEE_ARITH2(sqrts)
745IEEE_ARITH2(sqrtt)
746IEEE_ARITH2(cvtst)
747IEEE_ARITH2(cvtts)
748
3da653fa 749static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
f24518b5 750{
e20b8c04 751 TCGv vb, vc;
f24518b5 752
f24518b5 753 /* No need to set flushzero, since we have an integer output. */
8b0190bb 754 vb = gen_ieee_input(ctx, rb, fn11, 0);
e20b8c04 755 vc = dest_fpr(ctx, rc);
f24518b5 756
c24a8a0b
RH
757 /* Almost all integer conversions use cropped rounding;
758 special case that. */
759 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
e20b8c04 760 gen_helper_cvttq_c(vc, cpu_env, vb);
c24a8a0b 761 } else {
f24518b5 762 gen_qual_roundmode(ctx, fn11);
e20b8c04 763 gen_helper_cvttq(vc, cpu_env, vb);
f24518b5 764 }
471d4930 765 gen_fp_exc_raise(rc, fn11);
4c9649a9
JM
766}
767
4a58aedf
RH
768static void gen_ieee_intcvt(DisasContext *ctx,
769 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
770 int rb, int rc, int fn11)
771{
e20b8c04 772 TCGv vb, vc;
f24518b5
RH
773
774 gen_qual_roundmode(ctx, fn11);
e20b8c04
RH
775 vb = load_fpr(ctx, rb);
776 vc = dest_fpr(ctx, rc);
f24518b5
RH
777
778 /* The only exception that can be raised by integer conversion
779 is inexact. Thus we only need to worry about exceptions when
780 inexact handling is requested. */
781 if (fn11 & QUAL_I) {
e20b8c04 782 helper(vc, cpu_env, vb);
f24518b5
RH
783 gen_fp_exc_raise(rc, fn11);
784 } else {
e20b8c04 785 helper(vc, cpu_env, vb);
f24518b5
RH
786 }
787}
788
789#define IEEE_INTCVT(name) \
3da653fa
RH
790static inline void glue(gen_, name)(DisasContext *ctx, \
791 int rb, int rc, int fn11) \
f24518b5
RH
792{ \
793 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
794}
795IEEE_INTCVT(cvtqs)
796IEEE_INTCVT(cvtqt)
797
ef3765cb 798static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
dc96be4b 799{
ef3765cb
RH
800 TCGv vmask = tcg_const_i64(mask);
801 TCGv tmp = tcg_temp_new_i64();
dc96be4b 802
ef3765cb
RH
803 if (inv_a) {
804 tcg_gen_andc_i64(tmp, vmask, va);
dc96be4b 805 } else {
ef3765cb 806 tcg_gen_and_i64(tmp, va, vmask);
dc96be4b
RH
807 }
808
ef3765cb
RH
809 tcg_gen_andc_i64(vc, vb, vmask);
810 tcg_gen_or_i64(vc, vc, tmp);
dc96be4b
RH
811
812 tcg_temp_free(vmask);
ef3765cb 813 tcg_temp_free(tmp);
dc96be4b
RH
814}
815
f24518b5 816static void gen_ieee_arith3(DisasContext *ctx,
4a58aedf 817 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
818 int ra, int rb, int rc, int fn11)
819{
e20b8c04 820 TCGv va, vb, vc;
f24518b5
RH
821
822 gen_qual_roundmode(ctx, fn11);
823 gen_qual_flushzero(ctx, fn11);
f24518b5 824
8b0190bb
RH
825 va = gen_ieee_input(ctx, ra, fn11, 0);
826 vb = gen_ieee_input(ctx, rb, fn11, 0);
e20b8c04
RH
827 vc = dest_fpr(ctx, rc);
828 helper(vc, cpu_env, va, vb);
f24518b5
RH
829
830 gen_fp_exc_raise(rc, fn11);
831}
832
833#define IEEE_ARITH3(name) \
3da653fa
RH
834static inline void glue(gen_, name)(DisasContext *ctx, \
835 int ra, int rb, int rc, int fn11) \
f24518b5
RH
836{ \
837 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
838}
839IEEE_ARITH3(adds)
840IEEE_ARITH3(subs)
841IEEE_ARITH3(muls)
842IEEE_ARITH3(divs)
843IEEE_ARITH3(addt)
844IEEE_ARITH3(subt)
845IEEE_ARITH3(mult)
846IEEE_ARITH3(divt)
847
848static void gen_ieee_compare(DisasContext *ctx,
4a58aedf 849 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
850 int ra, int rb, int rc, int fn11)
851{
e20b8c04 852 TCGv va, vb, vc;
f24518b5 853
8b0190bb
RH
854 va = gen_ieee_input(ctx, ra, fn11, 1);
855 vb = gen_ieee_input(ctx, rb, fn11, 1);
e20b8c04
RH
856 vc = dest_fpr(ctx, rc);
857 helper(vc, cpu_env, va, vb);
f24518b5
RH
858
859 gen_fp_exc_raise(rc, fn11);
860}
861
862#define IEEE_CMP3(name) \
3da653fa
RH
863static inline void glue(gen_, name)(DisasContext *ctx, \
864 int ra, int rb, int rc, int fn11) \
f24518b5
RH
865{ \
866 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
867}
868IEEE_CMP3(cmptun)
869IEEE_CMP3(cmpteq)
870IEEE_CMP3(cmptlt)
871IEEE_CMP3(cmptle)
a7812ae4 872
248c42f3
RH
873static inline uint64_t zapnot_mask(uint8_t lit)
874{
875 uint64_t mask = 0;
876 int i;
877
878 for (i = 0; i < 8; ++i) {
67debe3a 879 if ((lit >> i) & 1) {
248c42f3 880 mask |= 0xffull << (i * 8);
67debe3a 881 }
248c42f3
RH
882 }
883 return mask;
884}
885
87d98f95
RH
886/* Implement zapnot with an immediate operand, which expands to some
887 form of immediate AND. This is a basic building block in the
888 definition of many of the other byte manipulation instructions. */
248c42f3 889static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 890{
87d98f95
RH
891 switch (lit) {
892 case 0x00:
248c42f3 893 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
894 break;
895 case 0x01:
248c42f3 896 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
897 break;
898 case 0x03:
248c42f3 899 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
900 break;
901 case 0x0f:
248c42f3 902 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
903 break;
904 case 0xff:
248c42f3 905 tcg_gen_mov_i64(dest, src);
87d98f95
RH
906 break;
907 default:
b144be9e 908 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
87d98f95
RH
909 break;
910 }
911}
912
248c42f3 913/* EXTWH, EXTLH, EXTQH */
9a734d64 914static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1 915 uint8_t lit, uint8_t byte_mask)
b3249f63 916{
9a734d64
RH
917 if (islit) {
918 tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
67debe3a 919 } else {
9a734d64
RH
920 TCGv tmp = tcg_temp_new();
921 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
922 tcg_gen_neg_i64(tmp, tmp);
923 tcg_gen_andi_i64(tmp, tmp, 0x3f);
924 tcg_gen_shl_i64(vc, va, tmp);
925 tcg_temp_free(tmp);
377a43b6 926 }
9a734d64 927 gen_zapnoti(vc, vc, byte_mask);
b3249f63
AJ
928}
929
248c42f3 930/* EXTBL, EXTWL, EXTLL, EXTQL */
9a734d64 931static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1 932 uint8_t lit, uint8_t byte_mask)
b3249f63 933{
9a734d64
RH
934 if (islit) {
935 tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
67debe3a 936 } else {
9a734d64
RH
937 TCGv tmp = tcg_temp_new();
938 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
939 tcg_gen_shli_i64(tmp, tmp, 3);
940 tcg_gen_shr_i64(vc, va, tmp);
941 tcg_temp_free(tmp);
248c42f3 942 }
9a734d64 943 gen_zapnoti(vc, vc, byte_mask);
248c42f3
RH
944}
945
50eb6e5c 946/* INSWH, INSLH, INSQH */
5e5863ec 947static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
50eb6e5c
RH
948 uint8_t lit, uint8_t byte_mask)
949{
5e5863ec 950 TCGv tmp = tcg_temp_new();
50eb6e5c 951
5e5863ec
RH
952 /* The instruction description has us left-shift the byte mask and extract
953 bits <15:8> and apply that zap at the end. This is equivalent to simply
954 performing the zap first and shifting afterward. */
955 gen_zapnoti(tmp, va, byte_mask);
50eb6e5c 956
5e5863ec
RH
957 if (islit) {
958 lit &= 7;
959 if (unlikely(lit == 0)) {
960 tcg_gen_movi_i64(vc, 0);
50eb6e5c 961 } else {
5e5863ec 962 tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
50eb6e5c 963 }
5e5863ec
RH
964 } else {
965 TCGv shift = tcg_temp_new();
966
967 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
968 portably by splitting the shift into two parts: shift_count-1 and 1.
969 Arrange for the -1 by using ones-complement instead of
970 twos-complement in the negation: ~(B * 8) & 63. */
971
972 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
973 tcg_gen_not_i64(shift, shift);
974 tcg_gen_andi_i64(shift, shift, 0x3f);
975
976 tcg_gen_shr_i64(vc, tmp, shift);
977 tcg_gen_shri_i64(vc, vc, 1);
978 tcg_temp_free(shift);
50eb6e5c 979 }
5e5863ec 980 tcg_temp_free(tmp);
50eb6e5c
RH
981}
982
248c42f3 983/* INSBL, INSWL, INSLL, INSQL */
5e5863ec 984static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1 985 uint8_t lit, uint8_t byte_mask)
248c42f3 986{
5e5863ec 987 TCGv tmp = tcg_temp_new();
248c42f3 988
5e5863ec
RH
989 /* The instruction description has us left-shift the byte mask
990 the same number of byte slots as the data and apply the zap
991 at the end. This is equivalent to simply performing the zap
992 first and shifting afterward. */
993 gen_zapnoti(tmp, va, byte_mask);
248c42f3 994
5e5863ec
RH
995 if (islit) {
996 tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
997 } else {
998 TCGv shift = tcg_temp_new();
999 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1000 tcg_gen_shli_i64(shift, shift, 3);
1001 tcg_gen_shl_i64(vc, tmp, shift);
1002 tcg_temp_free(shift);
377a43b6 1003 }
5e5863ec 1004 tcg_temp_free(tmp);
b3249f63
AJ
1005}
1006
ffec44f1 1007/* MSKWH, MSKLH, MSKQH */
9a8fa1bd 1008static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1
RH
1009 uint8_t lit, uint8_t byte_mask)
1010{
9a8fa1bd
RH
1011 if (islit) {
1012 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
ffec44f1
RH
1013 } else {
1014 TCGv shift = tcg_temp_new();
1015 TCGv mask = tcg_temp_new();
1016
1017 /* The instruction description is as above, where the byte_mask
1018 is shifted left, and then we extract bits <15:8>. This can be
1019 emulated with a right-shift on the expanded byte mask. This
1020 requires extra care because for an input <2:0> == 0 we need a
1021 shift of 64 bits in order to generate a zero. This is done by
1022 splitting the shift into two parts, the variable shift - 1
1023 followed by a constant 1 shift. The code we expand below is
9a8fa1bd 1024 equivalent to ~(B * 8) & 63. */
ffec44f1 1025
9a8fa1bd 1026 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
ffec44f1
RH
1027 tcg_gen_not_i64(shift, shift);
1028 tcg_gen_andi_i64(shift, shift, 0x3f);
1029 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1030 tcg_gen_shr_i64(mask, mask, shift);
1031 tcg_gen_shri_i64(mask, mask, 1);
1032
9a8fa1bd 1033 tcg_gen_andc_i64(vc, va, mask);
ffec44f1
RH
1034
1035 tcg_temp_free(mask);
1036 tcg_temp_free(shift);
1037 }
1038}
1039
14ab1634 1040/* MSKBL, MSKWL, MSKLL, MSKQL */
9a8fa1bd 1041static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1 1042 uint8_t lit, uint8_t byte_mask)
14ab1634 1043{
9a8fa1bd
RH
1044 if (islit) {
1045 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
14ab1634
RH
1046 } else {
1047 TCGv shift = tcg_temp_new();
1048 TCGv mask = tcg_temp_new();
1049
9a8fa1bd 1050 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
14ab1634 1051 tcg_gen_shli_i64(shift, shift, 3);
9a8fa1bd 1052 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
14ab1634
RH
1053 tcg_gen_shl_i64(mask, mask, shift);
1054
9a8fa1bd 1055 tcg_gen_andc_i64(vc, va, mask);
14ab1634
RH
1056
1057 tcg_temp_free(mask);
1058 tcg_temp_free(shift);
1059 }
1060}
1061
ac316ca4
RH
1062static void gen_rx(int ra, int set)
1063{
1064 TCGv_i32 tmp;
1065
1066 if (ra != 31) {
4d5712f1 1067 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1068 }
1069
1070 tmp = tcg_const_i32(set);
4d5712f1 1071 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1072 tcg_temp_free_i32(tmp);
1073}
1074
2ace7e55
RH
1075static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1076{
1077 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1078 to internal cpu registers. */
1079
1080 /* Unprivileged PAL call */
1081 if (palcode >= 0x80 && palcode < 0xC0) {
1082 switch (palcode) {
1083 case 0x86:
1084 /* IMB */
1085 /* No-op inside QEMU. */
1086 break;
1087 case 0x9E:
1088 /* RDUNIQUE */
06ef8604
RH
1089 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
1090 offsetof(CPUAlphaState, unique));
2ace7e55
RH
1091 break;
1092 case 0x9F:
1093 /* WRUNIQUE */
06ef8604
RH
1094 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1095 offsetof(CPUAlphaState, unique));
2ace7e55
RH
1096 break;
1097 default:
ba96394e
RH
1098 palcode &= 0xbf;
1099 goto do_call_pal;
2ace7e55
RH
1100 }
1101 return NO_EXIT;
1102 }
1103
1104#ifndef CONFIG_USER_ONLY
1105 /* Privileged PAL code */
1106 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1107 switch (palcode) {
1108 case 0x01:
1109 /* CFLUSH */
1110 /* No-op inside QEMU. */
1111 break;
1112 case 0x02:
1113 /* DRAINA */
1114 /* No-op inside QEMU. */
1115 break;
1116 case 0x2D:
1117 /* WRVPTPTR */
06ef8604
RH
1118 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1119 offsetof(CPUAlphaState, vptptr));
2ace7e55
RH
1120 break;
1121 case 0x31:
1122 /* WRVAL */
06ef8604
RH
1123 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1124 offsetof(CPUAlphaState, sysval));
2ace7e55
RH
1125 break;
1126 case 0x32:
1127 /* RDVAL */
06ef8604
RH
1128 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
1129 offsetof(CPUAlphaState, sysval));
2ace7e55
RH
1130 break;
1131
1132 case 0x35: {
1133 /* SWPIPL */
1134 TCGv tmp;
1135
1136 /* Note that we already know we're in kernel mode, so we know
1137 that PS only contains the 3 IPL bits. */
06ef8604
RH
1138 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
1139 offsetof(CPUAlphaState, ps));
2ace7e55
RH
1140
1141 /* But make sure and store only the 3 IPL bits from the user. */
1142 tmp = tcg_temp_new();
1143 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
4d5712f1 1144 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1145 tcg_temp_free(tmp);
1146 break;
1147 }
1148
1149 case 0x36:
1150 /* RDPS */
06ef8604
RH
1151 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
1152 offsetof(CPUAlphaState, ps));
2ace7e55
RH
1153 break;
1154 case 0x38:
1155 /* WRUSP */
06ef8604
RH
1156 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1157 offsetof(CPUAlphaState, usp));
2ace7e55
RH
1158 break;
1159 case 0x3A:
1160 /* RDUSP */
214bb280 1161 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
06ef8604 1162 offsetof(CPUAlphaState, usp));
2ace7e55
RH
1163 break;
1164 case 0x3C:
1165 /* WHAMI */
1166 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
55e5c285 1167 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
2ace7e55
RH
1168 break;
1169
1170 default:
ba96394e
RH
1171 palcode &= 0x3f;
1172 goto do_call_pal;
2ace7e55
RH
1173 }
1174 return NO_EXIT;
1175 }
1176#endif
2ace7e55 1177 return gen_invalid(ctx);
ba96394e
RH
1178
1179 do_call_pal:
1180#ifdef CONFIG_USER_ONLY
1181 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1182#else
1183 {
1184 TCGv pc = tcg_const_i64(ctx->pc);
1185 TCGv entry = tcg_const_i64(palcode & 0x80
1186 ? 0x2000 + (palcode - 0x80) * 64
1187 : 0x1000 + palcode * 64);
1188
1189 gen_helper_call_pal(cpu_env, pc, entry);
1190
1191 tcg_temp_free(entry);
1192 tcg_temp_free(pc);
a9ead832
RH
1193
1194 /* Since the destination is running in PALmode, we don't really
73f395fa 1195 need the page permissions check. We'll see the existence of
a9ead832
RH
1196 the page when we create the TB, and we'll flush all TBs if
1197 we change the PAL base register. */
1198 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1199 tcg_gen_goto_tb(0);
8cfd0495 1200 tcg_gen_exit_tb((uintptr_t)ctx->tb);
a9ead832
RH
1201 return EXIT_GOTO_TB;
1202 }
1203
ba96394e
RH
1204 return EXIT_PC_UPDATED;
1205 }
1206#endif
2ace7e55
RH
1207}
1208
26b46094
RH
1209#ifndef CONFIG_USER_ONLY
1210
1211#define PR_BYTE 0x100000
1212#define PR_LONG 0x200000
1213
1214static int cpu_pr_data(int pr)
1215{
1216 switch (pr) {
1217 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1218 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1219 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1220 case 3: return offsetof(CPUAlphaState, trap_arg0);
1221 case 4: return offsetof(CPUAlphaState, trap_arg1);
1222 case 5: return offsetof(CPUAlphaState, trap_arg2);
1223 case 6: return offsetof(CPUAlphaState, exc_addr);
1224 case 7: return offsetof(CPUAlphaState, palbr);
1225 case 8: return offsetof(CPUAlphaState, ptbr);
1226 case 9: return offsetof(CPUAlphaState, vptptr);
1227 case 10: return offsetof(CPUAlphaState, unique);
1228 case 11: return offsetof(CPUAlphaState, sysval);
1229 case 12: return offsetof(CPUAlphaState, usp);
1230
1231 case 32 ... 39:
1232 return offsetof(CPUAlphaState, shadow[pr - 32]);
1233 case 40 ... 63:
1234 return offsetof(CPUAlphaState, scratch[pr - 40]);
c781cf96
RH
1235
1236 case 251:
1237 return offsetof(CPUAlphaState, alarm_expire);
26b46094
RH
1238 }
1239 return 0;
1240}
1241
bd79255d 1242static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno)
26b46094
RH
1243{
1244 int data = cpu_pr_data(regno);
1245
19e0cbb8
RH
1246 /* Special help for VMTIME and WALLTIME. */
1247 if (regno == 250 || regno == 249) {
1248 void (*helper)(TCGv) = gen_helper_get_walltime;
1249 if (regno == 249) {
1250 helper = gen_helper_get_vmtime;
1251 }
bd79255d 1252 if (ctx->tb->cflags & CF_USE_ICOUNT) {
c781cf96 1253 gen_io_start();
0e154fe9 1254 helper(va);
c781cf96
RH
1255 gen_io_end();
1256 return EXIT_PC_STALE;
1257 } else {
0e154fe9 1258 helper(va);
c781cf96
RH
1259 return NO_EXIT;
1260 }
26b46094
RH
1261 }
1262
1263 /* The basic registers are data only, and unknown registers
1264 are read-zero, write-ignore. */
1265 if (data == 0) {
0e154fe9 1266 tcg_gen_movi_i64(va, 0);
26b46094 1267 } else if (data & PR_BYTE) {
0e154fe9 1268 tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
26b46094 1269 } else if (data & PR_LONG) {
0e154fe9 1270 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
26b46094 1271 } else {
0e154fe9 1272 tcg_gen_ld_i64(va, cpu_env, data);
26b46094 1273 }
c781cf96 1274 return NO_EXIT;
26b46094
RH
1275}
1276
0e154fe9 1277static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
26b46094
RH
1278{
1279 TCGv tmp;
bc24270e 1280 int data;
26b46094 1281
bc24270e
RH
1282 switch (regno) {
1283 case 255:
3b4fefd6 1284 /* TBIA */
69163fbb 1285 gen_helper_tbia(cpu_env);
bc24270e
RH
1286 break;
1287
1288 case 254:
3b4fefd6 1289 /* TBIS */
0e154fe9 1290 gen_helper_tbis(cpu_env, vb);
bc24270e
RH
1291 break;
1292
1293 case 253:
1294 /* WAIT */
1295 tmp = tcg_const_i64(1);
259186a7
AF
1296 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1297 offsetof(CPUState, halted));
bc24270e
RH
1298 return gen_excp(ctx, EXCP_HLT, 0);
1299
034ebc27
RH
1300 case 252:
1301 /* HALT */
0e154fe9 1302 gen_helper_halt(vb);
034ebc27
RH
1303 return EXIT_PC_STALE;
1304
c781cf96
RH
1305 case 251:
1306 /* ALARM */
0e154fe9 1307 gen_helper_set_alarm(cpu_env, vb);
c781cf96
RH
1308 break;
1309
a9ead832
RH
1310 case 7:
1311 /* PALBR */
0e154fe9 1312 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
a9ead832
RH
1313 /* Changing the PAL base register implies un-chaining all of the TBs
1314 that ended with a CALL_PAL. Since the base register usually only
1315 changes during boot, flushing everything works well. */
1316 gen_helper_tb_flush(cpu_env);
1317 return EXIT_PC_STALE;
1318
bc24270e 1319 default:
3b4fefd6
RH
1320 /* The basic registers are data only, and unknown registers
1321 are read-zero, write-ignore. */
bc24270e 1322 data = cpu_pr_data(regno);
3b4fefd6
RH
1323 if (data != 0) {
1324 if (data & PR_BYTE) {
0e154fe9 1325 tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
3b4fefd6 1326 } else if (data & PR_LONG) {
0e154fe9 1327 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
3b4fefd6 1328 } else {
0e154fe9 1329 tcg_gen_st_i64(vb, cpu_env, data);
3b4fefd6 1330 }
26b46094 1331 }
bc24270e 1332 break;
26b46094
RH
1333 }
1334
bc24270e 1335 return NO_EXIT;
26b46094
RH
1336}
1337#endif /* !USER_ONLY*/
1338
ed085138
RH
1339#define REQUIRE_NO_LIT \
1340 do { \
1341 if (real_islit) { \
1342 goto invalid_opc; \
1343 } \
1344 } while (0)
1345
5238c886
RH
1346#define REQUIRE_TB_FLAG(FLAG) \
1347 do { \
1348 if ((ctx->tb->flags & (FLAG)) == 0) { \
1349 goto invalid_opc; \
1350 } \
1351 } while (0)
1352
64f45e49
RH
1353#define REQUIRE_REG_31(WHICH) \
1354 do { \
1355 if (WHICH != 31) { \
1356 goto invalid_opc; \
1357 } \
1358 } while (0)
1359
4af70374 1360static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9 1361{
a9e05a1c 1362 int32_t disp21, disp16, disp12 __attribute__((unused));
f88fe4e3 1363 uint16_t fn11;
194cfb43 1364 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
ed085138 1365 bool islit, real_islit;
4d1628e8 1366 TCGv va, vb, vc, tmp, tmp2;
075b8ddb 1367 TCGv_i32 t32;
4af70374 1368 ExitStatus ret;
4c9649a9
JM
1369
1370 /* Decode all instruction fields */
a9e05a1c
RH
1371 opc = extract32(insn, 26, 6);
1372 ra = extract32(insn, 21, 5);
1373 rb = extract32(insn, 16, 5);
1374 rc = extract32(insn, 0, 5);
ed085138 1375 real_islit = islit = extract32(insn, 12, 1);
a9e05a1c
RH
1376 lit = extract32(insn, 13, 8);
1377
1378 disp21 = sextract32(insn, 0, 21);
1379 disp16 = sextract32(insn, 0, 16);
1380 disp12 = sextract32(insn, 0, 12);
1381
1382 fn11 = extract32(insn, 5, 11);
1383 fpfn = extract32(insn, 5, 6);
1384 fn7 = extract32(insn, 5, 7);
1385
dfaa8583 1386 if (rb == 31 && !islit) {
a9e05a1c 1387 islit = true;
dfaa8583 1388 lit = 0;
194cfb43 1389 }
806991da 1390
4af70374 1391 ret = NO_EXIT;
4c9649a9
JM
1392 switch (opc) {
1393 case 0x00:
1394 /* CALL_PAL */
a9e05a1c 1395 ret = gen_call_pal(ctx, insn & 0x03ffffff);
2ace7e55 1396 break;
4c9649a9
JM
1397 case 0x01:
1398 /* OPC01 */
1399 goto invalid_opc;
1400 case 0x02:
1401 /* OPC02 */
1402 goto invalid_opc;
1403 case 0x03:
1404 /* OPC03 */
1405 goto invalid_opc;
1406 case 0x04:
1407 /* OPC04 */
1408 goto invalid_opc;
1409 case 0x05:
1410 /* OPC05 */
1411 goto invalid_opc;
1412 case 0x06:
1413 /* OPC06 */
1414 goto invalid_opc;
1415 case 0x07:
1416 /* OPC07 */
1417 goto invalid_opc;
194cfb43 1418
4c9649a9
JM
1419 case 0x09:
1420 /* LDAH */
194cfb43
RH
1421 disp16 = (uint32_t)disp16 << 16;
1422 /* fall through */
1423 case 0x08:
1424 /* LDA */
1425 va = dest_gpr(ctx, ra);
1426 /* It's worth special-casing immediate loads. */
1427 if (rb == 31) {
1428 tcg_gen_movi_i64(va, disp16);
1429 } else {
1430 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
496cb5b9 1431 }
4c9649a9 1432 break;
194cfb43 1433
4c9649a9
JM
1434 case 0x0A:
1435 /* LDBU */
5238c886
RH
1436 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1437 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1438 break;
4c9649a9
JM
1439 case 0x0B:
1440 /* LDQ_U */
f18cd223 1441 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1442 break;
1443 case 0x0C:
1444 /* LDWU */
5238c886
RH
1445 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1446 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1447 break;
4c9649a9
JM
1448 case 0x0D:
1449 /* STW */
5238c886 1450 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
6910b8f6 1451 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
4c9649a9
JM
1452 break;
1453 case 0x0E:
1454 /* STB */
5238c886 1455 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
6910b8f6 1456 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
4c9649a9
JM
1457 break;
1458 case 0x0F:
1459 /* STQ_U */
6910b8f6 1460 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
4c9649a9 1461 break;
194cfb43 1462
4c9649a9 1463 case 0x10:
194cfb43
RH
1464 vc = dest_gpr(ctx, rc);
1465 vb = load_gpr_lit(ctx, rb, lit, islit);
1466
1467 if (ra == 31) {
1468 if (fn7 == 0x00) {
1469 /* Special case ADDL as SEXTL. */
1470 tcg_gen_ext32s_i64(vc, vb);
1471 break;
1472 }
1473 if (fn7 == 0x29) {
1474 /* Special case SUBQ as NEGQ. */
1475 tcg_gen_neg_i64(vc, vb);
1476 break;
1477 }
1478 }
1479
1480 va = load_gpr(ctx, ra);
4c9649a9
JM
1481 switch (fn7) {
1482 case 0x00:
1483 /* ADDL */
194cfb43
RH
1484 tcg_gen_add_i64(vc, va, vb);
1485 tcg_gen_ext32s_i64(vc, vc);
4c9649a9
JM
1486 break;
1487 case 0x02:
1488 /* S4ADDL */
194cfb43
RH
1489 tmp = tcg_temp_new();
1490 tcg_gen_shli_i64(tmp, va, 2);
1491 tcg_gen_add_i64(tmp, tmp, vb);
1492 tcg_gen_ext32s_i64(vc, tmp);
1493 tcg_temp_free(tmp);
4c9649a9
JM
1494 break;
1495 case 0x09:
1496 /* SUBL */
194cfb43
RH
1497 tcg_gen_sub_i64(vc, va, vb);
1498 tcg_gen_ext32s_i64(vc, vc);
4c9649a9
JM
1499 break;
1500 case 0x0B:
1501 /* S4SUBL */
194cfb43
RH
1502 tmp = tcg_temp_new();
1503 tcg_gen_shli_i64(tmp, va, 2);
1504 tcg_gen_sub_i64(tmp, tmp, vb);
1505 tcg_gen_ext32s_i64(vc, tmp);
1506 tcg_temp_free(tmp);
4c9649a9
JM
1507 break;
1508 case 0x0F:
1509 /* CMPBGE */
cd2754ad 1510 gen_helper_cmpbge(vc, va, vb);
4c9649a9
JM
1511 break;
1512 case 0x12:
1513 /* S8ADDL */
194cfb43
RH
1514 tmp = tcg_temp_new();
1515 tcg_gen_shli_i64(tmp, va, 3);
1516 tcg_gen_add_i64(tmp, tmp, vb);
1517 tcg_gen_ext32s_i64(vc, tmp);
1518 tcg_temp_free(tmp);
4c9649a9
JM
1519 break;
1520 case 0x1B:
1521 /* S8SUBL */
194cfb43
RH
1522 tmp = tcg_temp_new();
1523 tcg_gen_shli_i64(tmp, va, 3);
1524 tcg_gen_sub_i64(tmp, tmp, vb);
1525 tcg_gen_ext32s_i64(vc, tmp);
1526 tcg_temp_free(tmp);
4c9649a9
JM
1527 break;
1528 case 0x1D:
1529 /* CMPULT */
95868348 1530 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
4c9649a9
JM
1531 break;
1532 case 0x20:
1533 /* ADDQ */
194cfb43 1534 tcg_gen_add_i64(vc, va, vb);
4c9649a9
JM
1535 break;
1536 case 0x22:
1537 /* S4ADDQ */
194cfb43
RH
1538 tmp = tcg_temp_new();
1539 tcg_gen_shli_i64(tmp, va, 2);
1540 tcg_gen_add_i64(vc, tmp, vb);
1541 tcg_temp_free(tmp);
4c9649a9
JM
1542 break;
1543 case 0x29:
1544 /* SUBQ */
194cfb43 1545 tcg_gen_sub_i64(vc, va, vb);
4c9649a9
JM
1546 break;
1547 case 0x2B:
1548 /* S4SUBQ */
194cfb43
RH
1549 tmp = tcg_temp_new();
1550 tcg_gen_shli_i64(tmp, va, 2);
1551 tcg_gen_sub_i64(vc, tmp, vb);
1552 tcg_temp_free(tmp);
4c9649a9
JM
1553 break;
1554 case 0x2D:
1555 /* CMPEQ */
95868348 1556 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
4c9649a9
JM
1557 break;
1558 case 0x32:
1559 /* S8ADDQ */
194cfb43
RH
1560 tmp = tcg_temp_new();
1561 tcg_gen_shli_i64(tmp, va, 3);
1562 tcg_gen_add_i64(vc, tmp, vb);
1563 tcg_temp_free(tmp);
4c9649a9
JM
1564 break;
1565 case 0x3B:
1566 /* S8SUBQ */
194cfb43
RH
1567 tmp = tcg_temp_new();
1568 tcg_gen_shli_i64(tmp, va, 3);
1569 tcg_gen_sub_i64(vc, tmp, vb);
1570 tcg_temp_free(tmp);
4c9649a9
JM
1571 break;
1572 case 0x3D:
1573 /* CMPULE */
95868348 1574 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
4c9649a9
JM
1575 break;
1576 case 0x40:
1577 /* ADDL/V */
4d1628e8
RH
1578 tmp = tcg_temp_new();
1579 tcg_gen_ext32s_i64(tmp, va);
1580 tcg_gen_ext32s_i64(vc, vb);
1581 tcg_gen_add_i64(tmp, tmp, vc);
1582 tcg_gen_ext32s_i64(vc, tmp);
1583 gen_helper_check_overflow(cpu_env, vc, tmp);
1584 tcg_temp_free(tmp);
4c9649a9
JM
1585 break;
1586 case 0x49:
1587 /* SUBL/V */
4d1628e8
RH
1588 tmp = tcg_temp_new();
1589 tcg_gen_ext32s_i64(tmp, va);
1590 tcg_gen_ext32s_i64(vc, vb);
1591 tcg_gen_sub_i64(tmp, tmp, vc);
1592 tcg_gen_ext32s_i64(vc, tmp);
1593 gen_helper_check_overflow(cpu_env, vc, tmp);
1594 tcg_temp_free(tmp);
4c9649a9
JM
1595 break;
1596 case 0x4D:
1597 /* CMPLT */
95868348 1598 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
4c9649a9
JM
1599 break;
1600 case 0x60:
1601 /* ADDQ/V */
4d1628e8
RH
1602 tmp = tcg_temp_new();
1603 tmp2 = tcg_temp_new();
1604 tcg_gen_eqv_i64(tmp, va, vb);
1605 tcg_gen_mov_i64(tmp2, va);
1606 tcg_gen_add_i64(vc, va, vb);
1607 tcg_gen_xor_i64(tmp2, tmp2, vc);
1608 tcg_gen_and_i64(tmp, tmp, tmp2);
1609 tcg_gen_shri_i64(tmp, tmp, 63);
1610 tcg_gen_movi_i64(tmp2, 0);
1611 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1612 tcg_temp_free(tmp);
1613 tcg_temp_free(tmp2);
4c9649a9
JM
1614 break;
1615 case 0x69:
1616 /* SUBQ/V */
4d1628e8
RH
1617 tmp = tcg_temp_new();
1618 tmp2 = tcg_temp_new();
1619 tcg_gen_xor_i64(tmp, va, vb);
1620 tcg_gen_mov_i64(tmp2, va);
1621 tcg_gen_sub_i64(vc, va, vb);
1622 tcg_gen_xor_i64(tmp2, tmp2, vc);
1623 tcg_gen_and_i64(tmp, tmp, tmp2);
1624 tcg_gen_shri_i64(tmp, tmp, 63);
1625 tcg_gen_movi_i64(tmp2, 0);
1626 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1627 tcg_temp_free(tmp);
1628 tcg_temp_free(tmp2);
4c9649a9
JM
1629 break;
1630 case 0x6D:
1631 /* CMPLE */
95868348 1632 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
4c9649a9
JM
1633 break;
1634 default:
1635 goto invalid_opc;
1636 }
1637 break;
db4a1645 1638
4c9649a9 1639 case 0x11:
db4a1645
RH
1640 if (fn7 == 0x20) {
1641 if (rc == 31) {
1642 /* Special case BIS as NOP. */
1643 break;
1644 }
1645 if (ra == 31) {
1646 /* Special case BIS as MOV. */
1647 vc = dest_gpr(ctx, rc);
1648 if (islit) {
1649 tcg_gen_movi_i64(vc, lit);
67debe3a 1650 } else {
db4a1645 1651 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
67debe3a 1652 }
db4a1645 1653 break;
30c7183b 1654 }
db4a1645
RH
1655 }
1656
1657 vc = dest_gpr(ctx, rc);
1658 vb = load_gpr_lit(ctx, rb, lit, islit);
1659
1660 if (fn7 == 0x28 && ra == 31) {
1661 /* Special case ORNOT as NOT. */
1662 tcg_gen_not_i64(vc, vb);
1663 break;
1664 }
1665
1666 va = load_gpr(ctx, ra);
1667 switch (fn7) {
1668 case 0x00:
1669 /* AND */
1670 tcg_gen_and_i64(vc, va, vb);
4c9649a9
JM
1671 break;
1672 case 0x08:
1673 /* BIC */
db4a1645 1674 tcg_gen_andc_i64(vc, va, vb);
4c9649a9
JM
1675 break;
1676 case 0x14:
1677 /* CMOVLBS */
83ebb7cd
RH
1678 tmp = tcg_temp_new();
1679 tcg_gen_andi_i64(tmp, va, 1);
1680 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1681 vb, load_gpr(ctx, rc));
1682 tcg_temp_free(tmp);
4c9649a9
JM
1683 break;
1684 case 0x16:
1685 /* CMOVLBC */
83ebb7cd
RH
1686 tmp = tcg_temp_new();
1687 tcg_gen_andi_i64(tmp, va, 1);
1688 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1689 vb, load_gpr(ctx, rc));
1690 tcg_temp_free(tmp);
4c9649a9
JM
1691 break;
1692 case 0x20:
1693 /* BIS */
db4a1645 1694 tcg_gen_or_i64(vc, va, vb);
4c9649a9
JM
1695 break;
1696 case 0x24:
1697 /* CMOVEQ */
83ebb7cd
RH
1698 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1699 vb, load_gpr(ctx, rc));
4c9649a9
JM
1700 break;
1701 case 0x26:
1702 /* CMOVNE */
83ebb7cd
RH
1703 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1704 vb, load_gpr(ctx, rc));
4c9649a9
JM
1705 break;
1706 case 0x28:
1707 /* ORNOT */
db4a1645 1708 tcg_gen_orc_i64(vc, va, vb);
4c9649a9
JM
1709 break;
1710 case 0x40:
1711 /* XOR */
db4a1645 1712 tcg_gen_xor_i64(vc, va, vb);
4c9649a9
JM
1713 break;
1714 case 0x44:
1715 /* CMOVLT */
83ebb7cd
RH
1716 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1717 vb, load_gpr(ctx, rc));
4c9649a9
JM
1718 break;
1719 case 0x46:
1720 /* CMOVGE */
83ebb7cd
RH
1721 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1722 vb, load_gpr(ctx, rc));
4c9649a9
JM
1723 break;
1724 case 0x48:
1725 /* EQV */
db4a1645 1726 tcg_gen_eqv_i64(vc, va, vb);
4c9649a9
JM
1727 break;
1728 case 0x61:
1729 /* AMASK */
64f45e49 1730 REQUIRE_REG_31(ra);
db4a1645 1731 {
a18ad893 1732 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
db4a1645 1733 tcg_gen_andi_i64(vc, vb, ~amask);
ae8ecd42 1734 }
4c9649a9
JM
1735 break;
1736 case 0x64:
1737 /* CMOVLE */
83ebb7cd
RH
1738 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1739 vb, load_gpr(ctx, rc));
4c9649a9
JM
1740 break;
1741 case 0x66:
1742 /* CMOVGT */
83ebb7cd
RH
1743 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1744 vb, load_gpr(ctx, rc));
4c9649a9
JM
1745 break;
1746 case 0x6C:
1747 /* IMPLVER */
64f45e49 1748 REQUIRE_REG_31(ra);
db4a1645 1749 tcg_gen_movi_i64(vc, ctx->implver);
4c9649a9
JM
1750 break;
1751 default:
1752 goto invalid_opc;
1753 }
1754 break;
3bd67b7d 1755
4c9649a9 1756 case 0x12:
3bd67b7d
RH
1757 vc = dest_gpr(ctx, rc);
1758 va = load_gpr(ctx, ra);
4c9649a9
JM
1759 switch (fn7) {
1760 case 0x02:
1761 /* MSKBL */
9a8fa1bd 1762 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
4c9649a9
JM
1763 break;
1764 case 0x06:
1765 /* EXTBL */
9a734d64 1766 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
4c9649a9
JM
1767 break;
1768 case 0x0B:
1769 /* INSBL */
5e5863ec 1770 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
4c9649a9
JM
1771 break;
1772 case 0x12:
1773 /* MSKWL */
9a8fa1bd 1774 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
1775 break;
1776 case 0x16:
1777 /* EXTWL */
9a734d64 1778 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
1779 break;
1780 case 0x1B:
1781 /* INSWL */
5e5863ec 1782 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
1783 break;
1784 case 0x22:
1785 /* MSKLL */
9a8fa1bd 1786 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
1787 break;
1788 case 0x26:
1789 /* EXTLL */
9a734d64 1790 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
1791 break;
1792 case 0x2B:
1793 /* INSLL */
5e5863ec 1794 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
1795 break;
1796 case 0x30:
1797 /* ZAP */
b144be9e
RH
1798 if (islit) {
1799 gen_zapnoti(vc, va, ~lit);
1800 } else {
1801 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1802 }
4c9649a9
JM
1803 break;
1804 case 0x31:
1805 /* ZAPNOT */
b144be9e
RH
1806 if (islit) {
1807 gen_zapnoti(vc, va, lit);
1808 } else {
1809 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1810 }
4c9649a9
JM
1811 break;
1812 case 0x32:
1813 /* MSKQL */
9a8fa1bd 1814 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
1815 break;
1816 case 0x34:
1817 /* SRL */
3bd67b7d
RH
1818 if (islit) {
1819 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1820 } else {
1821 tmp = tcg_temp_new();
1822 vb = load_gpr(ctx, rb);
1823 tcg_gen_andi_i64(tmp, vb, 0x3f);
1824 tcg_gen_shr_i64(vc, va, tmp);
1825 tcg_temp_free(tmp);
30c7183b 1826 }
4c9649a9
JM
1827 break;
1828 case 0x36:
1829 /* EXTQL */
9a734d64 1830 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
1831 break;
1832 case 0x39:
1833 /* SLL */
3bd67b7d
RH
1834 if (islit) {
1835 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1836 } else {
1837 tmp = tcg_temp_new();
1838 vb = load_gpr(ctx, rb);
1839 tcg_gen_andi_i64(tmp, vb, 0x3f);
1840 tcg_gen_shl_i64(vc, va, tmp);
1841 tcg_temp_free(tmp);
30c7183b 1842 }
4c9649a9
JM
1843 break;
1844 case 0x3B:
1845 /* INSQL */
5e5863ec 1846 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
1847 break;
1848 case 0x3C:
1849 /* SRA */
3bd67b7d
RH
1850 if (islit) {
1851 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1852 } else {
1853 tmp = tcg_temp_new();
1854 vb = load_gpr(ctx, rb);
1855 tcg_gen_andi_i64(tmp, vb, 0x3f);
1856 tcg_gen_sar_i64(vc, va, tmp);
1857 tcg_temp_free(tmp);
30c7183b 1858 }
4c9649a9
JM
1859 break;
1860 case 0x52:
1861 /* MSKWH */
9a8fa1bd 1862 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
1863 break;
1864 case 0x57:
1865 /* INSWH */
5e5863ec 1866 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
1867 break;
1868 case 0x5A:
1869 /* EXTWH */
9a734d64 1870 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
1871 break;
1872 case 0x62:
1873 /* MSKLH */
9a8fa1bd 1874 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
1875 break;
1876 case 0x67:
1877 /* INSLH */
5e5863ec 1878 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
1879 break;
1880 case 0x6A:
1881 /* EXTLH */
9a734d64 1882 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
1883 break;
1884 case 0x72:
1885 /* MSKQH */
9a8fa1bd 1886 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
1887 break;
1888 case 0x77:
1889 /* INSQH */
5e5863ec 1890 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
1891 break;
1892 case 0x7A:
1893 /* EXTQH */
9a734d64 1894 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
1895 break;
1896 default:
1897 goto invalid_opc;
1898 }
1899 break;
de4d3555 1900
4c9649a9 1901 case 0x13:
de4d3555
RH
1902 vc = dest_gpr(ctx, rc);
1903 vb = load_gpr_lit(ctx, rb, lit, islit);
1904 va = load_gpr(ctx, ra);
4c9649a9
JM
1905 switch (fn7) {
1906 case 0x00:
1907 /* MULL */
de4d3555
RH
1908 tcg_gen_mul_i64(vc, va, vb);
1909 tcg_gen_ext32s_i64(vc, vc);
4c9649a9
JM
1910 break;
1911 case 0x20:
1912 /* MULQ */
de4d3555 1913 tcg_gen_mul_i64(vc, va, vb);
4c9649a9
JM
1914 break;
1915 case 0x30:
1916 /* UMULH */
de4d3555
RH
1917 tmp = tcg_temp_new();
1918 tcg_gen_mulu2_i64(tmp, vc, va, vb);
1919 tcg_temp_free(tmp);
4c9649a9
JM
1920 break;
1921 case 0x40:
1922 /* MULL/V */
4d1628e8
RH
1923 tmp = tcg_temp_new();
1924 tcg_gen_ext32s_i64(tmp, va);
1925 tcg_gen_ext32s_i64(vc, vb);
1926 tcg_gen_mul_i64(tmp, tmp, vc);
1927 tcg_gen_ext32s_i64(vc, tmp);
1928 gen_helper_check_overflow(cpu_env, vc, tmp);
1929 tcg_temp_free(tmp);
4c9649a9
JM
1930 break;
1931 case 0x60:
1932 /* MULQ/V */
4d1628e8
RH
1933 tmp = tcg_temp_new();
1934 tmp2 = tcg_temp_new();
1935 tcg_gen_muls2_i64(vc, tmp, va, vb);
1936 tcg_gen_sari_i64(tmp2, vc, 63);
1937 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1938 tcg_temp_free(tmp);
1939 tcg_temp_free(tmp2);
4c9649a9
JM
1940 break;
1941 default:
1942 goto invalid_opc;
1943 }
1944 break;
075b8ddb 1945
4c9649a9 1946 case 0x14:
5238c886 1947 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
baee04ab 1948 vc = dest_fpr(ctx, rc);
f24518b5 1949 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
1950 case 0x04:
1951 /* ITOFS */
64f45e49 1952 REQUIRE_REG_31(rb);
075b8ddb
RH
1953 t32 = tcg_temp_new_i32();
1954 va = load_gpr(ctx, ra);
075b8ddb
RH
1955 tcg_gen_trunc_i64_i32(t32, va);
1956 gen_helper_memory_to_s(vc, t32);
1957 tcg_temp_free_i32(t32);
4c9649a9
JM
1958 break;
1959 case 0x0A:
1960 /* SQRTF */
64f45e49 1961 REQUIRE_REG_31(ra);
baee04ab
RH
1962 vb = load_fpr(ctx, rb);
1963 gen_helper_sqrtf(vc, cpu_env, vb);
5238c886 1964 break;
4c9649a9
JM
1965 case 0x0B:
1966 /* SQRTS */
64f45e49 1967 REQUIRE_REG_31(ra);
3da653fa 1968 gen_sqrts(ctx, rb, rc, fn11);
5238c886 1969 break;
4c9649a9
JM
1970 case 0x14:
1971 /* ITOFF */
64f45e49 1972 REQUIRE_REG_31(rb);
075b8ddb
RH
1973 t32 = tcg_temp_new_i32();
1974 va = load_gpr(ctx, ra);
075b8ddb
RH
1975 tcg_gen_trunc_i64_i32(t32, va);
1976 gen_helper_memory_to_f(vc, t32);
1977 tcg_temp_free_i32(t32);
4c9649a9
JM
1978 break;
1979 case 0x24:
1980 /* ITOFT */
64f45e49 1981 REQUIRE_REG_31(rb);
075b8ddb 1982 va = load_gpr(ctx, ra);
075b8ddb 1983 tcg_gen_mov_i64(vc, va);
4c9649a9
JM
1984 break;
1985 case 0x2A:
1986 /* SQRTG */
64f45e49 1987 REQUIRE_REG_31(ra);
baee04ab
RH
1988 vb = load_fpr(ctx, rb);
1989 gen_helper_sqrtg(vc, cpu_env, vb);
5238c886 1990 break;
4c9649a9
JM
1991 case 0x02B:
1992 /* SQRTT */
64f45e49 1993 REQUIRE_REG_31(ra);
3da653fa 1994 gen_sqrtt(ctx, rb, rc, fn11);
5238c886 1995 break;
4c9649a9
JM
1996 default:
1997 goto invalid_opc;
1998 }
1999 break;
6b88b37c 2000
4c9649a9
JM
2001 case 0x15:
2002 /* VAX floating point */
2003 /* XXX: rounding mode and trap are ignored (!) */
baee04ab
RH
2004 vc = dest_fpr(ctx, rc);
2005 vb = load_fpr(ctx, rb);
3d045dbc 2006 va = load_fpr(ctx, ra);
f24518b5 2007 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2008 case 0x00:
2009 /* ADDF */
3d045dbc 2010 gen_helper_addf(vc, cpu_env, va, vb);
4c9649a9
JM
2011 break;
2012 case 0x01:
2013 /* SUBF */
3d045dbc 2014 gen_helper_subf(vc, cpu_env, va, vb);
4c9649a9
JM
2015 break;
2016 case 0x02:
2017 /* MULF */
3d045dbc 2018 gen_helper_mulf(vc, cpu_env, va, vb);
4c9649a9
JM
2019 break;
2020 case 0x03:
2021 /* DIVF */
3d045dbc 2022 gen_helper_divf(vc, cpu_env, va, vb);
4c9649a9
JM
2023 break;
2024 case 0x1E:
64f45e49
RH
2025 /* CVTDG -- TODO */
2026 REQUIRE_REG_31(ra);
4c9649a9 2027 goto invalid_opc;
4c9649a9
JM
2028 case 0x20:
2029 /* ADDG */
3d045dbc 2030 gen_helper_addg(vc, cpu_env, va, vb);
4c9649a9
JM
2031 break;
2032 case 0x21:
2033 /* SUBG */
3d045dbc 2034 gen_helper_subg(vc, cpu_env, va, vb);
4c9649a9
JM
2035 break;
2036 case 0x22:
2037 /* MULG */
3d045dbc 2038 gen_helper_mulg(vc, cpu_env, va, vb);
4c9649a9
JM
2039 break;
2040 case 0x23:
2041 /* DIVG */
3d045dbc 2042 gen_helper_divg(vc, cpu_env, va, vb);
4c9649a9
JM
2043 break;
2044 case 0x25:
2045 /* CMPGEQ */
3d045dbc 2046 gen_helper_cmpgeq(vc, cpu_env, va, vb);
4c9649a9
JM
2047 break;
2048 case 0x26:
2049 /* CMPGLT */
3d045dbc 2050 gen_helper_cmpglt(vc, cpu_env, va, vb);
4c9649a9
JM
2051 break;
2052 case 0x27:
2053 /* CMPGLE */
3d045dbc 2054 gen_helper_cmpgle(vc, cpu_env, va, vb);
4c9649a9
JM
2055 break;
2056 case 0x2C:
2057 /* CVTGF */
64f45e49 2058 REQUIRE_REG_31(ra);
baee04ab 2059 gen_helper_cvtgf(vc, cpu_env, vb);
4c9649a9
JM
2060 break;
2061 case 0x2D:
64f45e49
RH
2062 /* CVTGD -- TODO */
2063 REQUIRE_REG_31(ra);
4c9649a9 2064 goto invalid_opc;
4c9649a9
JM
2065 case 0x2F:
2066 /* CVTGQ */
64f45e49 2067 REQUIRE_REG_31(ra);
baee04ab 2068 gen_helper_cvtgq(vc, cpu_env, vb);
4c9649a9
JM
2069 break;
2070 case 0x3C:
2071 /* CVTQF */
64f45e49 2072 REQUIRE_REG_31(ra);
baee04ab 2073 gen_helper_cvtqf(vc, cpu_env, vb);
4c9649a9
JM
2074 break;
2075 case 0x3E:
2076 /* CVTQG */
64f45e49 2077 REQUIRE_REG_31(ra);
baee04ab 2078 gen_helper_cvtqg(vc, cpu_env, vb);
4c9649a9
JM
2079 break;
2080 default:
2081 goto invalid_opc;
2082 }
2083 break;
6b88b37c 2084
4c9649a9
JM
2085 case 0x16:
2086 /* IEEE floating-point */
f24518b5 2087 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2088 case 0x00:
2089 /* ADDS */
3da653fa 2090 gen_adds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2091 break;
2092 case 0x01:
2093 /* SUBS */
3da653fa 2094 gen_subs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2095 break;
2096 case 0x02:
2097 /* MULS */
3da653fa 2098 gen_muls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2099 break;
2100 case 0x03:
2101 /* DIVS */
3da653fa 2102 gen_divs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2103 break;
2104 case 0x20:
2105 /* ADDT */
3da653fa 2106 gen_addt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2107 break;
2108 case 0x21:
2109 /* SUBT */
3da653fa 2110 gen_subt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2111 break;
2112 case 0x22:
2113 /* MULT */
3da653fa 2114 gen_mult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2115 break;
2116 case 0x23:
2117 /* DIVT */
3da653fa 2118 gen_divt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2119 break;
2120 case 0x24:
2121 /* CMPTUN */
3da653fa 2122 gen_cmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2123 break;
2124 case 0x25:
2125 /* CMPTEQ */
3da653fa 2126 gen_cmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2127 break;
2128 case 0x26:
2129 /* CMPTLT */
3da653fa 2130 gen_cmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2131 break;
2132 case 0x27:
2133 /* CMPTLE */
3da653fa 2134 gen_cmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2135 break;
2136 case 0x2C:
64f45e49 2137 REQUIRE_REG_31(ra);
a74b4d2c 2138 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2139 /* CVTST */
3da653fa 2140 gen_cvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2141 } else {
2142 /* CVTTS */
3da653fa 2143 gen_cvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2144 }
2145 break;
2146 case 0x2F:
2147 /* CVTTQ */
64f45e49 2148 REQUIRE_REG_31(ra);
3da653fa 2149 gen_cvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2150 break;
2151 case 0x3C:
2152 /* CVTQS */
64f45e49 2153 REQUIRE_REG_31(ra);
3da653fa 2154 gen_cvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2155 break;
2156 case 0x3E:
2157 /* CVTQT */
64f45e49 2158 REQUIRE_REG_31(ra);
3da653fa 2159 gen_cvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2160 break;
2161 default:
2162 goto invalid_opc;
2163 }
2164 break;
6b88b37c 2165
4c9649a9
JM
2166 case 0x17:
2167 switch (fn11) {
2168 case 0x010:
2169 /* CVTLQ */
64f45e49 2170 REQUIRE_REG_31(ra);
e8d8fef4
RH
2171 vc = dest_fpr(ctx, rc);
2172 vb = load_fpr(ctx, rb);
3da653fa 2173 gen_cvtlq(vc, vb);
4c9649a9
JM
2174 break;
2175 case 0x020:
6b88b37c
RH
2176 /* CPYS */
2177 if (rc == 31) {
2178 /* Special case CPYS as FNOP. */
ef3765cb 2179 } else {
6b88b37c 2180 vc = dest_fpr(ctx, rc);
ef3765cb
RH
2181 va = load_fpr(ctx, ra);
2182 if (ra == rb) {
2183 /* Special case CPYS as FMOV. */
6b88b37c 2184 tcg_gen_mov_i64(vc, va);
ef3765cb
RH
2185 } else {
2186 vb = load_fpr(ctx, rb);
2187 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
a06d48d9 2188 }
4c9649a9
JM
2189 }
2190 break;
2191 case 0x021:
2192 /* CPYSN */
ef3765cb
RH
2193 vc = dest_fpr(ctx, rc);
2194 vb = load_fpr(ctx, rb);
2195 va = load_fpr(ctx, ra);
2196 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
4c9649a9
JM
2197 break;
2198 case 0x022:
2199 /* CPYSE */
ef3765cb
RH
2200 vc = dest_fpr(ctx, rc);
2201 vb = load_fpr(ctx, rb);
2202 va = load_fpr(ctx, ra);
2203 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
4c9649a9
JM
2204 break;
2205 case 0x024:
2206 /* MT_FPCR */
6b88b37c
RH
2207 va = load_fpr(ctx, ra);
2208 gen_helper_store_fpcr(cpu_env, va);
9d5a626b
RH
2209 if (ctx->tb_rm == QUAL_RM_D) {
2210 /* Re-do the copy of the rounding mode to fp_status
2211 the next time we use dynamic rounding. */
2212 ctx->tb_rm = -1;
2213 }
4c9649a9
JM
2214 break;
2215 case 0x025:
2216 /* MF_FPCR */
6b88b37c
RH
2217 va = dest_fpr(ctx, ra);
2218 gen_helper_load_fpcr(va, cpu_env);
4c9649a9
JM
2219 break;
2220 case 0x02A:
2221 /* FCMOVEQ */
65809352 2222 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2223 break;
2224 case 0x02B:
2225 /* FCMOVNE */
65809352 2226 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2227 break;
2228 case 0x02C:
2229 /* FCMOVLT */
65809352 2230 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2231 break;
2232 case 0x02D:
2233 /* FCMOVGE */
65809352 2234 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2235 break;
2236 case 0x02E:
2237 /* FCMOVLE */
65809352 2238 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2239 break;
2240 case 0x02F:
2241 /* FCMOVGT */
65809352 2242 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
4c9649a9 2243 break;
57a808b6
RH
2244 case 0x030: /* CVTQL */
2245 case 0x130: /* CVTQL/V */
2246 case 0x530: /* CVTQL/SV */
64f45e49 2247 REQUIRE_REG_31(ra);
e8d8fef4
RH
2248 vc = dest_fpr(ctx, rc);
2249 vb = load_fpr(ctx, rb);
57a808b6
RH
2250 gen_helper_cvtql(vc, cpu_env, vb);
2251 gen_fp_exc_raise(rc, fn11);
4c9649a9
JM
2252 break;
2253 default:
2254 goto invalid_opc;
2255 }
2256 break;
89fe090b 2257
4c9649a9
JM
2258 case 0x18:
2259 switch ((uint16_t)disp16) {
2260 case 0x0000:
2261 /* TRAPB */
4af70374 2262 /* No-op. */
4c9649a9
JM
2263 break;
2264 case 0x0400:
2265 /* EXCB */
4af70374 2266 /* No-op. */
4c9649a9
JM
2267 break;
2268 case 0x4000:
2269 /* MB */
2270 /* No-op */
2271 break;
2272 case 0x4400:
2273 /* WMB */
2274 /* No-op */
2275 break;
2276 case 0x8000:
2277 /* FETCH */
2278 /* No-op */
2279 break;
2280 case 0xA000:
2281 /* FETCH_M */
2282 /* No-op */
2283 break;
2284 case 0xC000:
2285 /* RPCC */
89fe090b 2286 va = dest_gpr(ctx, ra);
bd79255d 2287 if (ctx->tb->cflags & CF_USE_ICOUNT) {
89fe090b
RH
2288 gen_io_start();
2289 gen_helper_load_pcc(va, cpu_env);
2290 gen_io_end();
2291 ret = EXIT_PC_STALE;
2292 } else {
2293 gen_helper_load_pcc(va, cpu_env);
a9406ea1 2294 }
4c9649a9
JM
2295 break;
2296 case 0xE000:
2297 /* RC */
ac316ca4 2298 gen_rx(ra, 0);
4c9649a9
JM
2299 break;
2300 case 0xE800:
2301 /* ECB */
4c9649a9
JM
2302 break;
2303 case 0xF000:
2304 /* RS */
ac316ca4 2305 gen_rx(ra, 1);
4c9649a9
JM
2306 break;
2307 case 0xF800:
2308 /* WH64 */
2309 /* No-op */
2310 break;
2517def6
RH
2311 case 0xFC00:
2312 /* WH64EN */
2313 /* No-op */
2314 break;
4c9649a9
JM
2315 default:
2316 goto invalid_opc;
2317 }
2318 break;
8f56ced8 2319
4c9649a9
JM
2320 case 0x19:
2321 /* HW_MFPR (PALcode) */
26b46094 2322#ifndef CONFIG_USER_ONLY
5238c886 2323 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
0e154fe9 2324 va = dest_gpr(ctx, ra);
bd79255d 2325 ret = gen_mfpr(ctx, va, insn & 0xffff);
a9e05a1c 2326 break;
5238c886 2327#else
4c9649a9 2328 goto invalid_opc;
5238c886 2329#endif
8f56ced8 2330
4c9649a9 2331 case 0x1A:
49563a72
RH
2332 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2333 prediction stack action, which of course we don't implement. */
8f56ced8
RH
2334 vb = load_gpr(ctx, rb);
2335 tcg_gen_andi_i64(cpu_pc, vb, ~3);
49563a72 2336 if (ra != 31) {
1304ca87 2337 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2338 }
4af70374 2339 ret = EXIT_PC_UPDATED;
4c9649a9 2340 break;
1eaa1da7 2341
4c9649a9
JM
2342 case 0x1B:
2343 /* HW_LD (PALcode) */
a18ad893 2344#ifndef CONFIG_USER_ONLY
5238c886
RH
2345 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2346 {
1eaa1da7
RH
2347 TCGv addr = tcg_temp_new();
2348 vb = load_gpr(ctx, rb);
2349 va = dest_gpr(ctx, ra);
a18ad893 2350
1eaa1da7 2351 tcg_gen_addi_i64(addr, vb, disp12);
8bb6e981
AJ
2352 switch ((insn >> 12) & 0xF) {
2353 case 0x0:
b5d51029 2354 /* Longword physical access (hw_ldl/p) */
1eaa1da7 2355 gen_helper_ldl_phys(va, cpu_env, addr);
8bb6e981
AJ
2356 break;
2357 case 0x1:
b5d51029 2358 /* Quadword physical access (hw_ldq/p) */
1eaa1da7 2359 gen_helper_ldq_phys(va, cpu_env, addr);
8bb6e981
AJ
2360 break;
2361 case 0x2:
b5d51029 2362 /* Longword physical access with lock (hw_ldl_l/p) */
1eaa1da7 2363 gen_helper_ldl_l_phys(va, cpu_env, addr);
8bb6e981
AJ
2364 break;
2365 case 0x3:
b5d51029 2366 /* Quadword physical access with lock (hw_ldq_l/p) */
1eaa1da7 2367 gen_helper_ldq_l_phys(va, cpu_env, addr);
8bb6e981
AJ
2368 break;
2369 case 0x4:
b5d51029 2370 /* Longword virtual PTE fetch (hw_ldl/v) */
2374e73e 2371 goto invalid_opc;
8bb6e981 2372 case 0x5:
b5d51029 2373 /* Quadword virtual PTE fetch (hw_ldq/v) */
2374e73e 2374 goto invalid_opc;
8bb6e981
AJ
2375 break;
2376 case 0x6:
2377 /* Incpu_ir[ra]id */
b5d51029 2378 goto invalid_opc;
8bb6e981
AJ
2379 case 0x7:
2380 /* Incpu_ir[ra]id */
b5d51029 2381 goto invalid_opc;
8bb6e981 2382 case 0x8:
b5d51029 2383 /* Longword virtual access (hw_ldl) */
2374e73e 2384 goto invalid_opc;
8bb6e981 2385 case 0x9:
b5d51029 2386 /* Quadword virtual access (hw_ldq) */
2374e73e 2387 goto invalid_opc;
8bb6e981 2388 case 0xA:
b5d51029 2389 /* Longword virtual access with protection check (hw_ldl/w) */
1eaa1da7 2390 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
8bb6e981
AJ
2391 break;
2392 case 0xB:
b5d51029 2393 /* Quadword virtual access with protection check (hw_ldq/w) */
1eaa1da7 2394 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
8bb6e981
AJ
2395 break;
2396 case 0xC:
b5d51029 2397 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2374e73e 2398 goto invalid_opc;
8bb6e981 2399 case 0xD:
b5d51029 2400 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2374e73e 2401 goto invalid_opc;
8bb6e981
AJ
2402 case 0xE:
2403 /* Longword virtual access with alternate access mode and
2374e73e 2404 protection checks (hw_ldl/wa) */
1eaa1da7 2405 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
8bb6e981
AJ
2406 break;
2407 case 0xF:
2408 /* Quadword virtual access with alternate access mode and
2374e73e 2409 protection checks (hw_ldq/wa) */
1eaa1da7 2410 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
8bb6e981
AJ
2411 break;
2412 }
2413 tcg_temp_free(addr);
a18ad893 2414 break;
4c9649a9 2415 }
5238c886 2416#else
a18ad893 2417 goto invalid_opc;
5238c886 2418#endif
c67b67e5 2419
4c9649a9 2420 case 0x1C:
c67b67e5 2421 vc = dest_gpr(ctx, rc);
cd2754ad
RH
2422 if (fn7 == 0x70) {
2423 /* FTOIT */
2424 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2425 REQUIRE_REG_31(rb);
2426 va = load_fpr(ctx, ra);
2427 tcg_gen_mov_i64(vc, va);
2428 break;
2429 } else if (fn7 == 0x78) {
2430 /* FTOIS */
2431 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2432 REQUIRE_REG_31(rb);
2433 t32 = tcg_temp_new_i32();
2434 va = load_fpr(ctx, ra);
2435 gen_helper_s_to_memory(t32, va);
2436 tcg_gen_ext_i32_i64(vc, t32);
2437 tcg_temp_free_i32(t32);
2438 break;
2439 }
2440
2441 vb = load_gpr_lit(ctx, rb, lit, islit);
4c9649a9
JM
2442 switch (fn7) {
2443 case 0x00:
2444 /* SEXTB */
5238c886 2445 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
64f45e49 2446 REQUIRE_REG_31(ra);
c67b67e5 2447 tcg_gen_ext8s_i64(vc, vb);
4c9649a9
JM
2448 break;
2449 case 0x01:
2450 /* SEXTW */
5238c886 2451 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
64f45e49 2452 REQUIRE_REG_31(ra);
c67b67e5 2453 tcg_gen_ext16s_i64(vc, vb);
5238c886 2454 break;
4c9649a9
JM
2455 case 0x30:
2456 /* CTPOP */
5238c886 2457 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
64f45e49 2458 REQUIRE_REG_31(ra);
ed085138 2459 REQUIRE_NO_LIT;
c67b67e5 2460 gen_helper_ctpop(vc, vb);
5238c886 2461 break;
4c9649a9
JM
2462 case 0x31:
2463 /* PERR */
5238c886 2464 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
ed085138 2465 REQUIRE_NO_LIT;
cd2754ad
RH
2466 va = load_gpr(ctx, ra);
2467 gen_helper_perr(vc, va, vb);
5238c886 2468 break;
4c9649a9
JM
2469 case 0x32:
2470 /* CTLZ */
5238c886 2471 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
64f45e49 2472 REQUIRE_REG_31(ra);
ed085138 2473 REQUIRE_NO_LIT;
c67b67e5 2474 gen_helper_ctlz(vc, vb);
5238c886 2475 break;
4c9649a9
JM
2476 case 0x33:
2477 /* CTTZ */
5238c886 2478 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
64f45e49 2479 REQUIRE_REG_31(ra);
ed085138 2480 REQUIRE_NO_LIT;
c67b67e5 2481 gen_helper_cttz(vc, vb);
5238c886 2482 break;
4c9649a9
JM
2483 case 0x34:
2484 /* UNPKBW */
5238c886 2485 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
64f45e49 2486 REQUIRE_REG_31(ra);
ed085138 2487 REQUIRE_NO_LIT;
f477ed3c 2488 gen_helper_unpkbw(vc, vb);
5238c886 2489 break;
4c9649a9 2490 case 0x35:
13e4df99 2491 /* UNPKBL */
5238c886 2492 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
64f45e49 2493 REQUIRE_REG_31(ra);
ed085138 2494 REQUIRE_NO_LIT;
f477ed3c 2495 gen_helper_unpkbl(vc, vb);
5238c886 2496 break;
4c9649a9
JM
2497 case 0x36:
2498 /* PKWB */
5238c886 2499 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
64f45e49 2500 REQUIRE_REG_31(ra);
ed085138 2501 REQUIRE_NO_LIT;
f477ed3c 2502 gen_helper_pkwb(vc, vb);
5238c886 2503 break;
4c9649a9
JM
2504 case 0x37:
2505 /* PKLB */
5238c886 2506 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
64f45e49 2507 REQUIRE_REG_31(ra);
ed085138 2508 REQUIRE_NO_LIT;
f477ed3c 2509 gen_helper_pklb(vc, vb);
5238c886 2510 break;
4c9649a9
JM
2511 case 0x38:
2512 /* MINSB8 */
5238c886 2513 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2514 va = load_gpr(ctx, ra);
2515 gen_helper_minsb8(vc, va, vb);
5238c886 2516 break;
4c9649a9
JM
2517 case 0x39:
2518 /* MINSW4 */
5238c886 2519 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2520 va = load_gpr(ctx, ra);
2521 gen_helper_minsw4(vc, va, vb);
5238c886 2522 break;
4c9649a9
JM
2523 case 0x3A:
2524 /* MINUB8 */
5238c886 2525 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2526 va = load_gpr(ctx, ra);
2527 gen_helper_minub8(vc, va, vb);
5238c886 2528 break;
4c9649a9
JM
2529 case 0x3B:
2530 /* MINUW4 */
5238c886 2531 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2532 va = load_gpr(ctx, ra);
2533 gen_helper_minuw4(vc, va, vb);
5238c886 2534 break;
4c9649a9
JM
2535 case 0x3C:
2536 /* MAXUB8 */
5238c886 2537 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2538 va = load_gpr(ctx, ra);
2539 gen_helper_maxub8(vc, va, vb);
5238c886 2540 break;
4c9649a9
JM
2541 case 0x3D:
2542 /* MAXUW4 */
5238c886 2543 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2544 va = load_gpr(ctx, ra);
2545 gen_helper_maxuw4(vc, va, vb);
5238c886 2546 break;
4c9649a9
JM
2547 case 0x3E:
2548 /* MAXSB8 */
5238c886 2549 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2550 va = load_gpr(ctx, ra);
2551 gen_helper_maxsb8(vc, va, vb);
5238c886 2552 break;
4c9649a9
JM
2553 case 0x3F:
2554 /* MAXSW4 */
5238c886 2555 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2556 va = load_gpr(ctx, ra);
2557 gen_helper_maxsw4(vc, va, vb);
4c9649a9
JM
2558 break;
2559 default:
2560 goto invalid_opc;
2561 }
2562 break;
46010969 2563
4c9649a9
JM
2564 case 0x1D:
2565 /* HW_MTPR (PALcode) */
26b46094 2566#ifndef CONFIG_USER_ONLY
5238c886 2567 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
0e154fe9 2568 vb = load_gpr(ctx, rb);
a9e05a1c
RH
2569 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2570 break;
5238c886 2571#else
4c9649a9 2572 goto invalid_opc;
5238c886 2573#endif
46010969 2574
4c9649a9 2575 case 0x1E:
508b43ea 2576 /* HW_RET (PALcode) */
a18ad893 2577#ifndef CONFIG_USER_ONLY
5238c886
RH
2578 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2579 if (rb == 31) {
2580 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2581 address from EXC_ADDR. This turns out to be useful for our
2582 emulation PALcode, so continue to accept it. */
46010969 2583 tmp = tcg_temp_new();
5238c886
RH
2584 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
2585 gen_helper_hw_ret(cpu_env, tmp);
2586 tcg_temp_free(tmp);
2587 } else {
46010969 2588 gen_helper_hw_ret(cpu_env, load_gpr(ctx, rb));
4c9649a9 2589 }
5238c886
RH
2590 ret = EXIT_PC_UPDATED;
2591 break;
2592#else
a18ad893 2593 goto invalid_opc;
5238c886 2594#endif
a4af3044 2595
4c9649a9
JM
2596 case 0x1F:
2597 /* HW_ST (PALcode) */
a18ad893 2598#ifndef CONFIG_USER_ONLY
5238c886
RH
2599 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2600 {
a4af3044
RH
2601 TCGv addr = tcg_temp_new();
2602 va = load_gpr(ctx, ra);
2603 vb = load_gpr(ctx, rb);
2604
2605 tcg_gen_addi_i64(addr, vb, disp12);
8bb6e981
AJ
2606 switch ((insn >> 12) & 0xF) {
2607 case 0x0:
2608 /* Longword physical access */
a4af3044 2609 gen_helper_stl_phys(cpu_env, addr, va);
8bb6e981
AJ
2610 break;
2611 case 0x1:
2612 /* Quadword physical access */
a4af3044 2613 gen_helper_stq_phys(cpu_env, addr, va);
8bb6e981
AJ
2614 break;
2615 case 0x2:
2616 /* Longword physical access with lock */
a4af3044 2617 gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
8bb6e981
AJ
2618 break;
2619 case 0x3:
2620 /* Quadword physical access with lock */
a4af3044 2621 gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
8bb6e981
AJ
2622 break;
2623 case 0x4:
2624 /* Longword virtual access */
2374e73e 2625 goto invalid_opc;
8bb6e981
AJ
2626 case 0x5:
2627 /* Quadword virtual access */
2374e73e 2628 goto invalid_opc;
8bb6e981
AJ
2629 case 0x6:
2630 /* Invalid */
2631 goto invalid_opc;
2632 case 0x7:
2633 /* Invalid */
2634 goto invalid_opc;
2635 case 0x8:
2636 /* Invalid */
2637 goto invalid_opc;
2638 case 0x9:
2639 /* Invalid */
2640 goto invalid_opc;
2641 case 0xA:
2642 /* Invalid */
2643 goto invalid_opc;
2644 case 0xB:
2645 /* Invalid */
2646 goto invalid_opc;
2647 case 0xC:
2648 /* Longword virtual access with alternate access mode */
2374e73e 2649 goto invalid_opc;
8bb6e981
AJ
2650 case 0xD:
2651 /* Quadword virtual access with alternate access mode */
2374e73e 2652 goto invalid_opc;
8bb6e981
AJ
2653 case 0xE:
2654 /* Invalid */
2655 goto invalid_opc;
2656 case 0xF:
2657 /* Invalid */
2658 goto invalid_opc;
2659 }
8bb6e981 2660 tcg_temp_free(addr);
a18ad893 2661 break;
4c9649a9 2662 }
5238c886 2663#else
a18ad893 2664 goto invalid_opc;
5238c886 2665#endif
4c9649a9
JM
2666 case 0x20:
2667 /* LDF */
f18cd223 2668 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
2669 break;
2670 case 0x21:
2671 /* LDG */
f18cd223 2672 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
2673 break;
2674 case 0x22:
2675 /* LDS */
f18cd223 2676 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
2677 break;
2678 case 0x23:
2679 /* LDT */
f18cd223 2680 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
2681 break;
2682 case 0x24:
2683 /* STF */
6910b8f6 2684 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
4c9649a9
JM
2685 break;
2686 case 0x25:
2687 /* STG */
6910b8f6 2688 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
4c9649a9
JM
2689 break;
2690 case 0x26:
2691 /* STS */
6910b8f6 2692 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
4c9649a9
JM
2693 break;
2694 case 0x27:
2695 /* STT */
6910b8f6 2696 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
4c9649a9
JM
2697 break;
2698 case 0x28:
2699 /* LDL */
f18cd223 2700 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
2701 break;
2702 case 0x29:
2703 /* LDQ */
f18cd223 2704 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
2705 break;
2706 case 0x2A:
2707 /* LDL_L */
f4ed8679 2708 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2709 break;
2710 case 0x2B:
2711 /* LDQ_L */
f4ed8679 2712 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2713 break;
2714 case 0x2C:
2715 /* STL */
6910b8f6 2716 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
4c9649a9
JM
2717 break;
2718 case 0x2D:
2719 /* STQ */
6910b8f6 2720 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
4c9649a9
JM
2721 break;
2722 case 0x2E:
2723 /* STL_C */
6910b8f6 2724 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
4c9649a9
JM
2725 break;
2726 case 0x2F:
2727 /* STQ_C */
6910b8f6 2728 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
4c9649a9
JM
2729 break;
2730 case 0x30:
2731 /* BR */
4af70374 2732 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 2733 break;
a7812ae4 2734 case 0x31: /* FBEQ */
4af70374 2735 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 2736 break;
a7812ae4 2737 case 0x32: /* FBLT */
4af70374 2738 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 2739 break;
a7812ae4 2740 case 0x33: /* FBLE */
4af70374 2741 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
2742 break;
2743 case 0x34:
2744 /* BSR */
4af70374 2745 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 2746 break;
a7812ae4 2747 case 0x35: /* FBNE */
4af70374 2748 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 2749 break;
a7812ae4 2750 case 0x36: /* FBGE */
4af70374 2751 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 2752 break;
a7812ae4 2753 case 0x37: /* FBGT */
4af70374 2754 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
2755 break;
2756 case 0x38:
2757 /* BLBC */
4af70374 2758 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
2759 break;
2760 case 0x39:
2761 /* BEQ */
4af70374 2762 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
2763 break;
2764 case 0x3A:
2765 /* BLT */
4af70374 2766 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
2767 break;
2768 case 0x3B:
2769 /* BLE */
4af70374 2770 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
2771 break;
2772 case 0x3C:
2773 /* BLBS */
4af70374 2774 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
2775 break;
2776 case 0x3D:
2777 /* BNE */
4af70374 2778 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
2779 break;
2780 case 0x3E:
2781 /* BGE */
4af70374 2782 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
2783 break;
2784 case 0x3F:
2785 /* BGT */
4af70374 2786 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
2787 break;
2788 invalid_opc:
8aa3fa20 2789 ret = gen_invalid(ctx);
4c9649a9
JM
2790 break;
2791 }
2792
2793 return ret;
2794}
2795
86a35f7c 2796static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
636aa200 2797 TranslationBlock *tb,
86a35f7c 2798 bool search_pc)
4c9649a9 2799{
ed2803da 2800 CPUState *cs = CPU(cpu);
86a35f7c 2801 CPUAlphaState *env = &cpu->env;
4c9649a9
JM
2802 DisasContext ctx, *ctxp = &ctx;
2803 target_ulong pc_start;
b114b68a 2804 target_ulong pc_mask;
4c9649a9 2805 uint32_t insn;
a1d1bb31 2806 CPUBreakpoint *bp;
4c9649a9 2807 int j, lj = -1;
4af70374 2808 ExitStatus ret;
2e70f6ef
PB
2809 int num_insns;
2810 int max_insns;
4c9649a9
JM
2811
2812 pc_start = tb->pc;
4af70374
RH
2813
2814 ctx.tb = tb;
4c9649a9 2815 ctx.pc = pc_start;
bba9bdce 2816 ctx.mem_idx = cpu_mmu_index(env);
801c4c28 2817 ctx.implver = env->implver;
ed2803da 2818 ctx.singlestep_enabled = cs->singlestep_enabled;
f24518b5
RH
2819
2820 /* ??? Every TB begins with unset rounding mode, to be initialized on
2821 the first fp insn of the TB. Alternately we could define a proper
2822 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2823 to reset the FP_STATUS to that default at the end of any TB that
2824 changes the default. We could even (gasp) dynamiclly figure out
2825 what default would be most efficient given the running program. */
2826 ctx.tb_rm = -1;
2827 /* Similarly for flush-to-zero. */
2828 ctx.tb_ftz = -1;
2829
2e70f6ef
PB
2830 num_insns = 0;
2831 max_insns = tb->cflags & CF_COUNT_MASK;
b114b68a 2832 if (max_insns == 0) {
2e70f6ef 2833 max_insns = CF_COUNT_MASK;
b114b68a
RH
2834 }
2835
2836 if (in_superpage(&ctx, pc_start)) {
2837 pc_mask = (1ULL << 41) - 1;
2838 } else {
2839 pc_mask = ~TARGET_PAGE_MASK;
2840 }
2e70f6ef 2841
cd42d5b2 2842 gen_tb_start(tb);
4af70374 2843 do {
f0c3c505
AF
2844 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
2845 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 2846 if (bp->pc == ctx.pc) {
4c9649a9
JM
2847 gen_excp(&ctx, EXCP_DEBUG, 0);
2848 break;
2849 }
2850 }
2851 }
2852 if (search_pc) {
fe700adb 2853 j = tcg_op_buf_count();
4c9649a9
JM
2854 if (lj < j) {
2855 lj++;
fe700adb 2856 while (lj < j) {
ab1103de 2857 tcg_ctx.gen_opc_instr_start[lj++] = 0;
fe700adb 2858 }
4c9649a9 2859 }
25983cad 2860 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
ab1103de 2861 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 2862 tcg_ctx.gen_opc_icount[lj] = num_insns;
4c9649a9 2863 }
67debe3a 2864 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 2865 gen_io_start();
67debe3a 2866 }
c3082755 2867 insn = cpu_ldl_code(env, ctx.pc);
2e70f6ef 2868 num_insns++;
c4b3be39 2869
fdefe51c 2870 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
c4b3be39
RH
2871 tcg_gen_debug_insn_start(ctx.pc);
2872 }
2873
194cfb43
RH
2874 TCGV_UNUSED_I64(ctx.zero);
2875 TCGV_UNUSED_I64(ctx.sink);
2876 TCGV_UNUSED_I64(ctx.lit);
2877
4c9649a9
JM
2878 ctx.pc += 4;
2879 ret = translate_one(ctxp, insn);
19bf517b 2880
194cfb43
RH
2881 if (!TCGV_IS_UNUSED_I64(ctx.sink)) {
2882 tcg_gen_discard_i64(ctx.sink);
2883 tcg_temp_free(ctx.sink);
2884 }
2885 if (!TCGV_IS_UNUSED_I64(ctx.zero)) {
2886 tcg_temp_free(ctx.zero);
2887 }
2888 if (!TCGV_IS_UNUSED_I64(ctx.lit)) {
2889 tcg_temp_free(ctx.lit);
2890 }
2891
bf1b03fe
RH
2892 /* If we reach a page boundary, are single stepping,
2893 or exhaust instruction count, stop generation. */
2894 if (ret == NO_EXIT
b114b68a 2895 && ((ctx.pc & pc_mask) == 0
fe700adb 2896 || tcg_op_buf_full()
bf1b03fe
RH
2897 || num_insns >= max_insns
2898 || singlestep
ca6862a6 2899 || ctx.singlestep_enabled)) {
bf1b03fe 2900 ret = EXIT_PC_STALE;
1b530a6d 2901 }
4af70374
RH
2902 } while (ret == NO_EXIT);
2903
2904 if (tb->cflags & CF_LAST_IO) {
2905 gen_io_end();
4c9649a9 2906 }
4af70374
RH
2907
2908 switch (ret) {
2909 case EXIT_GOTO_TB:
8aa3fa20 2910 case EXIT_NORETURN:
4af70374
RH
2911 break;
2912 case EXIT_PC_STALE:
496cb5b9 2913 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
2914 /* FALLTHRU */
2915 case EXIT_PC_UPDATED:
ca6862a6 2916 if (ctx.singlestep_enabled) {
bf1b03fe
RH
2917 gen_excp_1(EXCP_DEBUG, 0);
2918 } else {
2919 tcg_gen_exit_tb(0);
2920 }
4af70374
RH
2921 break;
2922 default:
2923 abort();
4c9649a9 2924 }
4af70374 2925
806f352d 2926 gen_tb_end(tb, num_insns);
0a7df5da 2927
4c9649a9 2928 if (search_pc) {
fe700adb 2929 j = tcg_op_buf_count();
4c9649a9 2930 lj++;
fe700adb 2931 while (lj <= j) {
ab1103de 2932 tcg_ctx.gen_opc_instr_start[lj++] = 0;
fe700adb 2933 }
4c9649a9
JM
2934 } else {
2935 tb->size = ctx.pc - pc_start;
2e70f6ef 2936 tb->icount = num_insns;
4c9649a9 2937 }
4af70374 2938
806991da 2939#ifdef DEBUG_DISAS
8fec2b8c 2940 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39 2941 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 2942 log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
93fcfe39 2943 qemu_log("\n");
4c9649a9 2944 }
4c9649a9 2945#endif
4c9649a9
JM
2946}
2947
4d5712f1 2948void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 2949{
86a35f7c 2950 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
4c9649a9
JM
2951}
2952
4d5712f1 2953void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 2954{
86a35f7c 2955 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
4c9649a9
JM
2956}
2957
4d5712f1 2958void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 2959{
25983cad 2960 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
d2856f1a 2961}