]> git.proxmox.com Git - mirror_qemu.git/blame - target-alpha/translate.c
docs/atomics.txt: fix two typos
[mirror_qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
4c9649a9 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
1de7afc9 22#include "qemu/host-utils.h"
57fec1fe 23#include "tcg-op.h"
f08b6170 24#include "exec/cpu_ldst.h"
4c9649a9 25
2ef6175a
RH
26#include "exec/helper-proto.h"
27#include "exec/helper-gen.h"
a7812ae4 28
a7e30d84
LV
29#include "trace-tcg.h"
30
31
19188121 32#undef ALPHA_DEBUG_DISAS
f24518b5 33#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
34
35#ifdef ALPHA_DEBUG_DISAS
806991da 36# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
37#else
38# define LOG_DISAS(...) do { } while (0)
39#endif
40
4c9649a9
JM
41typedef struct DisasContext DisasContext;
42struct DisasContext {
4af70374 43 struct TranslationBlock *tb;
4c9649a9
JM
44 uint64_t pc;
45 int mem_idx;
f24518b5
RH
46
47 /* Current rounding mode for this TB. */
48 int tb_rm;
49 /* Current flush-to-zero setting for this TB. */
50 int tb_ftz;
ca6862a6 51
801c4c28
RH
52 /* implver value for this CPU. */
53 int implver;
54
194cfb43
RH
55 /* Temporaries for $31 and $f31 as source and destination. */
56 TCGv zero;
57 TCGv sink;
58 /* Temporary for immediate constants. */
59 TCGv lit;
60
ca6862a6 61 bool singlestep_enabled;
4c9649a9
JM
62};
63
4af70374
RH
64/* Return values from translate_one, indicating the state of the TB.
65 Note that zero indicates that we are not exiting the TB. */
66
67typedef enum {
68 NO_EXIT,
69
70 /* We have emitted one or more goto_tb. No fixup required. */
71 EXIT_GOTO_TB,
72
73 /* We are not using a goto_tb (for whatever reason), but have updated
74 the PC (for whatever reason), so there's no need to do it again on
75 exiting the TB. */
76 EXIT_PC_UPDATED,
77
78 /* We are exiting the TB, but have neither emitted a goto_tb, nor
79 updated the PC for the next instruction to be executed. */
8aa3fa20
RH
80 EXIT_PC_STALE,
81
82 /* We are ending the TB with a noreturn function call, e.g. longjmp.
83 No following code will be executed. */
84 EXIT_NORETURN,
4af70374
RH
85} ExitStatus;
86
3761035f 87/* global register indexes */
a7812ae4 88static TCGv_ptr cpu_env;
496cb5b9 89static TCGv cpu_ir[31];
f18cd223 90static TCGv cpu_fir[31];
496cb5b9 91static TCGv cpu_pc;
6910b8f6
RH
92static TCGv cpu_lock_addr;
93static TCGv cpu_lock_st_addr;
94static TCGv cpu_lock_value;
496cb5b9 95
022c62cb 96#include "exec/gen-icount.h"
2e70f6ef 97
0c28246f 98void alpha_translate_init(void)
2e70f6ef 99{
39acc647
RH
100#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
101
102 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
103 static const GlobalVar vars[] = {
104 DEF_VAR(pc),
105 DEF_VAR(lock_addr),
106 DEF_VAR(lock_st_addr),
107 DEF_VAR(lock_value),
39acc647
RH
108 };
109
110#undef DEF_VAR
111
112 /* Use the symbolic register names that match the disassembler. */
113 static const char greg_names[31][4] = {
114 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
115 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
116 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
117 "t10", "t11", "ra", "t12", "at", "gp", "sp"
118 };
119 static const char freg_names[31][4] = {
120 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
121 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
122 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
123 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
124 };
125
126 static bool done_init = 0;
496cb5b9 127 int i;
496cb5b9 128
67debe3a 129 if (done_init) {
2e70f6ef 130 return;
67debe3a 131 }
39acc647 132 done_init = 1;
496cb5b9 133
a7812ae4 134 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9 135
496cb5b9 136 for (i = 0; i < 31; i++) {
a7812ae4 137 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
39acc647
RH
138 offsetof(CPUAlphaState, ir[i]),
139 greg_names[i]);
140 }
f18cd223 141
39acc647 142 for (i = 0; i < 31; i++) {
a7812ae4 143 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
39acc647
RH
144 offsetof(CPUAlphaState, fir[i]),
145 freg_names[i]);
496cb5b9
AJ
146 }
147
39acc647
RH
148 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
149 const GlobalVar *v = &vars[i];
150 *v->var = tcg_global_mem_new_i64(TCG_AREG0, v->ofs, v->name);
151 }
2e70f6ef
PB
152}
153
194cfb43
RH
154static TCGv load_zero(DisasContext *ctx)
155{
156 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
8f811b9a 157 ctx->zero = tcg_const_i64(0);
194cfb43
RH
158 }
159 return ctx->zero;
160}
161
162static TCGv dest_sink(DisasContext *ctx)
163{
164 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
8f811b9a 165 ctx->sink = tcg_temp_new();
194cfb43
RH
166 }
167 return ctx->sink;
168}
169
170static TCGv load_gpr(DisasContext *ctx, unsigned reg)
171{
172 if (likely(reg < 31)) {
173 return cpu_ir[reg];
174 } else {
175 return load_zero(ctx);
176 }
177}
178
179static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
180 uint8_t lit, bool islit)
181{
182 if (islit) {
183 ctx->lit = tcg_const_i64(lit);
184 return ctx->lit;
185 } else if (likely(reg < 31)) {
186 return cpu_ir[reg];
187 } else {
188 return load_zero(ctx);
189 }
190}
191
192static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
193{
194 if (likely(reg < 31)) {
195 return cpu_ir[reg];
196 } else {
197 return dest_sink(ctx);
198 }
199}
200
6b88b37c 201static TCGv load_fpr(DisasContext *ctx, unsigned reg)
194cfb43
RH
202{
203 if (likely(reg < 31)) {
204 return cpu_fir[reg];
205 } else {
206 return load_zero(ctx);
207 }
208}
209
075b8ddb 210static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
194cfb43
RH
211{
212 if (likely(reg < 31)) {
213 return cpu_fir[reg];
214 } else {
215 return dest_sink(ctx);
216 }
217}
218
bf1b03fe 219static void gen_excp_1(int exception, int error_code)
4c9649a9 220{
a7812ae4 221 TCGv_i32 tmp1, tmp2;
6ad02592 222
6ad02592
AJ
223 tmp1 = tcg_const_i32(exception);
224 tmp2 = tcg_const_i32(error_code);
b9f0923e 225 gen_helper_excp(cpu_env, tmp1, tmp2);
a7812ae4
PB
226 tcg_temp_free_i32(tmp2);
227 tcg_temp_free_i32(tmp1);
bf1b03fe 228}
8aa3fa20 229
bf1b03fe
RH
230static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
231{
232 tcg_gen_movi_i64(cpu_pc, ctx->pc);
233 gen_excp_1(exception, error_code);
8aa3fa20 234 return EXIT_NORETURN;
4c9649a9
JM
235}
236
8aa3fa20 237static inline ExitStatus gen_invalid(DisasContext *ctx)
4c9649a9 238{
8aa3fa20 239 return gen_excp(ctx, EXCP_OPCDEC, 0);
4c9649a9
JM
240}
241
636aa200 242static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 243{
a7812ae4 244 TCGv_i32 tmp32 = tcg_temp_new_i32();
f8da40ae 245 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4
PB
246 gen_helper_memory_to_f(t0, tmp32);
247 tcg_temp_free_i32(tmp32);
f18cd223
AJ
248}
249
636aa200 250static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 251{
a7812ae4 252 TCGv tmp = tcg_temp_new();
f8da40ae 253 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
a7812ae4 254 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
255 tcg_temp_free(tmp);
256}
257
636aa200 258static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 259{
a7812ae4 260 TCGv_i32 tmp32 = tcg_temp_new_i32();
f8da40ae 261 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4
PB
262 gen_helper_memory_to_s(t0, tmp32);
263 tcg_temp_free_i32(tmp32);
f18cd223
AJ
264}
265
636aa200 266static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679 267{
f8da40ae 268 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
6910b8f6
RH
269 tcg_gen_mov_i64(cpu_lock_addr, t1);
270 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
271}
272
636aa200 273static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679 274{
f8da40ae 275 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
6910b8f6
RH
276 tcg_gen_mov_i64(cpu_lock_addr, t1);
277 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
278}
279
636aa200
BS
280static inline void gen_load_mem(DisasContext *ctx,
281 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
282 int flags),
595b8fdd
RH
283 int ra, int rb, int32_t disp16, bool fp,
284 bool clear)
023d8ca2 285{
595b8fdd 286 TCGv tmp, addr, va;
023d8ca2 287
6910b8f6
RH
288 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
289 prefetches, which we can treat as nops. No worries about
290 missed exceptions here. */
291 if (unlikely(ra == 31)) {
023d8ca2 292 return;
6910b8f6 293 }
023d8ca2 294
595b8fdd
RH
295 tmp = tcg_temp_new();
296 addr = load_gpr(ctx, rb);
297
298 if (disp16) {
299 tcg_gen_addi_i64(tmp, addr, disp16);
300 addr = tmp;
301 }
302 if (clear) {
303 tcg_gen_andi_i64(tmp, addr, ~0x7);
304 addr = tmp;
023d8ca2 305 }
6910b8f6
RH
306
307 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
308 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
309
595b8fdd 310 tcg_temp_free(tmp);
023d8ca2
AJ
311}
312
636aa200 313static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 314{
a7812ae4 315 TCGv_i32 tmp32 = tcg_temp_new_i32();
a7812ae4 316 gen_helper_f_to_memory(tmp32, t0);
f8da40ae 317 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4 318 tcg_temp_free_i32(tmp32);
f18cd223
AJ
319}
320
636aa200 321static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 322{
a7812ae4
PB
323 TCGv tmp = tcg_temp_new();
324 gen_helper_g_to_memory(tmp, t0);
f8da40ae 325 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
f18cd223
AJ
326 tcg_temp_free(tmp);
327}
328
636aa200 329static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 330{
a7812ae4 331 TCGv_i32 tmp32 = tcg_temp_new_i32();
a7812ae4 332 gen_helper_s_to_memory(tmp32, t0);
f8da40ae 333 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4 334 tcg_temp_free_i32(tmp32);
f18cd223
AJ
335}
336
636aa200
BS
337static inline void gen_store_mem(DisasContext *ctx,
338 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
339 int flags),
595b8fdd
RH
340 int ra, int rb, int32_t disp16, bool fp,
341 bool clear)
023d8ca2 342{
595b8fdd 343 TCGv tmp, addr, va;
6910b8f6 344
595b8fdd
RH
345 tmp = tcg_temp_new();
346 addr = load_gpr(ctx, rb);
6910b8f6 347
595b8fdd
RH
348 if (disp16) {
349 tcg_gen_addi_i64(tmp, addr, disp16);
350 addr = tmp;
351 }
352 if (clear) {
353 tcg_gen_andi_i64(tmp, addr, ~0x7);
354 addr = tmp;
023d8ca2 355 }
595b8fdd
RH
356
357 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
6910b8f6
RH
358 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
359
595b8fdd 360 tcg_temp_free(tmp);
6910b8f6
RH
361}
362
363static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
364 int32_t disp16, int quad)
365{
366 TCGv addr;
367
368 if (ra == 31) {
369 /* ??? Don't bother storing anything. The user can't tell
370 the difference, since the zero register always reads zero. */
371 return NO_EXIT;
372 }
373
374#if defined(CONFIG_USER_ONLY)
375 addr = cpu_lock_st_addr;
376#else
e52458fe 377 addr = tcg_temp_local_new();
6910b8f6
RH
378#endif
379
cd2d46fd 380 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
6910b8f6
RH
381
382#if defined(CONFIG_USER_ONLY)
383 /* ??? This is handled via a complicated version of compare-and-swap
384 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
385 in TCG so that this isn't necessary. */
386 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
387#else
388 /* ??? In system mode we are never multi-threaded, so CAS can be
389 implemented via a non-atomic load-compare-store sequence. */
390 {
42a268c2 391 TCGLabel *lab_fail, *lab_done;
6910b8f6
RH
392 TCGv val;
393
394 lab_fail = gen_new_label();
395 lab_done = gen_new_label();
e52458fe 396 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
6910b8f6
RH
397
398 val = tcg_temp_new();
f8da40ae 399 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
e52458fe 400 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
6910b8f6 401
f8da40ae
RH
402 tcg_gen_qemu_st_i64(cpu_ir[ra], addr, ctx->mem_idx,
403 quad ? MO_LEQ : MO_LEUL);
6910b8f6
RH
404 tcg_gen_movi_i64(cpu_ir[ra], 1);
405 tcg_gen_br(lab_done);
406
407 gen_set_label(lab_fail);
408 tcg_gen_movi_i64(cpu_ir[ra], 0);
409
410 gen_set_label(lab_done);
411 tcg_gen_movi_i64(cpu_lock_addr, -1);
412
413 tcg_temp_free(addr);
414 return NO_EXIT;
415 }
416#endif
023d8ca2
AJ
417}
418
b114b68a 419static bool in_superpage(DisasContext *ctx, int64_t addr)
4c9649a9 420{
b114b68a
RH
421 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
422 && addr < 0
423 && ((addr >> 41) & 3) == 2
424 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
425}
426
427static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
428{
429 /* Suppress goto_tb in the case of single-steping and IO. */
e566be04
RH
430 if ((ctx->tb->cflags & CF_LAST_IO)
431 || ctx->singlestep_enabled || singlestep) {
b114b68a
RH
432 return false;
433 }
434 /* If the destination is in the superpage, the page perms can't change. */
435 if (in_superpage(ctx, dest)) {
436 return true;
437 }
438 /* Check for the dest on the same page as the start of the TB. */
439 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
4af70374 440}
dbb30fe6 441
4af70374
RH
442static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
443{
444 uint64_t dest = ctx->pc + (disp << 2);
445
446 if (ra != 31) {
447 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
448 }
449
450 /* Notice branch-to-next; used to initialize RA with the PC. */
451 if (disp == 0) {
452 return 0;
453 } else if (use_goto_tb(ctx, dest)) {
454 tcg_gen_goto_tb(0);
455 tcg_gen_movi_i64(cpu_pc, dest);
8cfd0495 456 tcg_gen_exit_tb((uintptr_t)ctx->tb);
4af70374
RH
457 return EXIT_GOTO_TB;
458 } else {
459 tcg_gen_movi_i64(cpu_pc, dest);
460 return EXIT_PC_UPDATED;
461 }
dbb30fe6
RH
462}
463
4af70374
RH
464static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
465 TCGv cmp, int32_t disp)
dbb30fe6 466{
4af70374 467 uint64_t dest = ctx->pc + (disp << 2);
42a268c2 468 TCGLabel *lab_true = gen_new_label();
9c29504e 469
4af70374
RH
470 if (use_goto_tb(ctx, dest)) {
471 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
472
473 tcg_gen_goto_tb(0);
474 tcg_gen_movi_i64(cpu_pc, ctx->pc);
8cfd0495 475 tcg_gen_exit_tb((uintptr_t)ctx->tb);
4af70374
RH
476
477 gen_set_label(lab_true);
478 tcg_gen_goto_tb(1);
479 tcg_gen_movi_i64(cpu_pc, dest);
8cfd0495 480 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
4af70374
RH
481
482 return EXIT_GOTO_TB;
483 } else {
57e289de
RH
484 TCGv_i64 z = tcg_const_i64(0);
485 TCGv_i64 d = tcg_const_i64(dest);
486 TCGv_i64 p = tcg_const_i64(ctx->pc);
4af70374 487
57e289de 488 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
4af70374 489
57e289de
RH
490 tcg_temp_free_i64(z);
491 tcg_temp_free_i64(d);
492 tcg_temp_free_i64(p);
4af70374
RH
493 return EXIT_PC_UPDATED;
494 }
495}
496
497static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
498 int32_t disp, int mask)
499{
500 TCGv cmp_tmp;
501
76bff4f8 502 if (mask) {
4af70374 503 cmp_tmp = tcg_temp_new();
76bff4f8
RH
504 tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
505 } else {
506 cmp_tmp = load_gpr(ctx, ra);
9c29504e 507 }
4af70374
RH
508
509 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
510}
511
4af70374 512/* Fold -0.0 for comparison with COND. */
dbb30fe6 513
4af70374 514static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 515{
dbb30fe6 516 uint64_t mzero = 1ull << 63;
f18cd223 517
dbb30fe6
RH
518 switch (cond) {
519 case TCG_COND_LE:
520 case TCG_COND_GT:
521 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 522 tcg_gen_mov_i64(dest, src);
a7812ae4 523 break;
dbb30fe6
RH
524
525 case TCG_COND_EQ:
526 case TCG_COND_NE:
527 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 528 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 529 break;
dbb30fe6
RH
530
531 case TCG_COND_GE:
dbb30fe6 532 case TCG_COND_LT:
4af70374
RH
533 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
534 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
535 tcg_gen_neg_i64(dest, dest);
536 tcg_gen_and_i64(dest, dest, src);
a7812ae4 537 break;
dbb30fe6 538
a7812ae4
PB
539 default:
540 abort();
f18cd223 541 }
dbb30fe6
RH
542}
543
4af70374
RH
544static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
545 int32_t disp)
dbb30fe6 546{
76bff4f8
RH
547 TCGv cmp_tmp = tcg_temp_new();
548 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
4af70374 549 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
550}
551
65809352 552static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
dbb30fe6 553{
65809352 554 TCGv_i64 va, vb, z;
dbb30fe6 555
65809352
RH
556 z = load_zero(ctx);
557 vb = load_fpr(ctx, rb);
558 va = tcg_temp_new();
559 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
dbb30fe6 560
65809352 561 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
dbb30fe6 562
65809352 563 tcg_temp_free(va);
dbb30fe6
RH
564}
565
f24518b5
RH
566#define QUAL_RM_N 0x080 /* Round mode nearest even */
567#define QUAL_RM_C 0x000 /* Round mode chopped */
568#define QUAL_RM_M 0x040 /* Round mode minus infinity */
569#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
570#define QUAL_RM_MASK 0x0c0
571
572#define QUAL_U 0x100 /* Underflow enable (fp output) */
573#define QUAL_V 0x100 /* Overflow enable (int output) */
574#define QUAL_S 0x400 /* Software completion enable */
575#define QUAL_I 0x200 /* Inexact detection enable */
576
577static void gen_qual_roundmode(DisasContext *ctx, int fn11)
578{
579 TCGv_i32 tmp;
580
581 fn11 &= QUAL_RM_MASK;
582 if (fn11 == ctx->tb_rm) {
583 return;
584 }
585 ctx->tb_rm = fn11;
586
587 tmp = tcg_temp_new_i32();
588 switch (fn11) {
589 case QUAL_RM_N:
590 tcg_gen_movi_i32(tmp, float_round_nearest_even);
591 break;
592 case QUAL_RM_C:
593 tcg_gen_movi_i32(tmp, float_round_to_zero);
594 break;
595 case QUAL_RM_M:
596 tcg_gen_movi_i32(tmp, float_round_down);
597 break;
598 case QUAL_RM_D:
4a58aedf
RH
599 tcg_gen_ld8u_i32(tmp, cpu_env,
600 offsetof(CPUAlphaState, fpcr_dyn_round));
f24518b5
RH
601 break;
602 }
603
604#if defined(CONFIG_SOFTFLOAT_INLINE)
6b4c305c 605 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
f24518b5
RH
606 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
607 sets the one field. */
608 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 609 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
f24518b5
RH
610#else
611 gen_helper_setroundmode(tmp);
612#endif
613
614 tcg_temp_free_i32(tmp);
615}
616
617static void gen_qual_flushzero(DisasContext *ctx, int fn11)
618{
619 TCGv_i32 tmp;
620
621 fn11 &= QUAL_U;
622 if (fn11 == ctx->tb_ftz) {
623 return;
624 }
625 ctx->tb_ftz = fn11;
626
627 tmp = tcg_temp_new_i32();
628 if (fn11) {
629 /* Underflow is enabled, use the FPCR setting. */
4a58aedf
RH
630 tcg_gen_ld8u_i32(tmp, cpu_env,
631 offsetof(CPUAlphaState, fpcr_flush_to_zero));
f24518b5
RH
632 } else {
633 /* Underflow is disabled, force flush-to-zero. */
634 tcg_gen_movi_i32(tmp, 1);
635 }
636
637#if defined(CONFIG_SOFTFLOAT_INLINE)
638 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 639 offsetof(CPUAlphaState, fp_status.flush_to_zero));
f24518b5
RH
640#else
641 gen_helper_setflushzero(tmp);
642#endif
643
644 tcg_temp_free_i32(tmp);
645}
646
8b0190bb 647static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
f24518b5 648{
74343409 649 TCGv val;
8b0190bb
RH
650
651 if (unlikely(reg == 31)) {
652 val = load_zero(ctx);
f24518b5 653 } else {
8b0190bb 654 val = cpu_fir[reg];
74343409
RH
655 if ((fn11 & QUAL_S) == 0) {
656 if (is_cmp) {
8b0190bb 657 gen_helper_ieee_input_cmp(cpu_env, val);
74343409 658 } else {
8b0190bb 659 gen_helper_ieee_input(cpu_env, val);
74343409
RH
660 }
661 }
f24518b5
RH
662 }
663 return val;
664}
665
666static void gen_fp_exc_clear(void)
667{
668#if defined(CONFIG_SOFTFLOAT_INLINE)
669 TCGv_i32 zero = tcg_const_i32(0);
670 tcg_gen_st8_i32(zero, cpu_env,
4d5712f1 671 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5
RH
672 tcg_temp_free_i32(zero);
673#else
4a58aedf 674 gen_helper_fp_exc_clear(cpu_env);
f24518b5
RH
675#endif
676}
677
678static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
679{
680 /* ??? We ought to be able to do something with imprecise exceptions.
681 E.g. notice we're still in the trap shadow of something within the
682 TB and do not generate the code to signal the exception; end the TB
683 when an exception is forced to arrive, either by consumption of a
684 register value or TRAPB or EXCB. */
685 TCGv_i32 exc = tcg_temp_new_i32();
686 TCGv_i32 reg;
687
688#if defined(CONFIG_SOFTFLOAT_INLINE)
689 tcg_gen_ld8u_i32(exc, cpu_env,
4d5712f1 690 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5 691#else
4a58aedf 692 gen_helper_fp_exc_get(exc, cpu_env);
f24518b5
RH
693#endif
694
695 if (ignore) {
696 tcg_gen_andi_i32(exc, exc, ~ignore);
697 }
698
699 /* ??? Pass in the regno of the destination so that the helper can
700 set EXC_MASK, which contains a bitmask of destination registers
701 that have caused arithmetic traps. A simple userspace emulation
702 does not require this. We do need it for a guest kernel's entArith,
703 or if we were to do something clever with imprecise exceptions. */
704 reg = tcg_const_i32(rc + 32);
705
706 if (fn11 & QUAL_S) {
4a58aedf 707 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
f24518b5 708 } else {
4a58aedf 709 gen_helper_fp_exc_raise(cpu_env, exc, reg);
f24518b5
RH
710 }
711
712 tcg_temp_free_i32(reg);
713 tcg_temp_free_i32(exc);
714}
715
716static inline void gen_fp_exc_raise(int rc, int fn11)
717{
718 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 719}
f24518b5 720
e8d8fef4 721static void gen_fcvtlq(TCGv vc, TCGv vb)
593f17e5 722{
e8d8fef4 723 TCGv tmp = tcg_temp_new();
735cf45f 724
e8d8fef4
RH
725 /* The arithmetic right shift here, plus the sign-extended mask below
726 yields a sign-extended result without an explicit ext32s_i64. */
727 tcg_gen_sari_i64(tmp, vb, 32);
728 tcg_gen_shri_i64(vc, vb, 29);
729 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
730 tcg_gen_andi_i64(vc, vc, 0x3fffffff);
731 tcg_gen_or_i64(vc, vc, tmp);
735cf45f 732
e8d8fef4 733 tcg_temp_free(tmp);
735cf45f
RH
734}
735
e8d8fef4 736static void gen_fcvtql(TCGv vc, TCGv vb)
735cf45f 737{
e8d8fef4 738 TCGv tmp = tcg_temp_new();
735cf45f 739
e8d8fef4
RH
740 tcg_gen_andi_i64(tmp, vb, (int32_t)0xc0000000);
741 tcg_gen_andi_i64(vc, vb, 0x3FFFFFFF);
742 tcg_gen_shli_i64(tmp, tmp, 32);
743 tcg_gen_shli_i64(vc, vc, 29);
744 tcg_gen_or_i64(vc, vc, tmp);
735cf45f 745
e8d8fef4 746 tcg_temp_free(tmp);
735cf45f
RH
747}
748
4a58aedf
RH
749static void gen_ieee_arith2(DisasContext *ctx,
750 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
751 int rb, int rc, int fn11)
752{
753 TCGv vb;
754
f24518b5
RH
755 gen_qual_roundmode(ctx, fn11);
756 gen_qual_flushzero(ctx, fn11);
757 gen_fp_exc_clear();
758
8b0190bb 759 vb = gen_ieee_input(ctx, rb, fn11, 0);
e20b8c04 760 helper(dest_fpr(ctx, rc), cpu_env, vb);
f24518b5
RH
761
762 gen_fp_exc_raise(rc, fn11);
763}
764
765#define IEEE_ARITH2(name) \
766static inline void glue(gen_f, name)(DisasContext *ctx, \
767 int rb, int rc, int fn11) \
768{ \
769 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
770}
771IEEE_ARITH2(sqrts)
772IEEE_ARITH2(sqrtt)
773IEEE_ARITH2(cvtst)
774IEEE_ARITH2(cvtts)
775
776static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
777{
e20b8c04 778 TCGv vb, vc;
f24518b5
RH
779 int ignore = 0;
780
f24518b5
RH
781 /* No need to set flushzero, since we have an integer output. */
782 gen_fp_exc_clear();
8b0190bb 783 vb = gen_ieee_input(ctx, rb, fn11, 0);
e20b8c04 784 vc = dest_fpr(ctx, rc);
f24518b5
RH
785
786 /* Almost all integer conversions use cropped rounding, and most
787 also do not have integer overflow enabled. Special case that. */
788 switch (fn11) {
789 case QUAL_RM_C:
e20b8c04 790 gen_helper_cvttq_c(vc, cpu_env, vb);
f24518b5
RH
791 break;
792 case QUAL_V | QUAL_RM_C:
793 case QUAL_S | QUAL_V | QUAL_RM_C:
794 ignore = float_flag_inexact;
795 /* FALLTHRU */
796 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
e20b8c04 797 gen_helper_cvttq_svic(vc, cpu_env, vb);
f24518b5
RH
798 break;
799 default:
800 gen_qual_roundmode(ctx, fn11);
e20b8c04 801 gen_helper_cvttq(vc, cpu_env, vb);
f24518b5
RH
802 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
803 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
804 break;
805 }
f24518b5
RH
806
807 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
808}
809
4a58aedf
RH
810static void gen_ieee_intcvt(DisasContext *ctx,
811 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
812 int rb, int rc, int fn11)
813{
e20b8c04 814 TCGv vb, vc;
f24518b5
RH
815
816 gen_qual_roundmode(ctx, fn11);
e20b8c04
RH
817 vb = load_fpr(ctx, rb);
818 vc = dest_fpr(ctx, rc);
f24518b5
RH
819
820 /* The only exception that can be raised by integer conversion
821 is inexact. Thus we only need to worry about exceptions when
822 inexact handling is requested. */
823 if (fn11 & QUAL_I) {
824 gen_fp_exc_clear();
e20b8c04 825 helper(vc, cpu_env, vb);
f24518b5
RH
826 gen_fp_exc_raise(rc, fn11);
827 } else {
e20b8c04 828 helper(vc, cpu_env, vb);
f24518b5
RH
829 }
830}
831
832#define IEEE_INTCVT(name) \
833static inline void glue(gen_f, name)(DisasContext *ctx, \
834 int rb, int rc, int fn11) \
835{ \
836 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
837}
838IEEE_INTCVT(cvtqs)
839IEEE_INTCVT(cvtqt)
840
ef3765cb 841static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
dc96be4b 842{
ef3765cb
RH
843 TCGv vmask = tcg_const_i64(mask);
844 TCGv tmp = tcg_temp_new_i64();
dc96be4b 845
ef3765cb
RH
846 if (inv_a) {
847 tcg_gen_andc_i64(tmp, vmask, va);
dc96be4b 848 } else {
ef3765cb 849 tcg_gen_and_i64(tmp, va, vmask);
dc96be4b
RH
850 }
851
ef3765cb
RH
852 tcg_gen_andc_i64(vc, vb, vmask);
853 tcg_gen_or_i64(vc, vc, tmp);
dc96be4b
RH
854
855 tcg_temp_free(vmask);
ef3765cb 856 tcg_temp_free(tmp);
dc96be4b
RH
857}
858
f24518b5 859static void gen_ieee_arith3(DisasContext *ctx,
4a58aedf 860 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
861 int ra, int rb, int rc, int fn11)
862{
e20b8c04 863 TCGv va, vb, vc;
f24518b5
RH
864
865 gen_qual_roundmode(ctx, fn11);
866 gen_qual_flushzero(ctx, fn11);
867 gen_fp_exc_clear();
868
8b0190bb
RH
869 va = gen_ieee_input(ctx, ra, fn11, 0);
870 vb = gen_ieee_input(ctx, rb, fn11, 0);
e20b8c04
RH
871 vc = dest_fpr(ctx, rc);
872 helper(vc, cpu_env, va, vb);
f24518b5
RH
873
874 gen_fp_exc_raise(rc, fn11);
875}
876
877#define IEEE_ARITH3(name) \
878static inline void glue(gen_f, name)(DisasContext *ctx, \
879 int ra, int rb, int rc, int fn11) \
880{ \
881 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
882}
883IEEE_ARITH3(adds)
884IEEE_ARITH3(subs)
885IEEE_ARITH3(muls)
886IEEE_ARITH3(divs)
887IEEE_ARITH3(addt)
888IEEE_ARITH3(subt)
889IEEE_ARITH3(mult)
890IEEE_ARITH3(divt)
891
892static void gen_ieee_compare(DisasContext *ctx,
4a58aedf 893 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
894 int ra, int rb, int rc, int fn11)
895{
e20b8c04 896 TCGv va, vb, vc;
f24518b5
RH
897
898 gen_fp_exc_clear();
899
8b0190bb
RH
900 va = gen_ieee_input(ctx, ra, fn11, 1);
901 vb = gen_ieee_input(ctx, rb, fn11, 1);
e20b8c04
RH
902 vc = dest_fpr(ctx, rc);
903 helper(vc, cpu_env, va, vb);
f24518b5
RH
904
905 gen_fp_exc_raise(rc, fn11);
906}
907
908#define IEEE_CMP3(name) \
909static inline void glue(gen_f, name)(DisasContext *ctx, \
910 int ra, int rb, int rc, int fn11) \
911{ \
912 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
913}
914IEEE_CMP3(cmptun)
915IEEE_CMP3(cmpteq)
916IEEE_CMP3(cmptlt)
917IEEE_CMP3(cmptle)
a7812ae4 918
248c42f3
RH
919static inline uint64_t zapnot_mask(uint8_t lit)
920{
921 uint64_t mask = 0;
922 int i;
923
924 for (i = 0; i < 8; ++i) {
67debe3a 925 if ((lit >> i) & 1) {
248c42f3 926 mask |= 0xffull << (i * 8);
67debe3a 927 }
248c42f3
RH
928 }
929 return mask;
930}
931
87d98f95
RH
932/* Implement zapnot with an immediate operand, which expands to some
933 form of immediate AND. This is a basic building block in the
934 definition of many of the other byte manipulation instructions. */
248c42f3 935static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 936{
87d98f95
RH
937 switch (lit) {
938 case 0x00:
248c42f3 939 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
940 break;
941 case 0x01:
248c42f3 942 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
943 break;
944 case 0x03:
248c42f3 945 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
946 break;
947 case 0x0f:
248c42f3 948 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
949 break;
950 case 0xff:
248c42f3 951 tcg_gen_mov_i64(dest, src);
87d98f95
RH
952 break;
953 default:
b144be9e 954 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
87d98f95
RH
955 break;
956 }
957}
958
248c42f3 959/* EXTWH, EXTLH, EXTQH */
9a734d64 960static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1 961 uint8_t lit, uint8_t byte_mask)
b3249f63 962{
9a734d64
RH
963 if (islit) {
964 tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
67debe3a 965 } else {
9a734d64
RH
966 TCGv tmp = tcg_temp_new();
967 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
968 tcg_gen_neg_i64(tmp, tmp);
969 tcg_gen_andi_i64(tmp, tmp, 0x3f);
970 tcg_gen_shl_i64(vc, va, tmp);
971 tcg_temp_free(tmp);
377a43b6 972 }
9a734d64 973 gen_zapnoti(vc, vc, byte_mask);
b3249f63
AJ
974}
975
248c42f3 976/* EXTBL, EXTWL, EXTLL, EXTQL */
9a734d64 977static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1 978 uint8_t lit, uint8_t byte_mask)
b3249f63 979{
9a734d64
RH
980 if (islit) {
981 tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
67debe3a 982 } else {
9a734d64
RH
983 TCGv tmp = tcg_temp_new();
984 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
985 tcg_gen_shli_i64(tmp, tmp, 3);
986 tcg_gen_shr_i64(vc, va, tmp);
987 tcg_temp_free(tmp);
248c42f3 988 }
9a734d64 989 gen_zapnoti(vc, vc, byte_mask);
248c42f3
RH
990}
991
50eb6e5c 992/* INSWH, INSLH, INSQH */
5e5863ec 993static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
50eb6e5c
RH
994 uint8_t lit, uint8_t byte_mask)
995{
5e5863ec 996 TCGv tmp = tcg_temp_new();
50eb6e5c 997
5e5863ec
RH
998 /* The instruction description has us left-shift the byte mask and extract
999 bits <15:8> and apply that zap at the end. This is equivalent to simply
1000 performing the zap first and shifting afterward. */
1001 gen_zapnoti(tmp, va, byte_mask);
50eb6e5c 1002
5e5863ec
RH
1003 if (islit) {
1004 lit &= 7;
1005 if (unlikely(lit == 0)) {
1006 tcg_gen_movi_i64(vc, 0);
50eb6e5c 1007 } else {
5e5863ec 1008 tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
50eb6e5c 1009 }
5e5863ec
RH
1010 } else {
1011 TCGv shift = tcg_temp_new();
1012
1013 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1014 portably by splitting the shift into two parts: shift_count-1 and 1.
1015 Arrange for the -1 by using ones-complement instead of
1016 twos-complement in the negation: ~(B * 8) & 63. */
1017
1018 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1019 tcg_gen_not_i64(shift, shift);
1020 tcg_gen_andi_i64(shift, shift, 0x3f);
1021
1022 tcg_gen_shr_i64(vc, tmp, shift);
1023 tcg_gen_shri_i64(vc, vc, 1);
1024 tcg_temp_free(shift);
50eb6e5c 1025 }
5e5863ec 1026 tcg_temp_free(tmp);
50eb6e5c
RH
1027}
1028
248c42f3 1029/* INSBL, INSWL, INSLL, INSQL */
5e5863ec 1030static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1 1031 uint8_t lit, uint8_t byte_mask)
248c42f3 1032{
5e5863ec 1033 TCGv tmp = tcg_temp_new();
248c42f3 1034
5e5863ec
RH
1035 /* The instruction description has us left-shift the byte mask
1036 the same number of byte slots as the data and apply the zap
1037 at the end. This is equivalent to simply performing the zap
1038 first and shifting afterward. */
1039 gen_zapnoti(tmp, va, byte_mask);
248c42f3 1040
5e5863ec
RH
1041 if (islit) {
1042 tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
1043 } else {
1044 TCGv shift = tcg_temp_new();
1045 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1046 tcg_gen_shli_i64(shift, shift, 3);
1047 tcg_gen_shl_i64(vc, tmp, shift);
1048 tcg_temp_free(shift);
377a43b6 1049 }
5e5863ec 1050 tcg_temp_free(tmp);
b3249f63
AJ
1051}
1052
ffec44f1 1053/* MSKWH, MSKLH, MSKQH */
9a8fa1bd 1054static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1
RH
1055 uint8_t lit, uint8_t byte_mask)
1056{
9a8fa1bd
RH
1057 if (islit) {
1058 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
ffec44f1
RH
1059 } else {
1060 TCGv shift = tcg_temp_new();
1061 TCGv mask = tcg_temp_new();
1062
1063 /* The instruction description is as above, where the byte_mask
1064 is shifted left, and then we extract bits <15:8>. This can be
1065 emulated with a right-shift on the expanded byte mask. This
1066 requires extra care because for an input <2:0> == 0 we need a
1067 shift of 64 bits in order to generate a zero. This is done by
1068 splitting the shift into two parts, the variable shift - 1
1069 followed by a constant 1 shift. The code we expand below is
9a8fa1bd 1070 equivalent to ~(B * 8) & 63. */
ffec44f1 1071
9a8fa1bd 1072 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
ffec44f1
RH
1073 tcg_gen_not_i64(shift, shift);
1074 tcg_gen_andi_i64(shift, shift, 0x3f);
1075 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1076 tcg_gen_shr_i64(mask, mask, shift);
1077 tcg_gen_shri_i64(mask, mask, 1);
1078
9a8fa1bd 1079 tcg_gen_andc_i64(vc, va, mask);
ffec44f1
RH
1080
1081 tcg_temp_free(mask);
1082 tcg_temp_free(shift);
1083 }
1084}
1085
14ab1634 1086/* MSKBL, MSKWL, MSKLL, MSKQL */
9a8fa1bd 1087static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1 1088 uint8_t lit, uint8_t byte_mask)
14ab1634 1089{
9a8fa1bd
RH
1090 if (islit) {
1091 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
14ab1634
RH
1092 } else {
1093 TCGv shift = tcg_temp_new();
1094 TCGv mask = tcg_temp_new();
1095
9a8fa1bd 1096 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
14ab1634 1097 tcg_gen_shli_i64(shift, shift, 3);
9a8fa1bd 1098 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
14ab1634
RH
1099 tcg_gen_shl_i64(mask, mask, shift);
1100
9a8fa1bd 1101 tcg_gen_andc_i64(vc, va, mask);
14ab1634
RH
1102
1103 tcg_temp_free(mask);
1104 tcg_temp_free(shift);
1105 }
1106}
1107
ac316ca4
RH
1108static void gen_rx(int ra, int set)
1109{
1110 TCGv_i32 tmp;
1111
1112 if (ra != 31) {
4d5712f1 1113 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1114 }
1115
1116 tmp = tcg_const_i32(set);
4d5712f1 1117 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1118 tcg_temp_free_i32(tmp);
1119}
1120
2ace7e55
RH
1121static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1122{
1123 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1124 to internal cpu registers. */
1125
1126 /* Unprivileged PAL call */
1127 if (palcode >= 0x80 && palcode < 0xC0) {
1128 switch (palcode) {
1129 case 0x86:
1130 /* IMB */
1131 /* No-op inside QEMU. */
1132 break;
1133 case 0x9E:
1134 /* RDUNIQUE */
06ef8604
RH
1135 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
1136 offsetof(CPUAlphaState, unique));
2ace7e55
RH
1137 break;
1138 case 0x9F:
1139 /* WRUNIQUE */
06ef8604
RH
1140 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1141 offsetof(CPUAlphaState, unique));
2ace7e55
RH
1142 break;
1143 default:
ba96394e
RH
1144 palcode &= 0xbf;
1145 goto do_call_pal;
2ace7e55
RH
1146 }
1147 return NO_EXIT;
1148 }
1149
1150#ifndef CONFIG_USER_ONLY
1151 /* Privileged PAL code */
1152 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1153 switch (palcode) {
1154 case 0x01:
1155 /* CFLUSH */
1156 /* No-op inside QEMU. */
1157 break;
1158 case 0x02:
1159 /* DRAINA */
1160 /* No-op inside QEMU. */
1161 break;
1162 case 0x2D:
1163 /* WRVPTPTR */
06ef8604
RH
1164 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1165 offsetof(CPUAlphaState, vptptr));
2ace7e55
RH
1166 break;
1167 case 0x31:
1168 /* WRVAL */
06ef8604
RH
1169 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1170 offsetof(CPUAlphaState, sysval));
2ace7e55
RH
1171 break;
1172 case 0x32:
1173 /* RDVAL */
06ef8604
RH
1174 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
1175 offsetof(CPUAlphaState, sysval));
2ace7e55
RH
1176 break;
1177
1178 case 0x35: {
1179 /* SWPIPL */
1180 TCGv tmp;
1181
1182 /* Note that we already know we're in kernel mode, so we know
1183 that PS only contains the 3 IPL bits. */
06ef8604
RH
1184 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
1185 offsetof(CPUAlphaState, ps));
2ace7e55
RH
1186
1187 /* But make sure and store only the 3 IPL bits from the user. */
1188 tmp = tcg_temp_new();
1189 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
4d5712f1 1190 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1191 tcg_temp_free(tmp);
1192 break;
1193 }
1194
1195 case 0x36:
1196 /* RDPS */
06ef8604
RH
1197 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
1198 offsetof(CPUAlphaState, ps));
2ace7e55
RH
1199 break;
1200 case 0x38:
1201 /* WRUSP */
06ef8604
RH
1202 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1203 offsetof(CPUAlphaState, usp));
2ace7e55
RH
1204 break;
1205 case 0x3A:
1206 /* RDUSP */
214bb280 1207 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
06ef8604 1208 offsetof(CPUAlphaState, usp));
2ace7e55
RH
1209 break;
1210 case 0x3C:
1211 /* WHAMI */
1212 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
55e5c285 1213 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
2ace7e55
RH
1214 break;
1215
1216 default:
ba96394e
RH
1217 palcode &= 0x3f;
1218 goto do_call_pal;
2ace7e55
RH
1219 }
1220 return NO_EXIT;
1221 }
1222#endif
2ace7e55 1223 return gen_invalid(ctx);
ba96394e
RH
1224
1225 do_call_pal:
1226#ifdef CONFIG_USER_ONLY
1227 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1228#else
1229 {
1230 TCGv pc = tcg_const_i64(ctx->pc);
1231 TCGv entry = tcg_const_i64(palcode & 0x80
1232 ? 0x2000 + (palcode - 0x80) * 64
1233 : 0x1000 + palcode * 64);
1234
1235 gen_helper_call_pal(cpu_env, pc, entry);
1236
1237 tcg_temp_free(entry);
1238 tcg_temp_free(pc);
a9ead832
RH
1239
1240 /* Since the destination is running in PALmode, we don't really
73f395fa 1241 need the page permissions check. We'll see the existence of
a9ead832
RH
1242 the page when we create the TB, and we'll flush all TBs if
1243 we change the PAL base register. */
1244 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1245 tcg_gen_goto_tb(0);
8cfd0495 1246 tcg_gen_exit_tb((uintptr_t)ctx->tb);
a9ead832
RH
1247 return EXIT_GOTO_TB;
1248 }
1249
ba96394e
RH
1250 return EXIT_PC_UPDATED;
1251 }
1252#endif
2ace7e55
RH
1253}
1254
26b46094
RH
1255#ifndef CONFIG_USER_ONLY
1256
1257#define PR_BYTE 0x100000
1258#define PR_LONG 0x200000
1259
1260static int cpu_pr_data(int pr)
1261{
1262 switch (pr) {
1263 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1264 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1265 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1266 case 3: return offsetof(CPUAlphaState, trap_arg0);
1267 case 4: return offsetof(CPUAlphaState, trap_arg1);
1268 case 5: return offsetof(CPUAlphaState, trap_arg2);
1269 case 6: return offsetof(CPUAlphaState, exc_addr);
1270 case 7: return offsetof(CPUAlphaState, palbr);
1271 case 8: return offsetof(CPUAlphaState, ptbr);
1272 case 9: return offsetof(CPUAlphaState, vptptr);
1273 case 10: return offsetof(CPUAlphaState, unique);
1274 case 11: return offsetof(CPUAlphaState, sysval);
1275 case 12: return offsetof(CPUAlphaState, usp);
1276
1277 case 32 ... 39:
1278 return offsetof(CPUAlphaState, shadow[pr - 32]);
1279 case 40 ... 63:
1280 return offsetof(CPUAlphaState, scratch[pr - 40]);
c781cf96
RH
1281
1282 case 251:
1283 return offsetof(CPUAlphaState, alarm_expire);
26b46094
RH
1284 }
1285 return 0;
1286}
1287
bd79255d 1288static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno)
26b46094
RH
1289{
1290 int data = cpu_pr_data(regno);
1291
19e0cbb8
RH
1292 /* Special help for VMTIME and WALLTIME. */
1293 if (regno == 250 || regno == 249) {
1294 void (*helper)(TCGv) = gen_helper_get_walltime;
1295 if (regno == 249) {
1296 helper = gen_helper_get_vmtime;
1297 }
bd79255d 1298 if (ctx->tb->cflags & CF_USE_ICOUNT) {
c781cf96 1299 gen_io_start();
0e154fe9 1300 helper(va);
c781cf96
RH
1301 gen_io_end();
1302 return EXIT_PC_STALE;
1303 } else {
0e154fe9 1304 helper(va);
c781cf96
RH
1305 return NO_EXIT;
1306 }
26b46094
RH
1307 }
1308
1309 /* The basic registers are data only, and unknown registers
1310 are read-zero, write-ignore. */
1311 if (data == 0) {
0e154fe9 1312 tcg_gen_movi_i64(va, 0);
26b46094 1313 } else if (data & PR_BYTE) {
0e154fe9 1314 tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
26b46094 1315 } else if (data & PR_LONG) {
0e154fe9 1316 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
26b46094 1317 } else {
0e154fe9 1318 tcg_gen_ld_i64(va, cpu_env, data);
26b46094 1319 }
c781cf96 1320 return NO_EXIT;
26b46094
RH
1321}
1322
0e154fe9 1323static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
26b46094
RH
1324{
1325 TCGv tmp;
bc24270e 1326 int data;
26b46094 1327
bc24270e
RH
1328 switch (regno) {
1329 case 255:
3b4fefd6 1330 /* TBIA */
69163fbb 1331 gen_helper_tbia(cpu_env);
bc24270e
RH
1332 break;
1333
1334 case 254:
3b4fefd6 1335 /* TBIS */
0e154fe9 1336 gen_helper_tbis(cpu_env, vb);
bc24270e
RH
1337 break;
1338
1339 case 253:
1340 /* WAIT */
1341 tmp = tcg_const_i64(1);
259186a7
AF
1342 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1343 offsetof(CPUState, halted));
bc24270e
RH
1344 return gen_excp(ctx, EXCP_HLT, 0);
1345
034ebc27
RH
1346 case 252:
1347 /* HALT */
0e154fe9 1348 gen_helper_halt(vb);
034ebc27
RH
1349 return EXIT_PC_STALE;
1350
c781cf96
RH
1351 case 251:
1352 /* ALARM */
0e154fe9 1353 gen_helper_set_alarm(cpu_env, vb);
c781cf96
RH
1354 break;
1355
a9ead832
RH
1356 case 7:
1357 /* PALBR */
0e154fe9 1358 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
a9ead832
RH
1359 /* Changing the PAL base register implies un-chaining all of the TBs
1360 that ended with a CALL_PAL. Since the base register usually only
1361 changes during boot, flushing everything works well. */
1362 gen_helper_tb_flush(cpu_env);
1363 return EXIT_PC_STALE;
1364
bc24270e 1365 default:
3b4fefd6
RH
1366 /* The basic registers are data only, and unknown registers
1367 are read-zero, write-ignore. */
bc24270e 1368 data = cpu_pr_data(regno);
3b4fefd6
RH
1369 if (data != 0) {
1370 if (data & PR_BYTE) {
0e154fe9 1371 tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
3b4fefd6 1372 } else if (data & PR_LONG) {
0e154fe9 1373 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
3b4fefd6 1374 } else {
0e154fe9 1375 tcg_gen_st_i64(vb, cpu_env, data);
3b4fefd6 1376 }
26b46094 1377 }
bc24270e 1378 break;
26b46094
RH
1379 }
1380
bc24270e 1381 return NO_EXIT;
26b46094
RH
1382}
1383#endif /* !USER_ONLY*/
1384
5238c886
RH
1385#define REQUIRE_TB_FLAG(FLAG) \
1386 do { \
1387 if ((ctx->tb->flags & (FLAG)) == 0) { \
1388 goto invalid_opc; \
1389 } \
1390 } while (0)
1391
64f45e49
RH
1392#define REQUIRE_REG_31(WHICH) \
1393 do { \
1394 if (WHICH != 31) { \
1395 goto invalid_opc; \
1396 } \
1397 } while (0)
1398
4af70374 1399static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9 1400{
a9e05a1c 1401 int32_t disp21, disp16, disp12 __attribute__((unused));
f88fe4e3 1402 uint16_t fn11;
194cfb43
RH
1403 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1404 bool islit;
1405 TCGv va, vb, vc, tmp;
075b8ddb 1406 TCGv_i32 t32;
4af70374 1407 ExitStatus ret;
4c9649a9
JM
1408
1409 /* Decode all instruction fields */
a9e05a1c
RH
1410 opc = extract32(insn, 26, 6);
1411 ra = extract32(insn, 21, 5);
1412 rb = extract32(insn, 16, 5);
1413 rc = extract32(insn, 0, 5);
1414 islit = extract32(insn, 12, 1);
1415 lit = extract32(insn, 13, 8);
1416
1417 disp21 = sextract32(insn, 0, 21);
1418 disp16 = sextract32(insn, 0, 16);
1419 disp12 = sextract32(insn, 0, 12);
1420
1421 fn11 = extract32(insn, 5, 11);
1422 fpfn = extract32(insn, 5, 6);
1423 fn7 = extract32(insn, 5, 7);
1424
dfaa8583 1425 if (rb == 31 && !islit) {
a9e05a1c 1426 islit = true;
dfaa8583 1427 lit = 0;
194cfb43 1428 }
806991da 1429
4af70374 1430 ret = NO_EXIT;
4c9649a9
JM
1431 switch (opc) {
1432 case 0x00:
1433 /* CALL_PAL */
a9e05a1c 1434 ret = gen_call_pal(ctx, insn & 0x03ffffff);
2ace7e55 1435 break;
4c9649a9
JM
1436 case 0x01:
1437 /* OPC01 */
1438 goto invalid_opc;
1439 case 0x02:
1440 /* OPC02 */
1441 goto invalid_opc;
1442 case 0x03:
1443 /* OPC03 */
1444 goto invalid_opc;
1445 case 0x04:
1446 /* OPC04 */
1447 goto invalid_opc;
1448 case 0x05:
1449 /* OPC05 */
1450 goto invalid_opc;
1451 case 0x06:
1452 /* OPC06 */
1453 goto invalid_opc;
1454 case 0x07:
1455 /* OPC07 */
1456 goto invalid_opc;
194cfb43 1457
4c9649a9
JM
1458 case 0x09:
1459 /* LDAH */
194cfb43
RH
1460 disp16 = (uint32_t)disp16 << 16;
1461 /* fall through */
1462 case 0x08:
1463 /* LDA */
1464 va = dest_gpr(ctx, ra);
1465 /* It's worth special-casing immediate loads. */
1466 if (rb == 31) {
1467 tcg_gen_movi_i64(va, disp16);
1468 } else {
1469 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
496cb5b9 1470 }
4c9649a9 1471 break;
194cfb43 1472
4c9649a9
JM
1473 case 0x0A:
1474 /* LDBU */
5238c886
RH
1475 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1476 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1477 break;
4c9649a9
JM
1478 case 0x0B:
1479 /* LDQ_U */
f18cd223 1480 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1481 break;
1482 case 0x0C:
1483 /* LDWU */
5238c886
RH
1484 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1485 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1486 break;
4c9649a9
JM
1487 case 0x0D:
1488 /* STW */
5238c886 1489 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
6910b8f6 1490 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
4c9649a9
JM
1491 break;
1492 case 0x0E:
1493 /* STB */
5238c886 1494 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
6910b8f6 1495 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
4c9649a9
JM
1496 break;
1497 case 0x0F:
1498 /* STQ_U */
6910b8f6 1499 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
4c9649a9 1500 break;
194cfb43 1501
4c9649a9 1502 case 0x10:
194cfb43
RH
1503 vc = dest_gpr(ctx, rc);
1504 vb = load_gpr_lit(ctx, rb, lit, islit);
1505
1506 if (ra == 31) {
1507 if (fn7 == 0x00) {
1508 /* Special case ADDL as SEXTL. */
1509 tcg_gen_ext32s_i64(vc, vb);
1510 break;
1511 }
1512 if (fn7 == 0x29) {
1513 /* Special case SUBQ as NEGQ. */
1514 tcg_gen_neg_i64(vc, vb);
1515 break;
1516 }
1517 }
1518
1519 va = load_gpr(ctx, ra);
4c9649a9
JM
1520 switch (fn7) {
1521 case 0x00:
1522 /* ADDL */
194cfb43
RH
1523 tcg_gen_add_i64(vc, va, vb);
1524 tcg_gen_ext32s_i64(vc, vc);
4c9649a9
JM
1525 break;
1526 case 0x02:
1527 /* S4ADDL */
194cfb43
RH
1528 tmp = tcg_temp_new();
1529 tcg_gen_shli_i64(tmp, va, 2);
1530 tcg_gen_add_i64(tmp, tmp, vb);
1531 tcg_gen_ext32s_i64(vc, tmp);
1532 tcg_temp_free(tmp);
4c9649a9
JM
1533 break;
1534 case 0x09:
1535 /* SUBL */
194cfb43
RH
1536 tcg_gen_sub_i64(vc, va, vb);
1537 tcg_gen_ext32s_i64(vc, vc);
4c9649a9
JM
1538 break;
1539 case 0x0B:
1540 /* S4SUBL */
194cfb43
RH
1541 tmp = tcg_temp_new();
1542 tcg_gen_shli_i64(tmp, va, 2);
1543 tcg_gen_sub_i64(tmp, tmp, vb);
1544 tcg_gen_ext32s_i64(vc, tmp);
1545 tcg_temp_free(tmp);
4c9649a9
JM
1546 break;
1547 case 0x0F:
1548 /* CMPBGE */
cd2754ad 1549 gen_helper_cmpbge(vc, va, vb);
4c9649a9
JM
1550 break;
1551 case 0x12:
1552 /* S8ADDL */
194cfb43
RH
1553 tmp = tcg_temp_new();
1554 tcg_gen_shli_i64(tmp, va, 3);
1555 tcg_gen_add_i64(tmp, tmp, vb);
1556 tcg_gen_ext32s_i64(vc, tmp);
1557 tcg_temp_free(tmp);
4c9649a9
JM
1558 break;
1559 case 0x1B:
1560 /* S8SUBL */
194cfb43
RH
1561 tmp = tcg_temp_new();
1562 tcg_gen_shli_i64(tmp, va, 3);
1563 tcg_gen_sub_i64(tmp, tmp, vb);
1564 tcg_gen_ext32s_i64(vc, tmp);
1565 tcg_temp_free(tmp);
4c9649a9
JM
1566 break;
1567 case 0x1D:
1568 /* CMPULT */
95868348 1569 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
4c9649a9
JM
1570 break;
1571 case 0x20:
1572 /* ADDQ */
194cfb43 1573 tcg_gen_add_i64(vc, va, vb);
4c9649a9
JM
1574 break;
1575 case 0x22:
1576 /* S4ADDQ */
194cfb43
RH
1577 tmp = tcg_temp_new();
1578 tcg_gen_shli_i64(tmp, va, 2);
1579 tcg_gen_add_i64(vc, tmp, vb);
1580 tcg_temp_free(tmp);
4c9649a9
JM
1581 break;
1582 case 0x29:
1583 /* SUBQ */
194cfb43 1584 tcg_gen_sub_i64(vc, va, vb);
4c9649a9
JM
1585 break;
1586 case 0x2B:
1587 /* S4SUBQ */
194cfb43
RH
1588 tmp = tcg_temp_new();
1589 tcg_gen_shli_i64(tmp, va, 2);
1590 tcg_gen_sub_i64(vc, tmp, vb);
1591 tcg_temp_free(tmp);
4c9649a9
JM
1592 break;
1593 case 0x2D:
1594 /* CMPEQ */
95868348 1595 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
4c9649a9
JM
1596 break;
1597 case 0x32:
1598 /* S8ADDQ */
194cfb43
RH
1599 tmp = tcg_temp_new();
1600 tcg_gen_shli_i64(tmp, va, 3);
1601 tcg_gen_add_i64(vc, tmp, vb);
1602 tcg_temp_free(tmp);
4c9649a9
JM
1603 break;
1604 case 0x3B:
1605 /* S8SUBQ */
194cfb43
RH
1606 tmp = tcg_temp_new();
1607 tcg_gen_shli_i64(tmp, va, 3);
1608 tcg_gen_sub_i64(vc, tmp, vb);
1609 tcg_temp_free(tmp);
4c9649a9
JM
1610 break;
1611 case 0x3D:
1612 /* CMPULE */
95868348 1613 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
4c9649a9
JM
1614 break;
1615 case 0x40:
1616 /* ADDL/V */
42774a56 1617 gen_helper_addlv(vc, cpu_env, va, vb);
4c9649a9
JM
1618 break;
1619 case 0x49:
1620 /* SUBL/V */
42774a56 1621 gen_helper_sublv(vc, cpu_env, va, vb);
4c9649a9
JM
1622 break;
1623 case 0x4D:
1624 /* CMPLT */
95868348 1625 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
4c9649a9
JM
1626 break;
1627 case 0x60:
1628 /* ADDQ/V */
42774a56 1629 gen_helper_addqv(vc, cpu_env, va, vb);
4c9649a9
JM
1630 break;
1631 case 0x69:
1632 /* SUBQ/V */
42774a56 1633 gen_helper_subqv(vc, cpu_env, va, vb);
4c9649a9
JM
1634 break;
1635 case 0x6D:
1636 /* CMPLE */
95868348 1637 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
4c9649a9
JM
1638 break;
1639 default:
1640 goto invalid_opc;
1641 }
1642 break;
db4a1645 1643
4c9649a9 1644 case 0x11:
db4a1645
RH
1645 if (fn7 == 0x20) {
1646 if (rc == 31) {
1647 /* Special case BIS as NOP. */
1648 break;
1649 }
1650 if (ra == 31) {
1651 /* Special case BIS as MOV. */
1652 vc = dest_gpr(ctx, rc);
1653 if (islit) {
1654 tcg_gen_movi_i64(vc, lit);
67debe3a 1655 } else {
db4a1645 1656 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
67debe3a 1657 }
db4a1645 1658 break;
30c7183b 1659 }
db4a1645
RH
1660 }
1661
1662 vc = dest_gpr(ctx, rc);
1663 vb = load_gpr_lit(ctx, rb, lit, islit);
1664
1665 if (fn7 == 0x28 && ra == 31) {
1666 /* Special case ORNOT as NOT. */
1667 tcg_gen_not_i64(vc, vb);
1668 break;
1669 }
1670
1671 va = load_gpr(ctx, ra);
1672 switch (fn7) {
1673 case 0x00:
1674 /* AND */
1675 tcg_gen_and_i64(vc, va, vb);
4c9649a9
JM
1676 break;
1677 case 0x08:
1678 /* BIC */
db4a1645 1679 tcg_gen_andc_i64(vc, va, vb);
4c9649a9
JM
1680 break;
1681 case 0x14:
1682 /* CMOVLBS */
83ebb7cd
RH
1683 tmp = tcg_temp_new();
1684 tcg_gen_andi_i64(tmp, va, 1);
1685 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1686 vb, load_gpr(ctx, rc));
1687 tcg_temp_free(tmp);
4c9649a9
JM
1688 break;
1689 case 0x16:
1690 /* CMOVLBC */
83ebb7cd
RH
1691 tmp = tcg_temp_new();
1692 tcg_gen_andi_i64(tmp, va, 1);
1693 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1694 vb, load_gpr(ctx, rc));
1695 tcg_temp_free(tmp);
4c9649a9
JM
1696 break;
1697 case 0x20:
1698 /* BIS */
db4a1645 1699 tcg_gen_or_i64(vc, va, vb);
4c9649a9
JM
1700 break;
1701 case 0x24:
1702 /* CMOVEQ */
83ebb7cd
RH
1703 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1704 vb, load_gpr(ctx, rc));
4c9649a9
JM
1705 break;
1706 case 0x26:
1707 /* CMOVNE */
83ebb7cd
RH
1708 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1709 vb, load_gpr(ctx, rc));
4c9649a9
JM
1710 break;
1711 case 0x28:
1712 /* ORNOT */
db4a1645 1713 tcg_gen_orc_i64(vc, va, vb);
4c9649a9
JM
1714 break;
1715 case 0x40:
1716 /* XOR */
db4a1645 1717 tcg_gen_xor_i64(vc, va, vb);
4c9649a9
JM
1718 break;
1719 case 0x44:
1720 /* CMOVLT */
83ebb7cd
RH
1721 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1722 vb, load_gpr(ctx, rc));
4c9649a9
JM
1723 break;
1724 case 0x46:
1725 /* CMOVGE */
83ebb7cd
RH
1726 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1727 vb, load_gpr(ctx, rc));
4c9649a9
JM
1728 break;
1729 case 0x48:
1730 /* EQV */
db4a1645 1731 tcg_gen_eqv_i64(vc, va, vb);
4c9649a9
JM
1732 break;
1733 case 0x61:
1734 /* AMASK */
64f45e49 1735 REQUIRE_REG_31(ra);
db4a1645 1736 {
a18ad893 1737 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
db4a1645 1738 tcg_gen_andi_i64(vc, vb, ~amask);
ae8ecd42 1739 }
4c9649a9
JM
1740 break;
1741 case 0x64:
1742 /* CMOVLE */
83ebb7cd
RH
1743 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1744 vb, load_gpr(ctx, rc));
4c9649a9
JM
1745 break;
1746 case 0x66:
1747 /* CMOVGT */
83ebb7cd
RH
1748 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1749 vb, load_gpr(ctx, rc));
4c9649a9
JM
1750 break;
1751 case 0x6C:
1752 /* IMPLVER */
64f45e49 1753 REQUIRE_REG_31(ra);
db4a1645 1754 tcg_gen_movi_i64(vc, ctx->implver);
4c9649a9
JM
1755 break;
1756 default:
1757 goto invalid_opc;
1758 }
1759 break;
3bd67b7d 1760
4c9649a9 1761 case 0x12:
3bd67b7d
RH
1762 vc = dest_gpr(ctx, rc);
1763 va = load_gpr(ctx, ra);
4c9649a9
JM
1764 switch (fn7) {
1765 case 0x02:
1766 /* MSKBL */
9a8fa1bd 1767 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
4c9649a9
JM
1768 break;
1769 case 0x06:
1770 /* EXTBL */
9a734d64 1771 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
4c9649a9
JM
1772 break;
1773 case 0x0B:
1774 /* INSBL */
5e5863ec 1775 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
4c9649a9
JM
1776 break;
1777 case 0x12:
1778 /* MSKWL */
9a8fa1bd 1779 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
1780 break;
1781 case 0x16:
1782 /* EXTWL */
9a734d64 1783 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
1784 break;
1785 case 0x1B:
1786 /* INSWL */
5e5863ec 1787 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
1788 break;
1789 case 0x22:
1790 /* MSKLL */
9a8fa1bd 1791 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
1792 break;
1793 case 0x26:
1794 /* EXTLL */
9a734d64 1795 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
1796 break;
1797 case 0x2B:
1798 /* INSLL */
5e5863ec 1799 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
1800 break;
1801 case 0x30:
1802 /* ZAP */
b144be9e
RH
1803 if (islit) {
1804 gen_zapnoti(vc, va, ~lit);
1805 } else {
1806 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1807 }
4c9649a9
JM
1808 break;
1809 case 0x31:
1810 /* ZAPNOT */
b144be9e
RH
1811 if (islit) {
1812 gen_zapnoti(vc, va, lit);
1813 } else {
1814 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1815 }
4c9649a9
JM
1816 break;
1817 case 0x32:
1818 /* MSKQL */
9a8fa1bd 1819 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
1820 break;
1821 case 0x34:
1822 /* SRL */
3bd67b7d
RH
1823 if (islit) {
1824 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1825 } else {
1826 tmp = tcg_temp_new();
1827 vb = load_gpr(ctx, rb);
1828 tcg_gen_andi_i64(tmp, vb, 0x3f);
1829 tcg_gen_shr_i64(vc, va, tmp);
1830 tcg_temp_free(tmp);
30c7183b 1831 }
4c9649a9
JM
1832 break;
1833 case 0x36:
1834 /* EXTQL */
9a734d64 1835 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
1836 break;
1837 case 0x39:
1838 /* SLL */
3bd67b7d
RH
1839 if (islit) {
1840 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1841 } else {
1842 tmp = tcg_temp_new();
1843 vb = load_gpr(ctx, rb);
1844 tcg_gen_andi_i64(tmp, vb, 0x3f);
1845 tcg_gen_shl_i64(vc, va, tmp);
1846 tcg_temp_free(tmp);
30c7183b 1847 }
4c9649a9
JM
1848 break;
1849 case 0x3B:
1850 /* INSQL */
5e5863ec 1851 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
1852 break;
1853 case 0x3C:
1854 /* SRA */
3bd67b7d
RH
1855 if (islit) {
1856 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1857 } else {
1858 tmp = tcg_temp_new();
1859 vb = load_gpr(ctx, rb);
1860 tcg_gen_andi_i64(tmp, vb, 0x3f);
1861 tcg_gen_sar_i64(vc, va, tmp);
1862 tcg_temp_free(tmp);
30c7183b 1863 }
4c9649a9
JM
1864 break;
1865 case 0x52:
1866 /* MSKWH */
9a8fa1bd 1867 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
1868 break;
1869 case 0x57:
1870 /* INSWH */
5e5863ec 1871 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
1872 break;
1873 case 0x5A:
1874 /* EXTWH */
9a734d64 1875 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
1876 break;
1877 case 0x62:
1878 /* MSKLH */
9a8fa1bd 1879 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
1880 break;
1881 case 0x67:
1882 /* INSLH */
5e5863ec 1883 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
1884 break;
1885 case 0x6A:
1886 /* EXTLH */
9a734d64 1887 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
1888 break;
1889 case 0x72:
1890 /* MSKQH */
9a8fa1bd 1891 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
1892 break;
1893 case 0x77:
1894 /* INSQH */
5e5863ec 1895 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
1896 break;
1897 case 0x7A:
1898 /* EXTQH */
9a734d64 1899 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
1900 break;
1901 default:
1902 goto invalid_opc;
1903 }
1904 break;
de4d3555 1905
4c9649a9 1906 case 0x13:
de4d3555
RH
1907 vc = dest_gpr(ctx, rc);
1908 vb = load_gpr_lit(ctx, rb, lit, islit);
1909 va = load_gpr(ctx, ra);
4c9649a9
JM
1910 switch (fn7) {
1911 case 0x00:
1912 /* MULL */
de4d3555
RH
1913 tcg_gen_mul_i64(vc, va, vb);
1914 tcg_gen_ext32s_i64(vc, vc);
4c9649a9
JM
1915 break;
1916 case 0x20:
1917 /* MULQ */
de4d3555 1918 tcg_gen_mul_i64(vc, va, vb);
4c9649a9
JM
1919 break;
1920 case 0x30:
1921 /* UMULH */
de4d3555
RH
1922 tmp = tcg_temp_new();
1923 tcg_gen_mulu2_i64(tmp, vc, va, vb);
1924 tcg_temp_free(tmp);
4c9649a9
JM
1925 break;
1926 case 0x40:
1927 /* MULL/V */
42774a56 1928 gen_helper_mullv(vc, cpu_env, va, vb);
4c9649a9
JM
1929 break;
1930 case 0x60:
1931 /* MULQ/V */
42774a56 1932 gen_helper_mulqv(vc, cpu_env, va, vb);
4c9649a9
JM
1933 break;
1934 default:
1935 goto invalid_opc;
1936 }
1937 break;
075b8ddb 1938
4c9649a9 1939 case 0x14:
5238c886 1940 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
baee04ab 1941 vc = dest_fpr(ctx, rc);
f24518b5 1942 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
1943 case 0x04:
1944 /* ITOFS */
64f45e49 1945 REQUIRE_REG_31(rb);
075b8ddb
RH
1946 t32 = tcg_temp_new_i32();
1947 va = load_gpr(ctx, ra);
075b8ddb
RH
1948 tcg_gen_trunc_i64_i32(t32, va);
1949 gen_helper_memory_to_s(vc, t32);
1950 tcg_temp_free_i32(t32);
4c9649a9
JM
1951 break;
1952 case 0x0A:
1953 /* SQRTF */
64f45e49 1954 REQUIRE_REG_31(ra);
baee04ab
RH
1955 vb = load_fpr(ctx, rb);
1956 gen_helper_sqrtf(vc, cpu_env, vb);
5238c886 1957 break;
4c9649a9
JM
1958 case 0x0B:
1959 /* SQRTS */
64f45e49 1960 REQUIRE_REG_31(ra);
5238c886
RH
1961 gen_fsqrts(ctx, rb, rc, fn11);
1962 break;
4c9649a9
JM
1963 case 0x14:
1964 /* ITOFF */
64f45e49 1965 REQUIRE_REG_31(rb);
075b8ddb
RH
1966 t32 = tcg_temp_new_i32();
1967 va = load_gpr(ctx, ra);
075b8ddb
RH
1968 tcg_gen_trunc_i64_i32(t32, va);
1969 gen_helper_memory_to_f(vc, t32);
1970 tcg_temp_free_i32(t32);
4c9649a9
JM
1971 break;
1972 case 0x24:
1973 /* ITOFT */
64f45e49 1974 REQUIRE_REG_31(rb);
075b8ddb 1975 va = load_gpr(ctx, ra);
075b8ddb 1976 tcg_gen_mov_i64(vc, va);
4c9649a9
JM
1977 break;
1978 case 0x2A:
1979 /* SQRTG */
64f45e49 1980 REQUIRE_REG_31(ra);
baee04ab
RH
1981 vb = load_fpr(ctx, rb);
1982 gen_helper_sqrtg(vc, cpu_env, vb);
5238c886 1983 break;
4c9649a9
JM
1984 case 0x02B:
1985 /* SQRTT */
64f45e49 1986 REQUIRE_REG_31(ra);
5238c886
RH
1987 gen_fsqrtt(ctx, rb, rc, fn11);
1988 break;
4c9649a9
JM
1989 default:
1990 goto invalid_opc;
1991 }
1992 break;
6b88b37c 1993
4c9649a9
JM
1994 case 0x15:
1995 /* VAX floating point */
1996 /* XXX: rounding mode and trap are ignored (!) */
baee04ab
RH
1997 vc = dest_fpr(ctx, rc);
1998 vb = load_fpr(ctx, rb);
3d045dbc 1999 va = load_fpr(ctx, ra);
f24518b5 2000 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2001 case 0x00:
2002 /* ADDF */
3d045dbc 2003 gen_helper_addf(vc, cpu_env, va, vb);
4c9649a9
JM
2004 break;
2005 case 0x01:
2006 /* SUBF */
3d045dbc 2007 gen_helper_subf(vc, cpu_env, va, vb);
4c9649a9
JM
2008 break;
2009 case 0x02:
2010 /* MULF */
3d045dbc 2011 gen_helper_mulf(vc, cpu_env, va, vb);
4c9649a9
JM
2012 break;
2013 case 0x03:
2014 /* DIVF */
3d045dbc 2015 gen_helper_divf(vc, cpu_env, va, vb);
4c9649a9
JM
2016 break;
2017 case 0x1E:
64f45e49
RH
2018 /* CVTDG -- TODO */
2019 REQUIRE_REG_31(ra);
4c9649a9 2020 goto invalid_opc;
4c9649a9
JM
2021 case 0x20:
2022 /* ADDG */
3d045dbc 2023 gen_helper_addg(vc, cpu_env, va, vb);
4c9649a9
JM
2024 break;
2025 case 0x21:
2026 /* SUBG */
3d045dbc 2027 gen_helper_subg(vc, cpu_env, va, vb);
4c9649a9
JM
2028 break;
2029 case 0x22:
2030 /* MULG */
3d045dbc 2031 gen_helper_mulg(vc, cpu_env, va, vb);
4c9649a9
JM
2032 break;
2033 case 0x23:
2034 /* DIVG */
3d045dbc 2035 gen_helper_divg(vc, cpu_env, va, vb);
4c9649a9
JM
2036 break;
2037 case 0x25:
2038 /* CMPGEQ */
3d045dbc 2039 gen_helper_cmpgeq(vc, cpu_env, va, vb);
4c9649a9
JM
2040 break;
2041 case 0x26:
2042 /* CMPGLT */
3d045dbc 2043 gen_helper_cmpglt(vc, cpu_env, va, vb);
4c9649a9
JM
2044 break;
2045 case 0x27:
2046 /* CMPGLE */
3d045dbc 2047 gen_helper_cmpgle(vc, cpu_env, va, vb);
4c9649a9
JM
2048 break;
2049 case 0x2C:
2050 /* CVTGF */
64f45e49 2051 REQUIRE_REG_31(ra);
baee04ab 2052 gen_helper_cvtgf(vc, cpu_env, vb);
4c9649a9
JM
2053 break;
2054 case 0x2D:
64f45e49
RH
2055 /* CVTGD -- TODO */
2056 REQUIRE_REG_31(ra);
4c9649a9 2057 goto invalid_opc;
4c9649a9
JM
2058 case 0x2F:
2059 /* CVTGQ */
64f45e49 2060 REQUIRE_REG_31(ra);
baee04ab 2061 gen_helper_cvtgq(vc, cpu_env, vb);
4c9649a9
JM
2062 break;
2063 case 0x3C:
2064 /* CVTQF */
64f45e49 2065 REQUIRE_REG_31(ra);
baee04ab 2066 gen_helper_cvtqf(vc, cpu_env, vb);
4c9649a9
JM
2067 break;
2068 case 0x3E:
2069 /* CVTQG */
64f45e49 2070 REQUIRE_REG_31(ra);
baee04ab 2071 gen_helper_cvtqg(vc, cpu_env, vb);
4c9649a9
JM
2072 break;
2073 default:
2074 goto invalid_opc;
2075 }
2076 break;
6b88b37c 2077
4c9649a9
JM
2078 case 0x16:
2079 /* IEEE floating-point */
f24518b5 2080 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2081 case 0x00:
2082 /* ADDS */
f24518b5 2083 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2084 break;
2085 case 0x01:
2086 /* SUBS */
f24518b5 2087 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2088 break;
2089 case 0x02:
2090 /* MULS */
f24518b5 2091 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2092 break;
2093 case 0x03:
2094 /* DIVS */
f24518b5 2095 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2096 break;
2097 case 0x20:
2098 /* ADDT */
f24518b5 2099 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2100 break;
2101 case 0x21:
2102 /* SUBT */
f24518b5 2103 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2104 break;
2105 case 0x22:
2106 /* MULT */
f24518b5 2107 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2108 break;
2109 case 0x23:
2110 /* DIVT */
f24518b5 2111 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2112 break;
2113 case 0x24:
2114 /* CMPTUN */
f24518b5 2115 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2116 break;
2117 case 0x25:
2118 /* CMPTEQ */
f24518b5 2119 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2120 break;
2121 case 0x26:
2122 /* CMPTLT */
f24518b5 2123 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2124 break;
2125 case 0x27:
2126 /* CMPTLE */
f24518b5 2127 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2128 break;
2129 case 0x2C:
64f45e49 2130 REQUIRE_REG_31(ra);
a74b4d2c 2131 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2132 /* CVTST */
f24518b5 2133 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2134 } else {
2135 /* CVTTS */
f24518b5 2136 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2137 }
2138 break;
2139 case 0x2F:
2140 /* CVTTQ */
64f45e49 2141 REQUIRE_REG_31(ra);
f24518b5 2142 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2143 break;
2144 case 0x3C:
2145 /* CVTQS */
64f45e49 2146 REQUIRE_REG_31(ra);
f24518b5 2147 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2148 break;
2149 case 0x3E:
2150 /* CVTQT */
64f45e49 2151 REQUIRE_REG_31(ra);
f24518b5 2152 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2153 break;
2154 default:
2155 goto invalid_opc;
2156 }
2157 break;
6b88b37c 2158
4c9649a9
JM
2159 case 0x17:
2160 switch (fn11) {
2161 case 0x010:
2162 /* CVTLQ */
64f45e49 2163 REQUIRE_REG_31(ra);
e8d8fef4
RH
2164 vc = dest_fpr(ctx, rc);
2165 vb = load_fpr(ctx, rb);
2166 gen_fcvtlq(vc, vb);
4c9649a9
JM
2167 break;
2168 case 0x020:
6b88b37c
RH
2169 /* CPYS */
2170 if (rc == 31) {
2171 /* Special case CPYS as FNOP. */
ef3765cb 2172 } else {
6b88b37c 2173 vc = dest_fpr(ctx, rc);
ef3765cb
RH
2174 va = load_fpr(ctx, ra);
2175 if (ra == rb) {
2176 /* Special case CPYS as FMOV. */
6b88b37c 2177 tcg_gen_mov_i64(vc, va);
ef3765cb
RH
2178 } else {
2179 vb = load_fpr(ctx, rb);
2180 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
a06d48d9 2181 }
4c9649a9
JM
2182 }
2183 break;
2184 case 0x021:
2185 /* CPYSN */
ef3765cb
RH
2186 vc = dest_fpr(ctx, rc);
2187 vb = load_fpr(ctx, rb);
2188 va = load_fpr(ctx, ra);
2189 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
4c9649a9
JM
2190 break;
2191 case 0x022:
2192 /* CPYSE */
ef3765cb
RH
2193 vc = dest_fpr(ctx, rc);
2194 vb = load_fpr(ctx, rb);
2195 va = load_fpr(ctx, ra);
2196 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
4c9649a9
JM
2197 break;
2198 case 0x024:
2199 /* MT_FPCR */
6b88b37c
RH
2200 va = load_fpr(ctx, ra);
2201 gen_helper_store_fpcr(cpu_env, va);
4c9649a9
JM
2202 break;
2203 case 0x025:
2204 /* MF_FPCR */
6b88b37c
RH
2205 va = dest_fpr(ctx, ra);
2206 gen_helper_load_fpcr(va, cpu_env);
4c9649a9
JM
2207 break;
2208 case 0x02A:
2209 /* FCMOVEQ */
65809352 2210 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2211 break;
2212 case 0x02B:
2213 /* FCMOVNE */
65809352 2214 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2215 break;
2216 case 0x02C:
2217 /* FCMOVLT */
65809352 2218 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2219 break;
2220 case 0x02D:
2221 /* FCMOVGE */
65809352 2222 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2223 break;
2224 case 0x02E:
2225 /* FCMOVLE */
65809352 2226 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2227 break;
2228 case 0x02F:
2229 /* FCMOVGT */
65809352 2230 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2231 break;
2232 case 0x030:
2233 /* CVTQL */
64f45e49 2234 REQUIRE_REG_31(ra);
e8d8fef4
RH
2235 vc = dest_fpr(ctx, rc);
2236 vb = load_fpr(ctx, rb);
2237 gen_fcvtql(vc, vb);
4c9649a9
JM
2238 break;
2239 case 0x130:
2240 /* CVTQL/V */
4c9649a9
JM
2241 case 0x530:
2242 /* CVTQL/SV */
64f45e49 2243 REQUIRE_REG_31(ra);
735cf45f
RH
2244 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2245 /v doesn't do. The only thing I can think is that /sv is a
2246 valid instruction merely for completeness in the ISA. */
e8d8fef4
RH
2247 vc = dest_fpr(ctx, rc);
2248 vb = load_fpr(ctx, rb);
2249 gen_helper_fcvtql_v_input(cpu_env, vb);
2250 gen_fcvtql(vc, vb);
4c9649a9
JM
2251 break;
2252 default:
2253 goto invalid_opc;
2254 }
2255 break;
89fe090b 2256
4c9649a9
JM
2257 case 0x18:
2258 switch ((uint16_t)disp16) {
2259 case 0x0000:
2260 /* TRAPB */
4af70374 2261 /* No-op. */
4c9649a9
JM
2262 break;
2263 case 0x0400:
2264 /* EXCB */
4af70374 2265 /* No-op. */
4c9649a9
JM
2266 break;
2267 case 0x4000:
2268 /* MB */
2269 /* No-op */
2270 break;
2271 case 0x4400:
2272 /* WMB */
2273 /* No-op */
2274 break;
2275 case 0x8000:
2276 /* FETCH */
2277 /* No-op */
2278 break;
2279 case 0xA000:
2280 /* FETCH_M */
2281 /* No-op */
2282 break;
2283 case 0xC000:
2284 /* RPCC */
89fe090b 2285 va = dest_gpr(ctx, ra);
bd79255d 2286 if (ctx->tb->cflags & CF_USE_ICOUNT) {
89fe090b
RH
2287 gen_io_start();
2288 gen_helper_load_pcc(va, cpu_env);
2289 gen_io_end();
2290 ret = EXIT_PC_STALE;
2291 } else {
2292 gen_helper_load_pcc(va, cpu_env);
a9406ea1 2293 }
4c9649a9
JM
2294 break;
2295 case 0xE000:
2296 /* RC */
ac316ca4 2297 gen_rx(ra, 0);
4c9649a9
JM
2298 break;
2299 case 0xE800:
2300 /* ECB */
4c9649a9
JM
2301 break;
2302 case 0xF000:
2303 /* RS */
ac316ca4 2304 gen_rx(ra, 1);
4c9649a9
JM
2305 break;
2306 case 0xF800:
2307 /* WH64 */
2308 /* No-op */
2309 break;
2310 default:
2311 goto invalid_opc;
2312 }
2313 break;
8f56ced8 2314
4c9649a9
JM
2315 case 0x19:
2316 /* HW_MFPR (PALcode) */
26b46094 2317#ifndef CONFIG_USER_ONLY
5238c886 2318 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
0e154fe9 2319 va = dest_gpr(ctx, ra);
bd79255d 2320 ret = gen_mfpr(ctx, va, insn & 0xffff);
a9e05a1c 2321 break;
5238c886 2322#else
4c9649a9 2323 goto invalid_opc;
5238c886 2324#endif
8f56ced8 2325
4c9649a9 2326 case 0x1A:
49563a72
RH
2327 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2328 prediction stack action, which of course we don't implement. */
8f56ced8
RH
2329 vb = load_gpr(ctx, rb);
2330 tcg_gen_andi_i64(cpu_pc, vb, ~3);
49563a72 2331 if (ra != 31) {
1304ca87 2332 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2333 }
4af70374 2334 ret = EXIT_PC_UPDATED;
4c9649a9 2335 break;
1eaa1da7 2336
4c9649a9
JM
2337 case 0x1B:
2338 /* HW_LD (PALcode) */
a18ad893 2339#ifndef CONFIG_USER_ONLY
5238c886
RH
2340 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2341 {
1eaa1da7
RH
2342 TCGv addr = tcg_temp_new();
2343 vb = load_gpr(ctx, rb);
2344 va = dest_gpr(ctx, ra);
a18ad893 2345
1eaa1da7 2346 tcg_gen_addi_i64(addr, vb, disp12);
8bb6e981
AJ
2347 switch ((insn >> 12) & 0xF) {
2348 case 0x0:
b5d51029 2349 /* Longword physical access (hw_ldl/p) */
1eaa1da7 2350 gen_helper_ldl_phys(va, cpu_env, addr);
8bb6e981
AJ
2351 break;
2352 case 0x1:
b5d51029 2353 /* Quadword physical access (hw_ldq/p) */
1eaa1da7 2354 gen_helper_ldq_phys(va, cpu_env, addr);
8bb6e981
AJ
2355 break;
2356 case 0x2:
b5d51029 2357 /* Longword physical access with lock (hw_ldl_l/p) */
1eaa1da7 2358 gen_helper_ldl_l_phys(va, cpu_env, addr);
8bb6e981
AJ
2359 break;
2360 case 0x3:
b5d51029 2361 /* Quadword physical access with lock (hw_ldq_l/p) */
1eaa1da7 2362 gen_helper_ldq_l_phys(va, cpu_env, addr);
8bb6e981
AJ
2363 break;
2364 case 0x4:
b5d51029 2365 /* Longword virtual PTE fetch (hw_ldl/v) */
2374e73e 2366 goto invalid_opc;
8bb6e981 2367 case 0x5:
b5d51029 2368 /* Quadword virtual PTE fetch (hw_ldq/v) */
2374e73e 2369 goto invalid_opc;
8bb6e981
AJ
2370 break;
2371 case 0x6:
2372 /* Incpu_ir[ra]id */
b5d51029 2373 goto invalid_opc;
8bb6e981
AJ
2374 case 0x7:
2375 /* Incpu_ir[ra]id */
b5d51029 2376 goto invalid_opc;
8bb6e981 2377 case 0x8:
b5d51029 2378 /* Longword virtual access (hw_ldl) */
2374e73e 2379 goto invalid_opc;
8bb6e981 2380 case 0x9:
b5d51029 2381 /* Quadword virtual access (hw_ldq) */
2374e73e 2382 goto invalid_opc;
8bb6e981 2383 case 0xA:
b5d51029 2384 /* Longword virtual access with protection check (hw_ldl/w) */
1eaa1da7 2385 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
8bb6e981
AJ
2386 break;
2387 case 0xB:
b5d51029 2388 /* Quadword virtual access with protection check (hw_ldq/w) */
1eaa1da7 2389 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
8bb6e981
AJ
2390 break;
2391 case 0xC:
b5d51029 2392 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2374e73e 2393 goto invalid_opc;
8bb6e981 2394 case 0xD:
b5d51029 2395 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2374e73e 2396 goto invalid_opc;
8bb6e981
AJ
2397 case 0xE:
2398 /* Longword virtual access with alternate access mode and
2374e73e 2399 protection checks (hw_ldl/wa) */
1eaa1da7 2400 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
8bb6e981
AJ
2401 break;
2402 case 0xF:
2403 /* Quadword virtual access with alternate access mode and
2374e73e 2404 protection checks (hw_ldq/wa) */
1eaa1da7 2405 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
8bb6e981
AJ
2406 break;
2407 }
2408 tcg_temp_free(addr);
a18ad893 2409 break;
4c9649a9 2410 }
5238c886 2411#else
a18ad893 2412 goto invalid_opc;
5238c886 2413#endif
c67b67e5 2414
4c9649a9 2415 case 0x1C:
c67b67e5 2416 vc = dest_gpr(ctx, rc);
cd2754ad
RH
2417 if (fn7 == 0x70) {
2418 /* FTOIT */
2419 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2420 REQUIRE_REG_31(rb);
2421 va = load_fpr(ctx, ra);
2422 tcg_gen_mov_i64(vc, va);
2423 break;
2424 } else if (fn7 == 0x78) {
2425 /* FTOIS */
2426 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2427 REQUIRE_REG_31(rb);
2428 t32 = tcg_temp_new_i32();
2429 va = load_fpr(ctx, ra);
2430 gen_helper_s_to_memory(t32, va);
2431 tcg_gen_ext_i32_i64(vc, t32);
2432 tcg_temp_free_i32(t32);
2433 break;
2434 }
2435
2436 vb = load_gpr_lit(ctx, rb, lit, islit);
4c9649a9
JM
2437 switch (fn7) {
2438 case 0x00:
2439 /* SEXTB */
5238c886 2440 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
64f45e49 2441 REQUIRE_REG_31(ra);
c67b67e5 2442 tcg_gen_ext8s_i64(vc, vb);
4c9649a9
JM
2443 break;
2444 case 0x01:
2445 /* SEXTW */
5238c886 2446 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
64f45e49 2447 REQUIRE_REG_31(ra);
c67b67e5 2448 tcg_gen_ext16s_i64(vc, vb);
5238c886 2449 break;
4c9649a9
JM
2450 case 0x30:
2451 /* CTPOP */
5238c886 2452 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
64f45e49 2453 REQUIRE_REG_31(ra);
c67b67e5 2454 gen_helper_ctpop(vc, vb);
5238c886 2455 break;
4c9649a9
JM
2456 case 0x31:
2457 /* PERR */
5238c886 2458 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2459 va = load_gpr(ctx, ra);
2460 gen_helper_perr(vc, va, vb);
5238c886 2461 break;
4c9649a9
JM
2462 case 0x32:
2463 /* CTLZ */
5238c886 2464 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
64f45e49 2465 REQUIRE_REG_31(ra);
c67b67e5 2466 gen_helper_ctlz(vc, vb);
5238c886 2467 break;
4c9649a9
JM
2468 case 0x33:
2469 /* CTTZ */
5238c886 2470 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
64f45e49 2471 REQUIRE_REG_31(ra);
c67b67e5 2472 gen_helper_cttz(vc, vb);
5238c886 2473 break;
4c9649a9
JM
2474 case 0x34:
2475 /* UNPKBW */
5238c886 2476 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
64f45e49 2477 REQUIRE_REG_31(ra);
f477ed3c 2478 gen_helper_unpkbw(vc, vb);
5238c886 2479 break;
4c9649a9 2480 case 0x35:
13e4df99 2481 /* UNPKBL */
5238c886 2482 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
64f45e49 2483 REQUIRE_REG_31(ra);
f477ed3c 2484 gen_helper_unpkbl(vc, vb);
5238c886 2485 break;
4c9649a9
JM
2486 case 0x36:
2487 /* PKWB */
5238c886 2488 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
64f45e49 2489 REQUIRE_REG_31(ra);
f477ed3c 2490 gen_helper_pkwb(vc, vb);
5238c886 2491 break;
4c9649a9
JM
2492 case 0x37:
2493 /* PKLB */
5238c886 2494 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
64f45e49 2495 REQUIRE_REG_31(ra);
f477ed3c 2496 gen_helper_pklb(vc, vb);
5238c886 2497 break;
4c9649a9
JM
2498 case 0x38:
2499 /* MINSB8 */
5238c886 2500 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2501 va = load_gpr(ctx, ra);
2502 gen_helper_minsb8(vc, va, vb);
5238c886 2503 break;
4c9649a9
JM
2504 case 0x39:
2505 /* MINSW4 */
5238c886 2506 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2507 va = load_gpr(ctx, ra);
2508 gen_helper_minsw4(vc, va, vb);
5238c886 2509 break;
4c9649a9
JM
2510 case 0x3A:
2511 /* MINUB8 */
5238c886 2512 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2513 va = load_gpr(ctx, ra);
2514 gen_helper_minub8(vc, va, vb);
5238c886 2515 break;
4c9649a9
JM
2516 case 0x3B:
2517 /* MINUW4 */
5238c886 2518 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2519 va = load_gpr(ctx, ra);
2520 gen_helper_minuw4(vc, va, vb);
5238c886 2521 break;
4c9649a9
JM
2522 case 0x3C:
2523 /* MAXUB8 */
5238c886 2524 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2525 va = load_gpr(ctx, ra);
2526 gen_helper_maxub8(vc, va, vb);
5238c886 2527 break;
4c9649a9
JM
2528 case 0x3D:
2529 /* MAXUW4 */
5238c886 2530 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2531 va = load_gpr(ctx, ra);
2532 gen_helper_maxuw4(vc, va, vb);
5238c886 2533 break;
4c9649a9
JM
2534 case 0x3E:
2535 /* MAXSB8 */
5238c886 2536 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2537 va = load_gpr(ctx, ra);
2538 gen_helper_maxsb8(vc, va, vb);
5238c886 2539 break;
4c9649a9
JM
2540 case 0x3F:
2541 /* MAXSW4 */
5238c886 2542 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
cd2754ad
RH
2543 va = load_gpr(ctx, ra);
2544 gen_helper_maxsw4(vc, va, vb);
4c9649a9
JM
2545 break;
2546 default:
2547 goto invalid_opc;
2548 }
2549 break;
46010969 2550
4c9649a9
JM
2551 case 0x1D:
2552 /* HW_MTPR (PALcode) */
26b46094 2553#ifndef CONFIG_USER_ONLY
5238c886 2554 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
0e154fe9 2555 vb = load_gpr(ctx, rb);
a9e05a1c
RH
2556 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2557 break;
5238c886 2558#else
4c9649a9 2559 goto invalid_opc;
5238c886 2560#endif
46010969 2561
4c9649a9 2562 case 0x1E:
508b43ea 2563 /* HW_RET (PALcode) */
a18ad893 2564#ifndef CONFIG_USER_ONLY
5238c886
RH
2565 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2566 if (rb == 31) {
2567 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2568 address from EXC_ADDR. This turns out to be useful for our
2569 emulation PALcode, so continue to accept it. */
46010969 2570 tmp = tcg_temp_new();
5238c886
RH
2571 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
2572 gen_helper_hw_ret(cpu_env, tmp);
2573 tcg_temp_free(tmp);
2574 } else {
46010969 2575 gen_helper_hw_ret(cpu_env, load_gpr(ctx, rb));
4c9649a9 2576 }
5238c886
RH
2577 ret = EXIT_PC_UPDATED;
2578 break;
2579#else
a18ad893 2580 goto invalid_opc;
5238c886 2581#endif
a4af3044 2582
4c9649a9
JM
2583 case 0x1F:
2584 /* HW_ST (PALcode) */
a18ad893 2585#ifndef CONFIG_USER_ONLY
5238c886
RH
2586 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2587 {
a4af3044
RH
2588 TCGv addr = tcg_temp_new();
2589 va = load_gpr(ctx, ra);
2590 vb = load_gpr(ctx, rb);
2591
2592 tcg_gen_addi_i64(addr, vb, disp12);
8bb6e981
AJ
2593 switch ((insn >> 12) & 0xF) {
2594 case 0x0:
2595 /* Longword physical access */
a4af3044 2596 gen_helper_stl_phys(cpu_env, addr, va);
8bb6e981
AJ
2597 break;
2598 case 0x1:
2599 /* Quadword physical access */
a4af3044 2600 gen_helper_stq_phys(cpu_env, addr, va);
8bb6e981
AJ
2601 break;
2602 case 0x2:
2603 /* Longword physical access with lock */
a4af3044 2604 gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
8bb6e981
AJ
2605 break;
2606 case 0x3:
2607 /* Quadword physical access with lock */
a4af3044 2608 gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
8bb6e981
AJ
2609 break;
2610 case 0x4:
2611 /* Longword virtual access */
2374e73e 2612 goto invalid_opc;
8bb6e981
AJ
2613 case 0x5:
2614 /* Quadword virtual access */
2374e73e 2615 goto invalid_opc;
8bb6e981
AJ
2616 case 0x6:
2617 /* Invalid */
2618 goto invalid_opc;
2619 case 0x7:
2620 /* Invalid */
2621 goto invalid_opc;
2622 case 0x8:
2623 /* Invalid */
2624 goto invalid_opc;
2625 case 0x9:
2626 /* Invalid */
2627 goto invalid_opc;
2628 case 0xA:
2629 /* Invalid */
2630 goto invalid_opc;
2631 case 0xB:
2632 /* Invalid */
2633 goto invalid_opc;
2634 case 0xC:
2635 /* Longword virtual access with alternate access mode */
2374e73e 2636 goto invalid_opc;
8bb6e981
AJ
2637 case 0xD:
2638 /* Quadword virtual access with alternate access mode */
2374e73e 2639 goto invalid_opc;
8bb6e981
AJ
2640 case 0xE:
2641 /* Invalid */
2642 goto invalid_opc;
2643 case 0xF:
2644 /* Invalid */
2645 goto invalid_opc;
2646 }
8bb6e981 2647 tcg_temp_free(addr);
a18ad893 2648 break;
4c9649a9 2649 }
5238c886 2650#else
a18ad893 2651 goto invalid_opc;
5238c886 2652#endif
4c9649a9
JM
2653 case 0x20:
2654 /* LDF */
f18cd223 2655 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
2656 break;
2657 case 0x21:
2658 /* LDG */
f18cd223 2659 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
2660 break;
2661 case 0x22:
2662 /* LDS */
f18cd223 2663 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
2664 break;
2665 case 0x23:
2666 /* LDT */
f18cd223 2667 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
2668 break;
2669 case 0x24:
2670 /* STF */
6910b8f6 2671 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
4c9649a9
JM
2672 break;
2673 case 0x25:
2674 /* STG */
6910b8f6 2675 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
4c9649a9
JM
2676 break;
2677 case 0x26:
2678 /* STS */
6910b8f6 2679 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
4c9649a9
JM
2680 break;
2681 case 0x27:
2682 /* STT */
6910b8f6 2683 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
4c9649a9
JM
2684 break;
2685 case 0x28:
2686 /* LDL */
f18cd223 2687 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
2688 break;
2689 case 0x29:
2690 /* LDQ */
f18cd223 2691 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
2692 break;
2693 case 0x2A:
2694 /* LDL_L */
f4ed8679 2695 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2696 break;
2697 case 0x2B:
2698 /* LDQ_L */
f4ed8679 2699 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2700 break;
2701 case 0x2C:
2702 /* STL */
6910b8f6 2703 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
4c9649a9
JM
2704 break;
2705 case 0x2D:
2706 /* STQ */
6910b8f6 2707 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
4c9649a9
JM
2708 break;
2709 case 0x2E:
2710 /* STL_C */
6910b8f6 2711 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
4c9649a9
JM
2712 break;
2713 case 0x2F:
2714 /* STQ_C */
6910b8f6 2715 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
4c9649a9
JM
2716 break;
2717 case 0x30:
2718 /* BR */
4af70374 2719 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 2720 break;
a7812ae4 2721 case 0x31: /* FBEQ */
4af70374 2722 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 2723 break;
a7812ae4 2724 case 0x32: /* FBLT */
4af70374 2725 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 2726 break;
a7812ae4 2727 case 0x33: /* FBLE */
4af70374 2728 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
2729 break;
2730 case 0x34:
2731 /* BSR */
4af70374 2732 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 2733 break;
a7812ae4 2734 case 0x35: /* FBNE */
4af70374 2735 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 2736 break;
a7812ae4 2737 case 0x36: /* FBGE */
4af70374 2738 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 2739 break;
a7812ae4 2740 case 0x37: /* FBGT */
4af70374 2741 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
2742 break;
2743 case 0x38:
2744 /* BLBC */
4af70374 2745 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
2746 break;
2747 case 0x39:
2748 /* BEQ */
4af70374 2749 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
2750 break;
2751 case 0x3A:
2752 /* BLT */
4af70374 2753 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
2754 break;
2755 case 0x3B:
2756 /* BLE */
4af70374 2757 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
2758 break;
2759 case 0x3C:
2760 /* BLBS */
4af70374 2761 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
2762 break;
2763 case 0x3D:
2764 /* BNE */
4af70374 2765 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
2766 break;
2767 case 0x3E:
2768 /* BGE */
4af70374 2769 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
2770 break;
2771 case 0x3F:
2772 /* BGT */
4af70374 2773 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
2774 break;
2775 invalid_opc:
8aa3fa20 2776 ret = gen_invalid(ctx);
4c9649a9
JM
2777 break;
2778 }
2779
2780 return ret;
2781}
2782
86a35f7c 2783static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
636aa200 2784 TranslationBlock *tb,
86a35f7c 2785 bool search_pc)
4c9649a9 2786{
ed2803da 2787 CPUState *cs = CPU(cpu);
86a35f7c 2788 CPUAlphaState *env = &cpu->env;
4c9649a9
JM
2789 DisasContext ctx, *ctxp = &ctx;
2790 target_ulong pc_start;
b114b68a 2791 target_ulong pc_mask;
4c9649a9 2792 uint32_t insn;
a1d1bb31 2793 CPUBreakpoint *bp;
4c9649a9 2794 int j, lj = -1;
4af70374 2795 ExitStatus ret;
2e70f6ef
PB
2796 int num_insns;
2797 int max_insns;
4c9649a9
JM
2798
2799 pc_start = tb->pc;
4af70374
RH
2800
2801 ctx.tb = tb;
4c9649a9 2802 ctx.pc = pc_start;
bba9bdce 2803 ctx.mem_idx = cpu_mmu_index(env);
801c4c28 2804 ctx.implver = env->implver;
ed2803da 2805 ctx.singlestep_enabled = cs->singlestep_enabled;
f24518b5
RH
2806
2807 /* ??? Every TB begins with unset rounding mode, to be initialized on
2808 the first fp insn of the TB. Alternately we could define a proper
2809 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2810 to reset the FP_STATUS to that default at the end of any TB that
2811 changes the default. We could even (gasp) dynamiclly figure out
2812 what default would be most efficient given the running program. */
2813 ctx.tb_rm = -1;
2814 /* Similarly for flush-to-zero. */
2815 ctx.tb_ftz = -1;
2816
2e70f6ef
PB
2817 num_insns = 0;
2818 max_insns = tb->cflags & CF_COUNT_MASK;
b114b68a 2819 if (max_insns == 0) {
2e70f6ef 2820 max_insns = CF_COUNT_MASK;
b114b68a
RH
2821 }
2822
2823 if (in_superpage(&ctx, pc_start)) {
2824 pc_mask = (1ULL << 41) - 1;
2825 } else {
2826 pc_mask = ~TARGET_PAGE_MASK;
2827 }
2e70f6ef 2828
cd42d5b2 2829 gen_tb_start(tb);
4af70374 2830 do {
f0c3c505
AF
2831 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
2832 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 2833 if (bp->pc == ctx.pc) {
4c9649a9
JM
2834 gen_excp(&ctx, EXCP_DEBUG, 0);
2835 break;
2836 }
2837 }
2838 }
2839 if (search_pc) {
fe700adb 2840 j = tcg_op_buf_count();
4c9649a9
JM
2841 if (lj < j) {
2842 lj++;
fe700adb 2843 while (lj < j) {
ab1103de 2844 tcg_ctx.gen_opc_instr_start[lj++] = 0;
fe700adb 2845 }
4c9649a9 2846 }
25983cad 2847 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
ab1103de 2848 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 2849 tcg_ctx.gen_opc_icount[lj] = num_insns;
4c9649a9 2850 }
67debe3a 2851 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 2852 gen_io_start();
67debe3a 2853 }
c3082755 2854 insn = cpu_ldl_code(env, ctx.pc);
2e70f6ef 2855 num_insns++;
c4b3be39 2856
fdefe51c 2857 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
c4b3be39
RH
2858 tcg_gen_debug_insn_start(ctx.pc);
2859 }
2860
194cfb43
RH
2861 TCGV_UNUSED_I64(ctx.zero);
2862 TCGV_UNUSED_I64(ctx.sink);
2863 TCGV_UNUSED_I64(ctx.lit);
2864
4c9649a9
JM
2865 ctx.pc += 4;
2866 ret = translate_one(ctxp, insn);
19bf517b 2867
194cfb43
RH
2868 if (!TCGV_IS_UNUSED_I64(ctx.sink)) {
2869 tcg_gen_discard_i64(ctx.sink);
2870 tcg_temp_free(ctx.sink);
2871 }
2872 if (!TCGV_IS_UNUSED_I64(ctx.zero)) {
2873 tcg_temp_free(ctx.zero);
2874 }
2875 if (!TCGV_IS_UNUSED_I64(ctx.lit)) {
2876 tcg_temp_free(ctx.lit);
2877 }
2878
bf1b03fe
RH
2879 /* If we reach a page boundary, are single stepping,
2880 or exhaust instruction count, stop generation. */
2881 if (ret == NO_EXIT
b114b68a 2882 && ((ctx.pc & pc_mask) == 0
fe700adb 2883 || tcg_op_buf_full()
bf1b03fe
RH
2884 || num_insns >= max_insns
2885 || singlestep
ca6862a6 2886 || ctx.singlestep_enabled)) {
bf1b03fe 2887 ret = EXIT_PC_STALE;
1b530a6d 2888 }
4af70374
RH
2889 } while (ret == NO_EXIT);
2890
2891 if (tb->cflags & CF_LAST_IO) {
2892 gen_io_end();
4c9649a9 2893 }
4af70374
RH
2894
2895 switch (ret) {
2896 case EXIT_GOTO_TB:
8aa3fa20 2897 case EXIT_NORETURN:
4af70374
RH
2898 break;
2899 case EXIT_PC_STALE:
496cb5b9 2900 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
2901 /* FALLTHRU */
2902 case EXIT_PC_UPDATED:
ca6862a6 2903 if (ctx.singlestep_enabled) {
bf1b03fe
RH
2904 gen_excp_1(EXCP_DEBUG, 0);
2905 } else {
2906 tcg_gen_exit_tb(0);
2907 }
4af70374
RH
2908 break;
2909 default:
2910 abort();
4c9649a9 2911 }
4af70374 2912
806f352d 2913 gen_tb_end(tb, num_insns);
0a7df5da 2914
4c9649a9 2915 if (search_pc) {
fe700adb 2916 j = tcg_op_buf_count();
4c9649a9 2917 lj++;
fe700adb 2918 while (lj <= j) {
ab1103de 2919 tcg_ctx.gen_opc_instr_start[lj++] = 0;
fe700adb 2920 }
4c9649a9
JM
2921 } else {
2922 tb->size = ctx.pc - pc_start;
2e70f6ef 2923 tb->icount = num_insns;
4c9649a9 2924 }
4af70374 2925
806991da 2926#ifdef DEBUG_DISAS
8fec2b8c 2927 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39 2928 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 2929 log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
93fcfe39 2930 qemu_log("\n");
4c9649a9 2931 }
4c9649a9 2932#endif
4c9649a9
JM
2933}
2934
4d5712f1 2935void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 2936{
86a35f7c 2937 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
4c9649a9
JM
2938}
2939
4d5712f1 2940void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 2941{
86a35f7c 2942 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
4c9649a9
JM
2943}
2944
4d5712f1 2945void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 2946{
25983cad 2947 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
d2856f1a 2948}