]> git.proxmox.com Git - mirror_qemu.git/blame - target-alpha/translate.c
target-alpha: Convert gen_zap/not to source/sink
[mirror_qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
4c9649a9 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
1de7afc9 22#include "qemu/host-utils.h"
57fec1fe 23#include "tcg-op.h"
4c9649a9 24
a7812ae4
PB
25#include "helper.h"
26#define GEN_HELPER 1
27#include "helper.h"
28
19188121 29#undef ALPHA_DEBUG_DISAS
f24518b5 30#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
31
32#ifdef ALPHA_DEBUG_DISAS
806991da 33# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
34#else
35# define LOG_DISAS(...) do { } while (0)
36#endif
37
4c9649a9
JM
38typedef struct DisasContext DisasContext;
39struct DisasContext {
4af70374 40 struct TranslationBlock *tb;
4c9649a9
JM
41 uint64_t pc;
42 int mem_idx;
f24518b5
RH
43
44 /* Current rounding mode for this TB. */
45 int tb_rm;
46 /* Current flush-to-zero setting for this TB. */
47 int tb_ftz;
ca6862a6 48
801c4c28
RH
49 /* implver value for this CPU. */
50 int implver;
51
194cfb43
RH
52 /* Temporaries for $31 and $f31 as source and destination. */
53 TCGv zero;
54 TCGv sink;
55 /* Temporary for immediate constants. */
56 TCGv lit;
57
ca6862a6 58 bool singlestep_enabled;
4c9649a9
JM
59};
60
4af70374
RH
61/* Return values from translate_one, indicating the state of the TB.
62 Note that zero indicates that we are not exiting the TB. */
63
64typedef enum {
65 NO_EXIT,
66
67 /* We have emitted one or more goto_tb. No fixup required. */
68 EXIT_GOTO_TB,
69
70 /* We are not using a goto_tb (for whatever reason), but have updated
71 the PC (for whatever reason), so there's no need to do it again on
72 exiting the TB. */
73 EXIT_PC_UPDATED,
74
75 /* We are exiting the TB, but have neither emitted a goto_tb, nor
76 updated the PC for the next instruction to be executed. */
8aa3fa20
RH
77 EXIT_PC_STALE,
78
79 /* We are ending the TB with a noreturn function call, e.g. longjmp.
80 No following code will be executed. */
81 EXIT_NORETURN,
4af70374
RH
82} ExitStatus;
83
3761035f 84/* global register indexes */
a7812ae4 85static TCGv_ptr cpu_env;
496cb5b9 86static TCGv cpu_ir[31];
f18cd223 87static TCGv cpu_fir[31];
496cb5b9 88static TCGv cpu_pc;
6910b8f6
RH
89static TCGv cpu_lock_addr;
90static TCGv cpu_lock_st_addr;
91static TCGv cpu_lock_value;
2ace7e55
RH
92static TCGv cpu_unique;
93#ifndef CONFIG_USER_ONLY
94static TCGv cpu_sysval;
95static TCGv cpu_usp;
ab471ade 96#endif
496cb5b9 97
3761035f 98/* register names */
f18cd223 99static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef 100
022c62cb 101#include "exec/gen-icount.h"
2e70f6ef 102
0c28246f 103void alpha_translate_init(void)
2e70f6ef 104{
496cb5b9
AJ
105 int i;
106 char *p;
2e70f6ef 107 static int done_init = 0;
496cb5b9 108
67debe3a 109 if (done_init) {
2e70f6ef 110 return;
67debe3a 111 }
496cb5b9 112
a7812ae4 113 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
114
115 p = cpu_reg_names;
116 for (i = 0; i < 31; i++) {
117 sprintf(p, "ir%d", i);
a7812ae4 118 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 119 offsetof(CPUAlphaState, ir[i]), p);
6ba8dcd7 120 p += (i < 10) ? 4 : 5;
f18cd223
AJ
121
122 sprintf(p, "fir%d", i);
a7812ae4 123 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 124 offsetof(CPUAlphaState, fir[i]), p);
f18cd223 125 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
126 }
127
a7812ae4 128 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 129 offsetof(CPUAlphaState, pc), "pc");
496cb5b9 130
6910b8f6 131 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 132 offsetof(CPUAlphaState, lock_addr),
6910b8f6
RH
133 "lock_addr");
134 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 135 offsetof(CPUAlphaState, lock_st_addr),
6910b8f6
RH
136 "lock_st_addr");
137 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 138 offsetof(CPUAlphaState, lock_value),
6910b8f6 139 "lock_value");
f4ed8679 140
2ace7e55 141 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 142 offsetof(CPUAlphaState, unique), "unique");
2ace7e55
RH
143#ifndef CONFIG_USER_ONLY
144 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 145 offsetof(CPUAlphaState, sysval), "sysval");
2ace7e55 146 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 147 offsetof(CPUAlphaState, usp), "usp");
ab471ade
RH
148#endif
149
2e70f6ef
PB
150 done_init = 1;
151}
152
194cfb43
RH
153static TCGv load_zero(DisasContext *ctx)
154{
155 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
156 ctx->zero = tcg_const_local_i64(0);
157 }
158 return ctx->zero;
159}
160
161static TCGv dest_sink(DisasContext *ctx)
162{
163 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
164 ctx->sink = tcg_temp_local_new();
165 }
166 return ctx->sink;
167}
168
169static TCGv load_gpr(DisasContext *ctx, unsigned reg)
170{
171 if (likely(reg < 31)) {
172 return cpu_ir[reg];
173 } else {
174 return load_zero(ctx);
175 }
176}
177
178static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
179 uint8_t lit, bool islit)
180{
181 if (islit) {
182 ctx->lit = tcg_const_i64(lit);
183 return ctx->lit;
184 } else if (likely(reg < 31)) {
185 return cpu_ir[reg];
186 } else {
187 return load_zero(ctx);
188 }
189}
190
191static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
192{
193 if (likely(reg < 31)) {
194 return cpu_ir[reg];
195 } else {
196 return dest_sink(ctx);
197 }
198}
199
6b88b37c 200static TCGv load_fpr(DisasContext *ctx, unsigned reg)
194cfb43
RH
201{
202 if (likely(reg < 31)) {
203 return cpu_fir[reg];
204 } else {
205 return load_zero(ctx);
206 }
207}
208
075b8ddb 209static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
194cfb43
RH
210{
211 if (likely(reg < 31)) {
212 return cpu_fir[reg];
213 } else {
214 return dest_sink(ctx);
215 }
216}
217
bf1b03fe 218static void gen_excp_1(int exception, int error_code)
4c9649a9 219{
a7812ae4 220 TCGv_i32 tmp1, tmp2;
6ad02592 221
6ad02592
AJ
222 tmp1 = tcg_const_i32(exception);
223 tmp2 = tcg_const_i32(error_code);
b9f0923e 224 gen_helper_excp(cpu_env, tmp1, tmp2);
a7812ae4
PB
225 tcg_temp_free_i32(tmp2);
226 tcg_temp_free_i32(tmp1);
bf1b03fe 227}
8aa3fa20 228
bf1b03fe
RH
229static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
230{
231 tcg_gen_movi_i64(cpu_pc, ctx->pc);
232 gen_excp_1(exception, error_code);
8aa3fa20 233 return EXIT_NORETURN;
4c9649a9
JM
234}
235
8aa3fa20 236static inline ExitStatus gen_invalid(DisasContext *ctx)
4c9649a9 237{
8aa3fa20 238 return gen_excp(ctx, EXCP_OPCDEC, 0);
4c9649a9
JM
239}
240
636aa200 241static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 242{
a7812ae4 243 TCGv_i32 tmp32 = tcg_temp_new_i32();
f8da40ae 244 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4
PB
245 gen_helper_memory_to_f(t0, tmp32);
246 tcg_temp_free_i32(tmp32);
f18cd223
AJ
247}
248
636aa200 249static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 250{
a7812ae4 251 TCGv tmp = tcg_temp_new();
f8da40ae 252 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
a7812ae4 253 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
254 tcg_temp_free(tmp);
255}
256
636aa200 257static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 258{
a7812ae4 259 TCGv_i32 tmp32 = tcg_temp_new_i32();
f8da40ae 260 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4
PB
261 gen_helper_memory_to_s(t0, tmp32);
262 tcg_temp_free_i32(tmp32);
f18cd223
AJ
263}
264
636aa200 265static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679 266{
f8da40ae 267 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
6910b8f6
RH
268 tcg_gen_mov_i64(cpu_lock_addr, t1);
269 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
270}
271
636aa200 272static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679 273{
f8da40ae 274 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
6910b8f6
RH
275 tcg_gen_mov_i64(cpu_lock_addr, t1);
276 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
277}
278
636aa200
BS
279static inline void gen_load_mem(DisasContext *ctx,
280 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
281 int flags),
595b8fdd
RH
282 int ra, int rb, int32_t disp16, bool fp,
283 bool clear)
023d8ca2 284{
595b8fdd 285 TCGv tmp, addr, va;
023d8ca2 286
6910b8f6
RH
287 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
288 prefetches, which we can treat as nops. No worries about
289 missed exceptions here. */
290 if (unlikely(ra == 31)) {
023d8ca2 291 return;
6910b8f6 292 }
023d8ca2 293
595b8fdd
RH
294 tmp = tcg_temp_new();
295 addr = load_gpr(ctx, rb);
296
297 if (disp16) {
298 tcg_gen_addi_i64(tmp, addr, disp16);
299 addr = tmp;
300 }
301 if (clear) {
302 tcg_gen_andi_i64(tmp, addr, ~0x7);
303 addr = tmp;
023d8ca2 304 }
6910b8f6
RH
305
306 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
307 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
308
595b8fdd 309 tcg_temp_free(tmp);
023d8ca2
AJ
310}
311
636aa200 312static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 313{
a7812ae4 314 TCGv_i32 tmp32 = tcg_temp_new_i32();
a7812ae4 315 gen_helper_f_to_memory(tmp32, t0);
f8da40ae 316 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4 317 tcg_temp_free_i32(tmp32);
f18cd223
AJ
318}
319
636aa200 320static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 321{
a7812ae4
PB
322 TCGv tmp = tcg_temp_new();
323 gen_helper_g_to_memory(tmp, t0);
f8da40ae 324 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
f18cd223
AJ
325 tcg_temp_free(tmp);
326}
327
636aa200 328static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 329{
a7812ae4 330 TCGv_i32 tmp32 = tcg_temp_new_i32();
a7812ae4 331 gen_helper_s_to_memory(tmp32, t0);
f8da40ae 332 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4 333 tcg_temp_free_i32(tmp32);
f18cd223
AJ
334}
335
636aa200
BS
336static inline void gen_store_mem(DisasContext *ctx,
337 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
338 int flags),
595b8fdd
RH
339 int ra, int rb, int32_t disp16, bool fp,
340 bool clear)
023d8ca2 341{
595b8fdd 342 TCGv tmp, addr, va;
6910b8f6 343
595b8fdd
RH
344 tmp = tcg_temp_new();
345 addr = load_gpr(ctx, rb);
6910b8f6 346
595b8fdd
RH
347 if (disp16) {
348 tcg_gen_addi_i64(tmp, addr, disp16);
349 addr = tmp;
350 }
351 if (clear) {
352 tcg_gen_andi_i64(tmp, addr, ~0x7);
353 addr = tmp;
023d8ca2 354 }
595b8fdd
RH
355
356 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
6910b8f6
RH
357 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
358
595b8fdd 359 tcg_temp_free(tmp);
6910b8f6
RH
360}
361
362static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
363 int32_t disp16, int quad)
364{
365 TCGv addr;
366
367 if (ra == 31) {
368 /* ??? Don't bother storing anything. The user can't tell
369 the difference, since the zero register always reads zero. */
370 return NO_EXIT;
371 }
372
373#if defined(CONFIG_USER_ONLY)
374 addr = cpu_lock_st_addr;
375#else
e52458fe 376 addr = tcg_temp_local_new();
6910b8f6
RH
377#endif
378
cd2d46fd 379 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
6910b8f6
RH
380
381#if defined(CONFIG_USER_ONLY)
382 /* ??? This is handled via a complicated version of compare-and-swap
383 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
384 in TCG so that this isn't necessary. */
385 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
386#else
387 /* ??? In system mode we are never multi-threaded, so CAS can be
388 implemented via a non-atomic load-compare-store sequence. */
389 {
390 int lab_fail, lab_done;
391 TCGv val;
392
393 lab_fail = gen_new_label();
394 lab_done = gen_new_label();
e52458fe 395 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
6910b8f6
RH
396
397 val = tcg_temp_new();
f8da40ae 398 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
e52458fe 399 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
6910b8f6 400
f8da40ae
RH
401 tcg_gen_qemu_st_i64(cpu_ir[ra], addr, ctx->mem_idx,
402 quad ? MO_LEQ : MO_LEUL);
6910b8f6
RH
403 tcg_gen_movi_i64(cpu_ir[ra], 1);
404 tcg_gen_br(lab_done);
405
406 gen_set_label(lab_fail);
407 tcg_gen_movi_i64(cpu_ir[ra], 0);
408
409 gen_set_label(lab_done);
410 tcg_gen_movi_i64(cpu_lock_addr, -1);
411
412 tcg_temp_free(addr);
413 return NO_EXIT;
414 }
415#endif
023d8ca2
AJ
416}
417
b114b68a 418static bool in_superpage(DisasContext *ctx, int64_t addr)
4c9649a9 419{
b114b68a
RH
420 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
421 && addr < 0
422 && ((addr >> 41) & 3) == 2
423 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
424}
425
426static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
427{
428 /* Suppress goto_tb in the case of single-steping and IO. */
429 if (ctx->singlestep_enabled || (ctx->tb->cflags & CF_LAST_IO)) {
430 return false;
431 }
432 /* If the destination is in the superpage, the page perms can't change. */
433 if (in_superpage(ctx, dest)) {
434 return true;
435 }
436 /* Check for the dest on the same page as the start of the TB. */
437 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
4af70374 438}
dbb30fe6 439
4af70374
RH
440static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
441{
442 uint64_t dest = ctx->pc + (disp << 2);
443
444 if (ra != 31) {
445 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
446 }
447
448 /* Notice branch-to-next; used to initialize RA with the PC. */
449 if (disp == 0) {
450 return 0;
451 } else if (use_goto_tb(ctx, dest)) {
452 tcg_gen_goto_tb(0);
453 tcg_gen_movi_i64(cpu_pc, dest);
8cfd0495 454 tcg_gen_exit_tb((uintptr_t)ctx->tb);
4af70374
RH
455 return EXIT_GOTO_TB;
456 } else {
457 tcg_gen_movi_i64(cpu_pc, dest);
458 return EXIT_PC_UPDATED;
459 }
dbb30fe6
RH
460}
461
4af70374
RH
462static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
463 TCGv cmp, int32_t disp)
dbb30fe6 464{
4af70374 465 uint64_t dest = ctx->pc + (disp << 2);
dbb30fe6 466 int lab_true = gen_new_label();
9c29504e 467
4af70374
RH
468 if (use_goto_tb(ctx, dest)) {
469 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
470
471 tcg_gen_goto_tb(0);
472 tcg_gen_movi_i64(cpu_pc, ctx->pc);
8cfd0495 473 tcg_gen_exit_tb((uintptr_t)ctx->tb);
4af70374
RH
474
475 gen_set_label(lab_true);
476 tcg_gen_goto_tb(1);
477 tcg_gen_movi_i64(cpu_pc, dest);
8cfd0495 478 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
4af70374
RH
479
480 return EXIT_GOTO_TB;
481 } else {
57e289de
RH
482 TCGv_i64 z = tcg_const_i64(0);
483 TCGv_i64 d = tcg_const_i64(dest);
484 TCGv_i64 p = tcg_const_i64(ctx->pc);
4af70374 485
57e289de 486 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
4af70374 487
57e289de
RH
488 tcg_temp_free_i64(z);
489 tcg_temp_free_i64(d);
490 tcg_temp_free_i64(p);
4af70374
RH
491 return EXIT_PC_UPDATED;
492 }
493}
494
495static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
496 int32_t disp, int mask)
497{
498 TCGv cmp_tmp;
499
500 if (unlikely(ra == 31)) {
501 cmp_tmp = tcg_const_i64(0);
502 } else {
503 cmp_tmp = tcg_temp_new();
9c29504e 504 if (mask) {
4af70374 505 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
dbb30fe6 506 } else {
4af70374 507 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
dbb30fe6 508 }
9c29504e 509 }
4af70374
RH
510
511 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
512}
513
4af70374 514/* Fold -0.0 for comparison with COND. */
dbb30fe6 515
4af70374 516static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 517{
dbb30fe6 518 uint64_t mzero = 1ull << 63;
f18cd223 519
dbb30fe6
RH
520 switch (cond) {
521 case TCG_COND_LE:
522 case TCG_COND_GT:
523 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 524 tcg_gen_mov_i64(dest, src);
a7812ae4 525 break;
dbb30fe6
RH
526
527 case TCG_COND_EQ:
528 case TCG_COND_NE:
529 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 530 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 531 break;
dbb30fe6
RH
532
533 case TCG_COND_GE:
dbb30fe6 534 case TCG_COND_LT:
4af70374
RH
535 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
536 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
537 tcg_gen_neg_i64(dest, dest);
538 tcg_gen_and_i64(dest, dest, src);
a7812ae4 539 break;
dbb30fe6 540
a7812ae4
PB
541 default:
542 abort();
f18cd223 543 }
dbb30fe6
RH
544}
545
4af70374
RH
546static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
547 int32_t disp)
dbb30fe6 548{
4af70374 549 TCGv cmp_tmp;
dbb30fe6
RH
550
551 if (unlikely(ra == 31)) {
552 /* Very uncommon case, but easier to optimize it to an integer
553 comparison than continuing with the floating point comparison. */
4af70374 554 return gen_bcond(ctx, cond, ra, disp, 0);
dbb30fe6
RH
555 }
556
4af70374
RH
557 cmp_tmp = tcg_temp_new();
558 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
559 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
560}
561
bbe1dab4 562static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
dbb30fe6 563{
57e289de 564 TCGv_i64 c1, z, v1;
dbb30fe6 565
4af70374 566 if (unlikely(rc == 31)) {
dbb30fe6 567 return;
4af70374
RH
568 }
569
57e289de 570 c1 = tcg_temp_new_i64();
dbb30fe6 571 if (unlikely(ra == 31)) {
57e289de 572 tcg_gen_movi_i64(c1, 0);
4af70374 573 } else {
57e289de 574 gen_fold_mzero(cond, c1, cpu_fir[ra]);
dbb30fe6 575 }
57e289de
RH
576 if (rb == 31) {
577 v1 = tcg_const_i64(0);
578 } else {
579 v1 = cpu_fir[rb];
580 }
581 z = tcg_const_i64(0);
dbb30fe6 582
57e289de 583 tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]);
dbb30fe6 584
57e289de
RH
585 tcg_temp_free_i64(z);
586 tcg_temp_free_i64(c1);
587 if (rb == 31) {
588 tcg_temp_free_i64(v1);
589 }
dbb30fe6
RH
590}
591
f24518b5
RH
592#define QUAL_RM_N 0x080 /* Round mode nearest even */
593#define QUAL_RM_C 0x000 /* Round mode chopped */
594#define QUAL_RM_M 0x040 /* Round mode minus infinity */
595#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
596#define QUAL_RM_MASK 0x0c0
597
598#define QUAL_U 0x100 /* Underflow enable (fp output) */
599#define QUAL_V 0x100 /* Overflow enable (int output) */
600#define QUAL_S 0x400 /* Software completion enable */
601#define QUAL_I 0x200 /* Inexact detection enable */
602
603static void gen_qual_roundmode(DisasContext *ctx, int fn11)
604{
605 TCGv_i32 tmp;
606
607 fn11 &= QUAL_RM_MASK;
608 if (fn11 == ctx->tb_rm) {
609 return;
610 }
611 ctx->tb_rm = fn11;
612
613 tmp = tcg_temp_new_i32();
614 switch (fn11) {
615 case QUAL_RM_N:
616 tcg_gen_movi_i32(tmp, float_round_nearest_even);
617 break;
618 case QUAL_RM_C:
619 tcg_gen_movi_i32(tmp, float_round_to_zero);
620 break;
621 case QUAL_RM_M:
622 tcg_gen_movi_i32(tmp, float_round_down);
623 break;
624 case QUAL_RM_D:
4a58aedf
RH
625 tcg_gen_ld8u_i32(tmp, cpu_env,
626 offsetof(CPUAlphaState, fpcr_dyn_round));
f24518b5
RH
627 break;
628 }
629
630#if defined(CONFIG_SOFTFLOAT_INLINE)
6b4c305c 631 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
f24518b5
RH
632 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
633 sets the one field. */
634 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 635 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
f24518b5
RH
636#else
637 gen_helper_setroundmode(tmp);
638#endif
639
640 tcg_temp_free_i32(tmp);
641}
642
643static void gen_qual_flushzero(DisasContext *ctx, int fn11)
644{
645 TCGv_i32 tmp;
646
647 fn11 &= QUAL_U;
648 if (fn11 == ctx->tb_ftz) {
649 return;
650 }
651 ctx->tb_ftz = fn11;
652
653 tmp = tcg_temp_new_i32();
654 if (fn11) {
655 /* Underflow is enabled, use the FPCR setting. */
4a58aedf
RH
656 tcg_gen_ld8u_i32(tmp, cpu_env,
657 offsetof(CPUAlphaState, fpcr_flush_to_zero));
f24518b5
RH
658 } else {
659 /* Underflow is disabled, force flush-to-zero. */
660 tcg_gen_movi_i32(tmp, 1);
661 }
662
663#if defined(CONFIG_SOFTFLOAT_INLINE)
664 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 665 offsetof(CPUAlphaState, fp_status.flush_to_zero));
f24518b5
RH
666#else
667 gen_helper_setflushzero(tmp);
668#endif
669
670 tcg_temp_free_i32(tmp);
671}
672
673static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
674{
74343409 675 TCGv val;
f24518b5 676 if (reg == 31) {
74343409 677 val = tcg_const_i64(0);
f24518b5 678 } else {
74343409
RH
679 if ((fn11 & QUAL_S) == 0) {
680 if (is_cmp) {
681 gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
682 } else {
683 gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
684 }
685 }
686 val = tcg_temp_new();
687 tcg_gen_mov_i64(val, cpu_fir[reg]);
f24518b5
RH
688 }
689 return val;
690}
691
692static void gen_fp_exc_clear(void)
693{
694#if defined(CONFIG_SOFTFLOAT_INLINE)
695 TCGv_i32 zero = tcg_const_i32(0);
696 tcg_gen_st8_i32(zero, cpu_env,
4d5712f1 697 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5
RH
698 tcg_temp_free_i32(zero);
699#else
4a58aedf 700 gen_helper_fp_exc_clear(cpu_env);
f24518b5
RH
701#endif
702}
703
704static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
705{
706 /* ??? We ought to be able to do something with imprecise exceptions.
707 E.g. notice we're still in the trap shadow of something within the
708 TB and do not generate the code to signal the exception; end the TB
709 when an exception is forced to arrive, either by consumption of a
710 register value or TRAPB or EXCB. */
711 TCGv_i32 exc = tcg_temp_new_i32();
712 TCGv_i32 reg;
713
714#if defined(CONFIG_SOFTFLOAT_INLINE)
715 tcg_gen_ld8u_i32(exc, cpu_env,
4d5712f1 716 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5 717#else
4a58aedf 718 gen_helper_fp_exc_get(exc, cpu_env);
f24518b5
RH
719#endif
720
721 if (ignore) {
722 tcg_gen_andi_i32(exc, exc, ~ignore);
723 }
724
725 /* ??? Pass in the regno of the destination so that the helper can
726 set EXC_MASK, which contains a bitmask of destination registers
727 that have caused arithmetic traps. A simple userspace emulation
728 does not require this. We do need it for a guest kernel's entArith,
729 or if we were to do something clever with imprecise exceptions. */
730 reg = tcg_const_i32(rc + 32);
731
732 if (fn11 & QUAL_S) {
4a58aedf 733 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
f24518b5 734 } else {
4a58aedf 735 gen_helper_fp_exc_raise(cpu_env, exc, reg);
f24518b5
RH
736 }
737
738 tcg_temp_free_i32(reg);
739 tcg_temp_free_i32(exc);
740}
741
742static inline void gen_fp_exc_raise(int rc, int fn11)
743{
744 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 745}
f24518b5 746
593f17e5
RH
747static void gen_fcvtlq(int rb, int rc)
748{
749 if (unlikely(rc == 31)) {
750 return;
751 }
752 if (unlikely(rb == 31)) {
753 tcg_gen_movi_i64(cpu_fir[rc], 0);
754 } else {
755 TCGv tmp = tcg_temp_new();
756
757 /* The arithmetic right shift here, plus the sign-extended mask below
758 yields a sign-extended result without an explicit ext32s_i64. */
759 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
760 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
761 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
762 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
763 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
764
765 tcg_temp_free(tmp);
766 }
767}
768
735cf45f
RH
769static void gen_fcvtql(int rb, int rc)
770{
771 if (unlikely(rc == 31)) {
772 return;
773 }
774 if (unlikely(rb == 31)) {
775 tcg_gen_movi_i64(cpu_fir[rc], 0);
776 } else {
777 TCGv tmp = tcg_temp_new();
778
779 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
780 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
781 tcg_gen_shli_i64(tmp, tmp, 32);
782 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
783 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
784
785 tcg_temp_free(tmp);
786 }
787}
788
789static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
790{
791 if (rb != 31) {
792 int lab = gen_new_label();
793 TCGv tmp = tcg_temp_new();
794
795 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
796 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
797 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
798
799 gen_set_label(lab);
800 }
801 gen_fcvtql(rb, rc);
802}
803
4a58aedf
RH
804#define FARITH2(name) \
805 static inline void glue(gen_f, name)(int rb, int rc) \
806 { \
807 if (unlikely(rc == 31)) { \
808 return; \
809 } \
810 if (rb != 31) { \
811 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
812 } else { \
813 TCGv tmp = tcg_const_i64(0); \
814 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
815 tcg_temp_free(tmp); \
816 } \
817 }
f24518b5
RH
818
819/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
820FARITH2(sqrtf)
821FARITH2(sqrtg)
a7812ae4
PB
822FARITH2(cvtgf)
823FARITH2(cvtgq)
824FARITH2(cvtqf)
825FARITH2(cvtqg)
f24518b5 826
4a58aedf
RH
827static void gen_ieee_arith2(DisasContext *ctx,
828 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
829 int rb, int rc, int fn11)
830{
831 TCGv vb;
832
833 /* ??? This is wrong: the instruction is not a nop, it still may
834 raise exceptions. */
835 if (unlikely(rc == 31)) {
836 return;
837 }
838
839 gen_qual_roundmode(ctx, fn11);
840 gen_qual_flushzero(ctx, fn11);
841 gen_fp_exc_clear();
842
843 vb = gen_ieee_input(rb, fn11, 0);
4a58aedf 844 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
845 tcg_temp_free(vb);
846
847 gen_fp_exc_raise(rc, fn11);
848}
849
850#define IEEE_ARITH2(name) \
851static inline void glue(gen_f, name)(DisasContext *ctx, \
852 int rb, int rc, int fn11) \
853{ \
854 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
855}
856IEEE_ARITH2(sqrts)
857IEEE_ARITH2(sqrtt)
858IEEE_ARITH2(cvtst)
859IEEE_ARITH2(cvtts)
860
861static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
862{
863 TCGv vb;
864 int ignore = 0;
865
866 /* ??? This is wrong: the instruction is not a nop, it still may
867 raise exceptions. */
868 if (unlikely(rc == 31)) {
869 return;
870 }
871
872 /* No need to set flushzero, since we have an integer output. */
873 gen_fp_exc_clear();
874 vb = gen_ieee_input(rb, fn11, 0);
875
876 /* Almost all integer conversions use cropped rounding, and most
877 also do not have integer overflow enabled. Special case that. */
878 switch (fn11) {
879 case QUAL_RM_C:
4a58aedf 880 gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
881 break;
882 case QUAL_V | QUAL_RM_C:
883 case QUAL_S | QUAL_V | QUAL_RM_C:
884 ignore = float_flag_inexact;
885 /* FALLTHRU */
886 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
4a58aedf 887 gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
888 break;
889 default:
890 gen_qual_roundmode(ctx, fn11);
4a58aedf 891 gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
892 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
893 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
894 break;
895 }
896 tcg_temp_free(vb);
897
898 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
899}
900
4a58aedf
RH
901static void gen_ieee_intcvt(DisasContext *ctx,
902 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
903 int rb, int rc, int fn11)
904{
905 TCGv vb;
906
907 /* ??? This is wrong: the instruction is not a nop, it still may
908 raise exceptions. */
909 if (unlikely(rc == 31)) {
910 return;
911 }
912
913 gen_qual_roundmode(ctx, fn11);
914
915 if (rb == 31) {
916 vb = tcg_const_i64(0);
917 } else {
918 vb = cpu_fir[rb];
919 }
920
921 /* The only exception that can be raised by integer conversion
922 is inexact. Thus we only need to worry about exceptions when
923 inexact handling is requested. */
924 if (fn11 & QUAL_I) {
925 gen_fp_exc_clear();
4a58aedf 926 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
927 gen_fp_exc_raise(rc, fn11);
928 } else {
4a58aedf 929 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
930 }
931
932 if (rb == 31) {
933 tcg_temp_free(vb);
934 }
935}
936
937#define IEEE_INTCVT(name) \
938static inline void glue(gen_f, name)(DisasContext *ctx, \
939 int rb, int rc, int fn11) \
940{ \
941 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
942}
943IEEE_INTCVT(cvtqs)
944IEEE_INTCVT(cvtqt)
945
dc96be4b
RH
946static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
947{
948 TCGv va, vb, vmask;
949 int za = 0, zb = 0;
950
951 if (unlikely(rc == 31)) {
952 return;
953 }
954
955 vmask = tcg_const_i64(mask);
956
957 TCGV_UNUSED_I64(va);
958 if (ra == 31) {
959 if (inv_a) {
960 va = vmask;
961 } else {
962 za = 1;
963 }
964 } else {
965 va = tcg_temp_new_i64();
966 tcg_gen_mov_i64(va, cpu_fir[ra]);
967 if (inv_a) {
968 tcg_gen_andc_i64(va, vmask, va);
969 } else {
970 tcg_gen_and_i64(va, va, vmask);
971 }
972 }
973
974 TCGV_UNUSED_I64(vb);
975 if (rb == 31) {
976 zb = 1;
977 } else {
978 vb = tcg_temp_new_i64();
979 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
980 }
981
982 switch (za << 1 | zb) {
983 case 0 | 0:
984 tcg_gen_or_i64(cpu_fir[rc], va, vb);
985 break;
986 case 0 | 1:
987 tcg_gen_mov_i64(cpu_fir[rc], va);
988 break;
989 case 2 | 0:
990 tcg_gen_mov_i64(cpu_fir[rc], vb);
991 break;
992 case 2 | 1:
993 tcg_gen_movi_i64(cpu_fir[rc], 0);
994 break;
995 }
996
997 tcg_temp_free(vmask);
998 if (ra != 31) {
999 tcg_temp_free(va);
1000 }
1001 if (rb != 31) {
1002 tcg_temp_free(vb);
1003 }
1004}
1005
1006static inline void gen_fcpys(int ra, int rb, int rc)
1007{
1008 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
1009}
1010
1011static inline void gen_fcpysn(int ra, int rb, int rc)
1012{
1013 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1014}
1015
1016static inline void gen_fcpyse(int ra, int rb, int rc)
1017{
1018 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1019}
1020
4a58aedf
RH
1021#define FARITH3(name) \
1022 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1023 { \
1024 TCGv va, vb; \
1025 \
1026 if (unlikely(rc == 31)) { \
1027 return; \
1028 } \
1029 if (ra == 31) { \
1030 va = tcg_const_i64(0); \
1031 } else { \
1032 va = cpu_fir[ra]; \
1033 } \
1034 if (rb == 31) { \
1035 vb = tcg_const_i64(0); \
1036 } else { \
1037 vb = cpu_fir[rb]; \
1038 } \
1039 \
1040 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1041 \
1042 if (ra == 31) { \
1043 tcg_temp_free(va); \
1044 } \
1045 if (rb == 31) { \
1046 tcg_temp_free(vb); \
1047 } \
1048 }
f24518b5
RH
1049
1050/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
1051FARITH3(addf)
1052FARITH3(subf)
1053FARITH3(mulf)
1054FARITH3(divf)
1055FARITH3(addg)
1056FARITH3(subg)
1057FARITH3(mulg)
1058FARITH3(divg)
1059FARITH3(cmpgeq)
1060FARITH3(cmpglt)
1061FARITH3(cmpgle)
f24518b5
RH
1062
1063static void gen_ieee_arith3(DisasContext *ctx,
4a58aedf 1064 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
1065 int ra, int rb, int rc, int fn11)
1066{
1067 TCGv va, vb;
1068
1069 /* ??? This is wrong: the instruction is not a nop, it still may
1070 raise exceptions. */
1071 if (unlikely(rc == 31)) {
1072 return;
1073 }
1074
1075 gen_qual_roundmode(ctx, fn11);
1076 gen_qual_flushzero(ctx, fn11);
1077 gen_fp_exc_clear();
1078
1079 va = gen_ieee_input(ra, fn11, 0);
1080 vb = gen_ieee_input(rb, fn11, 0);
4a58aedf 1081 helper(cpu_fir[rc], cpu_env, va, vb);
f24518b5
RH
1082 tcg_temp_free(va);
1083 tcg_temp_free(vb);
1084
1085 gen_fp_exc_raise(rc, fn11);
1086}
1087
1088#define IEEE_ARITH3(name) \
1089static inline void glue(gen_f, name)(DisasContext *ctx, \
1090 int ra, int rb, int rc, int fn11) \
1091{ \
1092 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1093}
1094IEEE_ARITH3(adds)
1095IEEE_ARITH3(subs)
1096IEEE_ARITH3(muls)
1097IEEE_ARITH3(divs)
1098IEEE_ARITH3(addt)
1099IEEE_ARITH3(subt)
1100IEEE_ARITH3(mult)
1101IEEE_ARITH3(divt)
1102
1103static void gen_ieee_compare(DisasContext *ctx,
4a58aedf 1104 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
1105 int ra, int rb, int rc, int fn11)
1106{
1107 TCGv va, vb;
1108
1109 /* ??? This is wrong: the instruction is not a nop, it still may
1110 raise exceptions. */
1111 if (unlikely(rc == 31)) {
1112 return;
1113 }
1114
1115 gen_fp_exc_clear();
1116
1117 va = gen_ieee_input(ra, fn11, 1);
1118 vb = gen_ieee_input(rb, fn11, 1);
4a58aedf 1119 helper(cpu_fir[rc], cpu_env, va, vb);
f24518b5
RH
1120 tcg_temp_free(va);
1121 tcg_temp_free(vb);
1122
1123 gen_fp_exc_raise(rc, fn11);
1124}
1125
1126#define IEEE_CMP3(name) \
1127static inline void glue(gen_f, name)(DisasContext *ctx, \
1128 int ra, int rb, int rc, int fn11) \
1129{ \
1130 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1131}
1132IEEE_CMP3(cmptun)
1133IEEE_CMP3(cmpteq)
1134IEEE_CMP3(cmptlt)
1135IEEE_CMP3(cmptle)
a7812ae4 1136
248c42f3
RH
1137static inline uint64_t zapnot_mask(uint8_t lit)
1138{
1139 uint64_t mask = 0;
1140 int i;
1141
1142 for (i = 0; i < 8; ++i) {
67debe3a 1143 if ((lit >> i) & 1) {
248c42f3 1144 mask |= 0xffull << (i * 8);
67debe3a 1145 }
248c42f3
RH
1146 }
1147 return mask;
1148}
1149
87d98f95
RH
1150/* Implement zapnot with an immediate operand, which expands to some
1151 form of immediate AND. This is a basic building block in the
1152 definition of many of the other byte manipulation instructions. */
248c42f3 1153static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 1154{
87d98f95
RH
1155 switch (lit) {
1156 case 0x00:
248c42f3 1157 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
1158 break;
1159 case 0x01:
248c42f3 1160 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
1161 break;
1162 case 0x03:
248c42f3 1163 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
1164 break;
1165 case 0x0f:
248c42f3 1166 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
1167 break;
1168 case 0xff:
248c42f3 1169 tcg_gen_mov_i64(dest, src);
87d98f95
RH
1170 break;
1171 default:
b144be9e 1172 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
87d98f95
RH
1173 break;
1174 }
1175}
1176
248c42f3 1177/* EXTWH, EXTLH, EXTQH */
9a734d64 1178static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1 1179 uint8_t lit, uint8_t byte_mask)
b3249f63 1180{
9a734d64
RH
1181 if (islit) {
1182 tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
67debe3a 1183 } else {
9a734d64
RH
1184 TCGv tmp = tcg_temp_new();
1185 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
1186 tcg_gen_neg_i64(tmp, tmp);
1187 tcg_gen_andi_i64(tmp, tmp, 0x3f);
1188 tcg_gen_shl_i64(vc, va, tmp);
1189 tcg_temp_free(tmp);
377a43b6 1190 }
9a734d64 1191 gen_zapnoti(vc, vc, byte_mask);
b3249f63
AJ
1192}
1193
248c42f3 1194/* EXTBL, EXTWL, EXTLL, EXTQL */
9a734d64 1195static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1 1196 uint8_t lit, uint8_t byte_mask)
b3249f63 1197{
9a734d64
RH
1198 if (islit) {
1199 tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
67debe3a 1200 } else {
9a734d64
RH
1201 TCGv tmp = tcg_temp_new();
1202 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
1203 tcg_gen_shli_i64(tmp, tmp, 3);
1204 tcg_gen_shr_i64(vc, va, tmp);
1205 tcg_temp_free(tmp);
248c42f3 1206 }
9a734d64 1207 gen_zapnoti(vc, vc, byte_mask);
248c42f3
RH
1208}
1209
50eb6e5c 1210/* INSWH, INSLH, INSQH */
5e5863ec 1211static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
50eb6e5c
RH
1212 uint8_t lit, uint8_t byte_mask)
1213{
5e5863ec 1214 TCGv tmp = tcg_temp_new();
50eb6e5c 1215
5e5863ec
RH
1216 /* The instruction description has us left-shift the byte mask and extract
1217 bits <15:8> and apply that zap at the end. This is equivalent to simply
1218 performing the zap first and shifting afterward. */
1219 gen_zapnoti(tmp, va, byte_mask);
50eb6e5c 1220
5e5863ec
RH
1221 if (islit) {
1222 lit &= 7;
1223 if (unlikely(lit == 0)) {
1224 tcg_gen_movi_i64(vc, 0);
50eb6e5c 1225 } else {
5e5863ec 1226 tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
50eb6e5c 1227 }
5e5863ec
RH
1228 } else {
1229 TCGv shift = tcg_temp_new();
1230
1231 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1232 portably by splitting the shift into two parts: shift_count-1 and 1.
1233 Arrange for the -1 by using ones-complement instead of
1234 twos-complement in the negation: ~(B * 8) & 63. */
1235
1236 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1237 tcg_gen_not_i64(shift, shift);
1238 tcg_gen_andi_i64(shift, shift, 0x3f);
1239
1240 tcg_gen_shr_i64(vc, tmp, shift);
1241 tcg_gen_shri_i64(vc, vc, 1);
1242 tcg_temp_free(shift);
50eb6e5c 1243 }
5e5863ec 1244 tcg_temp_free(tmp);
50eb6e5c
RH
1245}
1246
248c42f3 1247/* INSBL, INSWL, INSLL, INSQL */
5e5863ec 1248static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1 1249 uint8_t lit, uint8_t byte_mask)
248c42f3 1250{
5e5863ec 1251 TCGv tmp = tcg_temp_new();
248c42f3 1252
5e5863ec
RH
1253 /* The instruction description has us left-shift the byte mask
1254 the same number of byte slots as the data and apply the zap
1255 at the end. This is equivalent to simply performing the zap
1256 first and shifting afterward. */
1257 gen_zapnoti(tmp, va, byte_mask);
248c42f3 1258
5e5863ec
RH
1259 if (islit) {
1260 tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
1261 } else {
1262 TCGv shift = tcg_temp_new();
1263 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1264 tcg_gen_shli_i64(shift, shift, 3);
1265 tcg_gen_shl_i64(vc, tmp, shift);
1266 tcg_temp_free(shift);
377a43b6 1267 }
5e5863ec 1268 tcg_temp_free(tmp);
b3249f63
AJ
1269}
1270
ffec44f1 1271/* MSKWH, MSKLH, MSKQH */
9a8fa1bd 1272static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1
RH
1273 uint8_t lit, uint8_t byte_mask)
1274{
9a8fa1bd
RH
1275 if (islit) {
1276 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
ffec44f1
RH
1277 } else {
1278 TCGv shift = tcg_temp_new();
1279 TCGv mask = tcg_temp_new();
1280
1281 /* The instruction description is as above, where the byte_mask
1282 is shifted left, and then we extract bits <15:8>. This can be
1283 emulated with a right-shift on the expanded byte mask. This
1284 requires extra care because for an input <2:0> == 0 we need a
1285 shift of 64 bits in order to generate a zero. This is done by
1286 splitting the shift into two parts, the variable shift - 1
1287 followed by a constant 1 shift. The code we expand below is
9a8fa1bd 1288 equivalent to ~(B * 8) & 63. */
ffec44f1 1289
9a8fa1bd 1290 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
ffec44f1
RH
1291 tcg_gen_not_i64(shift, shift);
1292 tcg_gen_andi_i64(shift, shift, 0x3f);
1293 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1294 tcg_gen_shr_i64(mask, mask, shift);
1295 tcg_gen_shri_i64(mask, mask, 1);
1296
9a8fa1bd 1297 tcg_gen_andc_i64(vc, va, mask);
ffec44f1
RH
1298
1299 tcg_temp_free(mask);
1300 tcg_temp_free(shift);
1301 }
1302}
1303
14ab1634 1304/* MSKBL, MSKWL, MSKLL, MSKQL */
9a8fa1bd 1305static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
ffec44f1 1306 uint8_t lit, uint8_t byte_mask)
14ab1634 1307{
9a8fa1bd
RH
1308 if (islit) {
1309 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
14ab1634
RH
1310 } else {
1311 TCGv shift = tcg_temp_new();
1312 TCGv mask = tcg_temp_new();
1313
9a8fa1bd 1314 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
14ab1634 1315 tcg_gen_shli_i64(shift, shift, 3);
9a8fa1bd 1316 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
14ab1634
RH
1317 tcg_gen_shl_i64(mask, mask, shift);
1318
9a8fa1bd 1319 tcg_gen_andc_i64(vc, va, mask);
14ab1634
RH
1320
1321 tcg_temp_free(mask);
1322 tcg_temp_free(shift);
1323 }
1324}
1325
04acd307 1326/* Code to call arith3 helpers */
a7812ae4 1327#define ARITH3(name) \
636aa200
BS
1328static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1329 uint8_t lit) \
a7812ae4
PB
1330{ \
1331 if (unlikely(rc == 31)) \
1332 return; \
1333 \
1334 if (ra != 31) { \
1335 if (islit) { \
1336 TCGv tmp = tcg_const_i64(lit); \
1337 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1338 tcg_temp_free(tmp); \
1339 } else \
1340 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1341 } else { \
1342 TCGv tmp1 = tcg_const_i64(0); \
1343 if (islit) { \
1344 TCGv tmp2 = tcg_const_i64(lit); \
1345 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1346 tcg_temp_free(tmp2); \
1347 } else \
1348 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1349 tcg_temp_free(tmp1); \
1350 } \
b3249f63 1351}
2958620f 1352ARITH3(cmpbge)
13e4df99
RH
1353ARITH3(minub8)
1354ARITH3(minsb8)
1355ARITH3(minuw4)
1356ARITH3(minsw4)
1357ARITH3(maxub8)
1358ARITH3(maxsb8)
1359ARITH3(maxuw4)
1360ARITH3(maxsw4)
1361ARITH3(perr)
1362
1363#define MVIOP2(name) \
1364static inline void glue(gen_, name)(int rb, int rc) \
1365{ \
1366 if (unlikely(rc == 31)) \
1367 return; \
1368 if (unlikely(rb == 31)) \
1369 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1370 else \
1371 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1372}
1373MVIOP2(pklb)
1374MVIOP2(pkwb)
1375MVIOP2(unpkbl)
1376MVIOP2(unpkbw)
b3249f63 1377
ac316ca4
RH
1378static void gen_rx(int ra, int set)
1379{
1380 TCGv_i32 tmp;
1381
1382 if (ra != 31) {
4d5712f1 1383 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1384 }
1385
1386 tmp = tcg_const_i32(set);
4d5712f1 1387 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1388 tcg_temp_free_i32(tmp);
1389}
1390
2ace7e55
RH
1391static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1392{
1393 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1394 to internal cpu registers. */
1395
1396 /* Unprivileged PAL call */
1397 if (palcode >= 0x80 && palcode < 0xC0) {
1398 switch (palcode) {
1399 case 0x86:
1400 /* IMB */
1401 /* No-op inside QEMU. */
1402 break;
1403 case 0x9E:
1404 /* RDUNIQUE */
1405 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1406 break;
1407 case 0x9F:
1408 /* WRUNIQUE */
1409 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1410 break;
1411 default:
ba96394e
RH
1412 palcode &= 0xbf;
1413 goto do_call_pal;
2ace7e55
RH
1414 }
1415 return NO_EXIT;
1416 }
1417
1418#ifndef CONFIG_USER_ONLY
1419 /* Privileged PAL code */
1420 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1421 switch (palcode) {
1422 case 0x01:
1423 /* CFLUSH */
1424 /* No-op inside QEMU. */
1425 break;
1426 case 0x02:
1427 /* DRAINA */
1428 /* No-op inside QEMU. */
1429 break;
1430 case 0x2D:
1431 /* WRVPTPTR */
4d5712f1 1432 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
2ace7e55
RH
1433 break;
1434 case 0x31:
1435 /* WRVAL */
1436 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1437 break;
1438 case 0x32:
1439 /* RDVAL */
1440 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1441 break;
1442
1443 case 0x35: {
1444 /* SWPIPL */
1445 TCGv tmp;
1446
1447 /* Note that we already know we're in kernel mode, so we know
1448 that PS only contains the 3 IPL bits. */
4d5712f1 1449 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1450
1451 /* But make sure and store only the 3 IPL bits from the user. */
1452 tmp = tcg_temp_new();
1453 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
4d5712f1 1454 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1455 tcg_temp_free(tmp);
1456 break;
1457 }
1458
1459 case 0x36:
1460 /* RDPS */
4d5712f1 1461 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1462 break;
1463 case 0x38:
1464 /* WRUSP */
1465 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1466 break;
1467 case 0x3A:
1468 /* RDUSP */
1469 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1470 break;
1471 case 0x3C:
1472 /* WHAMI */
1473 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
55e5c285 1474 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
2ace7e55
RH
1475 break;
1476
1477 default:
ba96394e
RH
1478 palcode &= 0x3f;
1479 goto do_call_pal;
2ace7e55
RH
1480 }
1481 return NO_EXIT;
1482 }
1483#endif
2ace7e55 1484 return gen_invalid(ctx);
ba96394e
RH
1485
1486 do_call_pal:
1487#ifdef CONFIG_USER_ONLY
1488 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1489#else
1490 {
1491 TCGv pc = tcg_const_i64(ctx->pc);
1492 TCGv entry = tcg_const_i64(palcode & 0x80
1493 ? 0x2000 + (palcode - 0x80) * 64
1494 : 0x1000 + palcode * 64);
1495
1496 gen_helper_call_pal(cpu_env, pc, entry);
1497
1498 tcg_temp_free(entry);
1499 tcg_temp_free(pc);
a9ead832
RH
1500
1501 /* Since the destination is running in PALmode, we don't really
73f395fa 1502 need the page permissions check. We'll see the existence of
a9ead832
RH
1503 the page when we create the TB, and we'll flush all TBs if
1504 we change the PAL base register. */
1505 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1506 tcg_gen_goto_tb(0);
8cfd0495 1507 tcg_gen_exit_tb((uintptr_t)ctx->tb);
a9ead832
RH
1508 return EXIT_GOTO_TB;
1509 }
1510
ba96394e
RH
1511 return EXIT_PC_UPDATED;
1512 }
1513#endif
2ace7e55
RH
1514}
1515
26b46094
RH
1516#ifndef CONFIG_USER_ONLY
1517
1518#define PR_BYTE 0x100000
1519#define PR_LONG 0x200000
1520
1521static int cpu_pr_data(int pr)
1522{
1523 switch (pr) {
1524 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1525 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1526 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1527 case 3: return offsetof(CPUAlphaState, trap_arg0);
1528 case 4: return offsetof(CPUAlphaState, trap_arg1);
1529 case 5: return offsetof(CPUAlphaState, trap_arg2);
1530 case 6: return offsetof(CPUAlphaState, exc_addr);
1531 case 7: return offsetof(CPUAlphaState, palbr);
1532 case 8: return offsetof(CPUAlphaState, ptbr);
1533 case 9: return offsetof(CPUAlphaState, vptptr);
1534 case 10: return offsetof(CPUAlphaState, unique);
1535 case 11: return offsetof(CPUAlphaState, sysval);
1536 case 12: return offsetof(CPUAlphaState, usp);
1537
1538 case 32 ... 39:
1539 return offsetof(CPUAlphaState, shadow[pr - 32]);
1540 case 40 ... 63:
1541 return offsetof(CPUAlphaState, scratch[pr - 40]);
c781cf96
RH
1542
1543 case 251:
1544 return offsetof(CPUAlphaState, alarm_expire);
26b46094
RH
1545 }
1546 return 0;
1547}
1548
c781cf96 1549static ExitStatus gen_mfpr(int ra, int regno)
26b46094
RH
1550{
1551 int data = cpu_pr_data(regno);
1552
1553 /* In our emulated PALcode, these processor registers have no
1554 side effects from reading. */
1555 if (ra == 31) {
c781cf96
RH
1556 return NO_EXIT;
1557 }
1558
19e0cbb8
RH
1559 /* Special help for VMTIME and WALLTIME. */
1560 if (regno == 250 || regno == 249) {
1561 void (*helper)(TCGv) = gen_helper_get_walltime;
1562 if (regno == 249) {
1563 helper = gen_helper_get_vmtime;
1564 }
c781cf96
RH
1565 if (use_icount) {
1566 gen_io_start();
19e0cbb8 1567 helper(cpu_ir[ra]);
c781cf96
RH
1568 gen_io_end();
1569 return EXIT_PC_STALE;
1570 } else {
19e0cbb8 1571 helper(cpu_ir[ra]);
c781cf96
RH
1572 return NO_EXIT;
1573 }
26b46094
RH
1574 }
1575
1576 /* The basic registers are data only, and unknown registers
1577 are read-zero, write-ignore. */
1578 if (data == 0) {
1579 tcg_gen_movi_i64(cpu_ir[ra], 0);
1580 } else if (data & PR_BYTE) {
1581 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1582 } else if (data & PR_LONG) {
1583 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1584 } else {
1585 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1586 }
c781cf96 1587 return NO_EXIT;
26b46094
RH
1588}
1589
bc24270e 1590static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
26b46094
RH
1591{
1592 TCGv tmp;
bc24270e 1593 int data;
26b46094
RH
1594
1595 if (rb == 31) {
1596 tmp = tcg_const_i64(0);
1597 } else {
1598 tmp = cpu_ir[rb];
1599 }
1600
bc24270e
RH
1601 switch (regno) {
1602 case 255:
3b4fefd6 1603 /* TBIA */
69163fbb 1604 gen_helper_tbia(cpu_env);
bc24270e
RH
1605 break;
1606
1607 case 254:
3b4fefd6 1608 /* TBIS */
69163fbb 1609 gen_helper_tbis(cpu_env, tmp);
bc24270e
RH
1610 break;
1611
1612 case 253:
1613 /* WAIT */
1614 tmp = tcg_const_i64(1);
259186a7
AF
1615 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1616 offsetof(CPUState, halted));
bc24270e
RH
1617 return gen_excp(ctx, EXCP_HLT, 0);
1618
034ebc27
RH
1619 case 252:
1620 /* HALT */
1621 gen_helper_halt(tmp);
1622 return EXIT_PC_STALE;
1623
c781cf96
RH
1624 case 251:
1625 /* ALARM */
69163fbb 1626 gen_helper_set_alarm(cpu_env, tmp);
c781cf96
RH
1627 break;
1628
a9ead832
RH
1629 case 7:
1630 /* PALBR */
1631 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, palbr));
1632 /* Changing the PAL base register implies un-chaining all of the TBs
1633 that ended with a CALL_PAL. Since the base register usually only
1634 changes during boot, flushing everything works well. */
1635 gen_helper_tb_flush(cpu_env);
1636 return EXIT_PC_STALE;
1637
bc24270e 1638 default:
3b4fefd6
RH
1639 /* The basic registers are data only, and unknown registers
1640 are read-zero, write-ignore. */
bc24270e 1641 data = cpu_pr_data(regno);
3b4fefd6
RH
1642 if (data != 0) {
1643 if (data & PR_BYTE) {
1644 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1645 } else if (data & PR_LONG) {
1646 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1647 } else {
1648 tcg_gen_st_i64(tmp, cpu_env, data);
1649 }
26b46094 1650 }
bc24270e 1651 break;
26b46094
RH
1652 }
1653
1654 if (rb == 31) {
1655 tcg_temp_free(tmp);
1656 }
bc24270e
RH
1657
1658 return NO_EXIT;
26b46094
RH
1659}
1660#endif /* !USER_ONLY*/
1661
5238c886
RH
1662#define REQUIRE_TB_FLAG(FLAG) \
1663 do { \
1664 if ((ctx->tb->flags & (FLAG)) == 0) { \
1665 goto invalid_opc; \
1666 } \
1667 } while (0)
1668
64f45e49
RH
1669#define REQUIRE_REG_31(WHICH) \
1670 do { \
1671 if (WHICH != 31) { \
1672 goto invalid_opc; \
1673 } \
1674 } while (0)
1675
4af70374 1676static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1677{
1678 uint32_t palcode;
efa64351
MT
1679 int32_t disp21, disp16;
1680#ifndef CONFIG_USER_ONLY
1681 int32_t disp12;
1682#endif
f88fe4e3 1683 uint16_t fn11;
194cfb43
RH
1684 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1685 bool islit;
1686 TCGv va, vb, vc, tmp;
075b8ddb 1687 TCGv_i32 t32;
4af70374 1688 ExitStatus ret;
4c9649a9
JM
1689
1690 /* Decode all instruction fields */
1691 opc = insn >> 26;
1692 ra = (insn >> 21) & 0x1F;
1693 rb = (insn >> 16) & 0x1F;
1694 rc = insn & 0x1F;
64f45e49 1695 islit = (insn >> 12) & 1;
dfaa8583
AJ
1696 if (rb == 31 && !islit) {
1697 islit = 1;
1698 lit = 0;
194cfb43 1699 } else {
dfaa8583 1700 lit = (insn >> 13) & 0xFF;
194cfb43 1701 }
4c9649a9
JM
1702 palcode = insn & 0x03FFFFFF;
1703 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1704 disp16 = (int16_t)(insn & 0x0000FFFF);
efa64351 1705#ifndef CONFIG_USER_ONLY
4c9649a9 1706 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
efa64351 1707#endif
4c9649a9
JM
1708 fn11 = (insn >> 5) & 0x000007FF;
1709 fpfn = fn11 & 0x3F;
1710 fn7 = (insn >> 5) & 0x0000007F;
806991da 1711 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1712 opc, ra, rb, rc, disp16);
806991da 1713
4af70374 1714 ret = NO_EXIT;
4c9649a9
JM
1715 switch (opc) {
1716 case 0x00:
1717 /* CALL_PAL */
2ace7e55
RH
1718 ret = gen_call_pal(ctx, palcode);
1719 break;
4c9649a9
JM
1720 case 0x01:
1721 /* OPC01 */
1722 goto invalid_opc;
1723 case 0x02:
1724 /* OPC02 */
1725 goto invalid_opc;
1726 case 0x03:
1727 /* OPC03 */
1728 goto invalid_opc;
1729 case 0x04:
1730 /* OPC04 */
1731 goto invalid_opc;
1732 case 0x05:
1733 /* OPC05 */
1734 goto invalid_opc;
1735 case 0x06:
1736 /* OPC06 */
1737 goto invalid_opc;
1738 case 0x07:
1739 /* OPC07 */
1740 goto invalid_opc;
194cfb43 1741
4c9649a9
JM
1742 case 0x09:
1743 /* LDAH */
194cfb43
RH
1744 disp16 = (uint32_t)disp16 << 16;
1745 /* fall through */
1746 case 0x08:
1747 /* LDA */
1748 va = dest_gpr(ctx, ra);
1749 /* It's worth special-casing immediate loads. */
1750 if (rb == 31) {
1751 tcg_gen_movi_i64(va, disp16);
1752 } else {
1753 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
496cb5b9 1754 }
4c9649a9 1755 break;
194cfb43 1756
4c9649a9
JM
1757 case 0x0A:
1758 /* LDBU */
5238c886
RH
1759 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1760 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1761 break;
4c9649a9
JM
1762 case 0x0B:
1763 /* LDQ_U */
f18cd223 1764 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1765 break;
1766 case 0x0C:
1767 /* LDWU */
5238c886
RH
1768 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1769 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1770 break;
4c9649a9
JM
1771 case 0x0D:
1772 /* STW */
5238c886 1773 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
6910b8f6 1774 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
4c9649a9
JM
1775 break;
1776 case 0x0E:
1777 /* STB */
5238c886 1778 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
6910b8f6 1779 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
4c9649a9
JM
1780 break;
1781 case 0x0F:
1782 /* STQ_U */
6910b8f6 1783 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
4c9649a9 1784 break;
194cfb43 1785
4c9649a9 1786 case 0x10:
194cfb43
RH
1787 vc = dest_gpr(ctx, rc);
1788 vb = load_gpr_lit(ctx, rb, lit, islit);
1789
1790 if (ra == 31) {
1791 if (fn7 == 0x00) {
1792 /* Special case ADDL as SEXTL. */
1793 tcg_gen_ext32s_i64(vc, vb);
1794 break;
1795 }
1796 if (fn7 == 0x29) {
1797 /* Special case SUBQ as NEGQ. */
1798 tcg_gen_neg_i64(vc, vb);
1799 break;
1800 }
1801 }
1802
1803 va = load_gpr(ctx, ra);
4c9649a9
JM
1804 switch (fn7) {
1805 case 0x00:
1806 /* ADDL */
194cfb43
RH
1807 tcg_gen_add_i64(vc, va, vb);
1808 tcg_gen_ext32s_i64(vc, vc);
4c9649a9
JM
1809 break;
1810 case 0x02:
1811 /* S4ADDL */
194cfb43
RH
1812 tmp = tcg_temp_new();
1813 tcg_gen_shli_i64(tmp, va, 2);
1814 tcg_gen_add_i64(tmp, tmp, vb);
1815 tcg_gen_ext32s_i64(vc, tmp);
1816 tcg_temp_free(tmp);
4c9649a9
JM
1817 break;
1818 case 0x09:
1819 /* SUBL */
194cfb43
RH
1820 tcg_gen_sub_i64(vc, va, vb);
1821 tcg_gen_ext32s_i64(vc, vc);
4c9649a9
JM
1822 break;
1823 case 0x0B:
1824 /* S4SUBL */
194cfb43
RH
1825 tmp = tcg_temp_new();
1826 tcg_gen_shli_i64(tmp, va, 2);
1827 tcg_gen_sub_i64(tmp, tmp, vb);
1828 tcg_gen_ext32s_i64(vc, tmp);
1829 tcg_temp_free(tmp);
4c9649a9
JM
1830 break;
1831 case 0x0F:
1832 /* CMPBGE */
a7812ae4 1833 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1834 break;
1835 case 0x12:
1836 /* S8ADDL */
194cfb43
RH
1837 tmp = tcg_temp_new();
1838 tcg_gen_shli_i64(tmp, va, 3);
1839 tcg_gen_add_i64(tmp, tmp, vb);
1840 tcg_gen_ext32s_i64(vc, tmp);
1841 tcg_temp_free(tmp);
4c9649a9
JM
1842 break;
1843 case 0x1B:
1844 /* S8SUBL */
194cfb43
RH
1845 tmp = tcg_temp_new();
1846 tcg_gen_shli_i64(tmp, va, 3);
1847 tcg_gen_sub_i64(tmp, tmp, vb);
1848 tcg_gen_ext32s_i64(vc, tmp);
1849 tcg_temp_free(tmp);
4c9649a9
JM
1850 break;
1851 case 0x1D:
1852 /* CMPULT */
95868348 1853 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
4c9649a9
JM
1854 break;
1855 case 0x20:
1856 /* ADDQ */
194cfb43 1857 tcg_gen_add_i64(vc, va, vb);
4c9649a9
JM
1858 break;
1859 case 0x22:
1860 /* S4ADDQ */
194cfb43
RH
1861 tmp = tcg_temp_new();
1862 tcg_gen_shli_i64(tmp, va, 2);
1863 tcg_gen_add_i64(vc, tmp, vb);
1864 tcg_temp_free(tmp);
4c9649a9
JM
1865 break;
1866 case 0x29:
1867 /* SUBQ */
194cfb43 1868 tcg_gen_sub_i64(vc, va, vb);
4c9649a9
JM
1869 break;
1870 case 0x2B:
1871 /* S4SUBQ */
194cfb43
RH
1872 tmp = tcg_temp_new();
1873 tcg_gen_shli_i64(tmp, va, 2);
1874 tcg_gen_sub_i64(vc, tmp, vb);
1875 tcg_temp_free(tmp);
4c9649a9
JM
1876 break;
1877 case 0x2D:
1878 /* CMPEQ */
95868348 1879 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
4c9649a9
JM
1880 break;
1881 case 0x32:
1882 /* S8ADDQ */
194cfb43
RH
1883 tmp = tcg_temp_new();
1884 tcg_gen_shli_i64(tmp, va, 3);
1885 tcg_gen_add_i64(vc, tmp, vb);
1886 tcg_temp_free(tmp);
4c9649a9
JM
1887 break;
1888 case 0x3B:
1889 /* S8SUBQ */
194cfb43
RH
1890 tmp = tcg_temp_new();
1891 tcg_gen_shli_i64(tmp, va, 3);
1892 tcg_gen_sub_i64(vc, tmp, vb);
1893 tcg_temp_free(tmp);
4c9649a9
JM
1894 break;
1895 case 0x3D:
1896 /* CMPULE */
95868348 1897 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
4c9649a9
JM
1898 break;
1899 case 0x40:
1900 /* ADDL/V */
42774a56 1901 gen_helper_addlv(vc, cpu_env, va, vb);
4c9649a9
JM
1902 break;
1903 case 0x49:
1904 /* SUBL/V */
42774a56 1905 gen_helper_sublv(vc, cpu_env, va, vb);
4c9649a9
JM
1906 break;
1907 case 0x4D:
1908 /* CMPLT */
95868348 1909 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
4c9649a9
JM
1910 break;
1911 case 0x60:
1912 /* ADDQ/V */
42774a56 1913 gen_helper_addqv(vc, cpu_env, va, vb);
4c9649a9
JM
1914 break;
1915 case 0x69:
1916 /* SUBQ/V */
42774a56 1917 gen_helper_subqv(vc, cpu_env, va, vb);
4c9649a9
JM
1918 break;
1919 case 0x6D:
1920 /* CMPLE */
95868348 1921 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
4c9649a9
JM
1922 break;
1923 default:
1924 goto invalid_opc;
1925 }
1926 break;
db4a1645 1927
4c9649a9 1928 case 0x11:
db4a1645
RH
1929 if (fn7 == 0x20) {
1930 if (rc == 31) {
1931 /* Special case BIS as NOP. */
1932 break;
1933 }
1934 if (ra == 31) {
1935 /* Special case BIS as MOV. */
1936 vc = dest_gpr(ctx, rc);
1937 if (islit) {
1938 tcg_gen_movi_i64(vc, lit);
67debe3a 1939 } else {
db4a1645 1940 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
67debe3a 1941 }
db4a1645 1942 break;
30c7183b 1943 }
db4a1645
RH
1944 }
1945
1946 vc = dest_gpr(ctx, rc);
1947 vb = load_gpr_lit(ctx, rb, lit, islit);
1948
1949 if (fn7 == 0x28 && ra == 31) {
1950 /* Special case ORNOT as NOT. */
1951 tcg_gen_not_i64(vc, vb);
1952 break;
1953 }
1954
1955 va = load_gpr(ctx, ra);
1956 switch (fn7) {
1957 case 0x00:
1958 /* AND */
1959 tcg_gen_and_i64(vc, va, vb);
4c9649a9
JM
1960 break;
1961 case 0x08:
1962 /* BIC */
db4a1645 1963 tcg_gen_andc_i64(vc, va, vb);
4c9649a9
JM
1964 break;
1965 case 0x14:
1966 /* CMOVLBS */
83ebb7cd
RH
1967 tmp = tcg_temp_new();
1968 tcg_gen_andi_i64(tmp, va, 1);
1969 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1970 vb, load_gpr(ctx, rc));
1971 tcg_temp_free(tmp);
4c9649a9
JM
1972 break;
1973 case 0x16:
1974 /* CMOVLBC */
83ebb7cd
RH
1975 tmp = tcg_temp_new();
1976 tcg_gen_andi_i64(tmp, va, 1);
1977 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1978 vb, load_gpr(ctx, rc));
1979 tcg_temp_free(tmp);
4c9649a9
JM
1980 break;
1981 case 0x20:
1982 /* BIS */
db4a1645 1983 tcg_gen_or_i64(vc, va, vb);
4c9649a9
JM
1984 break;
1985 case 0x24:
1986 /* CMOVEQ */
83ebb7cd
RH
1987 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1988 vb, load_gpr(ctx, rc));
4c9649a9
JM
1989 break;
1990 case 0x26:
1991 /* CMOVNE */
83ebb7cd
RH
1992 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1993 vb, load_gpr(ctx, rc));
4c9649a9
JM
1994 break;
1995 case 0x28:
1996 /* ORNOT */
db4a1645 1997 tcg_gen_orc_i64(vc, va, vb);
4c9649a9
JM
1998 break;
1999 case 0x40:
2000 /* XOR */
db4a1645 2001 tcg_gen_xor_i64(vc, va, vb);
4c9649a9
JM
2002 break;
2003 case 0x44:
2004 /* CMOVLT */
83ebb7cd
RH
2005 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
2006 vb, load_gpr(ctx, rc));
4c9649a9
JM
2007 break;
2008 case 0x46:
2009 /* CMOVGE */
83ebb7cd
RH
2010 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
2011 vb, load_gpr(ctx, rc));
4c9649a9
JM
2012 break;
2013 case 0x48:
2014 /* EQV */
db4a1645 2015 tcg_gen_eqv_i64(vc, va, vb);
4c9649a9
JM
2016 break;
2017 case 0x61:
2018 /* AMASK */
64f45e49 2019 REQUIRE_REG_31(ra);
db4a1645 2020 {
a18ad893 2021 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
db4a1645 2022 tcg_gen_andi_i64(vc, vb, ~amask);
ae8ecd42 2023 }
4c9649a9
JM
2024 break;
2025 case 0x64:
2026 /* CMOVLE */
83ebb7cd
RH
2027 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
2028 vb, load_gpr(ctx, rc));
4c9649a9
JM
2029 break;
2030 case 0x66:
2031 /* CMOVGT */
83ebb7cd
RH
2032 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
2033 vb, load_gpr(ctx, rc));
4c9649a9
JM
2034 break;
2035 case 0x6C:
2036 /* IMPLVER */
64f45e49 2037 REQUIRE_REG_31(ra);
db4a1645 2038 tcg_gen_movi_i64(vc, ctx->implver);
4c9649a9
JM
2039 break;
2040 default:
2041 goto invalid_opc;
2042 }
2043 break;
3bd67b7d 2044
4c9649a9 2045 case 0x12:
3bd67b7d
RH
2046 vc = dest_gpr(ctx, rc);
2047 va = load_gpr(ctx, ra);
4c9649a9
JM
2048 switch (fn7) {
2049 case 0x02:
2050 /* MSKBL */
9a8fa1bd 2051 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
4c9649a9
JM
2052 break;
2053 case 0x06:
2054 /* EXTBL */
9a734d64 2055 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
4c9649a9
JM
2056 break;
2057 case 0x0B:
2058 /* INSBL */
5e5863ec 2059 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
4c9649a9
JM
2060 break;
2061 case 0x12:
2062 /* MSKWL */
9a8fa1bd 2063 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
2064 break;
2065 case 0x16:
2066 /* EXTWL */
9a734d64 2067 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
2068 break;
2069 case 0x1B:
2070 /* INSWL */
5e5863ec 2071 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
2072 break;
2073 case 0x22:
2074 /* MSKLL */
9a8fa1bd 2075 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
2076 break;
2077 case 0x26:
2078 /* EXTLL */
9a734d64 2079 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
2080 break;
2081 case 0x2B:
2082 /* INSLL */
5e5863ec 2083 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
2084 break;
2085 case 0x30:
2086 /* ZAP */
b144be9e
RH
2087 if (islit) {
2088 gen_zapnoti(vc, va, ~lit);
2089 } else {
2090 gen_helper_zap(vc, va, load_gpr(ctx, rb));
2091 }
4c9649a9
JM
2092 break;
2093 case 0x31:
2094 /* ZAPNOT */
b144be9e
RH
2095 if (islit) {
2096 gen_zapnoti(vc, va, lit);
2097 } else {
2098 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
2099 }
4c9649a9
JM
2100 break;
2101 case 0x32:
2102 /* MSKQL */
9a8fa1bd 2103 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
2104 break;
2105 case 0x34:
2106 /* SRL */
3bd67b7d
RH
2107 if (islit) {
2108 tcg_gen_shri_i64(vc, va, lit & 0x3f);
2109 } else {
2110 tmp = tcg_temp_new();
2111 vb = load_gpr(ctx, rb);
2112 tcg_gen_andi_i64(tmp, vb, 0x3f);
2113 tcg_gen_shr_i64(vc, va, tmp);
2114 tcg_temp_free(tmp);
30c7183b 2115 }
4c9649a9
JM
2116 break;
2117 case 0x36:
2118 /* EXTQL */
9a734d64 2119 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
2120 break;
2121 case 0x39:
2122 /* SLL */
3bd67b7d
RH
2123 if (islit) {
2124 tcg_gen_shli_i64(vc, va, lit & 0x3f);
2125 } else {
2126 tmp = tcg_temp_new();
2127 vb = load_gpr(ctx, rb);
2128 tcg_gen_andi_i64(tmp, vb, 0x3f);
2129 tcg_gen_shl_i64(vc, va, tmp);
2130 tcg_temp_free(tmp);
30c7183b 2131 }
4c9649a9
JM
2132 break;
2133 case 0x3B:
2134 /* INSQL */
5e5863ec 2135 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
2136 break;
2137 case 0x3C:
2138 /* SRA */
3bd67b7d
RH
2139 if (islit) {
2140 tcg_gen_sari_i64(vc, va, lit & 0x3f);
2141 } else {
2142 tmp = tcg_temp_new();
2143 vb = load_gpr(ctx, rb);
2144 tcg_gen_andi_i64(tmp, vb, 0x3f);
2145 tcg_gen_sar_i64(vc, va, tmp);
2146 tcg_temp_free(tmp);
30c7183b 2147 }
4c9649a9
JM
2148 break;
2149 case 0x52:
2150 /* MSKWH */
9a8fa1bd 2151 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
2152 break;
2153 case 0x57:
2154 /* INSWH */
5e5863ec 2155 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
2156 break;
2157 case 0x5A:
2158 /* EXTWH */
9a734d64 2159 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
4c9649a9
JM
2160 break;
2161 case 0x62:
2162 /* MSKLH */
9a8fa1bd 2163 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
2164 break;
2165 case 0x67:
2166 /* INSLH */
5e5863ec 2167 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
2168 break;
2169 case 0x6A:
2170 /* EXTLH */
9a734d64 2171 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
4c9649a9
JM
2172 break;
2173 case 0x72:
2174 /* MSKQH */
9a8fa1bd 2175 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
2176 break;
2177 case 0x77:
2178 /* INSQH */
5e5863ec 2179 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
2180 break;
2181 case 0x7A:
2182 /* EXTQH */
9a734d64 2183 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
4c9649a9
JM
2184 break;
2185 default:
2186 goto invalid_opc;
2187 }
2188 break;
de4d3555 2189
4c9649a9 2190 case 0x13:
de4d3555
RH
2191 vc = dest_gpr(ctx, rc);
2192 vb = load_gpr_lit(ctx, rb, lit, islit);
2193 va = load_gpr(ctx, ra);
4c9649a9
JM
2194 switch (fn7) {
2195 case 0x00:
2196 /* MULL */
de4d3555
RH
2197 tcg_gen_mul_i64(vc, va, vb);
2198 tcg_gen_ext32s_i64(vc, vc);
4c9649a9
JM
2199 break;
2200 case 0x20:
2201 /* MULQ */
de4d3555 2202 tcg_gen_mul_i64(vc, va, vb);
4c9649a9
JM
2203 break;
2204 case 0x30:
2205 /* UMULH */
de4d3555
RH
2206 tmp = tcg_temp_new();
2207 tcg_gen_mulu2_i64(tmp, vc, va, vb);
2208 tcg_temp_free(tmp);
4c9649a9
JM
2209 break;
2210 case 0x40:
2211 /* MULL/V */
42774a56 2212 gen_helper_mullv(vc, cpu_env, va, vb);
4c9649a9
JM
2213 break;
2214 case 0x60:
2215 /* MULQ/V */
42774a56 2216 gen_helper_mulqv(vc, cpu_env, va, vb);
4c9649a9
JM
2217 break;
2218 default:
2219 goto invalid_opc;
2220 }
2221 break;
075b8ddb 2222
4c9649a9 2223 case 0x14:
5238c886 2224 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
f24518b5 2225 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2226 case 0x04:
2227 /* ITOFS */
64f45e49 2228 REQUIRE_REG_31(rb);
075b8ddb
RH
2229 t32 = tcg_temp_new_i32();
2230 va = load_gpr(ctx, ra);
2231 vc = dest_fpr(ctx, rc);
2232 tcg_gen_trunc_i64_i32(t32, va);
2233 gen_helper_memory_to_s(vc, t32);
2234 tcg_temp_free_i32(t32);
4c9649a9
JM
2235 break;
2236 case 0x0A:
2237 /* SQRTF */
64f45e49 2238 REQUIRE_REG_31(ra);
5238c886
RH
2239 gen_fsqrtf(rb, rc);
2240 break;
4c9649a9
JM
2241 case 0x0B:
2242 /* SQRTS */
64f45e49 2243 REQUIRE_REG_31(ra);
5238c886
RH
2244 gen_fsqrts(ctx, rb, rc, fn11);
2245 break;
4c9649a9
JM
2246 case 0x14:
2247 /* ITOFF */
64f45e49 2248 REQUIRE_REG_31(rb);
075b8ddb
RH
2249 t32 = tcg_temp_new_i32();
2250 va = load_gpr(ctx, ra);
2251 vc = dest_fpr(ctx, rc);
2252 tcg_gen_trunc_i64_i32(t32, va);
2253 gen_helper_memory_to_f(vc, t32);
2254 tcg_temp_free_i32(t32);
4c9649a9
JM
2255 break;
2256 case 0x24:
2257 /* ITOFT */
64f45e49 2258 REQUIRE_REG_31(rb);
075b8ddb
RH
2259 va = load_gpr(ctx, ra);
2260 vc = dest_fpr(ctx, rc);
2261 tcg_gen_mov_i64(vc, va);
4c9649a9
JM
2262 break;
2263 case 0x2A:
2264 /* SQRTG */
64f45e49 2265 REQUIRE_REG_31(ra);
5238c886
RH
2266 gen_fsqrtg(rb, rc);
2267 break;
4c9649a9
JM
2268 case 0x02B:
2269 /* SQRTT */
64f45e49 2270 REQUIRE_REG_31(ra);
5238c886
RH
2271 gen_fsqrtt(ctx, rb, rc, fn11);
2272 break;
4c9649a9
JM
2273 default:
2274 goto invalid_opc;
2275 }
2276 break;
6b88b37c 2277
4c9649a9
JM
2278 case 0x15:
2279 /* VAX floating point */
2280 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2281 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2282 case 0x00:
2283 /* ADDF */
a7812ae4 2284 gen_faddf(ra, rb, rc);
4c9649a9
JM
2285 break;
2286 case 0x01:
2287 /* SUBF */
a7812ae4 2288 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2289 break;
2290 case 0x02:
2291 /* MULF */
a7812ae4 2292 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2293 break;
2294 case 0x03:
2295 /* DIVF */
a7812ae4 2296 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2297 break;
2298 case 0x1E:
64f45e49
RH
2299 /* CVTDG -- TODO */
2300 REQUIRE_REG_31(ra);
4c9649a9 2301 goto invalid_opc;
4c9649a9
JM
2302 case 0x20:
2303 /* ADDG */
a7812ae4 2304 gen_faddg(ra, rb, rc);
4c9649a9
JM
2305 break;
2306 case 0x21:
2307 /* SUBG */
a7812ae4 2308 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2309 break;
2310 case 0x22:
2311 /* MULG */
a7812ae4 2312 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2313 break;
2314 case 0x23:
2315 /* DIVG */
a7812ae4 2316 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2317 break;
2318 case 0x25:
2319 /* CMPGEQ */
a7812ae4 2320 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2321 break;
2322 case 0x26:
2323 /* CMPGLT */
a7812ae4 2324 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2325 break;
2326 case 0x27:
2327 /* CMPGLE */
a7812ae4 2328 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2329 break;
2330 case 0x2C:
2331 /* CVTGF */
64f45e49 2332 REQUIRE_REG_31(ra);
a7812ae4 2333 gen_fcvtgf(rb, rc);
4c9649a9
JM
2334 break;
2335 case 0x2D:
64f45e49
RH
2336 /* CVTGD -- TODO */
2337 REQUIRE_REG_31(ra);
4c9649a9 2338 goto invalid_opc;
4c9649a9
JM
2339 case 0x2F:
2340 /* CVTGQ */
64f45e49 2341 REQUIRE_REG_31(ra);
a7812ae4 2342 gen_fcvtgq(rb, rc);
4c9649a9
JM
2343 break;
2344 case 0x3C:
2345 /* CVTQF */
64f45e49 2346 REQUIRE_REG_31(ra);
a7812ae4 2347 gen_fcvtqf(rb, rc);
4c9649a9
JM
2348 break;
2349 case 0x3E:
2350 /* CVTQG */
64f45e49 2351 REQUIRE_REG_31(ra);
a7812ae4 2352 gen_fcvtqg(rb, rc);
4c9649a9
JM
2353 break;
2354 default:
2355 goto invalid_opc;
2356 }
2357 break;
6b88b37c 2358
4c9649a9
JM
2359 case 0x16:
2360 /* IEEE floating-point */
f24518b5 2361 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2362 case 0x00:
2363 /* ADDS */
f24518b5 2364 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2365 break;
2366 case 0x01:
2367 /* SUBS */
f24518b5 2368 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2369 break;
2370 case 0x02:
2371 /* MULS */
f24518b5 2372 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2373 break;
2374 case 0x03:
2375 /* DIVS */
f24518b5 2376 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2377 break;
2378 case 0x20:
2379 /* ADDT */
f24518b5 2380 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2381 break;
2382 case 0x21:
2383 /* SUBT */
f24518b5 2384 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2385 break;
2386 case 0x22:
2387 /* MULT */
f24518b5 2388 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2389 break;
2390 case 0x23:
2391 /* DIVT */
f24518b5 2392 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2393 break;
2394 case 0x24:
2395 /* CMPTUN */
f24518b5 2396 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2397 break;
2398 case 0x25:
2399 /* CMPTEQ */
f24518b5 2400 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2401 break;
2402 case 0x26:
2403 /* CMPTLT */
f24518b5 2404 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2405 break;
2406 case 0x27:
2407 /* CMPTLE */
f24518b5 2408 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2409 break;
2410 case 0x2C:
64f45e49 2411 REQUIRE_REG_31(ra);
a74b4d2c 2412 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2413 /* CVTST */
f24518b5 2414 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2415 } else {
2416 /* CVTTS */
f24518b5 2417 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2418 }
2419 break;
2420 case 0x2F:
2421 /* CVTTQ */
64f45e49 2422 REQUIRE_REG_31(ra);
f24518b5 2423 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2424 break;
2425 case 0x3C:
2426 /* CVTQS */
64f45e49 2427 REQUIRE_REG_31(ra);
f24518b5 2428 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2429 break;
2430 case 0x3E:
2431 /* CVTQT */
64f45e49 2432 REQUIRE_REG_31(ra);
f24518b5 2433 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2434 break;
2435 default:
2436 goto invalid_opc;
2437 }
2438 break;
6b88b37c 2439
4c9649a9
JM
2440 case 0x17:
2441 switch (fn11) {
2442 case 0x010:
2443 /* CVTLQ */
64f45e49 2444 REQUIRE_REG_31(ra);
a7812ae4 2445 gen_fcvtlq(rb, rc);
4c9649a9
JM
2446 break;
2447 case 0x020:
6b88b37c
RH
2448 /* CPYS */
2449 if (rc == 31) {
2450 /* Special case CPYS as FNOP. */
2451 } else if (ra == rb) {
2452 vc = dest_fpr(ctx, rc);
2453 /* Special case CPYS as FMOV. */
2454 if (ra == 31) {
2455 tcg_gen_movi_i64(vc, 0);
a06d48d9 2456 } else {
6b88b37c
RH
2457 va = load_fpr(ctx, ra);
2458 tcg_gen_mov_i64(vc, va);
a06d48d9 2459 }
6b88b37c
RH
2460 } else {
2461 gen_fcpys(ra, rb, rc);
4c9649a9
JM
2462 }
2463 break;
2464 case 0x021:
2465 /* CPYSN */
a7812ae4 2466 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2467 break;
2468 case 0x022:
2469 /* CPYSE */
a7812ae4 2470 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2471 break;
2472 case 0x024:
2473 /* MT_FPCR */
6b88b37c
RH
2474 va = load_fpr(ctx, ra);
2475 gen_helper_store_fpcr(cpu_env, va);
4c9649a9
JM
2476 break;
2477 case 0x025:
2478 /* MF_FPCR */
6b88b37c
RH
2479 va = dest_fpr(ctx, ra);
2480 gen_helper_load_fpcr(va, cpu_env);
4c9649a9
JM
2481 break;
2482 case 0x02A:
2483 /* FCMOVEQ */
bbe1dab4 2484 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2485 break;
2486 case 0x02B:
2487 /* FCMOVNE */
bbe1dab4 2488 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2489 break;
2490 case 0x02C:
2491 /* FCMOVLT */
bbe1dab4 2492 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2493 break;
2494 case 0x02D:
2495 /* FCMOVGE */
bbe1dab4 2496 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2497 break;
2498 case 0x02E:
2499 /* FCMOVLE */
bbe1dab4 2500 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2501 break;
2502 case 0x02F:
2503 /* FCMOVGT */
bbe1dab4 2504 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2505 break;
2506 case 0x030:
2507 /* CVTQL */
64f45e49 2508 REQUIRE_REG_31(ra);
a7812ae4 2509 gen_fcvtql(rb, rc);
4c9649a9
JM
2510 break;
2511 case 0x130:
2512 /* CVTQL/V */
4c9649a9
JM
2513 case 0x530:
2514 /* CVTQL/SV */
64f45e49 2515 REQUIRE_REG_31(ra);
735cf45f
RH
2516 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2517 /v doesn't do. The only thing I can think is that /sv is a
2518 valid instruction merely for completeness in the ISA. */
2519 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2520 break;
2521 default:
2522 goto invalid_opc;
2523 }
2524 break;
89fe090b 2525
4c9649a9
JM
2526 case 0x18:
2527 switch ((uint16_t)disp16) {
2528 case 0x0000:
2529 /* TRAPB */
4af70374 2530 /* No-op. */
4c9649a9
JM
2531 break;
2532 case 0x0400:
2533 /* EXCB */
4af70374 2534 /* No-op. */
4c9649a9
JM
2535 break;
2536 case 0x4000:
2537 /* MB */
2538 /* No-op */
2539 break;
2540 case 0x4400:
2541 /* WMB */
2542 /* No-op */
2543 break;
2544 case 0x8000:
2545 /* FETCH */
2546 /* No-op */
2547 break;
2548 case 0xA000:
2549 /* FETCH_M */
2550 /* No-op */
2551 break;
2552 case 0xC000:
2553 /* RPCC */
89fe090b
RH
2554 va = dest_gpr(ctx, ra);
2555 if (use_icount) {
2556 gen_io_start();
2557 gen_helper_load_pcc(va, cpu_env);
2558 gen_io_end();
2559 ret = EXIT_PC_STALE;
2560 } else {
2561 gen_helper_load_pcc(va, cpu_env);
a9406ea1 2562 }
4c9649a9
JM
2563 break;
2564 case 0xE000:
2565 /* RC */
ac316ca4 2566 gen_rx(ra, 0);
4c9649a9
JM
2567 break;
2568 case 0xE800:
2569 /* ECB */
4c9649a9
JM
2570 break;
2571 case 0xF000:
2572 /* RS */
ac316ca4 2573 gen_rx(ra, 1);
4c9649a9
JM
2574 break;
2575 case 0xF800:
2576 /* WH64 */
2577 /* No-op */
2578 break;
2579 default:
2580 goto invalid_opc;
2581 }
2582 break;
8f56ced8 2583
4c9649a9
JM
2584 case 0x19:
2585 /* HW_MFPR (PALcode) */
26b46094 2586#ifndef CONFIG_USER_ONLY
5238c886
RH
2587 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2588 return gen_mfpr(ra, insn & 0xffff);
2589#else
4c9649a9 2590 goto invalid_opc;
5238c886 2591#endif
8f56ced8 2592
4c9649a9 2593 case 0x1A:
49563a72
RH
2594 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2595 prediction stack action, which of course we don't implement. */
8f56ced8
RH
2596 vb = load_gpr(ctx, rb);
2597 tcg_gen_andi_i64(cpu_pc, vb, ~3);
49563a72 2598 if (ra != 31) {
1304ca87 2599 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2600 }
4af70374 2601 ret = EXIT_PC_UPDATED;
4c9649a9 2602 break;
1eaa1da7 2603
4c9649a9
JM
2604 case 0x1B:
2605 /* HW_LD (PALcode) */
a18ad893 2606#ifndef CONFIG_USER_ONLY
5238c886
RH
2607 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2608 {
1eaa1da7
RH
2609 TCGv addr = tcg_temp_new();
2610 vb = load_gpr(ctx, rb);
2611 va = dest_gpr(ctx, ra);
a18ad893 2612
1eaa1da7 2613 tcg_gen_addi_i64(addr, vb, disp12);
8bb6e981
AJ
2614 switch ((insn >> 12) & 0xF) {
2615 case 0x0:
b5d51029 2616 /* Longword physical access (hw_ldl/p) */
1eaa1da7 2617 gen_helper_ldl_phys(va, cpu_env, addr);
8bb6e981
AJ
2618 break;
2619 case 0x1:
b5d51029 2620 /* Quadword physical access (hw_ldq/p) */
1eaa1da7 2621 gen_helper_ldq_phys(va, cpu_env, addr);
8bb6e981
AJ
2622 break;
2623 case 0x2:
b5d51029 2624 /* Longword physical access with lock (hw_ldl_l/p) */
1eaa1da7 2625 gen_helper_ldl_l_phys(va, cpu_env, addr);
8bb6e981
AJ
2626 break;
2627 case 0x3:
b5d51029 2628 /* Quadword physical access with lock (hw_ldq_l/p) */
1eaa1da7 2629 gen_helper_ldq_l_phys(va, cpu_env, addr);
8bb6e981
AJ
2630 break;
2631 case 0x4:
b5d51029 2632 /* Longword virtual PTE fetch (hw_ldl/v) */
2374e73e 2633 goto invalid_opc;
8bb6e981 2634 case 0x5:
b5d51029 2635 /* Quadword virtual PTE fetch (hw_ldq/v) */
2374e73e 2636 goto invalid_opc;
8bb6e981
AJ
2637 break;
2638 case 0x6:
2639 /* Incpu_ir[ra]id */
b5d51029 2640 goto invalid_opc;
8bb6e981
AJ
2641 case 0x7:
2642 /* Incpu_ir[ra]id */
b5d51029 2643 goto invalid_opc;
8bb6e981 2644 case 0x8:
b5d51029 2645 /* Longword virtual access (hw_ldl) */
2374e73e 2646 goto invalid_opc;
8bb6e981 2647 case 0x9:
b5d51029 2648 /* Quadword virtual access (hw_ldq) */
2374e73e 2649 goto invalid_opc;
8bb6e981 2650 case 0xA:
b5d51029 2651 /* Longword virtual access with protection check (hw_ldl/w) */
1eaa1da7 2652 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
8bb6e981
AJ
2653 break;
2654 case 0xB:
b5d51029 2655 /* Quadword virtual access with protection check (hw_ldq/w) */
1eaa1da7 2656 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
8bb6e981
AJ
2657 break;
2658 case 0xC:
b5d51029 2659 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2374e73e 2660 goto invalid_opc;
8bb6e981 2661 case 0xD:
b5d51029 2662 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2374e73e 2663 goto invalid_opc;
8bb6e981
AJ
2664 case 0xE:
2665 /* Longword virtual access with alternate access mode and
2374e73e 2666 protection checks (hw_ldl/wa) */
1eaa1da7 2667 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
8bb6e981
AJ
2668 break;
2669 case 0xF:
2670 /* Quadword virtual access with alternate access mode and
2374e73e 2671 protection checks (hw_ldq/wa) */
1eaa1da7 2672 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
8bb6e981
AJ
2673 break;
2674 }
2675 tcg_temp_free(addr);
a18ad893 2676 break;
4c9649a9 2677 }
5238c886 2678#else
a18ad893 2679 goto invalid_opc;
5238c886 2680#endif
c67b67e5 2681
4c9649a9 2682 case 0x1C:
c67b67e5 2683 vc = dest_gpr(ctx, rc);
4c9649a9
JM
2684 switch (fn7) {
2685 case 0x00:
2686 /* SEXTB */
5238c886 2687 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
64f45e49 2688 REQUIRE_REG_31(ra);
c67b67e5
RH
2689 vb = load_gpr_lit(ctx, rb, lit, islit);
2690 tcg_gen_ext8s_i64(vc, vb);
4c9649a9
JM
2691 break;
2692 case 0x01:
2693 /* SEXTW */
5238c886 2694 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
64f45e49 2695 REQUIRE_REG_31(ra);
c67b67e5
RH
2696 vb = load_gpr_lit(ctx, rb, lit, islit);
2697 tcg_gen_ext16s_i64(vc, vb);
5238c886 2698 break;
4c9649a9
JM
2699 case 0x30:
2700 /* CTPOP */
5238c886 2701 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
64f45e49 2702 REQUIRE_REG_31(ra);
c67b67e5
RH
2703 vb = load_gpr_lit(ctx, rb, lit, islit);
2704 gen_helper_ctpop(vc, vb);
5238c886 2705 break;
4c9649a9
JM
2706 case 0x31:
2707 /* PERR */
5238c886
RH
2708 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2709 gen_perr(ra, rb, rc, islit, lit);
2710 break;
4c9649a9
JM
2711 case 0x32:
2712 /* CTLZ */
5238c886 2713 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
64f45e49 2714 REQUIRE_REG_31(ra);
c67b67e5
RH
2715 vb = load_gpr_lit(ctx, rb, lit, islit);
2716 gen_helper_ctlz(vc, vb);
5238c886 2717 break;
4c9649a9
JM
2718 case 0x33:
2719 /* CTTZ */
5238c886 2720 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
64f45e49 2721 REQUIRE_REG_31(ra);
c67b67e5
RH
2722 vb = load_gpr_lit(ctx, rb, lit, islit);
2723 gen_helper_cttz(vc, vb);
5238c886 2724 break;
4c9649a9
JM
2725 case 0x34:
2726 /* UNPKBW */
5238c886 2727 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
64f45e49 2728 REQUIRE_REG_31(ra);
5238c886
RH
2729 gen_unpkbw(rb, rc);
2730 break;
4c9649a9 2731 case 0x35:
13e4df99 2732 /* UNPKBL */
5238c886 2733 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
64f45e49 2734 REQUIRE_REG_31(ra);
5238c886
RH
2735 gen_unpkbl(rb, rc);
2736 break;
4c9649a9
JM
2737 case 0x36:
2738 /* PKWB */
5238c886 2739 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
64f45e49 2740 REQUIRE_REG_31(ra);
5238c886
RH
2741 gen_pkwb(rb, rc);
2742 break;
4c9649a9
JM
2743 case 0x37:
2744 /* PKLB */
5238c886 2745 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
64f45e49 2746 REQUIRE_REG_31(ra);
5238c886
RH
2747 gen_pklb(rb, rc);
2748 break;
4c9649a9
JM
2749 case 0x38:
2750 /* MINSB8 */
5238c886
RH
2751 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2752 gen_minsb8(ra, rb, rc, islit, lit);
2753 break;
4c9649a9
JM
2754 case 0x39:
2755 /* MINSW4 */
5238c886
RH
2756 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2757 gen_minsw4(ra, rb, rc, islit, lit);
2758 break;
4c9649a9
JM
2759 case 0x3A:
2760 /* MINUB8 */
5238c886
RH
2761 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2762 gen_minub8(ra, rb, rc, islit, lit);
2763 break;
4c9649a9
JM
2764 case 0x3B:
2765 /* MINUW4 */
5238c886
RH
2766 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2767 gen_minuw4(ra, rb, rc, islit, lit);
2768 break;
4c9649a9
JM
2769 case 0x3C:
2770 /* MAXUB8 */
5238c886
RH
2771 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2772 gen_maxub8(ra, rb, rc, islit, lit);
2773 break;
4c9649a9
JM
2774 case 0x3D:
2775 /* MAXUW4 */
5238c886
RH
2776 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2777 gen_maxuw4(ra, rb, rc, islit, lit);
2778 break;
4c9649a9
JM
2779 case 0x3E:
2780 /* MAXSB8 */
5238c886
RH
2781 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2782 gen_maxsb8(ra, rb, rc, islit, lit);
2783 break;
4c9649a9
JM
2784 case 0x3F:
2785 /* MAXSW4 */
5238c886
RH
2786 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2787 gen_maxsw4(ra, rb, rc, islit, lit);
2788 break;
4c9649a9
JM
2789 case 0x70:
2790 /* FTOIT */
5238c886 2791 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
64f45e49 2792 REQUIRE_REG_31(rb);
c67b67e5
RH
2793 va = load_fpr(ctx, ra);
2794 tcg_gen_mov_i64(vc, va);
4c9649a9
JM
2795 break;
2796 case 0x78:
2797 /* FTOIS */
5238c886 2798 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
64f45e49 2799 REQUIRE_REG_31(rb);
c67b67e5
RH
2800 t32 = tcg_temp_new_i32();
2801 va = load_fpr(ctx, ra);
2802 gen_helper_s_to_memory(t32, va);
2803 tcg_gen_ext_i32_i64(vc, t32);
2804 tcg_temp_free_i32(t32);
4c9649a9
JM
2805 break;
2806 default:
2807 goto invalid_opc;
2808 }
2809 break;
46010969 2810
4c9649a9
JM
2811 case 0x1D:
2812 /* HW_MTPR (PALcode) */
26b46094 2813#ifndef CONFIG_USER_ONLY
5238c886
RH
2814 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2815 return gen_mtpr(ctx, rb, insn & 0xffff);
2816#else
4c9649a9 2817 goto invalid_opc;
5238c886 2818#endif
46010969 2819
4c9649a9 2820 case 0x1E:
508b43ea 2821 /* HW_RET (PALcode) */
a18ad893 2822#ifndef CONFIG_USER_ONLY
5238c886
RH
2823 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2824 if (rb == 31) {
2825 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2826 address from EXC_ADDR. This turns out to be useful for our
2827 emulation PALcode, so continue to accept it. */
46010969 2828 tmp = tcg_temp_new();
5238c886
RH
2829 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
2830 gen_helper_hw_ret(cpu_env, tmp);
2831 tcg_temp_free(tmp);
2832 } else {
46010969 2833 gen_helper_hw_ret(cpu_env, load_gpr(ctx, rb));
4c9649a9 2834 }
5238c886
RH
2835 ret = EXIT_PC_UPDATED;
2836 break;
2837#else
a18ad893 2838 goto invalid_opc;
5238c886 2839#endif
a4af3044 2840
4c9649a9
JM
2841 case 0x1F:
2842 /* HW_ST (PALcode) */
a18ad893 2843#ifndef CONFIG_USER_ONLY
5238c886
RH
2844 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2845 {
a4af3044
RH
2846 TCGv addr = tcg_temp_new();
2847 va = load_gpr(ctx, ra);
2848 vb = load_gpr(ctx, rb);
2849
2850 tcg_gen_addi_i64(addr, vb, disp12);
8bb6e981
AJ
2851 switch ((insn >> 12) & 0xF) {
2852 case 0x0:
2853 /* Longword physical access */
a4af3044 2854 gen_helper_stl_phys(cpu_env, addr, va);
8bb6e981
AJ
2855 break;
2856 case 0x1:
2857 /* Quadword physical access */
a4af3044 2858 gen_helper_stq_phys(cpu_env, addr, va);
8bb6e981
AJ
2859 break;
2860 case 0x2:
2861 /* Longword physical access with lock */
a4af3044 2862 gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
8bb6e981
AJ
2863 break;
2864 case 0x3:
2865 /* Quadword physical access with lock */
a4af3044 2866 gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
8bb6e981
AJ
2867 break;
2868 case 0x4:
2869 /* Longword virtual access */
2374e73e 2870 goto invalid_opc;
8bb6e981
AJ
2871 case 0x5:
2872 /* Quadword virtual access */
2374e73e 2873 goto invalid_opc;
8bb6e981
AJ
2874 case 0x6:
2875 /* Invalid */
2876 goto invalid_opc;
2877 case 0x7:
2878 /* Invalid */
2879 goto invalid_opc;
2880 case 0x8:
2881 /* Invalid */
2882 goto invalid_opc;
2883 case 0x9:
2884 /* Invalid */
2885 goto invalid_opc;
2886 case 0xA:
2887 /* Invalid */
2888 goto invalid_opc;
2889 case 0xB:
2890 /* Invalid */
2891 goto invalid_opc;
2892 case 0xC:
2893 /* Longword virtual access with alternate access mode */
2374e73e 2894 goto invalid_opc;
8bb6e981
AJ
2895 case 0xD:
2896 /* Quadword virtual access with alternate access mode */
2374e73e 2897 goto invalid_opc;
8bb6e981
AJ
2898 case 0xE:
2899 /* Invalid */
2900 goto invalid_opc;
2901 case 0xF:
2902 /* Invalid */
2903 goto invalid_opc;
2904 }
8bb6e981 2905 tcg_temp_free(addr);
a18ad893 2906 break;
4c9649a9 2907 }
5238c886 2908#else
a18ad893 2909 goto invalid_opc;
5238c886 2910#endif
4c9649a9
JM
2911 case 0x20:
2912 /* LDF */
f18cd223 2913 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
2914 break;
2915 case 0x21:
2916 /* LDG */
f18cd223 2917 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
2918 break;
2919 case 0x22:
2920 /* LDS */
f18cd223 2921 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
2922 break;
2923 case 0x23:
2924 /* LDT */
f18cd223 2925 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
2926 break;
2927 case 0x24:
2928 /* STF */
6910b8f6 2929 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
4c9649a9
JM
2930 break;
2931 case 0x25:
2932 /* STG */
6910b8f6 2933 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
4c9649a9
JM
2934 break;
2935 case 0x26:
2936 /* STS */
6910b8f6 2937 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
4c9649a9
JM
2938 break;
2939 case 0x27:
2940 /* STT */
6910b8f6 2941 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
4c9649a9
JM
2942 break;
2943 case 0x28:
2944 /* LDL */
f18cd223 2945 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
2946 break;
2947 case 0x29:
2948 /* LDQ */
f18cd223 2949 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
2950 break;
2951 case 0x2A:
2952 /* LDL_L */
f4ed8679 2953 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2954 break;
2955 case 0x2B:
2956 /* LDQ_L */
f4ed8679 2957 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2958 break;
2959 case 0x2C:
2960 /* STL */
6910b8f6 2961 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
4c9649a9
JM
2962 break;
2963 case 0x2D:
2964 /* STQ */
6910b8f6 2965 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
4c9649a9
JM
2966 break;
2967 case 0x2E:
2968 /* STL_C */
6910b8f6 2969 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
4c9649a9
JM
2970 break;
2971 case 0x2F:
2972 /* STQ_C */
6910b8f6 2973 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
4c9649a9
JM
2974 break;
2975 case 0x30:
2976 /* BR */
4af70374 2977 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 2978 break;
a7812ae4 2979 case 0x31: /* FBEQ */
4af70374 2980 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 2981 break;
a7812ae4 2982 case 0x32: /* FBLT */
4af70374 2983 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 2984 break;
a7812ae4 2985 case 0x33: /* FBLE */
4af70374 2986 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
2987 break;
2988 case 0x34:
2989 /* BSR */
4af70374 2990 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 2991 break;
a7812ae4 2992 case 0x35: /* FBNE */
4af70374 2993 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 2994 break;
a7812ae4 2995 case 0x36: /* FBGE */
4af70374 2996 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 2997 break;
a7812ae4 2998 case 0x37: /* FBGT */
4af70374 2999 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
3000 break;
3001 case 0x38:
3002 /* BLBC */
4af70374 3003 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
3004 break;
3005 case 0x39:
3006 /* BEQ */
4af70374 3007 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
3008 break;
3009 case 0x3A:
3010 /* BLT */
4af70374 3011 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
3012 break;
3013 case 0x3B:
3014 /* BLE */
4af70374 3015 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
3016 break;
3017 case 0x3C:
3018 /* BLBS */
4af70374 3019 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
3020 break;
3021 case 0x3D:
3022 /* BNE */
4af70374 3023 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
3024 break;
3025 case 0x3E:
3026 /* BGE */
4af70374 3027 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
3028 break;
3029 case 0x3F:
3030 /* BGT */
4af70374 3031 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
3032 break;
3033 invalid_opc:
8aa3fa20 3034 ret = gen_invalid(ctx);
4c9649a9
JM
3035 break;
3036 }
3037
3038 return ret;
3039}
3040
86a35f7c 3041static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
636aa200 3042 TranslationBlock *tb,
86a35f7c 3043 bool search_pc)
4c9649a9 3044{
ed2803da 3045 CPUState *cs = CPU(cpu);
86a35f7c 3046 CPUAlphaState *env = &cpu->env;
4c9649a9
JM
3047 DisasContext ctx, *ctxp = &ctx;
3048 target_ulong pc_start;
b114b68a 3049 target_ulong pc_mask;
4c9649a9
JM
3050 uint32_t insn;
3051 uint16_t *gen_opc_end;
a1d1bb31 3052 CPUBreakpoint *bp;
4c9649a9 3053 int j, lj = -1;
4af70374 3054 ExitStatus ret;
2e70f6ef
PB
3055 int num_insns;
3056 int max_insns;
4c9649a9
JM
3057
3058 pc_start = tb->pc;
92414b31 3059 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4af70374
RH
3060
3061 ctx.tb = tb;
4c9649a9 3062 ctx.pc = pc_start;
bba9bdce 3063 ctx.mem_idx = cpu_mmu_index(env);
801c4c28 3064 ctx.implver = env->implver;
ed2803da 3065 ctx.singlestep_enabled = cs->singlestep_enabled;
f24518b5
RH
3066
3067 /* ??? Every TB begins with unset rounding mode, to be initialized on
3068 the first fp insn of the TB. Alternately we could define a proper
3069 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3070 to reset the FP_STATUS to that default at the end of any TB that
3071 changes the default. We could even (gasp) dynamiclly figure out
3072 what default would be most efficient given the running program. */
3073 ctx.tb_rm = -1;
3074 /* Similarly for flush-to-zero. */
3075 ctx.tb_ftz = -1;
3076
2e70f6ef
PB
3077 num_insns = 0;
3078 max_insns = tb->cflags & CF_COUNT_MASK;
b114b68a 3079 if (max_insns == 0) {
2e70f6ef 3080 max_insns = CF_COUNT_MASK;
b114b68a
RH
3081 }
3082
3083 if (in_superpage(&ctx, pc_start)) {
3084 pc_mask = (1ULL << 41) - 1;
3085 } else {
3086 pc_mask = ~TARGET_PAGE_MASK;
3087 }
2e70f6ef 3088
806f352d 3089 gen_tb_start();
4af70374 3090 do {
f0c3c505
AF
3091 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
3092 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 3093 if (bp->pc == ctx.pc) {
4c9649a9
JM
3094 gen_excp(&ctx, EXCP_DEBUG, 0);
3095 break;
3096 }
3097 }
3098 }
3099 if (search_pc) {
92414b31 3100 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4c9649a9
JM
3101 if (lj < j) {
3102 lj++;
3103 while (lj < j)
ab1103de 3104 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4c9649a9 3105 }
25983cad 3106 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
ab1103de 3107 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 3108 tcg_ctx.gen_opc_icount[lj] = num_insns;
4c9649a9 3109 }
67debe3a 3110 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 3111 gen_io_start();
67debe3a 3112 }
c3082755 3113 insn = cpu_ldl_code(env, ctx.pc);
2e70f6ef 3114 num_insns++;
c4b3be39 3115
fdefe51c 3116 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
c4b3be39
RH
3117 tcg_gen_debug_insn_start(ctx.pc);
3118 }
3119
194cfb43
RH
3120 TCGV_UNUSED_I64(ctx.zero);
3121 TCGV_UNUSED_I64(ctx.sink);
3122 TCGV_UNUSED_I64(ctx.lit);
3123
4c9649a9
JM
3124 ctx.pc += 4;
3125 ret = translate_one(ctxp, insn);
19bf517b 3126
194cfb43
RH
3127 if (!TCGV_IS_UNUSED_I64(ctx.sink)) {
3128 tcg_gen_discard_i64(ctx.sink);
3129 tcg_temp_free(ctx.sink);
3130 }
3131 if (!TCGV_IS_UNUSED_I64(ctx.zero)) {
3132 tcg_temp_free(ctx.zero);
3133 }
3134 if (!TCGV_IS_UNUSED_I64(ctx.lit)) {
3135 tcg_temp_free(ctx.lit);
3136 }
3137
bf1b03fe
RH
3138 /* If we reach a page boundary, are single stepping,
3139 or exhaust instruction count, stop generation. */
3140 if (ret == NO_EXIT
b114b68a 3141 && ((ctx.pc & pc_mask) == 0
efd7f486 3142 || tcg_ctx.gen_opc_ptr >= gen_opc_end
bf1b03fe
RH
3143 || num_insns >= max_insns
3144 || singlestep
ca6862a6 3145 || ctx.singlestep_enabled)) {
bf1b03fe 3146 ret = EXIT_PC_STALE;
1b530a6d 3147 }
4af70374
RH
3148 } while (ret == NO_EXIT);
3149
3150 if (tb->cflags & CF_LAST_IO) {
3151 gen_io_end();
4c9649a9 3152 }
4af70374
RH
3153
3154 switch (ret) {
3155 case EXIT_GOTO_TB:
8aa3fa20 3156 case EXIT_NORETURN:
4af70374
RH
3157 break;
3158 case EXIT_PC_STALE:
496cb5b9 3159 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
3160 /* FALLTHRU */
3161 case EXIT_PC_UPDATED:
ca6862a6 3162 if (ctx.singlestep_enabled) {
bf1b03fe
RH
3163 gen_excp_1(EXCP_DEBUG, 0);
3164 } else {
3165 tcg_gen_exit_tb(0);
3166 }
4af70374
RH
3167 break;
3168 default:
3169 abort();
4c9649a9 3170 }
4af70374 3171
806f352d 3172 gen_tb_end(tb, num_insns);
efd7f486 3173 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4c9649a9 3174 if (search_pc) {
92414b31 3175 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4c9649a9
JM
3176 lj++;
3177 while (lj <= j)
ab1103de 3178 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3179 } else {
3180 tb->size = ctx.pc - pc_start;
2e70f6ef 3181 tb->icount = num_insns;
4c9649a9 3182 }
4af70374 3183
806991da 3184#ifdef DEBUG_DISAS
8fec2b8c 3185 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39 3186 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 3187 log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
93fcfe39 3188 qemu_log("\n");
4c9649a9 3189 }
4c9649a9 3190#endif
4c9649a9
JM
3191}
3192
4d5712f1 3193void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 3194{
86a35f7c 3195 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
4c9649a9
JM
3196}
3197
4d5712f1 3198void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 3199{
86a35f7c 3200 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
4c9649a9
JM
3201}
3202
4d5712f1 3203void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 3204{
25983cad 3205 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
d2856f1a 3206}