]> git.proxmox.com Git - mirror_qemu.git/blame - target-alpha/translate.c
target-alpha: Introduce REQUIRE_TB_FLAG
[mirror_qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
4c9649a9 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
1de7afc9 22#include "qemu/host-utils.h"
57fec1fe 23#include "tcg-op.h"
4c9649a9 24
a7812ae4
PB
25#include "helper.h"
26#define GEN_HELPER 1
27#include "helper.h"
28
19188121 29#undef ALPHA_DEBUG_DISAS
f24518b5 30#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
31
32#ifdef ALPHA_DEBUG_DISAS
806991da 33# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
34#else
35# define LOG_DISAS(...) do { } while (0)
36#endif
37
4c9649a9
JM
38typedef struct DisasContext DisasContext;
39struct DisasContext {
4af70374 40 struct TranslationBlock *tb;
4c9649a9
JM
41 uint64_t pc;
42 int mem_idx;
f24518b5
RH
43
44 /* Current rounding mode for this TB. */
45 int tb_rm;
46 /* Current flush-to-zero setting for this TB. */
47 int tb_ftz;
ca6862a6 48
801c4c28
RH
49 /* implver value for this CPU. */
50 int implver;
51
ca6862a6 52 bool singlestep_enabled;
4c9649a9
JM
53};
54
4af70374
RH
55/* Return values from translate_one, indicating the state of the TB.
56 Note that zero indicates that we are not exiting the TB. */
57
58typedef enum {
59 NO_EXIT,
60
61 /* We have emitted one or more goto_tb. No fixup required. */
62 EXIT_GOTO_TB,
63
64 /* We are not using a goto_tb (for whatever reason), but have updated
65 the PC (for whatever reason), so there's no need to do it again on
66 exiting the TB. */
67 EXIT_PC_UPDATED,
68
69 /* We are exiting the TB, but have neither emitted a goto_tb, nor
70 updated the PC for the next instruction to be executed. */
8aa3fa20
RH
71 EXIT_PC_STALE,
72
73 /* We are ending the TB with a noreturn function call, e.g. longjmp.
74 No following code will be executed. */
75 EXIT_NORETURN,
4af70374
RH
76} ExitStatus;
77
3761035f 78/* global register indexes */
a7812ae4 79static TCGv_ptr cpu_env;
496cb5b9 80static TCGv cpu_ir[31];
f18cd223 81static TCGv cpu_fir[31];
496cb5b9 82static TCGv cpu_pc;
6910b8f6
RH
83static TCGv cpu_lock_addr;
84static TCGv cpu_lock_st_addr;
85static TCGv cpu_lock_value;
2ace7e55
RH
86static TCGv cpu_unique;
87#ifndef CONFIG_USER_ONLY
88static TCGv cpu_sysval;
89static TCGv cpu_usp;
ab471ade 90#endif
496cb5b9 91
3761035f 92/* register names */
f18cd223 93static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef 94
022c62cb 95#include "exec/gen-icount.h"
2e70f6ef 96
0c28246f 97void alpha_translate_init(void)
2e70f6ef 98{
496cb5b9
AJ
99 int i;
100 char *p;
2e70f6ef 101 static int done_init = 0;
496cb5b9 102
67debe3a 103 if (done_init) {
2e70f6ef 104 return;
67debe3a 105 }
496cb5b9 106
a7812ae4 107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
108
109 p = cpu_reg_names;
110 for (i = 0; i < 31; i++) {
111 sprintf(p, "ir%d", i);
a7812ae4 112 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 113 offsetof(CPUAlphaState, ir[i]), p);
6ba8dcd7 114 p += (i < 10) ? 4 : 5;
f18cd223
AJ
115
116 sprintf(p, "fir%d", i);
a7812ae4 117 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 118 offsetof(CPUAlphaState, fir[i]), p);
f18cd223 119 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
120 }
121
a7812ae4 122 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 123 offsetof(CPUAlphaState, pc), "pc");
496cb5b9 124
6910b8f6 125 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 126 offsetof(CPUAlphaState, lock_addr),
6910b8f6
RH
127 "lock_addr");
128 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 129 offsetof(CPUAlphaState, lock_st_addr),
6910b8f6
RH
130 "lock_st_addr");
131 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 132 offsetof(CPUAlphaState, lock_value),
6910b8f6 133 "lock_value");
f4ed8679 134
2ace7e55 135 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 136 offsetof(CPUAlphaState, unique), "unique");
2ace7e55
RH
137#ifndef CONFIG_USER_ONLY
138 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 139 offsetof(CPUAlphaState, sysval), "sysval");
2ace7e55 140 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 141 offsetof(CPUAlphaState, usp), "usp");
ab471ade
RH
142#endif
143
2e70f6ef
PB
144 done_init = 1;
145}
146
bf1b03fe 147static void gen_excp_1(int exception, int error_code)
4c9649a9 148{
a7812ae4 149 TCGv_i32 tmp1, tmp2;
6ad02592 150
6ad02592
AJ
151 tmp1 = tcg_const_i32(exception);
152 tmp2 = tcg_const_i32(error_code);
b9f0923e 153 gen_helper_excp(cpu_env, tmp1, tmp2);
a7812ae4
PB
154 tcg_temp_free_i32(tmp2);
155 tcg_temp_free_i32(tmp1);
bf1b03fe 156}
8aa3fa20 157
bf1b03fe
RH
158static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
159{
160 tcg_gen_movi_i64(cpu_pc, ctx->pc);
161 gen_excp_1(exception, error_code);
8aa3fa20 162 return EXIT_NORETURN;
4c9649a9
JM
163}
164
8aa3fa20 165static inline ExitStatus gen_invalid(DisasContext *ctx)
4c9649a9 166{
8aa3fa20 167 return gen_excp(ctx, EXCP_OPCDEC, 0);
4c9649a9
JM
168}
169
636aa200 170static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 171{
a7812ae4 172 TCGv_i32 tmp32 = tcg_temp_new_i32();
f8da40ae 173 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4
PB
174 gen_helper_memory_to_f(t0, tmp32);
175 tcg_temp_free_i32(tmp32);
f18cd223
AJ
176}
177
636aa200 178static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 179{
a7812ae4 180 TCGv tmp = tcg_temp_new();
f8da40ae 181 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
a7812ae4 182 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
183 tcg_temp_free(tmp);
184}
185
636aa200 186static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 187{
a7812ae4 188 TCGv_i32 tmp32 = tcg_temp_new_i32();
f8da40ae 189 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4
PB
190 gen_helper_memory_to_s(t0, tmp32);
191 tcg_temp_free_i32(tmp32);
f18cd223
AJ
192}
193
636aa200 194static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679 195{
f8da40ae 196 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
6910b8f6
RH
197 tcg_gen_mov_i64(cpu_lock_addr, t1);
198 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
199}
200
636aa200 201static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679 202{
f8da40ae 203 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
6910b8f6
RH
204 tcg_gen_mov_i64(cpu_lock_addr, t1);
205 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
206}
207
636aa200
BS
208static inline void gen_load_mem(DisasContext *ctx,
209 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
210 int flags),
211 int ra, int rb, int32_t disp16, int fp,
212 int clear)
023d8ca2 213{
6910b8f6 214 TCGv addr, va;
023d8ca2 215
6910b8f6
RH
216 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
217 prefetches, which we can treat as nops. No worries about
218 missed exceptions here. */
219 if (unlikely(ra == 31)) {
023d8ca2 220 return;
6910b8f6 221 }
023d8ca2 222
a7812ae4 223 addr = tcg_temp_new();
023d8ca2
AJ
224 if (rb != 31) {
225 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 226 if (clear) {
023d8ca2 227 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 228 }
023d8ca2 229 } else {
6910b8f6 230 if (clear) {
023d8ca2 231 disp16 &= ~0x7;
6910b8f6 232 }
023d8ca2
AJ
233 tcg_gen_movi_i64(addr, disp16);
234 }
6910b8f6
RH
235
236 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
237 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
238
023d8ca2
AJ
239 tcg_temp_free(addr);
240}
241
636aa200 242static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 243{
a7812ae4 244 TCGv_i32 tmp32 = tcg_temp_new_i32();
a7812ae4 245 gen_helper_f_to_memory(tmp32, t0);
f8da40ae 246 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4 247 tcg_temp_free_i32(tmp32);
f18cd223
AJ
248}
249
636aa200 250static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 251{
a7812ae4
PB
252 TCGv tmp = tcg_temp_new();
253 gen_helper_g_to_memory(tmp, t0);
f8da40ae 254 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
f18cd223
AJ
255 tcg_temp_free(tmp);
256}
257
636aa200 258static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 259{
a7812ae4 260 TCGv_i32 tmp32 = tcg_temp_new_i32();
a7812ae4 261 gen_helper_s_to_memory(tmp32, t0);
f8da40ae 262 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
a7812ae4 263 tcg_temp_free_i32(tmp32);
f18cd223
AJ
264}
265
636aa200
BS
266static inline void gen_store_mem(DisasContext *ctx,
267 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
268 int flags),
269 int ra, int rb, int32_t disp16, int fp,
6910b8f6 270 int clear)
023d8ca2 271{
6910b8f6
RH
272 TCGv addr, va;
273
274 addr = tcg_temp_new();
023d8ca2
AJ
275 if (rb != 31) {
276 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 277 if (clear) {
023d8ca2 278 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 279 }
023d8ca2 280 } else {
6910b8f6 281 if (clear) {
023d8ca2 282 disp16 &= ~0x7;
6910b8f6 283 }
023d8ca2
AJ
284 tcg_gen_movi_i64(addr, disp16);
285 }
6910b8f6
RH
286
287 if (ra == 31) {
288 va = tcg_const_i64(0);
f18cd223 289 } else {
6910b8f6 290 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
023d8ca2 291 }
6910b8f6
RH
292 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
293
023d8ca2 294 tcg_temp_free(addr);
6910b8f6
RH
295 if (ra == 31) {
296 tcg_temp_free(va);
297 }
298}
299
300static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
301 int32_t disp16, int quad)
302{
303 TCGv addr;
304
305 if (ra == 31) {
306 /* ??? Don't bother storing anything. The user can't tell
307 the difference, since the zero register always reads zero. */
308 return NO_EXIT;
309 }
310
311#if defined(CONFIG_USER_ONLY)
312 addr = cpu_lock_st_addr;
313#else
e52458fe 314 addr = tcg_temp_local_new();
6910b8f6
RH
315#endif
316
317 if (rb != 31) {
318 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
319 } else {
320 tcg_gen_movi_i64(addr, disp16);
321 }
322
323#if defined(CONFIG_USER_ONLY)
324 /* ??? This is handled via a complicated version of compare-and-swap
325 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
326 in TCG so that this isn't necessary. */
327 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
328#else
329 /* ??? In system mode we are never multi-threaded, so CAS can be
330 implemented via a non-atomic load-compare-store sequence. */
331 {
332 int lab_fail, lab_done;
333 TCGv val;
334
335 lab_fail = gen_new_label();
336 lab_done = gen_new_label();
e52458fe 337 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
6910b8f6
RH
338
339 val = tcg_temp_new();
f8da40ae 340 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
e52458fe 341 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
6910b8f6 342
f8da40ae
RH
343 tcg_gen_qemu_st_i64(cpu_ir[ra], addr, ctx->mem_idx,
344 quad ? MO_LEQ : MO_LEUL);
6910b8f6
RH
345 tcg_gen_movi_i64(cpu_ir[ra], 1);
346 tcg_gen_br(lab_done);
347
348 gen_set_label(lab_fail);
349 tcg_gen_movi_i64(cpu_ir[ra], 0);
350
351 gen_set_label(lab_done);
352 tcg_gen_movi_i64(cpu_lock_addr, -1);
353
354 tcg_temp_free(addr);
355 return NO_EXIT;
356 }
357#endif
023d8ca2
AJ
358}
359
b114b68a 360static bool in_superpage(DisasContext *ctx, int64_t addr)
4c9649a9 361{
b114b68a
RH
362 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
363 && addr < 0
364 && ((addr >> 41) & 3) == 2
365 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
366}
367
368static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
369{
370 /* Suppress goto_tb in the case of single-steping and IO. */
371 if (ctx->singlestep_enabled || (ctx->tb->cflags & CF_LAST_IO)) {
372 return false;
373 }
374 /* If the destination is in the superpage, the page perms can't change. */
375 if (in_superpage(ctx, dest)) {
376 return true;
377 }
378 /* Check for the dest on the same page as the start of the TB. */
379 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
4af70374 380}
dbb30fe6 381
4af70374
RH
382static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
383{
384 uint64_t dest = ctx->pc + (disp << 2);
385
386 if (ra != 31) {
387 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
388 }
389
390 /* Notice branch-to-next; used to initialize RA with the PC. */
391 if (disp == 0) {
392 return 0;
393 } else if (use_goto_tb(ctx, dest)) {
394 tcg_gen_goto_tb(0);
395 tcg_gen_movi_i64(cpu_pc, dest);
8cfd0495 396 tcg_gen_exit_tb((uintptr_t)ctx->tb);
4af70374
RH
397 return EXIT_GOTO_TB;
398 } else {
399 tcg_gen_movi_i64(cpu_pc, dest);
400 return EXIT_PC_UPDATED;
401 }
dbb30fe6
RH
402}
403
4af70374
RH
404static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
405 TCGv cmp, int32_t disp)
dbb30fe6 406{
4af70374 407 uint64_t dest = ctx->pc + (disp << 2);
dbb30fe6 408 int lab_true = gen_new_label();
9c29504e 409
4af70374
RH
410 if (use_goto_tb(ctx, dest)) {
411 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
412
413 tcg_gen_goto_tb(0);
414 tcg_gen_movi_i64(cpu_pc, ctx->pc);
8cfd0495 415 tcg_gen_exit_tb((uintptr_t)ctx->tb);
4af70374
RH
416
417 gen_set_label(lab_true);
418 tcg_gen_goto_tb(1);
419 tcg_gen_movi_i64(cpu_pc, dest);
8cfd0495 420 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
4af70374
RH
421
422 return EXIT_GOTO_TB;
423 } else {
57e289de
RH
424 TCGv_i64 z = tcg_const_i64(0);
425 TCGv_i64 d = tcg_const_i64(dest);
426 TCGv_i64 p = tcg_const_i64(ctx->pc);
4af70374 427
57e289de 428 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
4af70374 429
57e289de
RH
430 tcg_temp_free_i64(z);
431 tcg_temp_free_i64(d);
432 tcg_temp_free_i64(p);
4af70374
RH
433 return EXIT_PC_UPDATED;
434 }
435}
436
437static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
438 int32_t disp, int mask)
439{
440 TCGv cmp_tmp;
441
442 if (unlikely(ra == 31)) {
443 cmp_tmp = tcg_const_i64(0);
444 } else {
445 cmp_tmp = tcg_temp_new();
9c29504e 446 if (mask) {
4af70374 447 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
dbb30fe6 448 } else {
4af70374 449 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
dbb30fe6 450 }
9c29504e 451 }
4af70374
RH
452
453 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
454}
455
4af70374 456/* Fold -0.0 for comparison with COND. */
dbb30fe6 457
4af70374 458static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 459{
dbb30fe6 460 uint64_t mzero = 1ull << 63;
f18cd223 461
dbb30fe6
RH
462 switch (cond) {
463 case TCG_COND_LE:
464 case TCG_COND_GT:
465 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 466 tcg_gen_mov_i64(dest, src);
a7812ae4 467 break;
dbb30fe6
RH
468
469 case TCG_COND_EQ:
470 case TCG_COND_NE:
471 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 472 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 473 break;
dbb30fe6
RH
474
475 case TCG_COND_GE:
dbb30fe6 476 case TCG_COND_LT:
4af70374
RH
477 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
478 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
479 tcg_gen_neg_i64(dest, dest);
480 tcg_gen_and_i64(dest, dest, src);
a7812ae4 481 break;
dbb30fe6 482
a7812ae4
PB
483 default:
484 abort();
f18cd223 485 }
dbb30fe6
RH
486}
487
4af70374
RH
488static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
489 int32_t disp)
dbb30fe6 490{
4af70374 491 TCGv cmp_tmp;
dbb30fe6
RH
492
493 if (unlikely(ra == 31)) {
494 /* Very uncommon case, but easier to optimize it to an integer
495 comparison than continuing with the floating point comparison. */
4af70374 496 return gen_bcond(ctx, cond, ra, disp, 0);
dbb30fe6
RH
497 }
498
4af70374
RH
499 cmp_tmp = tcg_temp_new();
500 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
501 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
502}
503
bbe1dab4 504static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
4af70374 505 int islit, uint8_t lit, int mask)
4c9649a9 506{
57e289de 507 TCGv_i64 c1, z, v1;
9c29504e 508
57e289de 509 if (unlikely(rc == 31)) {
9c29504e 510 return;
57e289de 511 }
9c29504e 512
57e289de 513 if (ra == 31) {
9c29504e 514 /* Very uncommon case - Do not bother to optimize. */
57e289de
RH
515 c1 = tcg_const_i64(0);
516 } else if (mask) {
517 c1 = tcg_const_i64(1);
518 tcg_gen_and_i64(c1, c1, cpu_ir[ra]);
519 } else {
520 c1 = cpu_ir[ra];
521 }
522 if (islit) {
523 v1 = tcg_const_i64(lit);
524 } else {
525 v1 = cpu_ir[rb];
9c29504e 526 }
57e289de 527 z = tcg_const_i64(0);
9c29504e 528
57e289de
RH
529 tcg_gen_movcond_i64(cond, cpu_ir[rc], c1, z, v1, cpu_ir[rc]);
530
531 tcg_temp_free_i64(z);
532 if (ra == 31 || mask) {
533 tcg_temp_free_i64(c1);
534 }
535 if (islit) {
536 tcg_temp_free_i64(v1);
537 }
4c9649a9
JM
538}
539
bbe1dab4 540static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
dbb30fe6 541{
57e289de 542 TCGv_i64 c1, z, v1;
dbb30fe6 543
4af70374 544 if (unlikely(rc == 31)) {
dbb30fe6 545 return;
4af70374
RH
546 }
547
57e289de 548 c1 = tcg_temp_new_i64();
dbb30fe6 549 if (unlikely(ra == 31)) {
57e289de 550 tcg_gen_movi_i64(c1, 0);
4af70374 551 } else {
57e289de 552 gen_fold_mzero(cond, c1, cpu_fir[ra]);
dbb30fe6 553 }
57e289de
RH
554 if (rb == 31) {
555 v1 = tcg_const_i64(0);
556 } else {
557 v1 = cpu_fir[rb];
558 }
559 z = tcg_const_i64(0);
dbb30fe6 560
57e289de 561 tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]);
dbb30fe6 562
57e289de
RH
563 tcg_temp_free_i64(z);
564 tcg_temp_free_i64(c1);
565 if (rb == 31) {
566 tcg_temp_free_i64(v1);
567 }
dbb30fe6
RH
568}
569
f24518b5
RH
570#define QUAL_RM_N 0x080 /* Round mode nearest even */
571#define QUAL_RM_C 0x000 /* Round mode chopped */
572#define QUAL_RM_M 0x040 /* Round mode minus infinity */
573#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
574#define QUAL_RM_MASK 0x0c0
575
576#define QUAL_U 0x100 /* Underflow enable (fp output) */
577#define QUAL_V 0x100 /* Overflow enable (int output) */
578#define QUAL_S 0x400 /* Software completion enable */
579#define QUAL_I 0x200 /* Inexact detection enable */
580
581static void gen_qual_roundmode(DisasContext *ctx, int fn11)
582{
583 TCGv_i32 tmp;
584
585 fn11 &= QUAL_RM_MASK;
586 if (fn11 == ctx->tb_rm) {
587 return;
588 }
589 ctx->tb_rm = fn11;
590
591 tmp = tcg_temp_new_i32();
592 switch (fn11) {
593 case QUAL_RM_N:
594 tcg_gen_movi_i32(tmp, float_round_nearest_even);
595 break;
596 case QUAL_RM_C:
597 tcg_gen_movi_i32(tmp, float_round_to_zero);
598 break;
599 case QUAL_RM_M:
600 tcg_gen_movi_i32(tmp, float_round_down);
601 break;
602 case QUAL_RM_D:
4a58aedf
RH
603 tcg_gen_ld8u_i32(tmp, cpu_env,
604 offsetof(CPUAlphaState, fpcr_dyn_round));
f24518b5
RH
605 break;
606 }
607
608#if defined(CONFIG_SOFTFLOAT_INLINE)
6b4c305c 609 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
f24518b5
RH
610 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
611 sets the one field. */
612 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 613 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
f24518b5
RH
614#else
615 gen_helper_setroundmode(tmp);
616#endif
617
618 tcg_temp_free_i32(tmp);
619}
620
621static void gen_qual_flushzero(DisasContext *ctx, int fn11)
622{
623 TCGv_i32 tmp;
624
625 fn11 &= QUAL_U;
626 if (fn11 == ctx->tb_ftz) {
627 return;
628 }
629 ctx->tb_ftz = fn11;
630
631 tmp = tcg_temp_new_i32();
632 if (fn11) {
633 /* Underflow is enabled, use the FPCR setting. */
4a58aedf
RH
634 tcg_gen_ld8u_i32(tmp, cpu_env,
635 offsetof(CPUAlphaState, fpcr_flush_to_zero));
f24518b5
RH
636 } else {
637 /* Underflow is disabled, force flush-to-zero. */
638 tcg_gen_movi_i32(tmp, 1);
639 }
640
641#if defined(CONFIG_SOFTFLOAT_INLINE)
642 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 643 offsetof(CPUAlphaState, fp_status.flush_to_zero));
f24518b5
RH
644#else
645 gen_helper_setflushzero(tmp);
646#endif
647
648 tcg_temp_free_i32(tmp);
649}
650
651static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
652{
74343409 653 TCGv val;
f24518b5 654 if (reg == 31) {
74343409 655 val = tcg_const_i64(0);
f24518b5 656 } else {
74343409
RH
657 if ((fn11 & QUAL_S) == 0) {
658 if (is_cmp) {
659 gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
660 } else {
661 gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
662 }
663 }
664 val = tcg_temp_new();
665 tcg_gen_mov_i64(val, cpu_fir[reg]);
f24518b5
RH
666 }
667 return val;
668}
669
670static void gen_fp_exc_clear(void)
671{
672#if defined(CONFIG_SOFTFLOAT_INLINE)
673 TCGv_i32 zero = tcg_const_i32(0);
674 tcg_gen_st8_i32(zero, cpu_env,
4d5712f1 675 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5
RH
676 tcg_temp_free_i32(zero);
677#else
4a58aedf 678 gen_helper_fp_exc_clear(cpu_env);
f24518b5
RH
679#endif
680}
681
682static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
683{
684 /* ??? We ought to be able to do something with imprecise exceptions.
685 E.g. notice we're still in the trap shadow of something within the
686 TB and do not generate the code to signal the exception; end the TB
687 when an exception is forced to arrive, either by consumption of a
688 register value or TRAPB or EXCB. */
689 TCGv_i32 exc = tcg_temp_new_i32();
690 TCGv_i32 reg;
691
692#if defined(CONFIG_SOFTFLOAT_INLINE)
693 tcg_gen_ld8u_i32(exc, cpu_env,
4d5712f1 694 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5 695#else
4a58aedf 696 gen_helper_fp_exc_get(exc, cpu_env);
f24518b5
RH
697#endif
698
699 if (ignore) {
700 tcg_gen_andi_i32(exc, exc, ~ignore);
701 }
702
703 /* ??? Pass in the regno of the destination so that the helper can
704 set EXC_MASK, which contains a bitmask of destination registers
705 that have caused arithmetic traps. A simple userspace emulation
706 does not require this. We do need it for a guest kernel's entArith,
707 or if we were to do something clever with imprecise exceptions. */
708 reg = tcg_const_i32(rc + 32);
709
710 if (fn11 & QUAL_S) {
4a58aedf 711 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
f24518b5 712 } else {
4a58aedf 713 gen_helper_fp_exc_raise(cpu_env, exc, reg);
f24518b5
RH
714 }
715
716 tcg_temp_free_i32(reg);
717 tcg_temp_free_i32(exc);
718}
719
720static inline void gen_fp_exc_raise(int rc, int fn11)
721{
722 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 723}
f24518b5 724
593f17e5
RH
725static void gen_fcvtlq(int rb, int rc)
726{
727 if (unlikely(rc == 31)) {
728 return;
729 }
730 if (unlikely(rb == 31)) {
731 tcg_gen_movi_i64(cpu_fir[rc], 0);
732 } else {
733 TCGv tmp = tcg_temp_new();
734
735 /* The arithmetic right shift here, plus the sign-extended mask below
736 yields a sign-extended result without an explicit ext32s_i64. */
737 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
738 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
739 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
740 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
741 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
742
743 tcg_temp_free(tmp);
744 }
745}
746
735cf45f
RH
747static void gen_fcvtql(int rb, int rc)
748{
749 if (unlikely(rc == 31)) {
750 return;
751 }
752 if (unlikely(rb == 31)) {
753 tcg_gen_movi_i64(cpu_fir[rc], 0);
754 } else {
755 TCGv tmp = tcg_temp_new();
756
757 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
758 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
759 tcg_gen_shli_i64(tmp, tmp, 32);
760 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
761 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
762
763 tcg_temp_free(tmp);
764 }
765}
766
767static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
768{
769 if (rb != 31) {
770 int lab = gen_new_label();
771 TCGv tmp = tcg_temp_new();
772
773 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
774 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
775 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
776
777 gen_set_label(lab);
778 }
779 gen_fcvtql(rb, rc);
780}
781
4a58aedf
RH
782#define FARITH2(name) \
783 static inline void glue(gen_f, name)(int rb, int rc) \
784 { \
785 if (unlikely(rc == 31)) { \
786 return; \
787 } \
788 if (rb != 31) { \
789 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
790 } else { \
791 TCGv tmp = tcg_const_i64(0); \
792 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
793 tcg_temp_free(tmp); \
794 } \
795 }
f24518b5
RH
796
797/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
798FARITH2(sqrtf)
799FARITH2(sqrtg)
a7812ae4
PB
800FARITH2(cvtgf)
801FARITH2(cvtgq)
802FARITH2(cvtqf)
803FARITH2(cvtqg)
f24518b5 804
4a58aedf
RH
805static void gen_ieee_arith2(DisasContext *ctx,
806 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
807 int rb, int rc, int fn11)
808{
809 TCGv vb;
810
811 /* ??? This is wrong: the instruction is not a nop, it still may
812 raise exceptions. */
813 if (unlikely(rc == 31)) {
814 return;
815 }
816
817 gen_qual_roundmode(ctx, fn11);
818 gen_qual_flushzero(ctx, fn11);
819 gen_fp_exc_clear();
820
821 vb = gen_ieee_input(rb, fn11, 0);
4a58aedf 822 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
823 tcg_temp_free(vb);
824
825 gen_fp_exc_raise(rc, fn11);
826}
827
828#define IEEE_ARITH2(name) \
829static inline void glue(gen_f, name)(DisasContext *ctx, \
830 int rb, int rc, int fn11) \
831{ \
832 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
833}
834IEEE_ARITH2(sqrts)
835IEEE_ARITH2(sqrtt)
836IEEE_ARITH2(cvtst)
837IEEE_ARITH2(cvtts)
838
839static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
840{
841 TCGv vb;
842 int ignore = 0;
843
844 /* ??? This is wrong: the instruction is not a nop, it still may
845 raise exceptions. */
846 if (unlikely(rc == 31)) {
847 return;
848 }
849
850 /* No need to set flushzero, since we have an integer output. */
851 gen_fp_exc_clear();
852 vb = gen_ieee_input(rb, fn11, 0);
853
854 /* Almost all integer conversions use cropped rounding, and most
855 also do not have integer overflow enabled. Special case that. */
856 switch (fn11) {
857 case QUAL_RM_C:
4a58aedf 858 gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
859 break;
860 case QUAL_V | QUAL_RM_C:
861 case QUAL_S | QUAL_V | QUAL_RM_C:
862 ignore = float_flag_inexact;
863 /* FALLTHRU */
864 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
4a58aedf 865 gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
866 break;
867 default:
868 gen_qual_roundmode(ctx, fn11);
4a58aedf 869 gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
870 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
871 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
872 break;
873 }
874 tcg_temp_free(vb);
875
876 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
877}
878
4a58aedf
RH
879static void gen_ieee_intcvt(DisasContext *ctx,
880 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
881 int rb, int rc, int fn11)
882{
883 TCGv vb;
884
885 /* ??? This is wrong: the instruction is not a nop, it still may
886 raise exceptions. */
887 if (unlikely(rc == 31)) {
888 return;
889 }
890
891 gen_qual_roundmode(ctx, fn11);
892
893 if (rb == 31) {
894 vb = tcg_const_i64(0);
895 } else {
896 vb = cpu_fir[rb];
897 }
898
899 /* The only exception that can be raised by integer conversion
900 is inexact. Thus we only need to worry about exceptions when
901 inexact handling is requested. */
902 if (fn11 & QUAL_I) {
903 gen_fp_exc_clear();
4a58aedf 904 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
905 gen_fp_exc_raise(rc, fn11);
906 } else {
4a58aedf 907 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
908 }
909
910 if (rb == 31) {
911 tcg_temp_free(vb);
912 }
913}
914
915#define IEEE_INTCVT(name) \
916static inline void glue(gen_f, name)(DisasContext *ctx, \
917 int rb, int rc, int fn11) \
918{ \
919 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
920}
921IEEE_INTCVT(cvtqs)
922IEEE_INTCVT(cvtqt)
923
dc96be4b
RH
924static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
925{
926 TCGv va, vb, vmask;
927 int za = 0, zb = 0;
928
929 if (unlikely(rc == 31)) {
930 return;
931 }
932
933 vmask = tcg_const_i64(mask);
934
935 TCGV_UNUSED_I64(va);
936 if (ra == 31) {
937 if (inv_a) {
938 va = vmask;
939 } else {
940 za = 1;
941 }
942 } else {
943 va = tcg_temp_new_i64();
944 tcg_gen_mov_i64(va, cpu_fir[ra]);
945 if (inv_a) {
946 tcg_gen_andc_i64(va, vmask, va);
947 } else {
948 tcg_gen_and_i64(va, va, vmask);
949 }
950 }
951
952 TCGV_UNUSED_I64(vb);
953 if (rb == 31) {
954 zb = 1;
955 } else {
956 vb = tcg_temp_new_i64();
957 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
958 }
959
960 switch (za << 1 | zb) {
961 case 0 | 0:
962 tcg_gen_or_i64(cpu_fir[rc], va, vb);
963 break;
964 case 0 | 1:
965 tcg_gen_mov_i64(cpu_fir[rc], va);
966 break;
967 case 2 | 0:
968 tcg_gen_mov_i64(cpu_fir[rc], vb);
969 break;
970 case 2 | 1:
971 tcg_gen_movi_i64(cpu_fir[rc], 0);
972 break;
973 }
974
975 tcg_temp_free(vmask);
976 if (ra != 31) {
977 tcg_temp_free(va);
978 }
979 if (rb != 31) {
980 tcg_temp_free(vb);
981 }
982}
983
984static inline void gen_fcpys(int ra, int rb, int rc)
985{
986 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
987}
988
989static inline void gen_fcpysn(int ra, int rb, int rc)
990{
991 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
992}
993
994static inline void gen_fcpyse(int ra, int rb, int rc)
995{
996 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
997}
998
4a58aedf
RH
999#define FARITH3(name) \
1000 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1001 { \
1002 TCGv va, vb; \
1003 \
1004 if (unlikely(rc == 31)) { \
1005 return; \
1006 } \
1007 if (ra == 31) { \
1008 va = tcg_const_i64(0); \
1009 } else { \
1010 va = cpu_fir[ra]; \
1011 } \
1012 if (rb == 31) { \
1013 vb = tcg_const_i64(0); \
1014 } else { \
1015 vb = cpu_fir[rb]; \
1016 } \
1017 \
1018 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1019 \
1020 if (ra == 31) { \
1021 tcg_temp_free(va); \
1022 } \
1023 if (rb == 31) { \
1024 tcg_temp_free(vb); \
1025 } \
1026 }
f24518b5
RH
1027
1028/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
1029FARITH3(addf)
1030FARITH3(subf)
1031FARITH3(mulf)
1032FARITH3(divf)
1033FARITH3(addg)
1034FARITH3(subg)
1035FARITH3(mulg)
1036FARITH3(divg)
1037FARITH3(cmpgeq)
1038FARITH3(cmpglt)
1039FARITH3(cmpgle)
f24518b5
RH
1040
1041static void gen_ieee_arith3(DisasContext *ctx,
4a58aedf 1042 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
1043 int ra, int rb, int rc, int fn11)
1044{
1045 TCGv va, vb;
1046
1047 /* ??? This is wrong: the instruction is not a nop, it still may
1048 raise exceptions. */
1049 if (unlikely(rc == 31)) {
1050 return;
1051 }
1052
1053 gen_qual_roundmode(ctx, fn11);
1054 gen_qual_flushzero(ctx, fn11);
1055 gen_fp_exc_clear();
1056
1057 va = gen_ieee_input(ra, fn11, 0);
1058 vb = gen_ieee_input(rb, fn11, 0);
4a58aedf 1059 helper(cpu_fir[rc], cpu_env, va, vb);
f24518b5
RH
1060 tcg_temp_free(va);
1061 tcg_temp_free(vb);
1062
1063 gen_fp_exc_raise(rc, fn11);
1064}
1065
1066#define IEEE_ARITH3(name) \
1067static inline void glue(gen_f, name)(DisasContext *ctx, \
1068 int ra, int rb, int rc, int fn11) \
1069{ \
1070 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1071}
1072IEEE_ARITH3(adds)
1073IEEE_ARITH3(subs)
1074IEEE_ARITH3(muls)
1075IEEE_ARITH3(divs)
1076IEEE_ARITH3(addt)
1077IEEE_ARITH3(subt)
1078IEEE_ARITH3(mult)
1079IEEE_ARITH3(divt)
1080
1081static void gen_ieee_compare(DisasContext *ctx,
4a58aedf 1082 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
1083 int ra, int rb, int rc, int fn11)
1084{
1085 TCGv va, vb;
1086
1087 /* ??? This is wrong: the instruction is not a nop, it still may
1088 raise exceptions. */
1089 if (unlikely(rc == 31)) {
1090 return;
1091 }
1092
1093 gen_fp_exc_clear();
1094
1095 va = gen_ieee_input(ra, fn11, 1);
1096 vb = gen_ieee_input(rb, fn11, 1);
4a58aedf 1097 helper(cpu_fir[rc], cpu_env, va, vb);
f24518b5
RH
1098 tcg_temp_free(va);
1099 tcg_temp_free(vb);
1100
1101 gen_fp_exc_raise(rc, fn11);
1102}
1103
1104#define IEEE_CMP3(name) \
1105static inline void glue(gen_f, name)(DisasContext *ctx, \
1106 int ra, int rb, int rc, int fn11) \
1107{ \
1108 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1109}
1110IEEE_CMP3(cmptun)
1111IEEE_CMP3(cmpteq)
1112IEEE_CMP3(cmptlt)
1113IEEE_CMP3(cmptle)
a7812ae4 1114
248c42f3
RH
1115static inline uint64_t zapnot_mask(uint8_t lit)
1116{
1117 uint64_t mask = 0;
1118 int i;
1119
1120 for (i = 0; i < 8; ++i) {
67debe3a 1121 if ((lit >> i) & 1) {
248c42f3 1122 mask |= 0xffull << (i * 8);
67debe3a 1123 }
248c42f3
RH
1124 }
1125 return mask;
1126}
1127
87d98f95
RH
1128/* Implement zapnot with an immediate operand, which expands to some
1129 form of immediate AND. This is a basic building block in the
1130 definition of many of the other byte manipulation instructions. */
248c42f3 1131static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 1132{
87d98f95
RH
1133 switch (lit) {
1134 case 0x00:
248c42f3 1135 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
1136 break;
1137 case 0x01:
248c42f3 1138 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
1139 break;
1140 case 0x03:
248c42f3 1141 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
1142 break;
1143 case 0x0f:
248c42f3 1144 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
1145 break;
1146 case 0xff:
248c42f3 1147 tcg_gen_mov_i64(dest, src);
87d98f95
RH
1148 break;
1149 default:
248c42f3 1150 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
87d98f95
RH
1151 break;
1152 }
1153}
1154
1155static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1156{
67debe3a 1157 if (unlikely(rc == 31)) {
87d98f95 1158 return;
67debe3a 1159 } else if (unlikely(ra == 31)) {
87d98f95 1160 tcg_gen_movi_i64(cpu_ir[rc], 0);
67debe3a 1161 } else if (islit) {
248c42f3 1162 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
67debe3a 1163 } else {
87d98f95 1164 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
67debe3a 1165 }
87d98f95
RH
1166}
1167
1168static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1169{
67debe3a 1170 if (unlikely(rc == 31)) {
87d98f95 1171 return;
67debe3a 1172 } else if (unlikely(ra == 31)) {
87d98f95 1173 tcg_gen_movi_i64(cpu_ir[rc], 0);
67debe3a 1174 } else if (islit) {
248c42f3 1175 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
67debe3a 1176 } else {
87d98f95 1177 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
67debe3a 1178 }
87d98f95
RH
1179}
1180
1181
248c42f3 1182/* EXTWH, EXTLH, EXTQH */
ffec44f1
RH
1183static void gen_ext_h(int ra, int rb, int rc, int islit,
1184 uint8_t lit, uint8_t byte_mask)
b3249f63 1185{
67debe3a 1186 if (unlikely(rc == 31)) {
b3249f63 1187 return;
67debe3a 1188 } else if (unlikely(ra == 31)) {
377a43b6 1189 tcg_gen_movi_i64(cpu_ir[rc], 0);
67debe3a 1190 } else {
dfaa8583 1191 if (islit) {
377a43b6
RH
1192 lit = (64 - (lit & 7) * 8) & 0x3f;
1193 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
fe2b269a 1194 } else {
377a43b6 1195 TCGv tmp1 = tcg_temp_new();
b3249f63
AJ
1196 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1197 tcg_gen_shli_i64(tmp1, tmp1, 3);
dbf95805
VW
1198 tcg_gen_neg_i64(tmp1, tmp1);
1199 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
dfaa8583 1200 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
b3249f63 1201 tcg_temp_free(tmp1);
dfaa8583 1202 }
248c42f3 1203 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
377a43b6 1204 }
b3249f63
AJ
1205}
1206
248c42f3 1207/* EXTBL, EXTWL, EXTLL, EXTQL */
ffec44f1
RH
1208static void gen_ext_l(int ra, int rb, int rc, int islit,
1209 uint8_t lit, uint8_t byte_mask)
b3249f63 1210{
67debe3a 1211 if (unlikely(rc == 31)) {
b3249f63 1212 return;
67debe3a 1213 } else if (unlikely(ra == 31)) {
377a43b6 1214 tcg_gen_movi_i64(cpu_ir[rc], 0);
67debe3a 1215 } else {
dfaa8583 1216 if (islit) {
377a43b6 1217 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
dfaa8583 1218 } else {
a7812ae4 1219 TCGv tmp = tcg_temp_new();
b3249f63
AJ
1220 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1221 tcg_gen_shli_i64(tmp, tmp, 3);
dfaa8583 1222 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
b3249f63 1223 tcg_temp_free(tmp);
fe2b269a 1224 }
248c42f3
RH
1225 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1226 }
1227}
1228
50eb6e5c
RH
1229/* INSWH, INSLH, INSQH */
1230static void gen_ins_h(int ra, int rb, int rc, int islit,
1231 uint8_t lit, uint8_t byte_mask)
1232{
67debe3a 1233 if (unlikely(rc == 31)) {
50eb6e5c 1234 return;
67debe3a 1235 } else if (unlikely(ra == 31) || (islit && (lit & 7) == 0)) {
50eb6e5c 1236 tcg_gen_movi_i64(cpu_ir[rc], 0);
67debe3a 1237 } else {
50eb6e5c
RH
1238 TCGv tmp = tcg_temp_new();
1239
1240 /* The instruction description has us left-shift the byte mask
1241 and extract bits <15:8> and apply that zap at the end. This
1242 is equivalent to simply performing the zap first and shifting
1243 afterward. */
1244 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1245
1246 if (islit) {
1247 /* Note that we have handled the lit==0 case above. */
1248 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1249 } else {
1250 TCGv shift = tcg_temp_new();
1251
1252 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1253 Do this portably by splitting the shift into two parts:
1254 shift_count-1 and 1. Arrange for the -1 by using
1255 ones-complement instead of twos-complement in the negation:
1256 ~((B & 7) * 8) & 63. */
1257
1258 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1259 tcg_gen_shli_i64(shift, shift, 3);
1260 tcg_gen_not_i64(shift, shift);
1261 tcg_gen_andi_i64(shift, shift, 0x3f);
1262
1263 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1264 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1265 tcg_temp_free(shift);
1266 }
1267 tcg_temp_free(tmp);
1268 }
1269}
1270
248c42f3 1271/* INSBL, INSWL, INSLL, INSQL */
ffec44f1
RH
1272static void gen_ins_l(int ra, int rb, int rc, int islit,
1273 uint8_t lit, uint8_t byte_mask)
248c42f3 1274{
67debe3a 1275 if (unlikely(rc == 31)) {
248c42f3 1276 return;
67debe3a 1277 } else if (unlikely(ra == 31)) {
248c42f3 1278 tcg_gen_movi_i64(cpu_ir[rc], 0);
67debe3a 1279 } else {
248c42f3
RH
1280 TCGv tmp = tcg_temp_new();
1281
1282 /* The instruction description has us left-shift the byte mask
1283 the same number of byte slots as the data and apply the zap
1284 at the end. This is equivalent to simply performing the zap
1285 first and shifting afterward. */
1286 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1287
1288 if (islit) {
1289 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1290 } else {
1291 TCGv shift = tcg_temp_new();
1292 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1293 tcg_gen_shli_i64(shift, shift, 3);
1294 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1295 tcg_temp_free(shift);
1296 }
1297 tcg_temp_free(tmp);
377a43b6 1298 }
b3249f63
AJ
1299}
1300
ffec44f1
RH
1301/* MSKWH, MSKLH, MSKQH */
1302static void gen_msk_h(int ra, int rb, int rc, int islit,
1303 uint8_t lit, uint8_t byte_mask)
1304{
67debe3a 1305 if (unlikely(rc == 31)) {
ffec44f1 1306 return;
67debe3a 1307 } else if (unlikely(ra == 31)) {
ffec44f1 1308 tcg_gen_movi_i64(cpu_ir[rc], 0);
67debe3a 1309 } else if (islit) {
ffec44f1
RH
1310 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1311 } else {
1312 TCGv shift = tcg_temp_new();
1313 TCGv mask = tcg_temp_new();
1314
1315 /* The instruction description is as above, where the byte_mask
1316 is shifted left, and then we extract bits <15:8>. This can be
1317 emulated with a right-shift on the expanded byte mask. This
1318 requires extra care because for an input <2:0> == 0 we need a
1319 shift of 64 bits in order to generate a zero. This is done by
1320 splitting the shift into two parts, the variable shift - 1
1321 followed by a constant 1 shift. The code we expand below is
1322 equivalent to ~((B & 7) * 8) & 63. */
1323
1324 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1325 tcg_gen_shli_i64(shift, shift, 3);
1326 tcg_gen_not_i64(shift, shift);
1327 tcg_gen_andi_i64(shift, shift, 0x3f);
1328 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1329 tcg_gen_shr_i64(mask, mask, shift);
1330 tcg_gen_shri_i64(mask, mask, 1);
1331
1332 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1333
1334 tcg_temp_free(mask);
1335 tcg_temp_free(shift);
1336 }
1337}
1338
14ab1634 1339/* MSKBL, MSKWL, MSKLL, MSKQL */
ffec44f1
RH
1340static void gen_msk_l(int ra, int rb, int rc, int islit,
1341 uint8_t lit, uint8_t byte_mask)
14ab1634 1342{
67debe3a 1343 if (unlikely(rc == 31)) {
14ab1634 1344 return;
67debe3a 1345 } else if (unlikely(ra == 31)) {
14ab1634 1346 tcg_gen_movi_i64(cpu_ir[rc], 0);
67debe3a 1347 } else if (islit) {
14ab1634
RH
1348 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1349 } else {
1350 TCGv shift = tcg_temp_new();
1351 TCGv mask = tcg_temp_new();
1352
1353 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1354 tcg_gen_shli_i64(shift, shift, 3);
1355 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1356 tcg_gen_shl_i64(mask, mask, shift);
1357
1358 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1359
1360 tcg_temp_free(mask);
1361 tcg_temp_free(shift);
1362 }
1363}
1364
04acd307 1365/* Code to call arith3 helpers */
a7812ae4 1366#define ARITH3(name) \
636aa200
BS
1367static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1368 uint8_t lit) \
a7812ae4
PB
1369{ \
1370 if (unlikely(rc == 31)) \
1371 return; \
1372 \
1373 if (ra != 31) { \
1374 if (islit) { \
1375 TCGv tmp = tcg_const_i64(lit); \
1376 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1377 tcg_temp_free(tmp); \
1378 } else \
1379 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1380 } else { \
1381 TCGv tmp1 = tcg_const_i64(0); \
1382 if (islit) { \
1383 TCGv tmp2 = tcg_const_i64(lit); \
1384 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1385 tcg_temp_free(tmp2); \
1386 } else \
1387 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1388 tcg_temp_free(tmp1); \
1389 } \
b3249f63 1390}
2958620f 1391ARITH3(cmpbge)
13e4df99
RH
1392ARITH3(minub8)
1393ARITH3(minsb8)
1394ARITH3(minuw4)
1395ARITH3(minsw4)
1396ARITH3(maxub8)
1397ARITH3(maxsb8)
1398ARITH3(maxuw4)
1399ARITH3(maxsw4)
1400ARITH3(perr)
1401
2958620f
RH
1402/* Code to call arith3 helpers */
1403#define ARITH3_EX(name) \
1404 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1405 int islit, uint8_t lit) \
1406 { \
1407 if (unlikely(rc == 31)) { \
1408 return; \
1409 } \
1410 if (ra != 31) { \
1411 if (islit) { \
1412 TCGv tmp = tcg_const_i64(lit); \
1413 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1414 cpu_ir[ra], tmp); \
1415 tcg_temp_free(tmp); \
1416 } else { \
1417 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1418 cpu_ir[ra], cpu_ir[rb]); \
1419 } \
1420 } else { \
1421 TCGv tmp1 = tcg_const_i64(0); \
1422 if (islit) { \
1423 TCGv tmp2 = tcg_const_i64(lit); \
1424 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1425 tcg_temp_free(tmp2); \
1426 } else { \
1427 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1428 } \
1429 tcg_temp_free(tmp1); \
1430 } \
1431 }
1432ARITH3_EX(addlv)
1433ARITH3_EX(sublv)
1434ARITH3_EX(addqv)
1435ARITH3_EX(subqv)
1436ARITH3_EX(mullv)
1437ARITH3_EX(mulqv)
1438
13e4df99
RH
1439#define MVIOP2(name) \
1440static inline void glue(gen_, name)(int rb, int rc) \
1441{ \
1442 if (unlikely(rc == 31)) \
1443 return; \
1444 if (unlikely(rb == 31)) \
1445 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1446 else \
1447 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1448}
1449MVIOP2(pklb)
1450MVIOP2(pkwb)
1451MVIOP2(unpkbl)
1452MVIOP2(unpkbw)
b3249f63 1453
9e05960f
RH
1454static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1455 int islit, uint8_t lit)
01ff9cc8 1456{
9e05960f 1457 TCGv va, vb;
01ff9cc8 1458
9e05960f 1459 if (unlikely(rc == 31)) {
13e4df99 1460 return;
9e05960f 1461 }
01ff9cc8 1462
9e05960f
RH
1463 if (ra == 31) {
1464 va = tcg_const_i64(0);
1465 } else {
1466 va = cpu_ir[ra];
1467 }
1468 if (islit) {
1469 vb = tcg_const_i64(lit);
1470 } else {
1471 vb = cpu_ir[rb];
1472 }
01ff9cc8 1473
9e05960f 1474 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
01ff9cc8 1475
9e05960f
RH
1476 if (ra == 31) {
1477 tcg_temp_free(va);
1478 }
1479 if (islit) {
1480 tcg_temp_free(vb);
1481 }
01ff9cc8
AJ
1482}
1483
ac316ca4
RH
1484static void gen_rx(int ra, int set)
1485{
1486 TCGv_i32 tmp;
1487
1488 if (ra != 31) {
4d5712f1 1489 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1490 }
1491
1492 tmp = tcg_const_i32(set);
4d5712f1 1493 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1494 tcg_temp_free_i32(tmp);
1495}
1496
2ace7e55
RH
1497static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1498{
1499 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1500 to internal cpu registers. */
1501
1502 /* Unprivileged PAL call */
1503 if (palcode >= 0x80 && palcode < 0xC0) {
1504 switch (palcode) {
1505 case 0x86:
1506 /* IMB */
1507 /* No-op inside QEMU. */
1508 break;
1509 case 0x9E:
1510 /* RDUNIQUE */
1511 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1512 break;
1513 case 0x9F:
1514 /* WRUNIQUE */
1515 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1516 break;
1517 default:
ba96394e
RH
1518 palcode &= 0xbf;
1519 goto do_call_pal;
2ace7e55
RH
1520 }
1521 return NO_EXIT;
1522 }
1523
1524#ifndef CONFIG_USER_ONLY
1525 /* Privileged PAL code */
1526 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1527 switch (palcode) {
1528 case 0x01:
1529 /* CFLUSH */
1530 /* No-op inside QEMU. */
1531 break;
1532 case 0x02:
1533 /* DRAINA */
1534 /* No-op inside QEMU. */
1535 break;
1536 case 0x2D:
1537 /* WRVPTPTR */
4d5712f1 1538 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
2ace7e55
RH
1539 break;
1540 case 0x31:
1541 /* WRVAL */
1542 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1543 break;
1544 case 0x32:
1545 /* RDVAL */
1546 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1547 break;
1548
1549 case 0x35: {
1550 /* SWPIPL */
1551 TCGv tmp;
1552
1553 /* Note that we already know we're in kernel mode, so we know
1554 that PS only contains the 3 IPL bits. */
4d5712f1 1555 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1556
1557 /* But make sure and store only the 3 IPL bits from the user. */
1558 tmp = tcg_temp_new();
1559 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
4d5712f1 1560 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1561 tcg_temp_free(tmp);
1562 break;
1563 }
1564
1565 case 0x36:
1566 /* RDPS */
4d5712f1 1567 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1568 break;
1569 case 0x38:
1570 /* WRUSP */
1571 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1572 break;
1573 case 0x3A:
1574 /* RDUSP */
1575 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1576 break;
1577 case 0x3C:
1578 /* WHAMI */
1579 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
55e5c285 1580 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
2ace7e55
RH
1581 break;
1582
1583 default:
ba96394e
RH
1584 palcode &= 0x3f;
1585 goto do_call_pal;
2ace7e55
RH
1586 }
1587 return NO_EXIT;
1588 }
1589#endif
2ace7e55 1590 return gen_invalid(ctx);
ba96394e
RH
1591
1592 do_call_pal:
1593#ifdef CONFIG_USER_ONLY
1594 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1595#else
1596 {
1597 TCGv pc = tcg_const_i64(ctx->pc);
1598 TCGv entry = tcg_const_i64(palcode & 0x80
1599 ? 0x2000 + (palcode - 0x80) * 64
1600 : 0x1000 + palcode * 64);
1601
1602 gen_helper_call_pal(cpu_env, pc, entry);
1603
1604 tcg_temp_free(entry);
1605 tcg_temp_free(pc);
a9ead832
RH
1606
1607 /* Since the destination is running in PALmode, we don't really
73f395fa 1608 need the page permissions check. We'll see the existence of
a9ead832
RH
1609 the page when we create the TB, and we'll flush all TBs if
1610 we change the PAL base register. */
1611 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1612 tcg_gen_goto_tb(0);
8cfd0495 1613 tcg_gen_exit_tb((uintptr_t)ctx->tb);
a9ead832
RH
1614 return EXIT_GOTO_TB;
1615 }
1616
ba96394e
RH
1617 return EXIT_PC_UPDATED;
1618 }
1619#endif
2ace7e55
RH
1620}
1621
26b46094
RH
1622#ifndef CONFIG_USER_ONLY
1623
1624#define PR_BYTE 0x100000
1625#define PR_LONG 0x200000
1626
1627static int cpu_pr_data(int pr)
1628{
1629 switch (pr) {
1630 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1631 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1632 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1633 case 3: return offsetof(CPUAlphaState, trap_arg0);
1634 case 4: return offsetof(CPUAlphaState, trap_arg1);
1635 case 5: return offsetof(CPUAlphaState, trap_arg2);
1636 case 6: return offsetof(CPUAlphaState, exc_addr);
1637 case 7: return offsetof(CPUAlphaState, palbr);
1638 case 8: return offsetof(CPUAlphaState, ptbr);
1639 case 9: return offsetof(CPUAlphaState, vptptr);
1640 case 10: return offsetof(CPUAlphaState, unique);
1641 case 11: return offsetof(CPUAlphaState, sysval);
1642 case 12: return offsetof(CPUAlphaState, usp);
1643
1644 case 32 ... 39:
1645 return offsetof(CPUAlphaState, shadow[pr - 32]);
1646 case 40 ... 63:
1647 return offsetof(CPUAlphaState, scratch[pr - 40]);
c781cf96
RH
1648
1649 case 251:
1650 return offsetof(CPUAlphaState, alarm_expire);
26b46094
RH
1651 }
1652 return 0;
1653}
1654
c781cf96 1655static ExitStatus gen_mfpr(int ra, int regno)
26b46094
RH
1656{
1657 int data = cpu_pr_data(regno);
1658
1659 /* In our emulated PALcode, these processor registers have no
1660 side effects from reading. */
1661 if (ra == 31) {
c781cf96
RH
1662 return NO_EXIT;
1663 }
1664
19e0cbb8
RH
1665 /* Special help for VMTIME and WALLTIME. */
1666 if (regno == 250 || regno == 249) {
1667 void (*helper)(TCGv) = gen_helper_get_walltime;
1668 if (regno == 249) {
1669 helper = gen_helper_get_vmtime;
1670 }
c781cf96
RH
1671 if (use_icount) {
1672 gen_io_start();
19e0cbb8 1673 helper(cpu_ir[ra]);
c781cf96
RH
1674 gen_io_end();
1675 return EXIT_PC_STALE;
1676 } else {
19e0cbb8 1677 helper(cpu_ir[ra]);
c781cf96
RH
1678 return NO_EXIT;
1679 }
26b46094
RH
1680 }
1681
1682 /* The basic registers are data only, and unknown registers
1683 are read-zero, write-ignore. */
1684 if (data == 0) {
1685 tcg_gen_movi_i64(cpu_ir[ra], 0);
1686 } else if (data & PR_BYTE) {
1687 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1688 } else if (data & PR_LONG) {
1689 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1690 } else {
1691 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1692 }
c781cf96 1693 return NO_EXIT;
26b46094
RH
1694}
1695
bc24270e 1696static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
26b46094
RH
1697{
1698 TCGv tmp;
bc24270e 1699 int data;
26b46094
RH
1700
1701 if (rb == 31) {
1702 tmp = tcg_const_i64(0);
1703 } else {
1704 tmp = cpu_ir[rb];
1705 }
1706
bc24270e
RH
1707 switch (regno) {
1708 case 255:
3b4fefd6 1709 /* TBIA */
69163fbb 1710 gen_helper_tbia(cpu_env);
bc24270e
RH
1711 break;
1712
1713 case 254:
3b4fefd6 1714 /* TBIS */
69163fbb 1715 gen_helper_tbis(cpu_env, tmp);
bc24270e
RH
1716 break;
1717
1718 case 253:
1719 /* WAIT */
1720 tmp = tcg_const_i64(1);
259186a7
AF
1721 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1722 offsetof(CPUState, halted));
bc24270e
RH
1723 return gen_excp(ctx, EXCP_HLT, 0);
1724
034ebc27
RH
1725 case 252:
1726 /* HALT */
1727 gen_helper_halt(tmp);
1728 return EXIT_PC_STALE;
1729
c781cf96
RH
1730 case 251:
1731 /* ALARM */
69163fbb 1732 gen_helper_set_alarm(cpu_env, tmp);
c781cf96
RH
1733 break;
1734
a9ead832
RH
1735 case 7:
1736 /* PALBR */
1737 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, palbr));
1738 /* Changing the PAL base register implies un-chaining all of the TBs
1739 that ended with a CALL_PAL. Since the base register usually only
1740 changes during boot, flushing everything works well. */
1741 gen_helper_tb_flush(cpu_env);
1742 return EXIT_PC_STALE;
1743
bc24270e 1744 default:
3b4fefd6
RH
1745 /* The basic registers are data only, and unknown registers
1746 are read-zero, write-ignore. */
bc24270e 1747 data = cpu_pr_data(regno);
3b4fefd6
RH
1748 if (data != 0) {
1749 if (data & PR_BYTE) {
1750 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1751 } else if (data & PR_LONG) {
1752 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1753 } else {
1754 tcg_gen_st_i64(tmp, cpu_env, data);
1755 }
26b46094 1756 }
bc24270e 1757 break;
26b46094
RH
1758 }
1759
1760 if (rb == 31) {
1761 tcg_temp_free(tmp);
1762 }
bc24270e
RH
1763
1764 return NO_EXIT;
26b46094
RH
1765}
1766#endif /* !USER_ONLY*/
1767
5238c886
RH
1768#define REQUIRE_TB_FLAG(FLAG) \
1769 do { \
1770 if ((ctx->tb->flags & (FLAG)) == 0) { \
1771 goto invalid_opc; \
1772 } \
1773 } while (0)
1774
4af70374 1775static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1776{
1777 uint32_t palcode;
efa64351
MT
1778 int32_t disp21, disp16;
1779#ifndef CONFIG_USER_ONLY
1780 int32_t disp12;
1781#endif
f88fe4e3 1782 uint16_t fn11;
b6fb147c 1783 uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
adf3c8b6 1784 uint8_t lit;
4af70374 1785 ExitStatus ret;
4c9649a9
JM
1786
1787 /* Decode all instruction fields */
1788 opc = insn >> 26;
1789 ra = (insn >> 21) & 0x1F;
1790 rb = (insn >> 16) & 0x1F;
1791 rc = insn & 0x1F;
13e4df99 1792 real_islit = islit = (insn >> 12) & 1;
dfaa8583
AJ
1793 if (rb == 31 && !islit) {
1794 islit = 1;
1795 lit = 0;
1796 } else
1797 lit = (insn >> 13) & 0xFF;
4c9649a9
JM
1798 palcode = insn & 0x03FFFFFF;
1799 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1800 disp16 = (int16_t)(insn & 0x0000FFFF);
efa64351 1801#ifndef CONFIG_USER_ONLY
4c9649a9 1802 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
efa64351 1803#endif
4c9649a9
JM
1804 fn11 = (insn >> 5) & 0x000007FF;
1805 fpfn = fn11 & 0x3F;
1806 fn7 = (insn >> 5) & 0x0000007F;
806991da 1807 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1808 opc, ra, rb, rc, disp16);
806991da 1809
4af70374 1810 ret = NO_EXIT;
4c9649a9
JM
1811 switch (opc) {
1812 case 0x00:
1813 /* CALL_PAL */
2ace7e55
RH
1814 ret = gen_call_pal(ctx, palcode);
1815 break;
4c9649a9
JM
1816 case 0x01:
1817 /* OPC01 */
1818 goto invalid_opc;
1819 case 0x02:
1820 /* OPC02 */
1821 goto invalid_opc;
1822 case 0x03:
1823 /* OPC03 */
1824 goto invalid_opc;
1825 case 0x04:
1826 /* OPC04 */
1827 goto invalid_opc;
1828 case 0x05:
1829 /* OPC05 */
1830 goto invalid_opc;
1831 case 0x06:
1832 /* OPC06 */
1833 goto invalid_opc;
1834 case 0x07:
1835 /* OPC07 */
1836 goto invalid_opc;
1837 case 0x08:
1838 /* LDA */
1ef4ef4e 1839 if (likely(ra != 31)) {
67debe3a 1840 if (rb != 31) {
3761035f 1841 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
67debe3a 1842 } else {
3761035f 1843 tcg_gen_movi_i64(cpu_ir[ra], disp16);
67debe3a 1844 }
496cb5b9 1845 }
4c9649a9
JM
1846 break;
1847 case 0x09:
1848 /* LDAH */
1ef4ef4e 1849 if (likely(ra != 31)) {
67debe3a 1850 if (rb != 31) {
3761035f 1851 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
67debe3a 1852 } else {
3761035f 1853 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
67debe3a 1854 }
496cb5b9 1855 }
4c9649a9
JM
1856 break;
1857 case 0x0A:
1858 /* LDBU */
5238c886
RH
1859 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1860 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1861 break;
4c9649a9
JM
1862 case 0x0B:
1863 /* LDQ_U */
f18cd223 1864 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1865 break;
1866 case 0x0C:
1867 /* LDWU */
5238c886
RH
1868 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1869 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1870 break;
4c9649a9
JM
1871 case 0x0D:
1872 /* STW */
5238c886 1873 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
6910b8f6 1874 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
4c9649a9
JM
1875 break;
1876 case 0x0E:
1877 /* STB */
5238c886 1878 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
6910b8f6 1879 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
4c9649a9
JM
1880 break;
1881 case 0x0F:
1882 /* STQ_U */
6910b8f6 1883 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1884 break;
1885 case 0x10:
1886 switch (fn7) {
1887 case 0x00:
1888 /* ADDL */
30c7183b
AJ
1889 if (likely(rc != 31)) {
1890 if (ra != 31) {
1891 if (islit) {
1892 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1893 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1894 } else {
30c7183b
AJ
1895 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1896 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1897 }
30c7183b 1898 } else {
67debe3a 1899 if (islit) {
dfaa8583 1900 tcg_gen_movi_i64(cpu_ir[rc], lit);
67debe3a 1901 } else {
dfaa8583 1902 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
67debe3a 1903 }
30c7183b
AJ
1904 }
1905 }
4c9649a9
JM
1906 break;
1907 case 0x02:
1908 /* S4ADDL */
30c7183b
AJ
1909 if (likely(rc != 31)) {
1910 if (ra != 31) {
a7812ae4 1911 TCGv tmp = tcg_temp_new();
dfaa8583 1912 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
67debe3a 1913 if (islit) {
dfaa8583 1914 tcg_gen_addi_i64(tmp, tmp, lit);
67debe3a 1915 } else {
dfaa8583 1916 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
67debe3a 1917 }
dfaa8583
AJ
1918 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1919 tcg_temp_free(tmp);
30c7183b 1920 } else {
67debe3a 1921 if (islit) {
30c7183b 1922 tcg_gen_movi_i64(cpu_ir[rc], lit);
67debe3a 1923 } else {
dfaa8583 1924 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
67debe3a 1925 }
30c7183b
AJ
1926 }
1927 }
4c9649a9
JM
1928 break;
1929 case 0x09:
1930 /* SUBL */
30c7183b
AJ
1931 if (likely(rc != 31)) {
1932 if (ra != 31) {
67debe3a 1933 if (islit) {
30c7183b 1934 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
67debe3a 1935 } else {
30c7183b 1936 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
67debe3a 1937 }
dfaa8583 1938 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b 1939 } else {
67debe3a 1940 if (islit) {
30c7183b 1941 tcg_gen_movi_i64(cpu_ir[rc], -lit);
67debe3a 1942 } else {
30c7183b
AJ
1943 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1944 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
83d1c8ae 1945 }
30c7183b
AJ
1946 }
1947 }
4c9649a9
JM
1948 break;
1949 case 0x0B:
1950 /* S4SUBL */
30c7183b
AJ
1951 if (likely(rc != 31)) {
1952 if (ra != 31) {
a7812ae4 1953 TCGv tmp = tcg_temp_new();
dfaa8583 1954 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
67debe3a 1955 if (islit) {
dfaa8583 1956 tcg_gen_subi_i64(tmp, tmp, lit);
67debe3a 1957 } else {
dfaa8583 1958 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
67debe3a 1959 }
dfaa8583
AJ
1960 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1961 tcg_temp_free(tmp);
30c7183b 1962 } else {
67debe3a 1963 if (islit) {
30c7183b 1964 tcg_gen_movi_i64(cpu_ir[rc], -lit);
67debe3a 1965 } else {
30c7183b
AJ
1966 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1967 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1968 }
30c7183b
AJ
1969 }
1970 }
4c9649a9
JM
1971 break;
1972 case 0x0F:
1973 /* CMPBGE */
a7812ae4 1974 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1975 break;
1976 case 0x12:
1977 /* S8ADDL */
30c7183b
AJ
1978 if (likely(rc != 31)) {
1979 if (ra != 31) {
a7812ae4 1980 TCGv tmp = tcg_temp_new();
dfaa8583 1981 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
67debe3a 1982 if (islit) {
dfaa8583 1983 tcg_gen_addi_i64(tmp, tmp, lit);
67debe3a 1984 } else {
dfaa8583 1985 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
67debe3a 1986 }
dfaa8583
AJ
1987 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1988 tcg_temp_free(tmp);
30c7183b 1989 } else {
67debe3a 1990 if (islit) {
30c7183b 1991 tcg_gen_movi_i64(cpu_ir[rc], lit);
67debe3a 1992 } else {
dfaa8583 1993 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
67debe3a 1994 }
30c7183b
AJ
1995 }
1996 }
4c9649a9
JM
1997 break;
1998 case 0x1B:
1999 /* S8SUBL */
30c7183b
AJ
2000 if (likely(rc != 31)) {
2001 if (ra != 31) {
a7812ae4 2002 TCGv tmp = tcg_temp_new();
dfaa8583 2003 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
67debe3a 2004 if (islit) {
dfaa8583 2005 tcg_gen_subi_i64(tmp, tmp, lit);
67debe3a 2006 } else {
dfaa8583 2007 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
67debe3a 2008 }
dfaa8583
AJ
2009 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
2010 tcg_temp_free(tmp);
30c7183b 2011 } else {
67debe3a 2012 if (islit) {
30c7183b 2013 tcg_gen_movi_i64(cpu_ir[rc], -lit);
67debe3a 2014 } else {
30c7183b
AJ
2015 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2016 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 2017 }
30c7183b
AJ
2018 }
2019 }
4c9649a9
JM
2020 break;
2021 case 0x1D:
2022 /* CMPULT */
01ff9cc8 2023 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
4c9649a9
JM
2024 break;
2025 case 0x20:
2026 /* ADDQ */
30c7183b
AJ
2027 if (likely(rc != 31)) {
2028 if (ra != 31) {
67debe3a 2029 if (islit) {
30c7183b 2030 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
67debe3a 2031 } else {
dfaa8583 2032 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
67debe3a 2033 }
30c7183b 2034 } else {
67debe3a 2035 if (islit) {
30c7183b 2036 tcg_gen_movi_i64(cpu_ir[rc], lit);
67debe3a 2037 } else {
dfaa8583 2038 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
67debe3a 2039 }
30c7183b
AJ
2040 }
2041 }
4c9649a9
JM
2042 break;
2043 case 0x22:
2044 /* S4ADDQ */
30c7183b
AJ
2045 if (likely(rc != 31)) {
2046 if (ra != 31) {
a7812ae4 2047 TCGv tmp = tcg_temp_new();
dfaa8583 2048 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
67debe3a 2049 if (islit) {
dfaa8583 2050 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
67debe3a 2051 } else {
dfaa8583 2052 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
67debe3a 2053 }
dfaa8583 2054 tcg_temp_free(tmp);
30c7183b 2055 } else {
67debe3a 2056 if (islit) {
30c7183b 2057 tcg_gen_movi_i64(cpu_ir[rc], lit);
67debe3a 2058 } else {
dfaa8583 2059 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
67debe3a 2060 }
30c7183b
AJ
2061 }
2062 }
4c9649a9
JM
2063 break;
2064 case 0x29:
2065 /* SUBQ */
30c7183b
AJ
2066 if (likely(rc != 31)) {
2067 if (ra != 31) {
67debe3a 2068 if (islit) {
30c7183b 2069 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
67debe3a 2070 } else {
dfaa8583 2071 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
67debe3a 2072 }
30c7183b 2073 } else {
67debe3a 2074 if (islit) {
30c7183b 2075 tcg_gen_movi_i64(cpu_ir[rc], -lit);
67debe3a 2076 } else {
dfaa8583 2077 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
67debe3a 2078 }
30c7183b
AJ
2079 }
2080 }
4c9649a9
JM
2081 break;
2082 case 0x2B:
2083 /* S4SUBQ */
30c7183b
AJ
2084 if (likely(rc != 31)) {
2085 if (ra != 31) {
a7812ae4 2086 TCGv tmp = tcg_temp_new();
dfaa8583 2087 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
67debe3a 2088 if (islit) {
dfaa8583 2089 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
67debe3a 2090 } else {
dfaa8583 2091 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
67debe3a 2092 }
dfaa8583 2093 tcg_temp_free(tmp);
30c7183b 2094 } else {
67debe3a 2095 if (islit) {
30c7183b 2096 tcg_gen_movi_i64(cpu_ir[rc], -lit);
67debe3a 2097 } else {
dfaa8583 2098 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
67debe3a 2099 }
30c7183b
AJ
2100 }
2101 }
4c9649a9
JM
2102 break;
2103 case 0x2D:
2104 /* CMPEQ */
01ff9cc8 2105 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
4c9649a9
JM
2106 break;
2107 case 0x32:
2108 /* S8ADDQ */
30c7183b
AJ
2109 if (likely(rc != 31)) {
2110 if (ra != 31) {
a7812ae4 2111 TCGv tmp = tcg_temp_new();
dfaa8583 2112 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
67debe3a 2113 if (islit) {
dfaa8583 2114 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
67debe3a 2115 } else {
dfaa8583 2116 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
67debe3a 2117 }
dfaa8583 2118 tcg_temp_free(tmp);
30c7183b 2119 } else {
67debe3a 2120 if (islit) {
30c7183b 2121 tcg_gen_movi_i64(cpu_ir[rc], lit);
67debe3a 2122 } else {
dfaa8583 2123 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
67debe3a 2124 }
30c7183b
AJ
2125 }
2126 }
4c9649a9
JM
2127 break;
2128 case 0x3B:
2129 /* S8SUBQ */
30c7183b
AJ
2130 if (likely(rc != 31)) {
2131 if (ra != 31) {
a7812ae4 2132 TCGv tmp = tcg_temp_new();
dfaa8583 2133 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
67debe3a 2134 if (islit) {
dfaa8583 2135 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
67debe3a 2136 } else {
dfaa8583 2137 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
67debe3a 2138 }
dfaa8583 2139 tcg_temp_free(tmp);
30c7183b 2140 } else {
67debe3a 2141 if (islit) {
30c7183b 2142 tcg_gen_movi_i64(cpu_ir[rc], -lit);
67debe3a 2143 } else {
dfaa8583 2144 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
67debe3a 2145 }
30c7183b
AJ
2146 }
2147 }
4c9649a9
JM
2148 break;
2149 case 0x3D:
2150 /* CMPULE */
01ff9cc8 2151 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
4c9649a9
JM
2152 break;
2153 case 0x40:
2154 /* ADDL/V */
a7812ae4 2155 gen_addlv(ra, rb, rc, islit, lit);
4c9649a9
JM
2156 break;
2157 case 0x49:
2158 /* SUBL/V */
a7812ae4 2159 gen_sublv(ra, rb, rc, islit, lit);
4c9649a9
JM
2160 break;
2161 case 0x4D:
2162 /* CMPLT */
01ff9cc8 2163 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
4c9649a9
JM
2164 break;
2165 case 0x60:
2166 /* ADDQ/V */
a7812ae4 2167 gen_addqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2168 break;
2169 case 0x69:
2170 /* SUBQ/V */
a7812ae4 2171 gen_subqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2172 break;
2173 case 0x6D:
2174 /* CMPLE */
01ff9cc8 2175 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
4c9649a9
JM
2176 break;
2177 default:
2178 goto invalid_opc;
2179 }
2180 break;
2181 case 0x11:
2182 switch (fn7) {
2183 case 0x00:
2184 /* AND */
30c7183b 2185 if (likely(rc != 31)) {
67debe3a 2186 if (ra == 31) {
30c7183b 2187 tcg_gen_movi_i64(cpu_ir[rc], 0);
67debe3a 2188 } else if (islit) {
30c7183b 2189 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
67debe3a 2190 } else {
30c7183b 2191 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
67debe3a 2192 }
30c7183b 2193 }
4c9649a9
JM
2194 break;
2195 case 0x08:
2196 /* BIC */
30c7183b
AJ
2197 if (likely(rc != 31)) {
2198 if (ra != 31) {
67debe3a 2199 if (islit) {
30c7183b 2200 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
67debe3a 2201 } else {
1b581c44 2202 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
67debe3a 2203 }
30c7183b
AJ
2204 } else
2205 tcg_gen_movi_i64(cpu_ir[rc], 0);
2206 }
4c9649a9
JM
2207 break;
2208 case 0x14:
2209 /* CMOVLBS */
bbe1dab4 2210 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2211 break;
2212 case 0x16:
2213 /* CMOVLBC */
bbe1dab4 2214 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2215 break;
2216 case 0x20:
2217 /* BIS */
30c7183b
AJ
2218 if (likely(rc != 31)) {
2219 if (ra != 31) {
67debe3a 2220 if (islit) {
30c7183b 2221 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
67debe3a 2222 } else {
30c7183b 2223 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
67debe3a 2224 }
4c9649a9 2225 } else {
67debe3a 2226 if (islit) {
30c7183b 2227 tcg_gen_movi_i64(cpu_ir[rc], lit);
67debe3a 2228 } else {
dfaa8583 2229 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
67debe3a 2230 }
4c9649a9 2231 }
4c9649a9
JM
2232 }
2233 break;
2234 case 0x24:
2235 /* CMOVEQ */
bbe1dab4 2236 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2237 break;
2238 case 0x26:
2239 /* CMOVNE */
bbe1dab4 2240 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2241 break;
2242 case 0x28:
2243 /* ORNOT */
30c7183b 2244 if (likely(rc != 31)) {
dfaa8583 2245 if (ra != 31) {
67debe3a 2246 if (islit) {
30c7183b 2247 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
67debe3a 2248 } else {
1b581c44 2249 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
67debe3a 2250 }
30c7183b 2251 } else {
67debe3a 2252 if (islit) {
30c7183b 2253 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
67debe3a 2254 } else {
30c7183b 2255 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
67debe3a 2256 }
30c7183b
AJ
2257 }
2258 }
4c9649a9
JM
2259 break;
2260 case 0x40:
2261 /* XOR */
30c7183b
AJ
2262 if (likely(rc != 31)) {
2263 if (ra != 31) {
67debe3a 2264 if (islit) {
30c7183b 2265 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
67debe3a 2266 } else {
dfaa8583 2267 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
67debe3a 2268 }
30c7183b 2269 } else {
67debe3a 2270 if (islit) {
30c7183b 2271 tcg_gen_movi_i64(cpu_ir[rc], lit);
67debe3a 2272 } else {
dfaa8583 2273 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
67debe3a 2274 }
30c7183b
AJ
2275 }
2276 }
4c9649a9
JM
2277 break;
2278 case 0x44:
2279 /* CMOVLT */
bbe1dab4 2280 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2281 break;
2282 case 0x46:
2283 /* CMOVGE */
bbe1dab4 2284 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2285 break;
2286 case 0x48:
2287 /* EQV */
30c7183b
AJ
2288 if (likely(rc != 31)) {
2289 if (ra != 31) {
67debe3a 2290 if (islit) {
30c7183b 2291 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
67debe3a 2292 } else {
1b581c44 2293 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
67debe3a 2294 }
30c7183b 2295 } else {
67debe3a 2296 if (islit) {
30c7183b 2297 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
67debe3a 2298 } else {
dfaa8583 2299 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
67debe3a 2300 }
30c7183b
AJ
2301 }
2302 }
4c9649a9
JM
2303 break;
2304 case 0x61:
2305 /* AMASK */
ae8ecd42 2306 if (likely(rc != 31)) {
a18ad893
RH
2307 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2308
2309 if (islit) {
2310 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2311 } else {
2312 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
1a1f7dbc 2313 }
ae8ecd42 2314 }
4c9649a9
JM
2315 break;
2316 case 0x64:
2317 /* CMOVLE */
bbe1dab4 2318 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2319 break;
2320 case 0x66:
2321 /* CMOVGT */
bbe1dab4 2322 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2323 break;
2324 case 0x6C:
2325 /* IMPLVER */
801c4c28
RH
2326 if (rc != 31) {
2327 tcg_gen_movi_i64(cpu_ir[rc], ctx->implver);
2328 }
4c9649a9
JM
2329 break;
2330 default:
2331 goto invalid_opc;
2332 }
2333 break;
2334 case 0x12:
2335 switch (fn7) {
2336 case 0x02:
2337 /* MSKBL */
14ab1634 2338 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2339 break;
2340 case 0x06:
2341 /* EXTBL */
377a43b6 2342 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2343 break;
2344 case 0x0B:
2345 /* INSBL */
248c42f3 2346 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2347 break;
2348 case 0x12:
2349 /* MSKWL */
14ab1634 2350 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2351 break;
2352 case 0x16:
2353 /* EXTWL */
377a43b6 2354 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2355 break;
2356 case 0x1B:
2357 /* INSWL */
248c42f3 2358 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2359 break;
2360 case 0x22:
2361 /* MSKLL */
14ab1634 2362 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2363 break;
2364 case 0x26:
2365 /* EXTLL */
377a43b6 2366 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2367 break;
2368 case 0x2B:
2369 /* INSLL */
248c42f3 2370 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2371 break;
2372 case 0x30:
2373 /* ZAP */
a7812ae4 2374 gen_zap(ra, rb, rc, islit, lit);
4c9649a9
JM
2375 break;
2376 case 0x31:
2377 /* ZAPNOT */
a7812ae4 2378 gen_zapnot(ra, rb, rc, islit, lit);
4c9649a9
JM
2379 break;
2380 case 0x32:
2381 /* MSKQL */
14ab1634 2382 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2383 break;
2384 case 0x34:
2385 /* SRL */
30c7183b
AJ
2386 if (likely(rc != 31)) {
2387 if (ra != 31) {
67debe3a 2388 if (islit) {
30c7183b 2389 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
67debe3a 2390 } else {
a7812ae4 2391 TCGv shift = tcg_temp_new();
30c7183b
AJ
2392 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2393 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2394 tcg_temp_free(shift);
dfaa8583 2395 }
30c7183b
AJ
2396 } else
2397 tcg_gen_movi_i64(cpu_ir[rc], 0);
2398 }
4c9649a9
JM
2399 break;
2400 case 0x36:
2401 /* EXTQL */
377a43b6 2402 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2403 break;
2404 case 0x39:
2405 /* SLL */
30c7183b
AJ
2406 if (likely(rc != 31)) {
2407 if (ra != 31) {
67debe3a 2408 if (islit) {
30c7183b 2409 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
67debe3a 2410 } else {
a7812ae4 2411 TCGv shift = tcg_temp_new();
30c7183b
AJ
2412 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2413 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2414 tcg_temp_free(shift);
dfaa8583 2415 }
30c7183b
AJ
2416 } else
2417 tcg_gen_movi_i64(cpu_ir[rc], 0);
2418 }
4c9649a9
JM
2419 break;
2420 case 0x3B:
2421 /* INSQL */
248c42f3 2422 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2423 break;
2424 case 0x3C:
2425 /* SRA */
30c7183b
AJ
2426 if (likely(rc != 31)) {
2427 if (ra != 31) {
67debe3a 2428 if (islit) {
30c7183b 2429 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
67debe3a 2430 } else {
a7812ae4 2431 TCGv shift = tcg_temp_new();
30c7183b
AJ
2432 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2433 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2434 tcg_temp_free(shift);
dfaa8583 2435 }
30c7183b
AJ
2436 } else
2437 tcg_gen_movi_i64(cpu_ir[rc], 0);
2438 }
4c9649a9
JM
2439 break;
2440 case 0x52:
2441 /* MSKWH */
ffec44f1 2442 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2443 break;
2444 case 0x57:
2445 /* INSWH */
50eb6e5c 2446 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2447 break;
2448 case 0x5A:
2449 /* EXTWH */
377a43b6 2450 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2451 break;
2452 case 0x62:
2453 /* MSKLH */
ffec44f1 2454 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2455 break;
2456 case 0x67:
2457 /* INSLH */
50eb6e5c 2458 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2459 break;
2460 case 0x6A:
2461 /* EXTLH */
377a43b6 2462 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2463 break;
2464 case 0x72:
2465 /* MSKQH */
ffec44f1 2466 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2467 break;
2468 case 0x77:
2469 /* INSQH */
50eb6e5c 2470 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2471 break;
2472 case 0x7A:
2473 /* EXTQH */
377a43b6 2474 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2475 break;
2476 default:
2477 goto invalid_opc;
2478 }
2479 break;
2480 case 0x13:
2481 switch (fn7) {
2482 case 0x00:
2483 /* MULL */
30c7183b 2484 if (likely(rc != 31)) {
67debe3a 2485 if (ra == 31) {
30c7183b 2486 tcg_gen_movi_i64(cpu_ir[rc], 0);
67debe3a
PB
2487 } else {
2488 if (islit) {
30c7183b 2489 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
67debe3a 2490 } else {
30c7183b 2491 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
67debe3a 2492 }
30c7183b
AJ
2493 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2494 }
2495 }
4c9649a9
JM
2496 break;
2497 case 0x20:
2498 /* MULQ */
30c7183b 2499 if (likely(rc != 31)) {
67debe3a 2500 if (ra == 31) {
30c7183b 2501 tcg_gen_movi_i64(cpu_ir[rc], 0);
67debe3a 2502 } else if (islit) {
30c7183b 2503 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
67debe3a 2504 } else {
30c7183b 2505 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
67debe3a 2506 }
30c7183b 2507 }
4c9649a9
JM
2508 break;
2509 case 0x30:
2510 /* UMULH */
962415fc
RH
2511 {
2512 TCGv low;
2513 if (unlikely(rc == 31)){
2514 break;
2515 }
2516 if (ra == 31) {
2517 tcg_gen_movi_i64(cpu_ir[rc], 0);
2518 break;
2519 }
2520 low = tcg_temp_new();
2521 if (islit) {
2522 tcg_gen_movi_tl(low, lit);
2523 tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], low);
2524 } else {
2525 tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2526 }
2527 tcg_temp_free(low);
2528 }
4c9649a9
JM
2529 break;
2530 case 0x40:
2531 /* MULL/V */
a7812ae4 2532 gen_mullv(ra, rb, rc, islit, lit);
4c9649a9
JM
2533 break;
2534 case 0x60:
2535 /* MULQ/V */
a7812ae4 2536 gen_mulqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2537 break;
2538 default:
2539 goto invalid_opc;
2540 }
2541 break;
2542 case 0x14:
5238c886 2543 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
f24518b5 2544 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2545 case 0x04:
2546 /* ITOFS */
f18cd223
AJ
2547 if (likely(rc != 31)) {
2548 if (ra != 31) {
a7812ae4 2549 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2550 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2551 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2552 tcg_temp_free_i32(tmp);
f18cd223
AJ
2553 } else
2554 tcg_gen_movi_i64(cpu_fir[rc], 0);
2555 }
4c9649a9
JM
2556 break;
2557 case 0x0A:
2558 /* SQRTF */
5238c886
RH
2559 gen_fsqrtf(rb, rc);
2560 break;
4c9649a9
JM
2561 case 0x0B:
2562 /* SQRTS */
5238c886
RH
2563 gen_fsqrts(ctx, rb, rc, fn11);
2564 break;
4c9649a9
JM
2565 case 0x14:
2566 /* ITOFF */
f18cd223
AJ
2567 if (likely(rc != 31)) {
2568 if (ra != 31) {
a7812ae4 2569 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2570 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2571 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2572 tcg_temp_free_i32(tmp);
f18cd223
AJ
2573 } else
2574 tcg_gen_movi_i64(cpu_fir[rc], 0);
2575 }
4c9649a9
JM
2576 break;
2577 case 0x24:
2578 /* ITOFT */
f18cd223 2579 if (likely(rc != 31)) {
67debe3a 2580 if (ra != 31) {
f18cd223 2581 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
67debe3a 2582 } else {
f18cd223 2583 tcg_gen_movi_i64(cpu_fir[rc], 0);
67debe3a 2584 }
f18cd223 2585 }
4c9649a9
JM
2586 break;
2587 case 0x2A:
2588 /* SQRTG */
5238c886
RH
2589 gen_fsqrtg(rb, rc);
2590 break;
4c9649a9
JM
2591 case 0x02B:
2592 /* SQRTT */
5238c886
RH
2593 gen_fsqrtt(ctx, rb, rc, fn11);
2594 break;
4c9649a9
JM
2595 default:
2596 goto invalid_opc;
2597 }
2598 break;
2599 case 0x15:
2600 /* VAX floating point */
2601 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2602 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2603 case 0x00:
2604 /* ADDF */
a7812ae4 2605 gen_faddf(ra, rb, rc);
4c9649a9
JM
2606 break;
2607 case 0x01:
2608 /* SUBF */
a7812ae4 2609 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2610 break;
2611 case 0x02:
2612 /* MULF */
a7812ae4 2613 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2614 break;
2615 case 0x03:
2616 /* DIVF */
a7812ae4 2617 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2618 break;
2619 case 0x1E:
2620 /* CVTDG */
2621#if 0 // TODO
a7812ae4 2622 gen_fcvtdg(rb, rc);
4c9649a9
JM
2623#else
2624 goto invalid_opc;
2625#endif
2626 break;
2627 case 0x20:
2628 /* ADDG */
a7812ae4 2629 gen_faddg(ra, rb, rc);
4c9649a9
JM
2630 break;
2631 case 0x21:
2632 /* SUBG */
a7812ae4 2633 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2634 break;
2635 case 0x22:
2636 /* MULG */
a7812ae4 2637 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2638 break;
2639 case 0x23:
2640 /* DIVG */
a7812ae4 2641 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2642 break;
2643 case 0x25:
2644 /* CMPGEQ */
a7812ae4 2645 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2646 break;
2647 case 0x26:
2648 /* CMPGLT */
a7812ae4 2649 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2650 break;
2651 case 0x27:
2652 /* CMPGLE */
a7812ae4 2653 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2654 break;
2655 case 0x2C:
2656 /* CVTGF */
a7812ae4 2657 gen_fcvtgf(rb, rc);
4c9649a9
JM
2658 break;
2659 case 0x2D:
2660 /* CVTGD */
2661#if 0 // TODO
a7812ae4 2662 gen_fcvtgd(rb, rc);
4c9649a9
JM
2663#else
2664 goto invalid_opc;
2665#endif
2666 break;
2667 case 0x2F:
2668 /* CVTGQ */
a7812ae4 2669 gen_fcvtgq(rb, rc);
4c9649a9
JM
2670 break;
2671 case 0x3C:
2672 /* CVTQF */
a7812ae4 2673 gen_fcvtqf(rb, rc);
4c9649a9
JM
2674 break;
2675 case 0x3E:
2676 /* CVTQG */
a7812ae4 2677 gen_fcvtqg(rb, rc);
4c9649a9
JM
2678 break;
2679 default:
2680 goto invalid_opc;
2681 }
2682 break;
2683 case 0x16:
2684 /* IEEE floating-point */
f24518b5 2685 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2686 case 0x00:
2687 /* ADDS */
f24518b5 2688 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2689 break;
2690 case 0x01:
2691 /* SUBS */
f24518b5 2692 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2693 break;
2694 case 0x02:
2695 /* MULS */
f24518b5 2696 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2697 break;
2698 case 0x03:
2699 /* DIVS */
f24518b5 2700 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2701 break;
2702 case 0x20:
2703 /* ADDT */
f24518b5 2704 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2705 break;
2706 case 0x21:
2707 /* SUBT */
f24518b5 2708 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2709 break;
2710 case 0x22:
2711 /* MULT */
f24518b5 2712 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2713 break;
2714 case 0x23:
2715 /* DIVT */
f24518b5 2716 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2717 break;
2718 case 0x24:
2719 /* CMPTUN */
f24518b5 2720 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2721 break;
2722 case 0x25:
2723 /* CMPTEQ */
f24518b5 2724 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2725 break;
2726 case 0x26:
2727 /* CMPTLT */
f24518b5 2728 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2729 break;
2730 case 0x27:
2731 /* CMPTLE */
f24518b5 2732 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2733 break;
2734 case 0x2C:
a74b4d2c 2735 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2736 /* CVTST */
f24518b5 2737 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2738 } else {
2739 /* CVTTS */
f24518b5 2740 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2741 }
2742 break;
2743 case 0x2F:
2744 /* CVTTQ */
f24518b5 2745 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2746 break;
2747 case 0x3C:
2748 /* CVTQS */
f24518b5 2749 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2750 break;
2751 case 0x3E:
2752 /* CVTQT */
f24518b5 2753 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2754 break;
2755 default:
2756 goto invalid_opc;
2757 }
2758 break;
2759 case 0x17:
2760 switch (fn11) {
2761 case 0x010:
2762 /* CVTLQ */
a7812ae4 2763 gen_fcvtlq(rb, rc);
4c9649a9
JM
2764 break;
2765 case 0x020:
f18cd223 2766 if (likely(rc != 31)) {
a06d48d9 2767 if (ra == rb) {
4c9649a9 2768 /* FMOV */
67debe3a 2769 if (ra == 31) {
a06d48d9 2770 tcg_gen_movi_i64(cpu_fir[rc], 0);
67debe3a 2771 } else {
a06d48d9 2772 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
67debe3a 2773 }
a06d48d9 2774 } else {
f18cd223 2775 /* CPYS */
a7812ae4 2776 gen_fcpys(ra, rb, rc);
a06d48d9 2777 }
4c9649a9
JM
2778 }
2779 break;
2780 case 0x021:
2781 /* CPYSN */
a7812ae4 2782 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2783 break;
2784 case 0x022:
2785 /* CPYSE */
a7812ae4 2786 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2787 break;
2788 case 0x024:
2789 /* MT_FPCR */
67debe3a 2790 if (likely(ra != 31)) {
a44a2777 2791 gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
67debe3a 2792 } else {
f18cd223 2793 TCGv tmp = tcg_const_i64(0);
a44a2777 2794 gen_helper_store_fpcr(cpu_env, tmp);
f18cd223
AJ
2795 tcg_temp_free(tmp);
2796 }
4c9649a9
JM
2797 break;
2798 case 0x025:
2799 /* MF_FPCR */
67debe3a 2800 if (likely(ra != 31)) {
a44a2777 2801 gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
67debe3a 2802 }
4c9649a9
JM
2803 break;
2804 case 0x02A:
2805 /* FCMOVEQ */
bbe1dab4 2806 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2807 break;
2808 case 0x02B:
2809 /* FCMOVNE */
bbe1dab4 2810 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2811 break;
2812 case 0x02C:
2813 /* FCMOVLT */
bbe1dab4 2814 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2815 break;
2816 case 0x02D:
2817 /* FCMOVGE */
bbe1dab4 2818 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2819 break;
2820 case 0x02E:
2821 /* FCMOVLE */
bbe1dab4 2822 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2823 break;
2824 case 0x02F:
2825 /* FCMOVGT */
bbe1dab4 2826 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2827 break;
2828 case 0x030:
2829 /* CVTQL */
a7812ae4 2830 gen_fcvtql(rb, rc);
4c9649a9
JM
2831 break;
2832 case 0x130:
2833 /* CVTQL/V */
4c9649a9
JM
2834 case 0x530:
2835 /* CVTQL/SV */
735cf45f
RH
2836 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2837 /v doesn't do. The only thing I can think is that /sv is a
2838 valid instruction merely for completeness in the ISA. */
2839 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2840 break;
2841 default:
2842 goto invalid_opc;
2843 }
2844 break;
2845 case 0x18:
2846 switch ((uint16_t)disp16) {
2847 case 0x0000:
2848 /* TRAPB */
4af70374 2849 /* No-op. */
4c9649a9
JM
2850 break;
2851 case 0x0400:
2852 /* EXCB */
4af70374 2853 /* No-op. */
4c9649a9
JM
2854 break;
2855 case 0x4000:
2856 /* MB */
2857 /* No-op */
2858 break;
2859 case 0x4400:
2860 /* WMB */
2861 /* No-op */
2862 break;
2863 case 0x8000:
2864 /* FETCH */
2865 /* No-op */
2866 break;
2867 case 0xA000:
2868 /* FETCH_M */
2869 /* No-op */
2870 break;
2871 case 0xC000:
2872 /* RPCC */
a9406ea1
RH
2873 if (ra != 31) {
2874 if (use_icount) {
2875 gen_io_start();
69163fbb 2876 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
a9406ea1
RH
2877 gen_io_end();
2878 ret = EXIT_PC_STALE;
2879 } else {
69163fbb 2880 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
a9406ea1
RH
2881 }
2882 }
4c9649a9
JM
2883 break;
2884 case 0xE000:
2885 /* RC */
ac316ca4 2886 gen_rx(ra, 0);
4c9649a9
JM
2887 break;
2888 case 0xE800:
2889 /* ECB */
4c9649a9
JM
2890 break;
2891 case 0xF000:
2892 /* RS */
ac316ca4 2893 gen_rx(ra, 1);
4c9649a9
JM
2894 break;
2895 case 0xF800:
2896 /* WH64 */
2897 /* No-op */
2898 break;
2899 default:
2900 goto invalid_opc;
2901 }
2902 break;
2903 case 0x19:
2904 /* HW_MFPR (PALcode) */
26b46094 2905#ifndef CONFIG_USER_ONLY
5238c886
RH
2906 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2907 return gen_mfpr(ra, insn & 0xffff);
2908#else
4c9649a9 2909 goto invalid_opc;
5238c886 2910#endif
4c9649a9 2911 case 0x1A:
49563a72
RH
2912 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2913 prediction stack action, which of course we don't implement. */
2914 if (rb != 31) {
3761035f 2915 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
49563a72 2916 } else {
3761035f 2917 tcg_gen_movi_i64(cpu_pc, 0);
49563a72
RH
2918 }
2919 if (ra != 31) {
1304ca87 2920 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2921 }
4af70374 2922 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2923 break;
2924 case 0x1B:
2925 /* HW_LD (PALcode) */
a18ad893 2926#ifndef CONFIG_USER_ONLY
5238c886
RH
2927 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2928 {
a18ad893
RH
2929 TCGv addr;
2930
2931 if (ra == 31) {
2932 break;
2933 }
2934
2935 addr = tcg_temp_new();
67debe3a 2936 if (rb != 31) {
8bb6e981 2937 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
67debe3a 2938 } else {
8bb6e981 2939 tcg_gen_movi_i64(addr, disp12);
67debe3a 2940 }
8bb6e981
AJ
2941 switch ((insn >> 12) & 0xF) {
2942 case 0x0:
b5d51029 2943 /* Longword physical access (hw_ldl/p) */
fdfba1a2 2944 gen_helper_ldl_phys(cpu_ir[ra], cpu_env, addr);
8bb6e981
AJ
2945 break;
2946 case 0x1:
b5d51029 2947 /* Quadword physical access (hw_ldq/p) */
2c17449b 2948 gen_helper_ldq_phys(cpu_ir[ra], cpu_env, addr);
8bb6e981
AJ
2949 break;
2950 case 0x2:
b5d51029 2951 /* Longword physical access with lock (hw_ldl_l/p) */
c3082755 2952 gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr);
8bb6e981
AJ
2953 break;
2954 case 0x3:
b5d51029 2955 /* Quadword physical access with lock (hw_ldq_l/p) */
c3082755 2956 gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr);
8bb6e981
AJ
2957 break;
2958 case 0x4:
b5d51029 2959 /* Longword virtual PTE fetch (hw_ldl/v) */
2374e73e 2960 goto invalid_opc;
8bb6e981 2961 case 0x5:
b5d51029 2962 /* Quadword virtual PTE fetch (hw_ldq/v) */
2374e73e 2963 goto invalid_opc;
8bb6e981
AJ
2964 break;
2965 case 0x6:
2966 /* Incpu_ir[ra]id */
b5d51029 2967 goto invalid_opc;
8bb6e981
AJ
2968 case 0x7:
2969 /* Incpu_ir[ra]id */
b5d51029 2970 goto invalid_opc;
8bb6e981 2971 case 0x8:
b5d51029 2972 /* Longword virtual access (hw_ldl) */
2374e73e 2973 goto invalid_opc;
8bb6e981 2974 case 0x9:
b5d51029 2975 /* Quadword virtual access (hw_ldq) */
2374e73e 2976 goto invalid_opc;
8bb6e981 2977 case 0xA:
b5d51029 2978 /* Longword virtual access with protection check (hw_ldl/w) */
f8da40ae 2979 tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_KERNEL_IDX, MO_LESL);
8bb6e981
AJ
2980 break;
2981 case 0xB:
b5d51029 2982 /* Quadword virtual access with protection check (hw_ldq/w) */
f8da40ae 2983 tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_KERNEL_IDX, MO_LEQ);
8bb6e981
AJ
2984 break;
2985 case 0xC:
b5d51029 2986 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2374e73e 2987 goto invalid_opc;
8bb6e981 2988 case 0xD:
b5d51029 2989 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2374e73e 2990 goto invalid_opc;
8bb6e981
AJ
2991 case 0xE:
2992 /* Longword virtual access with alternate access mode and
2374e73e 2993 protection checks (hw_ldl/wa) */
f8da40ae 2994 tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_USER_IDX, MO_LESL);
8bb6e981
AJ
2995 break;
2996 case 0xF:
2997 /* Quadword virtual access with alternate access mode and
2374e73e 2998 protection checks (hw_ldq/wa) */
f8da40ae 2999 tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_USER_IDX, MO_LEQ);
8bb6e981
AJ
3000 break;
3001 }
3002 tcg_temp_free(addr);
a18ad893 3003 break;
4c9649a9 3004 }
5238c886 3005#else
a18ad893 3006 goto invalid_opc;
5238c886 3007#endif
4c9649a9
JM
3008 case 0x1C:
3009 switch (fn7) {
3010 case 0x00:
3011 /* SEXTB */
5238c886 3012 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
ae8ecd42 3013 if (likely(rc != 31)) {
67debe3a 3014 if (islit) {
ae8ecd42 3015 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
67debe3a 3016 } else {
dfaa8583 3017 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
67debe3a 3018 }
ae8ecd42 3019 }
4c9649a9
JM
3020 break;
3021 case 0x01:
3022 /* SEXTW */
5238c886
RH
3023 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
3024 if (likely(rc != 31)) {
3025 if (islit) {
3026 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
3027 } else {
3028 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
a18ad893 3029 }
ae8ecd42 3030 }
5238c886 3031 break;
4c9649a9
JM
3032 case 0x30:
3033 /* CTPOP */
5238c886
RH
3034 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
3035 if (likely(rc != 31)) {
3036 if (islit) {
3037 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
3038 } else {
3039 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
a18ad893 3040 }
ae8ecd42 3041 }
5238c886 3042 break;
4c9649a9
JM
3043 case 0x31:
3044 /* PERR */
5238c886
RH
3045 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
3046 gen_perr(ra, rb, rc, islit, lit);
3047 break;
4c9649a9
JM
3048 case 0x32:
3049 /* CTLZ */
5238c886
RH
3050 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
3051 if (likely(rc != 31)) {
3052 if (islit) {
3053 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
3054 } else {
3055 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
a18ad893 3056 }
ae8ecd42 3057 }
5238c886 3058 break;
4c9649a9
JM
3059 case 0x33:
3060 /* CTTZ */
5238c886
RH
3061 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
3062 if (likely(rc != 31)) {
3063 if (islit) {
3064 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
3065 } else {
3066 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
a18ad893 3067 }
ae8ecd42 3068 }
5238c886 3069 break;
4c9649a9
JM
3070 case 0x34:
3071 /* UNPKBW */
5238c886
RH
3072 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
3073 if (real_islit || ra != 31) {
3074 goto invalid_opc;
a18ad893 3075 }
5238c886
RH
3076 gen_unpkbw(rb, rc);
3077 break;
4c9649a9 3078 case 0x35:
13e4df99 3079 /* UNPKBL */
5238c886
RH
3080 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
3081 if (real_islit || ra != 31) {
3082 goto invalid_opc;
a18ad893 3083 }
5238c886
RH
3084 gen_unpkbl(rb, rc);
3085 break;
4c9649a9
JM
3086 case 0x36:
3087 /* PKWB */
5238c886
RH
3088 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
3089 if (real_islit || ra != 31) {
3090 goto invalid_opc;
a18ad893 3091 }
5238c886
RH
3092 gen_pkwb(rb, rc);
3093 break;
4c9649a9
JM
3094 case 0x37:
3095 /* PKLB */
5238c886
RH
3096 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
3097 if (real_islit || ra != 31) {
3098 goto invalid_opc;
a18ad893 3099 }
5238c886
RH
3100 gen_pklb(rb, rc);
3101 break;
4c9649a9
JM
3102 case 0x38:
3103 /* MINSB8 */
5238c886
RH
3104 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
3105 gen_minsb8(ra, rb, rc, islit, lit);
3106 break;
4c9649a9
JM
3107 case 0x39:
3108 /* MINSW4 */
5238c886
RH
3109 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
3110 gen_minsw4(ra, rb, rc, islit, lit);
3111 break;
4c9649a9
JM
3112 case 0x3A:
3113 /* MINUB8 */
5238c886
RH
3114 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
3115 gen_minub8(ra, rb, rc, islit, lit);
3116 break;
4c9649a9
JM
3117 case 0x3B:
3118 /* MINUW4 */
5238c886
RH
3119 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
3120 gen_minuw4(ra, rb, rc, islit, lit);
3121 break;
4c9649a9
JM
3122 case 0x3C:
3123 /* MAXUB8 */
5238c886
RH
3124 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
3125 gen_maxub8(ra, rb, rc, islit, lit);
3126 break;
4c9649a9
JM
3127 case 0x3D:
3128 /* MAXUW4 */
5238c886
RH
3129 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
3130 gen_maxuw4(ra, rb, rc, islit, lit);
3131 break;
4c9649a9
JM
3132 case 0x3E:
3133 /* MAXSB8 */
5238c886
RH
3134 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
3135 gen_maxsb8(ra, rb, rc, islit, lit);
3136 break;
4c9649a9
JM
3137 case 0x3F:
3138 /* MAXSW4 */
5238c886
RH
3139 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
3140 gen_maxsw4(ra, rb, rc, islit, lit);
3141 break;
4c9649a9
JM
3142 case 0x70:
3143 /* FTOIT */
5238c886 3144 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
f18cd223 3145 if (likely(rc != 31)) {
67debe3a 3146 if (ra != 31) {
f18cd223 3147 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
67debe3a 3148 } else {
f18cd223 3149 tcg_gen_movi_i64(cpu_ir[rc], 0);
67debe3a 3150 }
f18cd223 3151 }
4c9649a9
JM
3152 break;
3153 case 0x78:
3154 /* FTOIS */
5238c886 3155 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
f18cd223 3156 if (rc != 31) {
a7812ae4 3157 TCGv_i32 tmp1 = tcg_temp_new_i32();
67debe3a 3158 if (ra != 31) {
a7812ae4 3159 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
67debe3a 3160 } else {
f18cd223 3161 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 3162 gen_helper_s_to_memory(tmp1, tmp2);
f18cd223
AJ
3163 tcg_temp_free(tmp2);
3164 }
3165 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
a7812ae4 3166 tcg_temp_free_i32(tmp1);
f18cd223 3167 }
4c9649a9
JM
3168 break;
3169 default:
3170 goto invalid_opc;
3171 }
3172 break;
3173 case 0x1D:
3174 /* HW_MTPR (PALcode) */
26b46094 3175#ifndef CONFIG_USER_ONLY
5238c886
RH
3176 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
3177 return gen_mtpr(ctx, rb, insn & 0xffff);
3178#else
4c9649a9 3179 goto invalid_opc;
5238c886 3180#endif
4c9649a9 3181 case 0x1E:
508b43ea 3182 /* HW_RET (PALcode) */
a18ad893 3183#ifndef CONFIG_USER_ONLY
5238c886
RH
3184 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
3185 if (rb == 31) {
3186 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3187 address from EXC_ADDR. This turns out to be useful for our
3188 emulation PALcode, so continue to accept it. */
3189 TCGv tmp = tcg_temp_new();
3190 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
3191 gen_helper_hw_ret(cpu_env, tmp);
3192 tcg_temp_free(tmp);
3193 } else {
3194 gen_helper_hw_ret(cpu_env, cpu_ir[rb]);
4c9649a9 3195 }
5238c886
RH
3196 ret = EXIT_PC_UPDATED;
3197 break;
3198#else
a18ad893 3199 goto invalid_opc;
5238c886 3200#endif
4c9649a9
JM
3201 case 0x1F:
3202 /* HW_ST (PALcode) */
a18ad893 3203#ifndef CONFIG_USER_ONLY
5238c886
RH
3204 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
3205 {
8bb6e981 3206 TCGv addr, val;
a7812ae4 3207 addr = tcg_temp_new();
67debe3a 3208 if (rb != 31) {
8bb6e981 3209 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
67debe3a 3210 } else {
8bb6e981 3211 tcg_gen_movi_i64(addr, disp12);
67debe3a
PB
3212 }
3213 if (ra != 31) {
8bb6e981 3214 val = cpu_ir[ra];
67debe3a 3215 } else {
a7812ae4 3216 val = tcg_temp_new();
8bb6e981
AJ
3217 tcg_gen_movi_i64(val, 0);
3218 }
3219 switch ((insn >> 12) & 0xF) {
3220 case 0x0:
3221 /* Longword physical access */
ab1da857 3222 gen_helper_stl_phys(cpu_env, addr, val);
8bb6e981
AJ
3223 break;
3224 case 0x1:
3225 /* Quadword physical access */
f606604f 3226 gen_helper_stq_phys(cpu_env, addr, val);
8bb6e981
AJ
3227 break;
3228 case 0x2:
3229 /* Longword physical access with lock */
c3082755 3230 gen_helper_stl_c_phys(val, cpu_env, addr, val);
8bb6e981
AJ
3231 break;
3232 case 0x3:
3233 /* Quadword physical access with lock */
c3082755 3234 gen_helper_stq_c_phys(val, cpu_env, addr, val);
8bb6e981
AJ
3235 break;
3236 case 0x4:
3237 /* Longword virtual access */
2374e73e 3238 goto invalid_opc;
8bb6e981
AJ
3239 case 0x5:
3240 /* Quadword virtual access */
2374e73e 3241 goto invalid_opc;
8bb6e981
AJ
3242 case 0x6:
3243 /* Invalid */
3244 goto invalid_opc;
3245 case 0x7:
3246 /* Invalid */
3247 goto invalid_opc;
3248 case 0x8:
3249 /* Invalid */
3250 goto invalid_opc;
3251 case 0x9:
3252 /* Invalid */
3253 goto invalid_opc;
3254 case 0xA:
3255 /* Invalid */
3256 goto invalid_opc;
3257 case 0xB:
3258 /* Invalid */
3259 goto invalid_opc;
3260 case 0xC:
3261 /* Longword virtual access with alternate access mode */
2374e73e 3262 goto invalid_opc;
8bb6e981
AJ
3263 case 0xD:
3264 /* Quadword virtual access with alternate access mode */
2374e73e 3265 goto invalid_opc;
8bb6e981
AJ
3266 case 0xE:
3267 /* Invalid */
3268 goto invalid_opc;
3269 case 0xF:
3270 /* Invalid */
3271 goto invalid_opc;
3272 }
67debe3a 3273 if (ra == 31) {
8bb6e981 3274 tcg_temp_free(val);
67debe3a 3275 }
8bb6e981 3276 tcg_temp_free(addr);
a18ad893 3277 break;
4c9649a9 3278 }
5238c886 3279#else
a18ad893 3280 goto invalid_opc;
5238c886 3281#endif
4c9649a9
JM
3282 case 0x20:
3283 /* LDF */
f18cd223 3284 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3285 break;
3286 case 0x21:
3287 /* LDG */
f18cd223 3288 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3289 break;
3290 case 0x22:
3291 /* LDS */
f18cd223 3292 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
3293 break;
3294 case 0x23:
3295 /* LDT */
f18cd223 3296 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3297 break;
3298 case 0x24:
3299 /* STF */
6910b8f6 3300 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3301 break;
3302 case 0x25:
3303 /* STG */
6910b8f6 3304 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3305 break;
3306 case 0x26:
3307 /* STS */
6910b8f6 3308 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
4c9649a9
JM
3309 break;
3310 case 0x27:
3311 /* STT */
6910b8f6 3312 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3313 break;
3314 case 0x28:
3315 /* LDL */
f18cd223 3316 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
3317 break;
3318 case 0x29:
3319 /* LDQ */
f18cd223 3320 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3321 break;
3322 case 0x2A:
3323 /* LDL_L */
f4ed8679 3324 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3325 break;
3326 case 0x2B:
3327 /* LDQ_L */
f4ed8679 3328 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3329 break;
3330 case 0x2C:
3331 /* STL */
6910b8f6 3332 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
4c9649a9
JM
3333 break;
3334 case 0x2D:
3335 /* STQ */
6910b8f6 3336 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3337 break;
3338 case 0x2E:
3339 /* STL_C */
6910b8f6 3340 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
4c9649a9
JM
3341 break;
3342 case 0x2F:
3343 /* STQ_C */
6910b8f6 3344 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
4c9649a9
JM
3345 break;
3346 case 0x30:
3347 /* BR */
4af70374 3348 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3349 break;
a7812ae4 3350 case 0x31: /* FBEQ */
4af70374 3351 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 3352 break;
a7812ae4 3353 case 0x32: /* FBLT */
4af70374 3354 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 3355 break;
a7812ae4 3356 case 0x33: /* FBLE */
4af70374 3357 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
3358 break;
3359 case 0x34:
3360 /* BSR */
4af70374 3361 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3362 break;
a7812ae4 3363 case 0x35: /* FBNE */
4af70374 3364 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 3365 break;
a7812ae4 3366 case 0x36: /* FBGE */
4af70374 3367 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 3368 break;
a7812ae4 3369 case 0x37: /* FBGT */
4af70374 3370 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
3371 break;
3372 case 0x38:
3373 /* BLBC */
4af70374 3374 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
3375 break;
3376 case 0x39:
3377 /* BEQ */
4af70374 3378 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
3379 break;
3380 case 0x3A:
3381 /* BLT */
4af70374 3382 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
3383 break;
3384 case 0x3B:
3385 /* BLE */
4af70374 3386 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
3387 break;
3388 case 0x3C:
3389 /* BLBS */
4af70374 3390 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
3391 break;
3392 case 0x3D:
3393 /* BNE */
4af70374 3394 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
3395 break;
3396 case 0x3E:
3397 /* BGE */
4af70374 3398 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
3399 break;
3400 case 0x3F:
3401 /* BGT */
4af70374 3402 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
3403 break;
3404 invalid_opc:
8aa3fa20 3405 ret = gen_invalid(ctx);
4c9649a9
JM
3406 break;
3407 }
3408
3409 return ret;
3410}
3411
86a35f7c 3412static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
636aa200 3413 TranslationBlock *tb,
86a35f7c 3414 bool search_pc)
4c9649a9 3415{
ed2803da 3416 CPUState *cs = CPU(cpu);
86a35f7c 3417 CPUAlphaState *env = &cpu->env;
4c9649a9
JM
3418 DisasContext ctx, *ctxp = &ctx;
3419 target_ulong pc_start;
b114b68a 3420 target_ulong pc_mask;
4c9649a9
JM
3421 uint32_t insn;
3422 uint16_t *gen_opc_end;
a1d1bb31 3423 CPUBreakpoint *bp;
4c9649a9 3424 int j, lj = -1;
4af70374 3425 ExitStatus ret;
2e70f6ef
PB
3426 int num_insns;
3427 int max_insns;
4c9649a9
JM
3428
3429 pc_start = tb->pc;
92414b31 3430 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4af70374
RH
3431
3432 ctx.tb = tb;
4c9649a9 3433 ctx.pc = pc_start;
bba9bdce 3434 ctx.mem_idx = cpu_mmu_index(env);
801c4c28 3435 ctx.implver = env->implver;
ed2803da 3436 ctx.singlestep_enabled = cs->singlestep_enabled;
f24518b5
RH
3437
3438 /* ??? Every TB begins with unset rounding mode, to be initialized on
3439 the first fp insn of the TB. Alternately we could define a proper
3440 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3441 to reset the FP_STATUS to that default at the end of any TB that
3442 changes the default. We could even (gasp) dynamiclly figure out
3443 what default would be most efficient given the running program. */
3444 ctx.tb_rm = -1;
3445 /* Similarly for flush-to-zero. */
3446 ctx.tb_ftz = -1;
3447
2e70f6ef
PB
3448 num_insns = 0;
3449 max_insns = tb->cflags & CF_COUNT_MASK;
b114b68a 3450 if (max_insns == 0) {
2e70f6ef 3451 max_insns = CF_COUNT_MASK;
b114b68a
RH
3452 }
3453
3454 if (in_superpage(&ctx, pc_start)) {
3455 pc_mask = (1ULL << 41) - 1;
3456 } else {
3457 pc_mask = ~TARGET_PAGE_MASK;
3458 }
2e70f6ef 3459
806f352d 3460 gen_tb_start();
4af70374 3461 do {
f0c3c505
AF
3462 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
3463 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 3464 if (bp->pc == ctx.pc) {
4c9649a9
JM
3465 gen_excp(&ctx, EXCP_DEBUG, 0);
3466 break;
3467 }
3468 }
3469 }
3470 if (search_pc) {
92414b31 3471 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4c9649a9
JM
3472 if (lj < j) {
3473 lj++;
3474 while (lj < j)
ab1103de 3475 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4c9649a9 3476 }
25983cad 3477 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
ab1103de 3478 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 3479 tcg_ctx.gen_opc_icount[lj] = num_insns;
4c9649a9 3480 }
67debe3a 3481 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 3482 gen_io_start();
67debe3a 3483 }
c3082755 3484 insn = cpu_ldl_code(env, ctx.pc);
2e70f6ef 3485 num_insns++;
c4b3be39 3486
fdefe51c 3487 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
c4b3be39
RH
3488 tcg_gen_debug_insn_start(ctx.pc);
3489 }
3490
4c9649a9
JM
3491 ctx.pc += 4;
3492 ret = translate_one(ctxp, insn);
19bf517b 3493
bf1b03fe
RH
3494 /* If we reach a page boundary, are single stepping,
3495 or exhaust instruction count, stop generation. */
3496 if (ret == NO_EXIT
b114b68a 3497 && ((ctx.pc & pc_mask) == 0
efd7f486 3498 || tcg_ctx.gen_opc_ptr >= gen_opc_end
bf1b03fe
RH
3499 || num_insns >= max_insns
3500 || singlestep
ca6862a6 3501 || ctx.singlestep_enabled)) {
bf1b03fe 3502 ret = EXIT_PC_STALE;
1b530a6d 3503 }
4af70374
RH
3504 } while (ret == NO_EXIT);
3505
3506 if (tb->cflags & CF_LAST_IO) {
3507 gen_io_end();
4c9649a9 3508 }
4af70374
RH
3509
3510 switch (ret) {
3511 case EXIT_GOTO_TB:
8aa3fa20 3512 case EXIT_NORETURN:
4af70374
RH
3513 break;
3514 case EXIT_PC_STALE:
496cb5b9 3515 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
3516 /* FALLTHRU */
3517 case EXIT_PC_UPDATED:
ca6862a6 3518 if (ctx.singlestep_enabled) {
bf1b03fe
RH
3519 gen_excp_1(EXCP_DEBUG, 0);
3520 } else {
3521 tcg_gen_exit_tb(0);
3522 }
4af70374
RH
3523 break;
3524 default:
3525 abort();
4c9649a9 3526 }
4af70374 3527
806f352d 3528 gen_tb_end(tb, num_insns);
efd7f486 3529 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4c9649a9 3530 if (search_pc) {
92414b31 3531 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4c9649a9
JM
3532 lj++;
3533 while (lj <= j)
ab1103de 3534 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3535 } else {
3536 tb->size = ctx.pc - pc_start;
2e70f6ef 3537 tb->icount = num_insns;
4c9649a9 3538 }
4af70374 3539
806991da 3540#ifdef DEBUG_DISAS
8fec2b8c 3541 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39 3542 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 3543 log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
93fcfe39 3544 qemu_log("\n");
4c9649a9 3545 }
4c9649a9 3546#endif
4c9649a9
JM
3547}
3548
4d5712f1 3549void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 3550{
86a35f7c 3551 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
4c9649a9
JM
3552}
3553
4d5712f1 3554void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 3555{
86a35f7c 3556 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
4c9649a9
JM
3557}
3558
4d5712f1 3559void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 3560{
25983cad 3561 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
d2856f1a 3562}