]> git.proxmox.com Git - qemu.git/blame - target-alpha/translate.c
tcg: Use uintptr_t in TCGHelperInfo
[qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
4c9649a9 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
1de7afc9 22#include "qemu/host-utils.h"
57fec1fe 23#include "tcg-op.h"
4c9649a9 24
a7812ae4
PB
25#include "helper.h"
26#define GEN_HELPER 1
27#include "helper.h"
28
19188121 29#undef ALPHA_DEBUG_DISAS
f24518b5 30#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
31
32#ifdef ALPHA_DEBUG_DISAS
806991da 33# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
34#else
35# define LOG_DISAS(...) do { } while (0)
36#endif
37
4c9649a9
JM
38typedef struct DisasContext DisasContext;
39struct DisasContext {
4af70374 40 struct TranslationBlock *tb;
4c9649a9
JM
41 uint64_t pc;
42 int mem_idx;
f24518b5
RH
43
44 /* Current rounding mode for this TB. */
45 int tb_rm;
46 /* Current flush-to-zero setting for this TB. */
47 int tb_ftz;
ca6862a6 48
801c4c28
RH
49 /* implver value for this CPU. */
50 int implver;
51
ca6862a6 52 bool singlestep_enabled;
4c9649a9
JM
53};
54
4af70374
RH
55/* Return values from translate_one, indicating the state of the TB.
56 Note that zero indicates that we are not exiting the TB. */
57
58typedef enum {
59 NO_EXIT,
60
61 /* We have emitted one or more goto_tb. No fixup required. */
62 EXIT_GOTO_TB,
63
64 /* We are not using a goto_tb (for whatever reason), but have updated
65 the PC (for whatever reason), so there's no need to do it again on
66 exiting the TB. */
67 EXIT_PC_UPDATED,
68
69 /* We are exiting the TB, but have neither emitted a goto_tb, nor
70 updated the PC for the next instruction to be executed. */
8aa3fa20
RH
71 EXIT_PC_STALE,
72
73 /* We are ending the TB with a noreturn function call, e.g. longjmp.
74 No following code will be executed. */
75 EXIT_NORETURN,
4af70374
RH
76} ExitStatus;
77
3761035f 78/* global register indexes */
a7812ae4 79static TCGv_ptr cpu_env;
496cb5b9 80static TCGv cpu_ir[31];
f18cd223 81static TCGv cpu_fir[31];
496cb5b9 82static TCGv cpu_pc;
6910b8f6
RH
83static TCGv cpu_lock_addr;
84static TCGv cpu_lock_st_addr;
85static TCGv cpu_lock_value;
2ace7e55
RH
86static TCGv cpu_unique;
87#ifndef CONFIG_USER_ONLY
88static TCGv cpu_sysval;
89static TCGv cpu_usp;
ab471ade 90#endif
496cb5b9 91
3761035f 92/* register names */
f18cd223 93static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef 94
022c62cb 95#include "exec/gen-icount.h"
2e70f6ef 96
0c28246f 97void alpha_translate_init(void)
2e70f6ef 98{
496cb5b9
AJ
99 int i;
100 char *p;
2e70f6ef 101 static int done_init = 0;
496cb5b9 102
2e70f6ef
PB
103 if (done_init)
104 return;
496cb5b9 105
a7812ae4 106 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
107
108 p = cpu_reg_names;
109 for (i = 0; i < 31; i++) {
110 sprintf(p, "ir%d", i);
a7812ae4 111 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 112 offsetof(CPUAlphaState, ir[i]), p);
6ba8dcd7 113 p += (i < 10) ? 4 : 5;
f18cd223
AJ
114
115 sprintf(p, "fir%d", i);
a7812ae4 116 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 117 offsetof(CPUAlphaState, fir[i]), p);
f18cd223 118 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
119 }
120
a7812ae4 121 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 122 offsetof(CPUAlphaState, pc), "pc");
496cb5b9 123
6910b8f6 124 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 125 offsetof(CPUAlphaState, lock_addr),
6910b8f6
RH
126 "lock_addr");
127 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 128 offsetof(CPUAlphaState, lock_st_addr),
6910b8f6
RH
129 "lock_st_addr");
130 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 131 offsetof(CPUAlphaState, lock_value),
6910b8f6 132 "lock_value");
f4ed8679 133
2ace7e55 134 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 135 offsetof(CPUAlphaState, unique), "unique");
2ace7e55
RH
136#ifndef CONFIG_USER_ONLY
137 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 138 offsetof(CPUAlphaState, sysval), "sysval");
2ace7e55 139 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 140 offsetof(CPUAlphaState, usp), "usp");
ab471ade
RH
141#endif
142
496cb5b9 143 /* register helpers */
a7812ae4 144#define GEN_HELPER 2
496cb5b9
AJ
145#include "helper.h"
146
2e70f6ef
PB
147 done_init = 1;
148}
149
bf1b03fe 150static void gen_excp_1(int exception, int error_code)
4c9649a9 151{
a7812ae4 152 TCGv_i32 tmp1, tmp2;
6ad02592 153
6ad02592
AJ
154 tmp1 = tcg_const_i32(exception);
155 tmp2 = tcg_const_i32(error_code);
b9f0923e 156 gen_helper_excp(cpu_env, tmp1, tmp2);
a7812ae4
PB
157 tcg_temp_free_i32(tmp2);
158 tcg_temp_free_i32(tmp1);
bf1b03fe 159}
8aa3fa20 160
bf1b03fe
RH
161static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
162{
163 tcg_gen_movi_i64(cpu_pc, ctx->pc);
164 gen_excp_1(exception, error_code);
8aa3fa20 165 return EXIT_NORETURN;
4c9649a9
JM
166}
167
8aa3fa20 168static inline ExitStatus gen_invalid(DisasContext *ctx)
4c9649a9 169{
8aa3fa20 170 return gen_excp(ctx, EXCP_OPCDEC, 0);
4c9649a9
JM
171}
172
636aa200 173static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 174{
a7812ae4
PB
175 TCGv tmp = tcg_temp_new();
176 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 177 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
178 tcg_gen_trunc_i64_i32(tmp32, tmp);
179 gen_helper_memory_to_f(t0, tmp32);
180 tcg_temp_free_i32(tmp32);
f18cd223
AJ
181 tcg_temp_free(tmp);
182}
183
636aa200 184static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 185{
a7812ae4 186 TCGv tmp = tcg_temp_new();
f18cd223 187 tcg_gen_qemu_ld64(tmp, t1, flags);
a7812ae4 188 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
189 tcg_temp_free(tmp);
190}
191
636aa200 192static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 193{
a7812ae4
PB
194 TCGv tmp = tcg_temp_new();
195 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 196 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
197 tcg_gen_trunc_i64_i32(tmp32, tmp);
198 gen_helper_memory_to_s(t0, tmp32);
199 tcg_temp_free_i32(tmp32);
f18cd223
AJ
200 tcg_temp_free(tmp);
201}
202
636aa200 203static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679 204{
f4ed8679 205 tcg_gen_qemu_ld32s(t0, t1, flags);
6910b8f6
RH
206 tcg_gen_mov_i64(cpu_lock_addr, t1);
207 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
208}
209
636aa200 210static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679 211{
f4ed8679 212 tcg_gen_qemu_ld64(t0, t1, flags);
6910b8f6
RH
213 tcg_gen_mov_i64(cpu_lock_addr, t1);
214 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
215}
216
636aa200
BS
217static inline void gen_load_mem(DisasContext *ctx,
218 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
219 int flags),
220 int ra, int rb, int32_t disp16, int fp,
221 int clear)
023d8ca2 222{
6910b8f6 223 TCGv addr, va;
023d8ca2 224
6910b8f6
RH
225 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
226 prefetches, which we can treat as nops. No worries about
227 missed exceptions here. */
228 if (unlikely(ra == 31)) {
023d8ca2 229 return;
6910b8f6 230 }
023d8ca2 231
a7812ae4 232 addr = tcg_temp_new();
023d8ca2
AJ
233 if (rb != 31) {
234 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 235 if (clear) {
023d8ca2 236 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 237 }
023d8ca2 238 } else {
6910b8f6 239 if (clear) {
023d8ca2 240 disp16 &= ~0x7;
6910b8f6 241 }
023d8ca2
AJ
242 tcg_gen_movi_i64(addr, disp16);
243 }
6910b8f6
RH
244
245 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
246 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
247
023d8ca2
AJ
248 tcg_temp_free(addr);
249}
250
636aa200 251static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 252{
a7812ae4
PB
253 TCGv_i32 tmp32 = tcg_temp_new_i32();
254 TCGv tmp = tcg_temp_new();
255 gen_helper_f_to_memory(tmp32, t0);
256 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
257 tcg_gen_qemu_st32(tmp, t1, flags);
258 tcg_temp_free(tmp);
a7812ae4 259 tcg_temp_free_i32(tmp32);
f18cd223
AJ
260}
261
636aa200 262static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 263{
a7812ae4
PB
264 TCGv tmp = tcg_temp_new();
265 gen_helper_g_to_memory(tmp, t0);
f18cd223
AJ
266 tcg_gen_qemu_st64(tmp, t1, flags);
267 tcg_temp_free(tmp);
268}
269
636aa200 270static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 271{
a7812ae4
PB
272 TCGv_i32 tmp32 = tcg_temp_new_i32();
273 TCGv tmp = tcg_temp_new();
274 gen_helper_s_to_memory(tmp32, t0);
275 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
276 tcg_gen_qemu_st32(tmp, t1, flags);
277 tcg_temp_free(tmp);
a7812ae4 278 tcg_temp_free_i32(tmp32);
f18cd223
AJ
279}
280
636aa200
BS
281static inline void gen_store_mem(DisasContext *ctx,
282 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
283 int flags),
284 int ra, int rb, int32_t disp16, int fp,
6910b8f6 285 int clear)
023d8ca2 286{
6910b8f6
RH
287 TCGv addr, va;
288
289 addr = tcg_temp_new();
023d8ca2
AJ
290 if (rb != 31) {
291 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 292 if (clear) {
023d8ca2 293 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 294 }
023d8ca2 295 } else {
6910b8f6 296 if (clear) {
023d8ca2 297 disp16 &= ~0x7;
6910b8f6 298 }
023d8ca2
AJ
299 tcg_gen_movi_i64(addr, disp16);
300 }
6910b8f6
RH
301
302 if (ra == 31) {
303 va = tcg_const_i64(0);
f18cd223 304 } else {
6910b8f6 305 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
023d8ca2 306 }
6910b8f6
RH
307 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
308
023d8ca2 309 tcg_temp_free(addr);
6910b8f6
RH
310 if (ra == 31) {
311 tcg_temp_free(va);
312 }
313}
314
315static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
316 int32_t disp16, int quad)
317{
318 TCGv addr;
319
320 if (ra == 31) {
321 /* ??? Don't bother storing anything. The user can't tell
322 the difference, since the zero register always reads zero. */
323 return NO_EXIT;
324 }
325
326#if defined(CONFIG_USER_ONLY)
327 addr = cpu_lock_st_addr;
328#else
e52458fe 329 addr = tcg_temp_local_new();
6910b8f6
RH
330#endif
331
332 if (rb != 31) {
333 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
334 } else {
335 tcg_gen_movi_i64(addr, disp16);
336 }
337
338#if defined(CONFIG_USER_ONLY)
339 /* ??? This is handled via a complicated version of compare-and-swap
340 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
341 in TCG so that this isn't necessary. */
342 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
343#else
344 /* ??? In system mode we are never multi-threaded, so CAS can be
345 implemented via a non-atomic load-compare-store sequence. */
346 {
347 int lab_fail, lab_done;
348 TCGv val;
349
350 lab_fail = gen_new_label();
351 lab_done = gen_new_label();
e52458fe 352 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
6910b8f6
RH
353
354 val = tcg_temp_new();
355 if (quad) {
356 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
357 } else {
358 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
359 }
e52458fe 360 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
6910b8f6
RH
361
362 if (quad) {
363 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
364 } else {
365 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
366 }
367 tcg_gen_movi_i64(cpu_ir[ra], 1);
368 tcg_gen_br(lab_done);
369
370 gen_set_label(lab_fail);
371 tcg_gen_movi_i64(cpu_ir[ra], 0);
372
373 gen_set_label(lab_done);
374 tcg_gen_movi_i64(cpu_lock_addr, -1);
375
376 tcg_temp_free(addr);
377 return NO_EXIT;
378 }
379#endif
023d8ca2
AJ
380}
381
b114b68a 382static bool in_superpage(DisasContext *ctx, int64_t addr)
4c9649a9 383{
b114b68a
RH
384 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
385 && addr < 0
386 && ((addr >> 41) & 3) == 2
387 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
388}
389
390static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
391{
392 /* Suppress goto_tb in the case of single-steping and IO. */
393 if (ctx->singlestep_enabled || (ctx->tb->cflags & CF_LAST_IO)) {
394 return false;
395 }
396 /* If the destination is in the superpage, the page perms can't change. */
397 if (in_superpage(ctx, dest)) {
398 return true;
399 }
400 /* Check for the dest on the same page as the start of the TB. */
401 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
4af70374 402}
dbb30fe6 403
4af70374
RH
404static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
405{
406 uint64_t dest = ctx->pc + (disp << 2);
407
408 if (ra != 31) {
409 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
410 }
411
412 /* Notice branch-to-next; used to initialize RA with the PC. */
413 if (disp == 0) {
414 return 0;
415 } else if (use_goto_tb(ctx, dest)) {
416 tcg_gen_goto_tb(0);
417 tcg_gen_movi_i64(cpu_pc, dest);
4b4a72e5 418 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
4af70374
RH
419 return EXIT_GOTO_TB;
420 } else {
421 tcg_gen_movi_i64(cpu_pc, dest);
422 return EXIT_PC_UPDATED;
423 }
dbb30fe6
RH
424}
425
4af70374
RH
426static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
427 TCGv cmp, int32_t disp)
dbb30fe6 428{
4af70374 429 uint64_t dest = ctx->pc + (disp << 2);
dbb30fe6 430 int lab_true = gen_new_label();
9c29504e 431
4af70374
RH
432 if (use_goto_tb(ctx, dest)) {
433 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
434
435 tcg_gen_goto_tb(0);
436 tcg_gen_movi_i64(cpu_pc, ctx->pc);
4b4a72e5 437 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
4af70374
RH
438
439 gen_set_label(lab_true);
440 tcg_gen_goto_tb(1);
441 tcg_gen_movi_i64(cpu_pc, dest);
4b4a72e5 442 tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
4af70374
RH
443
444 return EXIT_GOTO_TB;
445 } else {
57e289de
RH
446 TCGv_i64 z = tcg_const_i64(0);
447 TCGv_i64 d = tcg_const_i64(dest);
448 TCGv_i64 p = tcg_const_i64(ctx->pc);
4af70374 449
57e289de 450 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
4af70374 451
57e289de
RH
452 tcg_temp_free_i64(z);
453 tcg_temp_free_i64(d);
454 tcg_temp_free_i64(p);
4af70374
RH
455 return EXIT_PC_UPDATED;
456 }
457}
458
459static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
460 int32_t disp, int mask)
461{
462 TCGv cmp_tmp;
463
464 if (unlikely(ra == 31)) {
465 cmp_tmp = tcg_const_i64(0);
466 } else {
467 cmp_tmp = tcg_temp_new();
9c29504e 468 if (mask) {
4af70374 469 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
dbb30fe6 470 } else {
4af70374 471 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
dbb30fe6 472 }
9c29504e 473 }
4af70374
RH
474
475 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
476}
477
4af70374 478/* Fold -0.0 for comparison with COND. */
dbb30fe6 479
4af70374 480static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 481{
dbb30fe6 482 uint64_t mzero = 1ull << 63;
f18cd223 483
dbb30fe6
RH
484 switch (cond) {
485 case TCG_COND_LE:
486 case TCG_COND_GT:
487 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 488 tcg_gen_mov_i64(dest, src);
a7812ae4 489 break;
dbb30fe6
RH
490
491 case TCG_COND_EQ:
492 case TCG_COND_NE:
493 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 494 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 495 break;
dbb30fe6
RH
496
497 case TCG_COND_GE:
dbb30fe6 498 case TCG_COND_LT:
4af70374
RH
499 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
500 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
501 tcg_gen_neg_i64(dest, dest);
502 tcg_gen_and_i64(dest, dest, src);
a7812ae4 503 break;
dbb30fe6 504
a7812ae4
PB
505 default:
506 abort();
f18cd223 507 }
dbb30fe6
RH
508}
509
4af70374
RH
510static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
511 int32_t disp)
dbb30fe6 512{
4af70374 513 TCGv cmp_tmp;
dbb30fe6
RH
514
515 if (unlikely(ra == 31)) {
516 /* Very uncommon case, but easier to optimize it to an integer
517 comparison than continuing with the floating point comparison. */
4af70374 518 return gen_bcond(ctx, cond, ra, disp, 0);
dbb30fe6
RH
519 }
520
4af70374
RH
521 cmp_tmp = tcg_temp_new();
522 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
523 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
524}
525
bbe1dab4 526static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
4af70374 527 int islit, uint8_t lit, int mask)
4c9649a9 528{
57e289de 529 TCGv_i64 c1, z, v1;
9c29504e 530
57e289de 531 if (unlikely(rc == 31)) {
9c29504e 532 return;
57e289de 533 }
9c29504e 534
57e289de 535 if (ra == 31) {
9c29504e 536 /* Very uncommon case - Do not bother to optimize. */
57e289de
RH
537 c1 = tcg_const_i64(0);
538 } else if (mask) {
539 c1 = tcg_const_i64(1);
540 tcg_gen_and_i64(c1, c1, cpu_ir[ra]);
541 } else {
542 c1 = cpu_ir[ra];
543 }
544 if (islit) {
545 v1 = tcg_const_i64(lit);
546 } else {
547 v1 = cpu_ir[rb];
9c29504e 548 }
57e289de 549 z = tcg_const_i64(0);
9c29504e 550
57e289de
RH
551 tcg_gen_movcond_i64(cond, cpu_ir[rc], c1, z, v1, cpu_ir[rc]);
552
553 tcg_temp_free_i64(z);
554 if (ra == 31 || mask) {
555 tcg_temp_free_i64(c1);
556 }
557 if (islit) {
558 tcg_temp_free_i64(v1);
559 }
4c9649a9
JM
560}
561
bbe1dab4 562static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
dbb30fe6 563{
57e289de 564 TCGv_i64 c1, z, v1;
dbb30fe6 565
4af70374 566 if (unlikely(rc == 31)) {
dbb30fe6 567 return;
4af70374
RH
568 }
569
57e289de 570 c1 = tcg_temp_new_i64();
dbb30fe6 571 if (unlikely(ra == 31)) {
57e289de 572 tcg_gen_movi_i64(c1, 0);
4af70374 573 } else {
57e289de 574 gen_fold_mzero(cond, c1, cpu_fir[ra]);
dbb30fe6 575 }
57e289de
RH
576 if (rb == 31) {
577 v1 = tcg_const_i64(0);
578 } else {
579 v1 = cpu_fir[rb];
580 }
581 z = tcg_const_i64(0);
dbb30fe6 582
57e289de 583 tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]);
dbb30fe6 584
57e289de
RH
585 tcg_temp_free_i64(z);
586 tcg_temp_free_i64(c1);
587 if (rb == 31) {
588 tcg_temp_free_i64(v1);
589 }
dbb30fe6
RH
590}
591
f24518b5
RH
592#define QUAL_RM_N 0x080 /* Round mode nearest even */
593#define QUAL_RM_C 0x000 /* Round mode chopped */
594#define QUAL_RM_M 0x040 /* Round mode minus infinity */
595#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
596#define QUAL_RM_MASK 0x0c0
597
598#define QUAL_U 0x100 /* Underflow enable (fp output) */
599#define QUAL_V 0x100 /* Overflow enable (int output) */
600#define QUAL_S 0x400 /* Software completion enable */
601#define QUAL_I 0x200 /* Inexact detection enable */
602
603static void gen_qual_roundmode(DisasContext *ctx, int fn11)
604{
605 TCGv_i32 tmp;
606
607 fn11 &= QUAL_RM_MASK;
608 if (fn11 == ctx->tb_rm) {
609 return;
610 }
611 ctx->tb_rm = fn11;
612
613 tmp = tcg_temp_new_i32();
614 switch (fn11) {
615 case QUAL_RM_N:
616 tcg_gen_movi_i32(tmp, float_round_nearest_even);
617 break;
618 case QUAL_RM_C:
619 tcg_gen_movi_i32(tmp, float_round_to_zero);
620 break;
621 case QUAL_RM_M:
622 tcg_gen_movi_i32(tmp, float_round_down);
623 break;
624 case QUAL_RM_D:
4a58aedf
RH
625 tcg_gen_ld8u_i32(tmp, cpu_env,
626 offsetof(CPUAlphaState, fpcr_dyn_round));
f24518b5
RH
627 break;
628 }
629
630#if defined(CONFIG_SOFTFLOAT_INLINE)
6b4c305c 631 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
f24518b5
RH
632 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
633 sets the one field. */
634 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 635 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
f24518b5
RH
636#else
637 gen_helper_setroundmode(tmp);
638#endif
639
640 tcg_temp_free_i32(tmp);
641}
642
643static void gen_qual_flushzero(DisasContext *ctx, int fn11)
644{
645 TCGv_i32 tmp;
646
647 fn11 &= QUAL_U;
648 if (fn11 == ctx->tb_ftz) {
649 return;
650 }
651 ctx->tb_ftz = fn11;
652
653 tmp = tcg_temp_new_i32();
654 if (fn11) {
655 /* Underflow is enabled, use the FPCR setting. */
4a58aedf
RH
656 tcg_gen_ld8u_i32(tmp, cpu_env,
657 offsetof(CPUAlphaState, fpcr_flush_to_zero));
f24518b5
RH
658 } else {
659 /* Underflow is disabled, force flush-to-zero. */
660 tcg_gen_movi_i32(tmp, 1);
661 }
662
663#if defined(CONFIG_SOFTFLOAT_INLINE)
664 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 665 offsetof(CPUAlphaState, fp_status.flush_to_zero));
f24518b5
RH
666#else
667 gen_helper_setflushzero(tmp);
668#endif
669
670 tcg_temp_free_i32(tmp);
671}
672
673static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
674{
74343409 675 TCGv val;
f24518b5 676 if (reg == 31) {
74343409 677 val = tcg_const_i64(0);
f24518b5 678 } else {
74343409
RH
679 if ((fn11 & QUAL_S) == 0) {
680 if (is_cmp) {
681 gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
682 } else {
683 gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
684 }
685 }
686 val = tcg_temp_new();
687 tcg_gen_mov_i64(val, cpu_fir[reg]);
f24518b5
RH
688 }
689 return val;
690}
691
692static void gen_fp_exc_clear(void)
693{
694#if defined(CONFIG_SOFTFLOAT_INLINE)
695 TCGv_i32 zero = tcg_const_i32(0);
696 tcg_gen_st8_i32(zero, cpu_env,
4d5712f1 697 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5
RH
698 tcg_temp_free_i32(zero);
699#else
4a58aedf 700 gen_helper_fp_exc_clear(cpu_env);
f24518b5
RH
701#endif
702}
703
704static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
705{
706 /* ??? We ought to be able to do something with imprecise exceptions.
707 E.g. notice we're still in the trap shadow of something within the
708 TB and do not generate the code to signal the exception; end the TB
709 when an exception is forced to arrive, either by consumption of a
710 register value or TRAPB or EXCB. */
711 TCGv_i32 exc = tcg_temp_new_i32();
712 TCGv_i32 reg;
713
714#if defined(CONFIG_SOFTFLOAT_INLINE)
715 tcg_gen_ld8u_i32(exc, cpu_env,
4d5712f1 716 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5 717#else
4a58aedf 718 gen_helper_fp_exc_get(exc, cpu_env);
f24518b5
RH
719#endif
720
721 if (ignore) {
722 tcg_gen_andi_i32(exc, exc, ~ignore);
723 }
724
725 /* ??? Pass in the regno of the destination so that the helper can
726 set EXC_MASK, which contains a bitmask of destination registers
727 that have caused arithmetic traps. A simple userspace emulation
728 does not require this. We do need it for a guest kernel's entArith,
729 or if we were to do something clever with imprecise exceptions. */
730 reg = tcg_const_i32(rc + 32);
731
732 if (fn11 & QUAL_S) {
4a58aedf 733 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
f24518b5 734 } else {
4a58aedf 735 gen_helper_fp_exc_raise(cpu_env, exc, reg);
f24518b5
RH
736 }
737
738 tcg_temp_free_i32(reg);
739 tcg_temp_free_i32(exc);
740}
741
742static inline void gen_fp_exc_raise(int rc, int fn11)
743{
744 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 745}
f24518b5 746
593f17e5
RH
747static void gen_fcvtlq(int rb, int rc)
748{
749 if (unlikely(rc == 31)) {
750 return;
751 }
752 if (unlikely(rb == 31)) {
753 tcg_gen_movi_i64(cpu_fir[rc], 0);
754 } else {
755 TCGv tmp = tcg_temp_new();
756
757 /* The arithmetic right shift here, plus the sign-extended mask below
758 yields a sign-extended result without an explicit ext32s_i64. */
759 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
760 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
761 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
762 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
763 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
764
765 tcg_temp_free(tmp);
766 }
767}
768
735cf45f
RH
769static void gen_fcvtql(int rb, int rc)
770{
771 if (unlikely(rc == 31)) {
772 return;
773 }
774 if (unlikely(rb == 31)) {
775 tcg_gen_movi_i64(cpu_fir[rc], 0);
776 } else {
777 TCGv tmp = tcg_temp_new();
778
779 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
780 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
781 tcg_gen_shli_i64(tmp, tmp, 32);
782 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
783 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
784
785 tcg_temp_free(tmp);
786 }
787}
788
789static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
790{
791 if (rb != 31) {
792 int lab = gen_new_label();
793 TCGv tmp = tcg_temp_new();
794
795 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
796 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
797 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
798
799 gen_set_label(lab);
800 }
801 gen_fcvtql(rb, rc);
802}
803
4a58aedf
RH
804#define FARITH2(name) \
805 static inline void glue(gen_f, name)(int rb, int rc) \
806 { \
807 if (unlikely(rc == 31)) { \
808 return; \
809 } \
810 if (rb != 31) { \
811 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
812 } else { \
813 TCGv tmp = tcg_const_i64(0); \
814 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
815 tcg_temp_free(tmp); \
816 } \
817 }
f24518b5
RH
818
819/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
820FARITH2(sqrtf)
821FARITH2(sqrtg)
a7812ae4
PB
822FARITH2(cvtgf)
823FARITH2(cvtgq)
824FARITH2(cvtqf)
825FARITH2(cvtqg)
f24518b5 826
4a58aedf
RH
827static void gen_ieee_arith2(DisasContext *ctx,
828 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
829 int rb, int rc, int fn11)
830{
831 TCGv vb;
832
833 /* ??? This is wrong: the instruction is not a nop, it still may
834 raise exceptions. */
835 if (unlikely(rc == 31)) {
836 return;
837 }
838
839 gen_qual_roundmode(ctx, fn11);
840 gen_qual_flushzero(ctx, fn11);
841 gen_fp_exc_clear();
842
843 vb = gen_ieee_input(rb, fn11, 0);
4a58aedf 844 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
845 tcg_temp_free(vb);
846
847 gen_fp_exc_raise(rc, fn11);
848}
849
850#define IEEE_ARITH2(name) \
851static inline void glue(gen_f, name)(DisasContext *ctx, \
852 int rb, int rc, int fn11) \
853{ \
854 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
855}
856IEEE_ARITH2(sqrts)
857IEEE_ARITH2(sqrtt)
858IEEE_ARITH2(cvtst)
859IEEE_ARITH2(cvtts)
860
861static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
862{
863 TCGv vb;
864 int ignore = 0;
865
866 /* ??? This is wrong: the instruction is not a nop, it still may
867 raise exceptions. */
868 if (unlikely(rc == 31)) {
869 return;
870 }
871
872 /* No need to set flushzero, since we have an integer output. */
873 gen_fp_exc_clear();
874 vb = gen_ieee_input(rb, fn11, 0);
875
876 /* Almost all integer conversions use cropped rounding, and most
877 also do not have integer overflow enabled. Special case that. */
878 switch (fn11) {
879 case QUAL_RM_C:
4a58aedf 880 gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
881 break;
882 case QUAL_V | QUAL_RM_C:
883 case QUAL_S | QUAL_V | QUAL_RM_C:
884 ignore = float_flag_inexact;
885 /* FALLTHRU */
886 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
4a58aedf 887 gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
888 break;
889 default:
890 gen_qual_roundmode(ctx, fn11);
4a58aedf 891 gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
892 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
893 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
894 break;
895 }
896 tcg_temp_free(vb);
897
898 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
899}
900
4a58aedf
RH
901static void gen_ieee_intcvt(DisasContext *ctx,
902 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
903 int rb, int rc, int fn11)
904{
905 TCGv vb;
906
907 /* ??? This is wrong: the instruction is not a nop, it still may
908 raise exceptions. */
909 if (unlikely(rc == 31)) {
910 return;
911 }
912
913 gen_qual_roundmode(ctx, fn11);
914
915 if (rb == 31) {
916 vb = tcg_const_i64(0);
917 } else {
918 vb = cpu_fir[rb];
919 }
920
921 /* The only exception that can be raised by integer conversion
922 is inexact. Thus we only need to worry about exceptions when
923 inexact handling is requested. */
924 if (fn11 & QUAL_I) {
925 gen_fp_exc_clear();
4a58aedf 926 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
927 gen_fp_exc_raise(rc, fn11);
928 } else {
4a58aedf 929 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
930 }
931
932 if (rb == 31) {
933 tcg_temp_free(vb);
934 }
935}
936
937#define IEEE_INTCVT(name) \
938static inline void glue(gen_f, name)(DisasContext *ctx, \
939 int rb, int rc, int fn11) \
940{ \
941 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
942}
943IEEE_INTCVT(cvtqs)
944IEEE_INTCVT(cvtqt)
945
dc96be4b
RH
946static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
947{
948 TCGv va, vb, vmask;
949 int za = 0, zb = 0;
950
951 if (unlikely(rc == 31)) {
952 return;
953 }
954
955 vmask = tcg_const_i64(mask);
956
957 TCGV_UNUSED_I64(va);
958 if (ra == 31) {
959 if (inv_a) {
960 va = vmask;
961 } else {
962 za = 1;
963 }
964 } else {
965 va = tcg_temp_new_i64();
966 tcg_gen_mov_i64(va, cpu_fir[ra]);
967 if (inv_a) {
968 tcg_gen_andc_i64(va, vmask, va);
969 } else {
970 tcg_gen_and_i64(va, va, vmask);
971 }
972 }
973
974 TCGV_UNUSED_I64(vb);
975 if (rb == 31) {
976 zb = 1;
977 } else {
978 vb = tcg_temp_new_i64();
979 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
980 }
981
982 switch (za << 1 | zb) {
983 case 0 | 0:
984 tcg_gen_or_i64(cpu_fir[rc], va, vb);
985 break;
986 case 0 | 1:
987 tcg_gen_mov_i64(cpu_fir[rc], va);
988 break;
989 case 2 | 0:
990 tcg_gen_mov_i64(cpu_fir[rc], vb);
991 break;
992 case 2 | 1:
993 tcg_gen_movi_i64(cpu_fir[rc], 0);
994 break;
995 }
996
997 tcg_temp_free(vmask);
998 if (ra != 31) {
999 tcg_temp_free(va);
1000 }
1001 if (rb != 31) {
1002 tcg_temp_free(vb);
1003 }
1004}
1005
1006static inline void gen_fcpys(int ra, int rb, int rc)
1007{
1008 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
1009}
1010
1011static inline void gen_fcpysn(int ra, int rb, int rc)
1012{
1013 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1014}
1015
1016static inline void gen_fcpyse(int ra, int rb, int rc)
1017{
1018 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1019}
1020
4a58aedf
RH
1021#define FARITH3(name) \
1022 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1023 { \
1024 TCGv va, vb; \
1025 \
1026 if (unlikely(rc == 31)) { \
1027 return; \
1028 } \
1029 if (ra == 31) { \
1030 va = tcg_const_i64(0); \
1031 } else { \
1032 va = cpu_fir[ra]; \
1033 } \
1034 if (rb == 31) { \
1035 vb = tcg_const_i64(0); \
1036 } else { \
1037 vb = cpu_fir[rb]; \
1038 } \
1039 \
1040 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1041 \
1042 if (ra == 31) { \
1043 tcg_temp_free(va); \
1044 } \
1045 if (rb == 31) { \
1046 tcg_temp_free(vb); \
1047 } \
1048 }
f24518b5
RH
1049
1050/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
1051FARITH3(addf)
1052FARITH3(subf)
1053FARITH3(mulf)
1054FARITH3(divf)
1055FARITH3(addg)
1056FARITH3(subg)
1057FARITH3(mulg)
1058FARITH3(divg)
1059FARITH3(cmpgeq)
1060FARITH3(cmpglt)
1061FARITH3(cmpgle)
f24518b5
RH
1062
1063static void gen_ieee_arith3(DisasContext *ctx,
4a58aedf 1064 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
1065 int ra, int rb, int rc, int fn11)
1066{
1067 TCGv va, vb;
1068
1069 /* ??? This is wrong: the instruction is not a nop, it still may
1070 raise exceptions. */
1071 if (unlikely(rc == 31)) {
1072 return;
1073 }
1074
1075 gen_qual_roundmode(ctx, fn11);
1076 gen_qual_flushzero(ctx, fn11);
1077 gen_fp_exc_clear();
1078
1079 va = gen_ieee_input(ra, fn11, 0);
1080 vb = gen_ieee_input(rb, fn11, 0);
4a58aedf 1081 helper(cpu_fir[rc], cpu_env, va, vb);
f24518b5
RH
1082 tcg_temp_free(va);
1083 tcg_temp_free(vb);
1084
1085 gen_fp_exc_raise(rc, fn11);
1086}
1087
1088#define IEEE_ARITH3(name) \
1089static inline void glue(gen_f, name)(DisasContext *ctx, \
1090 int ra, int rb, int rc, int fn11) \
1091{ \
1092 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1093}
1094IEEE_ARITH3(adds)
1095IEEE_ARITH3(subs)
1096IEEE_ARITH3(muls)
1097IEEE_ARITH3(divs)
1098IEEE_ARITH3(addt)
1099IEEE_ARITH3(subt)
1100IEEE_ARITH3(mult)
1101IEEE_ARITH3(divt)
1102
1103static void gen_ieee_compare(DisasContext *ctx,
4a58aedf 1104 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
1105 int ra, int rb, int rc, int fn11)
1106{
1107 TCGv va, vb;
1108
1109 /* ??? This is wrong: the instruction is not a nop, it still may
1110 raise exceptions. */
1111 if (unlikely(rc == 31)) {
1112 return;
1113 }
1114
1115 gen_fp_exc_clear();
1116
1117 va = gen_ieee_input(ra, fn11, 1);
1118 vb = gen_ieee_input(rb, fn11, 1);
4a58aedf 1119 helper(cpu_fir[rc], cpu_env, va, vb);
f24518b5
RH
1120 tcg_temp_free(va);
1121 tcg_temp_free(vb);
1122
1123 gen_fp_exc_raise(rc, fn11);
1124}
1125
1126#define IEEE_CMP3(name) \
1127static inline void glue(gen_f, name)(DisasContext *ctx, \
1128 int ra, int rb, int rc, int fn11) \
1129{ \
1130 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1131}
1132IEEE_CMP3(cmptun)
1133IEEE_CMP3(cmpteq)
1134IEEE_CMP3(cmptlt)
1135IEEE_CMP3(cmptle)
a7812ae4 1136
248c42f3
RH
1137static inline uint64_t zapnot_mask(uint8_t lit)
1138{
1139 uint64_t mask = 0;
1140 int i;
1141
1142 for (i = 0; i < 8; ++i) {
1143 if ((lit >> i) & 1)
1144 mask |= 0xffull << (i * 8);
1145 }
1146 return mask;
1147}
1148
87d98f95
RH
1149/* Implement zapnot with an immediate operand, which expands to some
1150 form of immediate AND. This is a basic building block in the
1151 definition of many of the other byte manipulation instructions. */
248c42f3 1152static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 1153{
87d98f95
RH
1154 switch (lit) {
1155 case 0x00:
248c42f3 1156 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
1157 break;
1158 case 0x01:
248c42f3 1159 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
1160 break;
1161 case 0x03:
248c42f3 1162 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
1163 break;
1164 case 0x0f:
248c42f3 1165 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
1166 break;
1167 case 0xff:
248c42f3 1168 tcg_gen_mov_i64(dest, src);
87d98f95
RH
1169 break;
1170 default:
248c42f3 1171 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
87d98f95
RH
1172 break;
1173 }
1174}
1175
1176static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1177{
1178 if (unlikely(rc == 31))
1179 return;
1180 else if (unlikely(ra == 31))
1181 tcg_gen_movi_i64(cpu_ir[rc], 0);
1182 else if (islit)
248c42f3 1183 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
87d98f95
RH
1184 else
1185 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1186}
1187
1188static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1189{
1190 if (unlikely(rc == 31))
1191 return;
1192 else if (unlikely(ra == 31))
1193 tcg_gen_movi_i64(cpu_ir[rc], 0);
1194 else if (islit)
248c42f3 1195 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
87d98f95
RH
1196 else
1197 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1198}
1199
1200
248c42f3 1201/* EXTWH, EXTLH, EXTQH */
ffec44f1
RH
1202static void gen_ext_h(int ra, int rb, int rc, int islit,
1203 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1204{
1205 if (unlikely(rc == 31))
1206 return;
377a43b6
RH
1207 else if (unlikely(ra == 31))
1208 tcg_gen_movi_i64(cpu_ir[rc], 0);
1209 else {
dfaa8583 1210 if (islit) {
377a43b6
RH
1211 lit = (64 - (lit & 7) * 8) & 0x3f;
1212 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
fe2b269a 1213 } else {
377a43b6 1214 TCGv tmp1 = tcg_temp_new();
b3249f63
AJ
1215 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1216 tcg_gen_shli_i64(tmp1, tmp1, 3);
dbf95805
VW
1217 tcg_gen_neg_i64(tmp1, tmp1);
1218 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
dfaa8583 1219 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
b3249f63 1220 tcg_temp_free(tmp1);
dfaa8583 1221 }
248c42f3 1222 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
377a43b6 1223 }
b3249f63
AJ
1224}
1225
248c42f3 1226/* EXTBL, EXTWL, EXTLL, EXTQL */
ffec44f1
RH
1227static void gen_ext_l(int ra, int rb, int rc, int islit,
1228 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1229{
1230 if (unlikely(rc == 31))
1231 return;
377a43b6
RH
1232 else if (unlikely(ra == 31))
1233 tcg_gen_movi_i64(cpu_ir[rc], 0);
1234 else {
dfaa8583 1235 if (islit) {
377a43b6 1236 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
dfaa8583 1237 } else {
a7812ae4 1238 TCGv tmp = tcg_temp_new();
b3249f63
AJ
1239 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1240 tcg_gen_shli_i64(tmp, tmp, 3);
dfaa8583 1241 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
b3249f63 1242 tcg_temp_free(tmp);
fe2b269a 1243 }
248c42f3
RH
1244 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1245 }
1246}
1247
50eb6e5c
RH
1248/* INSWH, INSLH, INSQH */
1249static void gen_ins_h(int ra, int rb, int rc, int islit,
1250 uint8_t lit, uint8_t byte_mask)
1251{
1252 if (unlikely(rc == 31))
1253 return;
1254 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1255 tcg_gen_movi_i64(cpu_ir[rc], 0);
1256 else {
1257 TCGv tmp = tcg_temp_new();
1258
1259 /* The instruction description has us left-shift the byte mask
1260 and extract bits <15:8> and apply that zap at the end. This
1261 is equivalent to simply performing the zap first and shifting
1262 afterward. */
1263 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1264
1265 if (islit) {
1266 /* Note that we have handled the lit==0 case above. */
1267 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1268 } else {
1269 TCGv shift = tcg_temp_new();
1270
1271 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1272 Do this portably by splitting the shift into two parts:
1273 shift_count-1 and 1. Arrange for the -1 by using
1274 ones-complement instead of twos-complement in the negation:
1275 ~((B & 7) * 8) & 63. */
1276
1277 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1278 tcg_gen_shli_i64(shift, shift, 3);
1279 tcg_gen_not_i64(shift, shift);
1280 tcg_gen_andi_i64(shift, shift, 0x3f);
1281
1282 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1283 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1284 tcg_temp_free(shift);
1285 }
1286 tcg_temp_free(tmp);
1287 }
1288}
1289
248c42f3 1290/* INSBL, INSWL, INSLL, INSQL */
ffec44f1
RH
1291static void gen_ins_l(int ra, int rb, int rc, int islit,
1292 uint8_t lit, uint8_t byte_mask)
248c42f3
RH
1293{
1294 if (unlikely(rc == 31))
1295 return;
1296 else if (unlikely(ra == 31))
1297 tcg_gen_movi_i64(cpu_ir[rc], 0);
1298 else {
1299 TCGv tmp = tcg_temp_new();
1300
1301 /* The instruction description has us left-shift the byte mask
1302 the same number of byte slots as the data and apply the zap
1303 at the end. This is equivalent to simply performing the zap
1304 first and shifting afterward. */
1305 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1306
1307 if (islit) {
1308 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1309 } else {
1310 TCGv shift = tcg_temp_new();
1311 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1312 tcg_gen_shli_i64(shift, shift, 3);
1313 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1314 tcg_temp_free(shift);
1315 }
1316 tcg_temp_free(tmp);
377a43b6 1317 }
b3249f63
AJ
1318}
1319
ffec44f1
RH
1320/* MSKWH, MSKLH, MSKQH */
1321static void gen_msk_h(int ra, int rb, int rc, int islit,
1322 uint8_t lit, uint8_t byte_mask)
1323{
1324 if (unlikely(rc == 31))
1325 return;
1326 else if (unlikely(ra == 31))
1327 tcg_gen_movi_i64(cpu_ir[rc], 0);
1328 else if (islit) {
1329 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1330 } else {
1331 TCGv shift = tcg_temp_new();
1332 TCGv mask = tcg_temp_new();
1333
1334 /* The instruction description is as above, where the byte_mask
1335 is shifted left, and then we extract bits <15:8>. This can be
1336 emulated with a right-shift on the expanded byte mask. This
1337 requires extra care because for an input <2:0> == 0 we need a
1338 shift of 64 bits in order to generate a zero. This is done by
1339 splitting the shift into two parts, the variable shift - 1
1340 followed by a constant 1 shift. The code we expand below is
1341 equivalent to ~((B & 7) * 8) & 63. */
1342
1343 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1344 tcg_gen_shli_i64(shift, shift, 3);
1345 tcg_gen_not_i64(shift, shift);
1346 tcg_gen_andi_i64(shift, shift, 0x3f);
1347 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1348 tcg_gen_shr_i64(mask, mask, shift);
1349 tcg_gen_shri_i64(mask, mask, 1);
1350
1351 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1352
1353 tcg_temp_free(mask);
1354 tcg_temp_free(shift);
1355 }
1356}
1357
14ab1634 1358/* MSKBL, MSKWL, MSKLL, MSKQL */
ffec44f1
RH
1359static void gen_msk_l(int ra, int rb, int rc, int islit,
1360 uint8_t lit, uint8_t byte_mask)
14ab1634
RH
1361{
1362 if (unlikely(rc == 31))
1363 return;
1364 else if (unlikely(ra == 31))
1365 tcg_gen_movi_i64(cpu_ir[rc], 0);
1366 else if (islit) {
1367 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1368 } else {
1369 TCGv shift = tcg_temp_new();
1370 TCGv mask = tcg_temp_new();
1371
1372 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1373 tcg_gen_shli_i64(shift, shift, 3);
1374 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1375 tcg_gen_shl_i64(mask, mask, shift);
1376
1377 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1378
1379 tcg_temp_free(mask);
1380 tcg_temp_free(shift);
1381 }
1382}
1383
04acd307 1384/* Code to call arith3 helpers */
a7812ae4 1385#define ARITH3(name) \
636aa200
BS
1386static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1387 uint8_t lit) \
a7812ae4
PB
1388{ \
1389 if (unlikely(rc == 31)) \
1390 return; \
1391 \
1392 if (ra != 31) { \
1393 if (islit) { \
1394 TCGv tmp = tcg_const_i64(lit); \
1395 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1396 tcg_temp_free(tmp); \
1397 } else \
1398 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1399 } else { \
1400 TCGv tmp1 = tcg_const_i64(0); \
1401 if (islit) { \
1402 TCGv tmp2 = tcg_const_i64(lit); \
1403 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1404 tcg_temp_free(tmp2); \
1405 } else \
1406 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1407 tcg_temp_free(tmp1); \
1408 } \
b3249f63 1409}
2958620f 1410ARITH3(cmpbge)
13e4df99
RH
1411ARITH3(minub8)
1412ARITH3(minsb8)
1413ARITH3(minuw4)
1414ARITH3(minsw4)
1415ARITH3(maxub8)
1416ARITH3(maxsb8)
1417ARITH3(maxuw4)
1418ARITH3(maxsw4)
1419ARITH3(perr)
1420
2958620f
RH
1421/* Code to call arith3 helpers */
1422#define ARITH3_EX(name) \
1423 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1424 int islit, uint8_t lit) \
1425 { \
1426 if (unlikely(rc == 31)) { \
1427 return; \
1428 } \
1429 if (ra != 31) { \
1430 if (islit) { \
1431 TCGv tmp = tcg_const_i64(lit); \
1432 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1433 cpu_ir[ra], tmp); \
1434 tcg_temp_free(tmp); \
1435 } else { \
1436 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1437 cpu_ir[ra], cpu_ir[rb]); \
1438 } \
1439 } else { \
1440 TCGv tmp1 = tcg_const_i64(0); \
1441 if (islit) { \
1442 TCGv tmp2 = tcg_const_i64(lit); \
1443 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1444 tcg_temp_free(tmp2); \
1445 } else { \
1446 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1447 } \
1448 tcg_temp_free(tmp1); \
1449 } \
1450 }
1451ARITH3_EX(addlv)
1452ARITH3_EX(sublv)
1453ARITH3_EX(addqv)
1454ARITH3_EX(subqv)
1455ARITH3_EX(mullv)
1456ARITH3_EX(mulqv)
1457
13e4df99
RH
1458#define MVIOP2(name) \
1459static inline void glue(gen_, name)(int rb, int rc) \
1460{ \
1461 if (unlikely(rc == 31)) \
1462 return; \
1463 if (unlikely(rb == 31)) \
1464 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1465 else \
1466 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1467}
1468MVIOP2(pklb)
1469MVIOP2(pkwb)
1470MVIOP2(unpkbl)
1471MVIOP2(unpkbw)
b3249f63 1472
9e05960f
RH
1473static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1474 int islit, uint8_t lit)
01ff9cc8 1475{
9e05960f 1476 TCGv va, vb;
01ff9cc8 1477
9e05960f 1478 if (unlikely(rc == 31)) {
13e4df99 1479 return;
9e05960f 1480 }
01ff9cc8 1481
9e05960f
RH
1482 if (ra == 31) {
1483 va = tcg_const_i64(0);
1484 } else {
1485 va = cpu_ir[ra];
1486 }
1487 if (islit) {
1488 vb = tcg_const_i64(lit);
1489 } else {
1490 vb = cpu_ir[rb];
1491 }
01ff9cc8 1492
9e05960f 1493 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
01ff9cc8 1494
9e05960f
RH
1495 if (ra == 31) {
1496 tcg_temp_free(va);
1497 }
1498 if (islit) {
1499 tcg_temp_free(vb);
1500 }
01ff9cc8
AJ
1501}
1502
ac316ca4
RH
1503static void gen_rx(int ra, int set)
1504{
1505 TCGv_i32 tmp;
1506
1507 if (ra != 31) {
4d5712f1 1508 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1509 }
1510
1511 tmp = tcg_const_i32(set);
4d5712f1 1512 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1513 tcg_temp_free_i32(tmp);
1514}
1515
2ace7e55
RH
1516static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1517{
1518 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1519 to internal cpu registers. */
1520
1521 /* Unprivileged PAL call */
1522 if (palcode >= 0x80 && palcode < 0xC0) {
1523 switch (palcode) {
1524 case 0x86:
1525 /* IMB */
1526 /* No-op inside QEMU. */
1527 break;
1528 case 0x9E:
1529 /* RDUNIQUE */
1530 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1531 break;
1532 case 0x9F:
1533 /* WRUNIQUE */
1534 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1535 break;
1536 default:
ba96394e
RH
1537 palcode &= 0xbf;
1538 goto do_call_pal;
2ace7e55
RH
1539 }
1540 return NO_EXIT;
1541 }
1542
1543#ifndef CONFIG_USER_ONLY
1544 /* Privileged PAL code */
1545 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1546 switch (palcode) {
1547 case 0x01:
1548 /* CFLUSH */
1549 /* No-op inside QEMU. */
1550 break;
1551 case 0x02:
1552 /* DRAINA */
1553 /* No-op inside QEMU. */
1554 break;
1555 case 0x2D:
1556 /* WRVPTPTR */
4d5712f1 1557 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
2ace7e55
RH
1558 break;
1559 case 0x31:
1560 /* WRVAL */
1561 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1562 break;
1563 case 0x32:
1564 /* RDVAL */
1565 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1566 break;
1567
1568 case 0x35: {
1569 /* SWPIPL */
1570 TCGv tmp;
1571
1572 /* Note that we already know we're in kernel mode, so we know
1573 that PS only contains the 3 IPL bits. */
4d5712f1 1574 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1575
1576 /* But make sure and store only the 3 IPL bits from the user. */
1577 tmp = tcg_temp_new();
1578 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
4d5712f1 1579 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1580 tcg_temp_free(tmp);
1581 break;
1582 }
1583
1584 case 0x36:
1585 /* RDPS */
4d5712f1 1586 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1587 break;
1588 case 0x38:
1589 /* WRUSP */
1590 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1591 break;
1592 case 0x3A:
1593 /* RDUSP */
1594 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1595 break;
1596 case 0x3C:
1597 /* WHAMI */
1598 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
55e5c285 1599 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
2ace7e55
RH
1600 break;
1601
1602 default:
ba96394e
RH
1603 palcode &= 0x3f;
1604 goto do_call_pal;
2ace7e55
RH
1605 }
1606 return NO_EXIT;
1607 }
1608#endif
2ace7e55 1609 return gen_invalid(ctx);
ba96394e
RH
1610
1611 do_call_pal:
1612#ifdef CONFIG_USER_ONLY
1613 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1614#else
1615 {
1616 TCGv pc = tcg_const_i64(ctx->pc);
1617 TCGv entry = tcg_const_i64(palcode & 0x80
1618 ? 0x2000 + (palcode - 0x80) * 64
1619 : 0x1000 + palcode * 64);
1620
1621 gen_helper_call_pal(cpu_env, pc, entry);
1622
1623 tcg_temp_free(entry);
1624 tcg_temp_free(pc);
a9ead832
RH
1625
1626 /* Since the destination is running in PALmode, we don't really
1627 need the page permissions check. We'll see the existance of
1628 the page when we create the TB, and we'll flush all TBs if
1629 we change the PAL base register. */
1630 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1631 tcg_gen_goto_tb(0);
1632 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
1633 return EXIT_GOTO_TB;
1634 }
1635
ba96394e
RH
1636 return EXIT_PC_UPDATED;
1637 }
1638#endif
2ace7e55
RH
1639}
1640
26b46094
RH
1641#ifndef CONFIG_USER_ONLY
1642
1643#define PR_BYTE 0x100000
1644#define PR_LONG 0x200000
1645
1646static int cpu_pr_data(int pr)
1647{
1648 switch (pr) {
1649 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1650 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1651 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1652 case 3: return offsetof(CPUAlphaState, trap_arg0);
1653 case 4: return offsetof(CPUAlphaState, trap_arg1);
1654 case 5: return offsetof(CPUAlphaState, trap_arg2);
1655 case 6: return offsetof(CPUAlphaState, exc_addr);
1656 case 7: return offsetof(CPUAlphaState, palbr);
1657 case 8: return offsetof(CPUAlphaState, ptbr);
1658 case 9: return offsetof(CPUAlphaState, vptptr);
1659 case 10: return offsetof(CPUAlphaState, unique);
1660 case 11: return offsetof(CPUAlphaState, sysval);
1661 case 12: return offsetof(CPUAlphaState, usp);
1662
1663 case 32 ... 39:
1664 return offsetof(CPUAlphaState, shadow[pr - 32]);
1665 case 40 ... 63:
1666 return offsetof(CPUAlphaState, scratch[pr - 40]);
c781cf96
RH
1667
1668 case 251:
1669 return offsetof(CPUAlphaState, alarm_expire);
26b46094
RH
1670 }
1671 return 0;
1672}
1673
c781cf96 1674static ExitStatus gen_mfpr(int ra, int regno)
26b46094
RH
1675{
1676 int data = cpu_pr_data(regno);
1677
1678 /* In our emulated PALcode, these processor registers have no
1679 side effects from reading. */
1680 if (ra == 31) {
c781cf96
RH
1681 return NO_EXIT;
1682 }
1683
19e0cbb8
RH
1684 /* Special help for VMTIME and WALLTIME. */
1685 if (regno == 250 || regno == 249) {
1686 void (*helper)(TCGv) = gen_helper_get_walltime;
1687 if (regno == 249) {
1688 helper = gen_helper_get_vmtime;
1689 }
c781cf96
RH
1690 if (use_icount) {
1691 gen_io_start();
19e0cbb8 1692 helper(cpu_ir[ra]);
c781cf96
RH
1693 gen_io_end();
1694 return EXIT_PC_STALE;
1695 } else {
19e0cbb8 1696 helper(cpu_ir[ra]);
c781cf96
RH
1697 return NO_EXIT;
1698 }
26b46094
RH
1699 }
1700
1701 /* The basic registers are data only, and unknown registers
1702 are read-zero, write-ignore. */
1703 if (data == 0) {
1704 tcg_gen_movi_i64(cpu_ir[ra], 0);
1705 } else if (data & PR_BYTE) {
1706 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1707 } else if (data & PR_LONG) {
1708 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1709 } else {
1710 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1711 }
c781cf96 1712 return NO_EXIT;
26b46094
RH
1713}
1714
bc24270e 1715static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
26b46094
RH
1716{
1717 TCGv tmp;
bc24270e 1718 int data;
26b46094
RH
1719
1720 if (rb == 31) {
1721 tmp = tcg_const_i64(0);
1722 } else {
1723 tmp = cpu_ir[rb];
1724 }
1725
bc24270e
RH
1726 switch (regno) {
1727 case 255:
3b4fefd6 1728 /* TBIA */
69163fbb 1729 gen_helper_tbia(cpu_env);
bc24270e
RH
1730 break;
1731
1732 case 254:
3b4fefd6 1733 /* TBIS */
69163fbb 1734 gen_helper_tbis(cpu_env, tmp);
bc24270e
RH
1735 break;
1736
1737 case 253:
1738 /* WAIT */
1739 tmp = tcg_const_i64(1);
259186a7
AF
1740 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1741 offsetof(CPUState, halted));
bc24270e
RH
1742 return gen_excp(ctx, EXCP_HLT, 0);
1743
034ebc27
RH
1744 case 252:
1745 /* HALT */
1746 gen_helper_halt(tmp);
1747 return EXIT_PC_STALE;
1748
c781cf96
RH
1749 case 251:
1750 /* ALARM */
69163fbb 1751 gen_helper_set_alarm(cpu_env, tmp);
c781cf96
RH
1752 break;
1753
a9ead832
RH
1754 case 7:
1755 /* PALBR */
1756 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, palbr));
1757 /* Changing the PAL base register implies un-chaining all of the TBs
1758 that ended with a CALL_PAL. Since the base register usually only
1759 changes during boot, flushing everything works well. */
1760 gen_helper_tb_flush(cpu_env);
1761 return EXIT_PC_STALE;
1762
bc24270e 1763 default:
3b4fefd6
RH
1764 /* The basic registers are data only, and unknown registers
1765 are read-zero, write-ignore. */
bc24270e 1766 data = cpu_pr_data(regno);
3b4fefd6
RH
1767 if (data != 0) {
1768 if (data & PR_BYTE) {
1769 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1770 } else if (data & PR_LONG) {
1771 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1772 } else {
1773 tcg_gen_st_i64(tmp, cpu_env, data);
1774 }
26b46094 1775 }
bc24270e 1776 break;
26b46094
RH
1777 }
1778
1779 if (rb == 31) {
1780 tcg_temp_free(tmp);
1781 }
bc24270e
RH
1782
1783 return NO_EXIT;
26b46094
RH
1784}
1785#endif /* !USER_ONLY*/
1786
4af70374 1787static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1788{
1789 uint32_t palcode;
efa64351
MT
1790 int32_t disp21, disp16;
1791#ifndef CONFIG_USER_ONLY
1792 int32_t disp12;
1793#endif
f88fe4e3 1794 uint16_t fn11;
b6fb147c 1795 uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
adf3c8b6 1796 uint8_t lit;
4af70374 1797 ExitStatus ret;
4c9649a9
JM
1798
1799 /* Decode all instruction fields */
1800 opc = insn >> 26;
1801 ra = (insn >> 21) & 0x1F;
1802 rb = (insn >> 16) & 0x1F;
1803 rc = insn & 0x1F;
13e4df99 1804 real_islit = islit = (insn >> 12) & 1;
dfaa8583
AJ
1805 if (rb == 31 && !islit) {
1806 islit = 1;
1807 lit = 0;
1808 } else
1809 lit = (insn >> 13) & 0xFF;
4c9649a9
JM
1810 palcode = insn & 0x03FFFFFF;
1811 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1812 disp16 = (int16_t)(insn & 0x0000FFFF);
efa64351 1813#ifndef CONFIG_USER_ONLY
4c9649a9 1814 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
efa64351 1815#endif
4c9649a9
JM
1816 fn11 = (insn >> 5) & 0x000007FF;
1817 fpfn = fn11 & 0x3F;
1818 fn7 = (insn >> 5) & 0x0000007F;
806991da 1819 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1820 opc, ra, rb, rc, disp16);
806991da 1821
4af70374 1822 ret = NO_EXIT;
4c9649a9
JM
1823 switch (opc) {
1824 case 0x00:
1825 /* CALL_PAL */
2ace7e55
RH
1826 ret = gen_call_pal(ctx, palcode);
1827 break;
4c9649a9
JM
1828 case 0x01:
1829 /* OPC01 */
1830 goto invalid_opc;
1831 case 0x02:
1832 /* OPC02 */
1833 goto invalid_opc;
1834 case 0x03:
1835 /* OPC03 */
1836 goto invalid_opc;
1837 case 0x04:
1838 /* OPC04 */
1839 goto invalid_opc;
1840 case 0x05:
1841 /* OPC05 */
1842 goto invalid_opc;
1843 case 0x06:
1844 /* OPC06 */
1845 goto invalid_opc;
1846 case 0x07:
1847 /* OPC07 */
1848 goto invalid_opc;
1849 case 0x08:
1850 /* LDA */
1ef4ef4e 1851 if (likely(ra != 31)) {
496cb5b9 1852 if (rb != 31)
3761035f
AJ
1853 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1854 else
1855 tcg_gen_movi_i64(cpu_ir[ra], disp16);
496cb5b9 1856 }
4c9649a9
JM
1857 break;
1858 case 0x09:
1859 /* LDAH */
1ef4ef4e 1860 if (likely(ra != 31)) {
496cb5b9 1861 if (rb != 31)
3761035f
AJ
1862 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1863 else
1864 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
496cb5b9 1865 }
4c9649a9
JM
1866 break;
1867 case 0x0A:
1868 /* LDBU */
a18ad893
RH
1869 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1870 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1871 break;
1872 }
1873 goto invalid_opc;
4c9649a9
JM
1874 case 0x0B:
1875 /* LDQ_U */
f18cd223 1876 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1877 break;
1878 case 0x0C:
1879 /* LDWU */
a18ad893
RH
1880 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1881 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1882 break;
1883 }
1884 goto invalid_opc;
4c9649a9
JM
1885 case 0x0D:
1886 /* STW */
6910b8f6 1887 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
4c9649a9
JM
1888 break;
1889 case 0x0E:
1890 /* STB */
6910b8f6 1891 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
4c9649a9
JM
1892 break;
1893 case 0x0F:
1894 /* STQ_U */
6910b8f6 1895 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1896 break;
1897 case 0x10:
1898 switch (fn7) {
1899 case 0x00:
1900 /* ADDL */
30c7183b
AJ
1901 if (likely(rc != 31)) {
1902 if (ra != 31) {
1903 if (islit) {
1904 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1905 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1906 } else {
30c7183b
AJ
1907 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1908 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1909 }
30c7183b
AJ
1910 } else {
1911 if (islit)
dfaa8583 1912 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1913 else
dfaa8583 1914 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1915 }
1916 }
4c9649a9
JM
1917 break;
1918 case 0x02:
1919 /* S4ADDL */
30c7183b
AJ
1920 if (likely(rc != 31)) {
1921 if (ra != 31) {
a7812ae4 1922 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1923 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1924 if (islit)
1925 tcg_gen_addi_i64(tmp, tmp, lit);
1926 else
1927 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1928 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1929 tcg_temp_free(tmp);
30c7183b
AJ
1930 } else {
1931 if (islit)
1932 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1933 else
dfaa8583 1934 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1935 }
1936 }
4c9649a9
JM
1937 break;
1938 case 0x09:
1939 /* SUBL */
30c7183b
AJ
1940 if (likely(rc != 31)) {
1941 if (ra != 31) {
dfaa8583 1942 if (islit)
30c7183b 1943 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
dfaa8583 1944 else
30c7183b 1945 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
dfaa8583 1946 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1947 } else {
1948 if (islit)
1949 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1950 else {
30c7183b
AJ
1951 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1952 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1953 }
1954 }
4c9649a9
JM
1955 break;
1956 case 0x0B:
1957 /* S4SUBL */
30c7183b
AJ
1958 if (likely(rc != 31)) {
1959 if (ra != 31) {
a7812ae4 1960 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1961 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1962 if (islit)
1963 tcg_gen_subi_i64(tmp, tmp, lit);
1964 else
1965 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1966 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1967 tcg_temp_free(tmp);
30c7183b
AJ
1968 } else {
1969 if (islit)
1970 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1971 else {
30c7183b
AJ
1972 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1973 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1974 }
30c7183b
AJ
1975 }
1976 }
4c9649a9
JM
1977 break;
1978 case 0x0F:
1979 /* CMPBGE */
a7812ae4 1980 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1981 break;
1982 case 0x12:
1983 /* S8ADDL */
30c7183b
AJ
1984 if (likely(rc != 31)) {
1985 if (ra != 31) {
a7812ae4 1986 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1987 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1988 if (islit)
1989 tcg_gen_addi_i64(tmp, tmp, lit);
1990 else
1991 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1992 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1993 tcg_temp_free(tmp);
30c7183b
AJ
1994 } else {
1995 if (islit)
1996 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1997 else
dfaa8583 1998 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1999 }
2000 }
4c9649a9
JM
2001 break;
2002 case 0x1B:
2003 /* S8SUBL */
30c7183b
AJ
2004 if (likely(rc != 31)) {
2005 if (ra != 31) {
a7812ae4 2006 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2007 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2008 if (islit)
2009 tcg_gen_subi_i64(tmp, tmp, lit);
2010 else
2011 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
2012 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
2013 tcg_temp_free(tmp);
30c7183b
AJ
2014 } else {
2015 if (islit)
2016 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 2017 else
30c7183b
AJ
2018 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2019 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 2020 }
30c7183b
AJ
2021 }
2022 }
4c9649a9
JM
2023 break;
2024 case 0x1D:
2025 /* CMPULT */
01ff9cc8 2026 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
4c9649a9
JM
2027 break;
2028 case 0x20:
2029 /* ADDQ */
30c7183b
AJ
2030 if (likely(rc != 31)) {
2031 if (ra != 31) {
2032 if (islit)
2033 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 2034 else
dfaa8583 2035 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2036 } else {
2037 if (islit)
2038 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2039 else
dfaa8583 2040 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2041 }
2042 }
4c9649a9
JM
2043 break;
2044 case 0x22:
2045 /* S4ADDQ */
30c7183b
AJ
2046 if (likely(rc != 31)) {
2047 if (ra != 31) {
a7812ae4 2048 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2049 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2050 if (islit)
2051 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2052 else
2053 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2054 tcg_temp_free(tmp);
30c7183b
AJ
2055 } else {
2056 if (islit)
2057 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2058 else
dfaa8583 2059 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2060 }
2061 }
4c9649a9
JM
2062 break;
2063 case 0x29:
2064 /* SUBQ */
30c7183b
AJ
2065 if (likely(rc != 31)) {
2066 if (ra != 31) {
2067 if (islit)
2068 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 2069 else
dfaa8583 2070 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2071 } else {
2072 if (islit)
2073 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2074 else
dfaa8583 2075 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2076 }
2077 }
4c9649a9
JM
2078 break;
2079 case 0x2B:
2080 /* S4SUBQ */
30c7183b
AJ
2081 if (likely(rc != 31)) {
2082 if (ra != 31) {
a7812ae4 2083 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2084 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2085 if (islit)
2086 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2087 else
2088 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2089 tcg_temp_free(tmp);
30c7183b
AJ
2090 } else {
2091 if (islit)
2092 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2093 else
dfaa8583 2094 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2095 }
2096 }
4c9649a9
JM
2097 break;
2098 case 0x2D:
2099 /* CMPEQ */
01ff9cc8 2100 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
4c9649a9
JM
2101 break;
2102 case 0x32:
2103 /* S8ADDQ */
30c7183b
AJ
2104 if (likely(rc != 31)) {
2105 if (ra != 31) {
a7812ae4 2106 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2107 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2108 if (islit)
2109 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2110 else
2111 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2112 tcg_temp_free(tmp);
30c7183b
AJ
2113 } else {
2114 if (islit)
2115 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2116 else
dfaa8583 2117 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2118 }
2119 }
4c9649a9
JM
2120 break;
2121 case 0x3B:
2122 /* S8SUBQ */
30c7183b
AJ
2123 if (likely(rc != 31)) {
2124 if (ra != 31) {
a7812ae4 2125 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2126 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2127 if (islit)
2128 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2129 else
2130 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2131 tcg_temp_free(tmp);
30c7183b
AJ
2132 } else {
2133 if (islit)
2134 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2135 else
dfaa8583 2136 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2137 }
2138 }
4c9649a9
JM
2139 break;
2140 case 0x3D:
2141 /* CMPULE */
01ff9cc8 2142 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
4c9649a9
JM
2143 break;
2144 case 0x40:
2145 /* ADDL/V */
a7812ae4 2146 gen_addlv(ra, rb, rc, islit, lit);
4c9649a9
JM
2147 break;
2148 case 0x49:
2149 /* SUBL/V */
a7812ae4 2150 gen_sublv(ra, rb, rc, islit, lit);
4c9649a9
JM
2151 break;
2152 case 0x4D:
2153 /* CMPLT */
01ff9cc8 2154 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
4c9649a9
JM
2155 break;
2156 case 0x60:
2157 /* ADDQ/V */
a7812ae4 2158 gen_addqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2159 break;
2160 case 0x69:
2161 /* SUBQ/V */
a7812ae4 2162 gen_subqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2163 break;
2164 case 0x6D:
2165 /* CMPLE */
01ff9cc8 2166 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
4c9649a9
JM
2167 break;
2168 default:
2169 goto invalid_opc;
2170 }
2171 break;
2172 case 0x11:
2173 switch (fn7) {
2174 case 0x00:
2175 /* AND */
30c7183b 2176 if (likely(rc != 31)) {
dfaa8583 2177 if (ra == 31)
30c7183b
AJ
2178 tcg_gen_movi_i64(cpu_ir[rc], 0);
2179 else if (islit)
2180 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2181 else
2182 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2183 }
4c9649a9
JM
2184 break;
2185 case 0x08:
2186 /* BIC */
30c7183b
AJ
2187 if (likely(rc != 31)) {
2188 if (ra != 31) {
2189 if (islit)
2190 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2191 else
2192 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2193 } else
2194 tcg_gen_movi_i64(cpu_ir[rc], 0);
2195 }
4c9649a9
JM
2196 break;
2197 case 0x14:
2198 /* CMOVLBS */
bbe1dab4 2199 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2200 break;
2201 case 0x16:
2202 /* CMOVLBC */
bbe1dab4 2203 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2204 break;
2205 case 0x20:
2206 /* BIS */
30c7183b
AJ
2207 if (likely(rc != 31)) {
2208 if (ra != 31) {
2209 if (islit)
2210 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
8bb6e981 2211 else
30c7183b 2212 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
4c9649a9 2213 } else {
30c7183b
AJ
2214 if (islit)
2215 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2216 else
dfaa8583 2217 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
4c9649a9 2218 }
4c9649a9
JM
2219 }
2220 break;
2221 case 0x24:
2222 /* CMOVEQ */
bbe1dab4 2223 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2224 break;
2225 case 0x26:
2226 /* CMOVNE */
bbe1dab4 2227 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2228 break;
2229 case 0x28:
2230 /* ORNOT */
30c7183b 2231 if (likely(rc != 31)) {
dfaa8583 2232 if (ra != 31) {
30c7183b
AJ
2233 if (islit)
2234 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2235 else
2236 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2237 } else {
2238 if (islit)
2239 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2240 else
2241 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2242 }
2243 }
4c9649a9
JM
2244 break;
2245 case 0x40:
2246 /* XOR */
30c7183b
AJ
2247 if (likely(rc != 31)) {
2248 if (ra != 31) {
2249 if (islit)
2250 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 2251 else
dfaa8583 2252 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2253 } else {
2254 if (islit)
2255 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2256 else
dfaa8583 2257 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2258 }
2259 }
4c9649a9
JM
2260 break;
2261 case 0x44:
2262 /* CMOVLT */
bbe1dab4 2263 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2264 break;
2265 case 0x46:
2266 /* CMOVGE */
bbe1dab4 2267 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2268 break;
2269 case 0x48:
2270 /* EQV */
30c7183b
AJ
2271 if (likely(rc != 31)) {
2272 if (ra != 31) {
2273 if (islit)
2274 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2275 else
2276 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2277 } else {
2278 if (islit)
2279 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
30c7183b 2280 else
dfaa8583 2281 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2282 }
2283 }
4c9649a9
JM
2284 break;
2285 case 0x61:
2286 /* AMASK */
ae8ecd42 2287 if (likely(rc != 31)) {
a18ad893
RH
2288 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2289
2290 if (islit) {
2291 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2292 } else {
2293 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
1a1f7dbc 2294 }
ae8ecd42 2295 }
4c9649a9
JM
2296 break;
2297 case 0x64:
2298 /* CMOVLE */
bbe1dab4 2299 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2300 break;
2301 case 0x66:
2302 /* CMOVGT */
bbe1dab4 2303 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2304 break;
2305 case 0x6C:
2306 /* IMPLVER */
801c4c28
RH
2307 if (rc != 31) {
2308 tcg_gen_movi_i64(cpu_ir[rc], ctx->implver);
2309 }
4c9649a9
JM
2310 break;
2311 default:
2312 goto invalid_opc;
2313 }
2314 break;
2315 case 0x12:
2316 switch (fn7) {
2317 case 0x02:
2318 /* MSKBL */
14ab1634 2319 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2320 break;
2321 case 0x06:
2322 /* EXTBL */
377a43b6 2323 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2324 break;
2325 case 0x0B:
2326 /* INSBL */
248c42f3 2327 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2328 break;
2329 case 0x12:
2330 /* MSKWL */
14ab1634 2331 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2332 break;
2333 case 0x16:
2334 /* EXTWL */
377a43b6 2335 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2336 break;
2337 case 0x1B:
2338 /* INSWL */
248c42f3 2339 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2340 break;
2341 case 0x22:
2342 /* MSKLL */
14ab1634 2343 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2344 break;
2345 case 0x26:
2346 /* EXTLL */
377a43b6 2347 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2348 break;
2349 case 0x2B:
2350 /* INSLL */
248c42f3 2351 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2352 break;
2353 case 0x30:
2354 /* ZAP */
a7812ae4 2355 gen_zap(ra, rb, rc, islit, lit);
4c9649a9
JM
2356 break;
2357 case 0x31:
2358 /* ZAPNOT */
a7812ae4 2359 gen_zapnot(ra, rb, rc, islit, lit);
4c9649a9
JM
2360 break;
2361 case 0x32:
2362 /* MSKQL */
14ab1634 2363 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2364 break;
2365 case 0x34:
2366 /* SRL */
30c7183b
AJ
2367 if (likely(rc != 31)) {
2368 if (ra != 31) {
2369 if (islit)
2370 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2371 else {
a7812ae4 2372 TCGv shift = tcg_temp_new();
30c7183b
AJ
2373 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2374 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2375 tcg_temp_free(shift);
dfaa8583 2376 }
30c7183b
AJ
2377 } else
2378 tcg_gen_movi_i64(cpu_ir[rc], 0);
2379 }
4c9649a9
JM
2380 break;
2381 case 0x36:
2382 /* EXTQL */
377a43b6 2383 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2384 break;
2385 case 0x39:
2386 /* SLL */
30c7183b
AJ
2387 if (likely(rc != 31)) {
2388 if (ra != 31) {
2389 if (islit)
2390 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2391 else {
a7812ae4 2392 TCGv shift = tcg_temp_new();
30c7183b
AJ
2393 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2394 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2395 tcg_temp_free(shift);
dfaa8583 2396 }
30c7183b
AJ
2397 } else
2398 tcg_gen_movi_i64(cpu_ir[rc], 0);
2399 }
4c9649a9
JM
2400 break;
2401 case 0x3B:
2402 /* INSQL */
248c42f3 2403 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2404 break;
2405 case 0x3C:
2406 /* SRA */
30c7183b
AJ
2407 if (likely(rc != 31)) {
2408 if (ra != 31) {
2409 if (islit)
2410 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2411 else {
a7812ae4 2412 TCGv shift = tcg_temp_new();
30c7183b
AJ
2413 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2414 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2415 tcg_temp_free(shift);
dfaa8583 2416 }
30c7183b
AJ
2417 } else
2418 tcg_gen_movi_i64(cpu_ir[rc], 0);
2419 }
4c9649a9
JM
2420 break;
2421 case 0x52:
2422 /* MSKWH */
ffec44f1 2423 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2424 break;
2425 case 0x57:
2426 /* INSWH */
50eb6e5c 2427 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2428 break;
2429 case 0x5A:
2430 /* EXTWH */
377a43b6 2431 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2432 break;
2433 case 0x62:
2434 /* MSKLH */
ffec44f1 2435 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2436 break;
2437 case 0x67:
2438 /* INSLH */
50eb6e5c 2439 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2440 break;
2441 case 0x6A:
2442 /* EXTLH */
377a43b6 2443 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2444 break;
2445 case 0x72:
2446 /* MSKQH */
ffec44f1 2447 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2448 break;
2449 case 0x77:
2450 /* INSQH */
50eb6e5c 2451 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2452 break;
2453 case 0x7A:
2454 /* EXTQH */
377a43b6 2455 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2456 break;
2457 default:
2458 goto invalid_opc;
2459 }
2460 break;
2461 case 0x13:
2462 switch (fn7) {
2463 case 0x00:
2464 /* MULL */
30c7183b 2465 if (likely(rc != 31)) {
dfaa8583 2466 if (ra == 31)
30c7183b
AJ
2467 tcg_gen_movi_i64(cpu_ir[rc], 0);
2468 else {
2469 if (islit)
2470 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2471 else
2472 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2473 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2474 }
2475 }
4c9649a9
JM
2476 break;
2477 case 0x20:
2478 /* MULQ */
30c7183b 2479 if (likely(rc != 31)) {
dfaa8583 2480 if (ra == 31)
30c7183b
AJ
2481 tcg_gen_movi_i64(cpu_ir[rc], 0);
2482 else if (islit)
2483 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2484 else
2485 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2486 }
4c9649a9
JM
2487 break;
2488 case 0x30:
2489 /* UMULH */
962415fc
RH
2490 {
2491 TCGv low;
2492 if (unlikely(rc == 31)){
2493 break;
2494 }
2495 if (ra == 31) {
2496 tcg_gen_movi_i64(cpu_ir[rc], 0);
2497 break;
2498 }
2499 low = tcg_temp_new();
2500 if (islit) {
2501 tcg_gen_movi_tl(low, lit);
2502 tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], low);
2503 } else {
2504 tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2505 }
2506 tcg_temp_free(low);
2507 }
4c9649a9
JM
2508 break;
2509 case 0x40:
2510 /* MULL/V */
a7812ae4 2511 gen_mullv(ra, rb, rc, islit, lit);
4c9649a9
JM
2512 break;
2513 case 0x60:
2514 /* MULQ/V */
a7812ae4 2515 gen_mulqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2516 break;
2517 default:
2518 goto invalid_opc;
2519 }
2520 break;
2521 case 0x14:
f24518b5 2522 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2523 case 0x04:
2524 /* ITOFS */
a18ad893 2525 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2526 goto invalid_opc;
a18ad893 2527 }
f18cd223
AJ
2528 if (likely(rc != 31)) {
2529 if (ra != 31) {
a7812ae4 2530 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2531 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2532 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2533 tcg_temp_free_i32(tmp);
f18cd223
AJ
2534 } else
2535 tcg_gen_movi_i64(cpu_fir[rc], 0);
2536 }
4c9649a9
JM
2537 break;
2538 case 0x0A:
2539 /* SQRTF */
a18ad893
RH
2540 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2541 gen_fsqrtf(rb, rc);
2542 break;
2543 }
2544 goto invalid_opc;
4c9649a9
JM
2545 case 0x0B:
2546 /* SQRTS */
a18ad893
RH
2547 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2548 gen_fsqrts(ctx, rb, rc, fn11);
2549 break;
2550 }
2551 goto invalid_opc;
4c9649a9
JM
2552 case 0x14:
2553 /* ITOFF */
a18ad893 2554 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2555 goto invalid_opc;
a18ad893 2556 }
f18cd223
AJ
2557 if (likely(rc != 31)) {
2558 if (ra != 31) {
a7812ae4 2559 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2560 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2561 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2562 tcg_temp_free_i32(tmp);
f18cd223
AJ
2563 } else
2564 tcg_gen_movi_i64(cpu_fir[rc], 0);
2565 }
4c9649a9
JM
2566 break;
2567 case 0x24:
2568 /* ITOFT */
a18ad893 2569 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2570 goto invalid_opc;
a18ad893 2571 }
f18cd223
AJ
2572 if (likely(rc != 31)) {
2573 if (ra != 31)
2574 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2575 else
2576 tcg_gen_movi_i64(cpu_fir[rc], 0);
2577 }
4c9649a9
JM
2578 break;
2579 case 0x2A:
2580 /* SQRTG */
a18ad893
RH
2581 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2582 gen_fsqrtg(rb, rc);
2583 break;
2584 }
2585 goto invalid_opc;
4c9649a9
JM
2586 case 0x02B:
2587 /* SQRTT */
a18ad893
RH
2588 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2589 gen_fsqrtt(ctx, rb, rc, fn11);
2590 break;
2591 }
2592 goto invalid_opc;
4c9649a9
JM
2593 default:
2594 goto invalid_opc;
2595 }
2596 break;
2597 case 0x15:
2598 /* VAX floating point */
2599 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2600 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2601 case 0x00:
2602 /* ADDF */
a7812ae4 2603 gen_faddf(ra, rb, rc);
4c9649a9
JM
2604 break;
2605 case 0x01:
2606 /* SUBF */
a7812ae4 2607 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2608 break;
2609 case 0x02:
2610 /* MULF */
a7812ae4 2611 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2612 break;
2613 case 0x03:
2614 /* DIVF */
a7812ae4 2615 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2616 break;
2617 case 0x1E:
2618 /* CVTDG */
2619#if 0 // TODO
a7812ae4 2620 gen_fcvtdg(rb, rc);
4c9649a9
JM
2621#else
2622 goto invalid_opc;
2623#endif
2624 break;
2625 case 0x20:
2626 /* ADDG */
a7812ae4 2627 gen_faddg(ra, rb, rc);
4c9649a9
JM
2628 break;
2629 case 0x21:
2630 /* SUBG */
a7812ae4 2631 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2632 break;
2633 case 0x22:
2634 /* MULG */
a7812ae4 2635 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2636 break;
2637 case 0x23:
2638 /* DIVG */
a7812ae4 2639 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2640 break;
2641 case 0x25:
2642 /* CMPGEQ */
a7812ae4 2643 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2644 break;
2645 case 0x26:
2646 /* CMPGLT */
a7812ae4 2647 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2648 break;
2649 case 0x27:
2650 /* CMPGLE */
a7812ae4 2651 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2652 break;
2653 case 0x2C:
2654 /* CVTGF */
a7812ae4 2655 gen_fcvtgf(rb, rc);
4c9649a9
JM
2656 break;
2657 case 0x2D:
2658 /* CVTGD */
2659#if 0 // TODO
a7812ae4 2660 gen_fcvtgd(rb, rc);
4c9649a9
JM
2661#else
2662 goto invalid_opc;
2663#endif
2664 break;
2665 case 0x2F:
2666 /* CVTGQ */
a7812ae4 2667 gen_fcvtgq(rb, rc);
4c9649a9
JM
2668 break;
2669 case 0x3C:
2670 /* CVTQF */
a7812ae4 2671 gen_fcvtqf(rb, rc);
4c9649a9
JM
2672 break;
2673 case 0x3E:
2674 /* CVTQG */
a7812ae4 2675 gen_fcvtqg(rb, rc);
4c9649a9
JM
2676 break;
2677 default:
2678 goto invalid_opc;
2679 }
2680 break;
2681 case 0x16:
2682 /* IEEE floating-point */
f24518b5 2683 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2684 case 0x00:
2685 /* ADDS */
f24518b5 2686 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2687 break;
2688 case 0x01:
2689 /* SUBS */
f24518b5 2690 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2691 break;
2692 case 0x02:
2693 /* MULS */
f24518b5 2694 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2695 break;
2696 case 0x03:
2697 /* DIVS */
f24518b5 2698 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2699 break;
2700 case 0x20:
2701 /* ADDT */
f24518b5 2702 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2703 break;
2704 case 0x21:
2705 /* SUBT */
f24518b5 2706 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2707 break;
2708 case 0x22:
2709 /* MULT */
f24518b5 2710 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2711 break;
2712 case 0x23:
2713 /* DIVT */
f24518b5 2714 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2715 break;
2716 case 0x24:
2717 /* CMPTUN */
f24518b5 2718 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2719 break;
2720 case 0x25:
2721 /* CMPTEQ */
f24518b5 2722 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2723 break;
2724 case 0x26:
2725 /* CMPTLT */
f24518b5 2726 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2727 break;
2728 case 0x27:
2729 /* CMPTLE */
f24518b5 2730 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2731 break;
2732 case 0x2C:
a74b4d2c 2733 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2734 /* CVTST */
f24518b5 2735 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2736 } else {
2737 /* CVTTS */
f24518b5 2738 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2739 }
2740 break;
2741 case 0x2F:
2742 /* CVTTQ */
f24518b5 2743 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2744 break;
2745 case 0x3C:
2746 /* CVTQS */
f24518b5 2747 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2748 break;
2749 case 0x3E:
2750 /* CVTQT */
f24518b5 2751 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2752 break;
2753 default:
2754 goto invalid_opc;
2755 }
2756 break;
2757 case 0x17:
2758 switch (fn11) {
2759 case 0x010:
2760 /* CVTLQ */
a7812ae4 2761 gen_fcvtlq(rb, rc);
4c9649a9
JM
2762 break;
2763 case 0x020:
f18cd223 2764 if (likely(rc != 31)) {
a06d48d9 2765 if (ra == rb) {
4c9649a9 2766 /* FMOV */
a06d48d9
RH
2767 if (ra == 31)
2768 tcg_gen_movi_i64(cpu_fir[rc], 0);
2769 else
2770 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2771 } else {
f18cd223 2772 /* CPYS */
a7812ae4 2773 gen_fcpys(ra, rb, rc);
a06d48d9 2774 }
4c9649a9
JM
2775 }
2776 break;
2777 case 0x021:
2778 /* CPYSN */
a7812ae4 2779 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2780 break;
2781 case 0x022:
2782 /* CPYSE */
a7812ae4 2783 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2784 break;
2785 case 0x024:
2786 /* MT_FPCR */
f18cd223 2787 if (likely(ra != 31))
a44a2777 2788 gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
f18cd223
AJ
2789 else {
2790 TCGv tmp = tcg_const_i64(0);
a44a2777 2791 gen_helper_store_fpcr(cpu_env, tmp);
f18cd223
AJ
2792 tcg_temp_free(tmp);
2793 }
4c9649a9
JM
2794 break;
2795 case 0x025:
2796 /* MF_FPCR */
f18cd223 2797 if (likely(ra != 31))
a44a2777 2798 gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
4c9649a9
JM
2799 break;
2800 case 0x02A:
2801 /* FCMOVEQ */
bbe1dab4 2802 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2803 break;
2804 case 0x02B:
2805 /* FCMOVNE */
bbe1dab4 2806 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2807 break;
2808 case 0x02C:
2809 /* FCMOVLT */
bbe1dab4 2810 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2811 break;
2812 case 0x02D:
2813 /* FCMOVGE */
bbe1dab4 2814 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2815 break;
2816 case 0x02E:
2817 /* FCMOVLE */
bbe1dab4 2818 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2819 break;
2820 case 0x02F:
2821 /* FCMOVGT */
bbe1dab4 2822 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2823 break;
2824 case 0x030:
2825 /* CVTQL */
a7812ae4 2826 gen_fcvtql(rb, rc);
4c9649a9
JM
2827 break;
2828 case 0x130:
2829 /* CVTQL/V */
4c9649a9
JM
2830 case 0x530:
2831 /* CVTQL/SV */
735cf45f
RH
2832 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2833 /v doesn't do. The only thing I can think is that /sv is a
2834 valid instruction merely for completeness in the ISA. */
2835 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2836 break;
2837 default:
2838 goto invalid_opc;
2839 }
2840 break;
2841 case 0x18:
2842 switch ((uint16_t)disp16) {
2843 case 0x0000:
2844 /* TRAPB */
4af70374 2845 /* No-op. */
4c9649a9
JM
2846 break;
2847 case 0x0400:
2848 /* EXCB */
4af70374 2849 /* No-op. */
4c9649a9
JM
2850 break;
2851 case 0x4000:
2852 /* MB */
2853 /* No-op */
2854 break;
2855 case 0x4400:
2856 /* WMB */
2857 /* No-op */
2858 break;
2859 case 0x8000:
2860 /* FETCH */
2861 /* No-op */
2862 break;
2863 case 0xA000:
2864 /* FETCH_M */
2865 /* No-op */
2866 break;
2867 case 0xC000:
2868 /* RPCC */
a9406ea1
RH
2869 if (ra != 31) {
2870 if (use_icount) {
2871 gen_io_start();
69163fbb 2872 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
a9406ea1
RH
2873 gen_io_end();
2874 ret = EXIT_PC_STALE;
2875 } else {
69163fbb 2876 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
a9406ea1
RH
2877 }
2878 }
4c9649a9
JM
2879 break;
2880 case 0xE000:
2881 /* RC */
ac316ca4 2882 gen_rx(ra, 0);
4c9649a9
JM
2883 break;
2884 case 0xE800:
2885 /* ECB */
4c9649a9
JM
2886 break;
2887 case 0xF000:
2888 /* RS */
ac316ca4 2889 gen_rx(ra, 1);
4c9649a9
JM
2890 break;
2891 case 0xF800:
2892 /* WH64 */
2893 /* No-op */
2894 break;
2895 default:
2896 goto invalid_opc;
2897 }
2898 break;
2899 case 0x19:
2900 /* HW_MFPR (PALcode) */
26b46094 2901#ifndef CONFIG_USER_ONLY
a18ad893 2902 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
c781cf96 2903 return gen_mfpr(ra, insn & 0xffff);
26b46094
RH
2904 }
2905#endif
4c9649a9 2906 goto invalid_opc;
4c9649a9 2907 case 0x1A:
49563a72
RH
2908 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2909 prediction stack action, which of course we don't implement. */
2910 if (rb != 31) {
3761035f 2911 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
49563a72 2912 } else {
3761035f 2913 tcg_gen_movi_i64(cpu_pc, 0);
49563a72
RH
2914 }
2915 if (ra != 31) {
1304ca87 2916 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2917 }
4af70374 2918 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2919 break;
2920 case 0x1B:
2921 /* HW_LD (PALcode) */
a18ad893
RH
2922#ifndef CONFIG_USER_ONLY
2923 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2924 TCGv addr;
2925
2926 if (ra == 31) {
2927 break;
2928 }
2929
2930 addr = tcg_temp_new();
8bb6e981
AJ
2931 if (rb != 31)
2932 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2933 else
2934 tcg_gen_movi_i64(addr, disp12);
2935 switch ((insn >> 12) & 0xF) {
2936 case 0x0:
b5d51029 2937 /* Longword physical access (hw_ldl/p) */
2374e73e 2938 gen_helper_ldl_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2939 break;
2940 case 0x1:
b5d51029 2941 /* Quadword physical access (hw_ldq/p) */
2374e73e 2942 gen_helper_ldq_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2943 break;
2944 case 0x2:
b5d51029 2945 /* Longword physical access with lock (hw_ldl_l/p) */
c3082755 2946 gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr);
8bb6e981
AJ
2947 break;
2948 case 0x3:
b5d51029 2949 /* Quadword physical access with lock (hw_ldq_l/p) */
c3082755 2950 gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr);
8bb6e981
AJ
2951 break;
2952 case 0x4:
b5d51029 2953 /* Longword virtual PTE fetch (hw_ldl/v) */
2374e73e 2954 goto invalid_opc;
8bb6e981 2955 case 0x5:
b5d51029 2956 /* Quadword virtual PTE fetch (hw_ldq/v) */
2374e73e 2957 goto invalid_opc;
8bb6e981
AJ
2958 break;
2959 case 0x6:
2960 /* Incpu_ir[ra]id */
b5d51029 2961 goto invalid_opc;
8bb6e981
AJ
2962 case 0x7:
2963 /* Incpu_ir[ra]id */
b5d51029 2964 goto invalid_opc;
8bb6e981 2965 case 0x8:
b5d51029 2966 /* Longword virtual access (hw_ldl) */
2374e73e 2967 goto invalid_opc;
8bb6e981 2968 case 0x9:
b5d51029 2969 /* Quadword virtual access (hw_ldq) */
2374e73e 2970 goto invalid_opc;
8bb6e981 2971 case 0xA:
b5d51029 2972 /* Longword virtual access with protection check (hw_ldl/w) */
8417845e 2973 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2974 break;
2975 case 0xB:
b5d51029 2976 /* Quadword virtual access with protection check (hw_ldq/w) */
8417845e 2977 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2978 break;
2979 case 0xC:
b5d51029 2980 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2374e73e 2981 goto invalid_opc;
8bb6e981 2982 case 0xD:
b5d51029 2983 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2374e73e 2984 goto invalid_opc;
8bb6e981
AJ
2985 case 0xE:
2986 /* Longword virtual access with alternate access mode and
2374e73e
RH
2987 protection checks (hw_ldl/wa) */
2988 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2989 break;
2990 case 0xF:
2991 /* Quadword virtual access with alternate access mode and
2374e73e
RH
2992 protection checks (hw_ldq/wa) */
2993 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2994 break;
2995 }
2996 tcg_temp_free(addr);
a18ad893 2997 break;
4c9649a9 2998 }
4c9649a9 2999#endif
a18ad893 3000 goto invalid_opc;
4c9649a9
JM
3001 case 0x1C:
3002 switch (fn7) {
3003 case 0x00:
3004 /* SEXTB */
a18ad893 3005 if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
4c9649a9 3006 goto invalid_opc;
a18ad893 3007 }
ae8ecd42
AJ
3008 if (likely(rc != 31)) {
3009 if (islit)
3010 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
ae8ecd42 3011 else
dfaa8583 3012 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 3013 }
4c9649a9
JM
3014 break;
3015 case 0x01:
3016 /* SEXTW */
a18ad893
RH
3017 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
3018 if (likely(rc != 31)) {
3019 if (islit) {
3020 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
3021 } else {
3022 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
3023 }
3024 }
3025 break;
ae8ecd42 3026 }
a18ad893 3027 goto invalid_opc;
4c9649a9
JM
3028 case 0x30:
3029 /* CTPOP */
a18ad893
RH
3030 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3031 if (likely(rc != 31)) {
3032 if (islit) {
3033 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
3034 } else {
3035 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
3036 }
3037 }
3038 break;
ae8ecd42 3039 }
a18ad893 3040 goto invalid_opc;
4c9649a9
JM
3041 case 0x31:
3042 /* PERR */
a18ad893
RH
3043 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3044 gen_perr(ra, rb, rc, islit, lit);
3045 break;
3046 }
3047 goto invalid_opc;
4c9649a9
JM
3048 case 0x32:
3049 /* CTLZ */
a18ad893
RH
3050 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3051 if (likely(rc != 31)) {
3052 if (islit) {
3053 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
3054 } else {
3055 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
3056 }
3057 }
3058 break;
ae8ecd42 3059 }
a18ad893 3060 goto invalid_opc;
4c9649a9
JM
3061 case 0x33:
3062 /* CTTZ */
a18ad893
RH
3063 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3064 if (likely(rc != 31)) {
3065 if (islit) {
3066 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
3067 } else {
3068 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
3069 }
3070 }
3071 break;
ae8ecd42 3072 }
a18ad893 3073 goto invalid_opc;
4c9649a9
JM
3074 case 0x34:
3075 /* UNPKBW */
a18ad893
RH
3076 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3077 if (real_islit || ra != 31) {
3078 goto invalid_opc;
3079 }
3080 gen_unpkbw(rb, rc);
3081 break;
3082 }
3083 goto invalid_opc;
4c9649a9 3084 case 0x35:
13e4df99 3085 /* UNPKBL */
a18ad893
RH
3086 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3087 if (real_islit || ra != 31) {
3088 goto invalid_opc;
3089 }
3090 gen_unpkbl(rb, rc);
3091 break;
3092 }
3093 goto invalid_opc;
4c9649a9
JM
3094 case 0x36:
3095 /* PKWB */
a18ad893
RH
3096 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3097 if (real_islit || ra != 31) {
3098 goto invalid_opc;
3099 }
3100 gen_pkwb(rb, rc);
3101 break;
3102 }
3103 goto invalid_opc;
4c9649a9
JM
3104 case 0x37:
3105 /* PKLB */
a18ad893
RH
3106 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3107 if (real_islit || ra != 31) {
3108 goto invalid_opc;
3109 }
3110 gen_pklb(rb, rc);
3111 break;
3112 }
3113 goto invalid_opc;
4c9649a9
JM
3114 case 0x38:
3115 /* MINSB8 */
a18ad893
RH
3116 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3117 gen_minsb8(ra, rb, rc, islit, lit);
3118 break;
3119 }
3120 goto invalid_opc;
4c9649a9
JM
3121 case 0x39:
3122 /* MINSW4 */
a18ad893
RH
3123 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3124 gen_minsw4(ra, rb, rc, islit, lit);
3125 break;
3126 }
3127 goto invalid_opc;
4c9649a9
JM
3128 case 0x3A:
3129 /* MINUB8 */
a18ad893
RH
3130 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3131 gen_minub8(ra, rb, rc, islit, lit);
3132 break;
3133 }
3134 goto invalid_opc;
4c9649a9
JM
3135 case 0x3B:
3136 /* MINUW4 */
a18ad893
RH
3137 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3138 gen_minuw4(ra, rb, rc, islit, lit);
3139 break;
3140 }
3141 goto invalid_opc;
4c9649a9
JM
3142 case 0x3C:
3143 /* MAXUB8 */
a18ad893
RH
3144 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3145 gen_maxub8(ra, rb, rc, islit, lit);
3146 break;
3147 }
3148 goto invalid_opc;
4c9649a9
JM
3149 case 0x3D:
3150 /* MAXUW4 */
a18ad893
RH
3151 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3152 gen_maxuw4(ra, rb, rc, islit, lit);
3153 break;
3154 }
3155 goto invalid_opc;
4c9649a9
JM
3156 case 0x3E:
3157 /* MAXSB8 */
a18ad893
RH
3158 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3159 gen_maxsb8(ra, rb, rc, islit, lit);
3160 break;
3161 }
3162 goto invalid_opc;
4c9649a9
JM
3163 case 0x3F:
3164 /* MAXSW4 */
a18ad893
RH
3165 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3166 gen_maxsw4(ra, rb, rc, islit, lit);
3167 break;
3168 }
3169 goto invalid_opc;
4c9649a9
JM
3170 case 0x70:
3171 /* FTOIT */
a18ad893 3172 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 3173 goto invalid_opc;
a18ad893 3174 }
f18cd223
AJ
3175 if (likely(rc != 31)) {
3176 if (ra != 31)
3177 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3178 else
3179 tcg_gen_movi_i64(cpu_ir[rc], 0);
3180 }
4c9649a9
JM
3181 break;
3182 case 0x78:
3183 /* FTOIS */
a18ad893 3184 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 3185 goto invalid_opc;
a18ad893 3186 }
f18cd223 3187 if (rc != 31) {
a7812ae4 3188 TCGv_i32 tmp1 = tcg_temp_new_i32();
f18cd223 3189 if (ra != 31)
a7812ae4 3190 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
f18cd223
AJ
3191 else {
3192 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 3193 gen_helper_s_to_memory(tmp1, tmp2);
f18cd223
AJ
3194 tcg_temp_free(tmp2);
3195 }
3196 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
a7812ae4 3197 tcg_temp_free_i32(tmp1);
f18cd223 3198 }
4c9649a9
JM
3199 break;
3200 default:
3201 goto invalid_opc;
3202 }
3203 break;
3204 case 0x1D:
3205 /* HW_MTPR (PALcode) */
26b46094 3206#ifndef CONFIG_USER_ONLY
a18ad893 3207 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
bc24270e 3208 return gen_mtpr(ctx, rb, insn & 0xffff);
26b46094
RH
3209 }
3210#endif
4c9649a9 3211 goto invalid_opc;
4c9649a9 3212 case 0x1E:
508b43ea 3213 /* HW_RET (PALcode) */
a18ad893
RH
3214#ifndef CONFIG_USER_ONLY
3215 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3216 if (rb == 31) {
3217 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3218 address from EXC_ADDR. This turns out to be useful for our
3219 emulation PALcode, so continue to accept it. */
3220 TCGv tmp = tcg_temp_new();
4d5712f1 3221 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
69163fbb 3222 gen_helper_hw_ret(cpu_env, tmp);
a18ad893
RH
3223 tcg_temp_free(tmp);
3224 } else {
69163fbb 3225 gen_helper_hw_ret(cpu_env, cpu_ir[rb]);
a18ad893
RH
3226 }
3227 ret = EXIT_PC_UPDATED;
3228 break;
4c9649a9 3229 }
4c9649a9 3230#endif
a18ad893 3231 goto invalid_opc;
4c9649a9
JM
3232 case 0x1F:
3233 /* HW_ST (PALcode) */
a18ad893
RH
3234#ifndef CONFIG_USER_ONLY
3235 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
8bb6e981 3236 TCGv addr, val;
a7812ae4 3237 addr = tcg_temp_new();
8bb6e981
AJ
3238 if (rb != 31)
3239 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3240 else
3241 tcg_gen_movi_i64(addr, disp12);
3242 if (ra != 31)
3243 val = cpu_ir[ra];
3244 else {
a7812ae4 3245 val = tcg_temp_new();
8bb6e981
AJ
3246 tcg_gen_movi_i64(val, 0);
3247 }
3248 switch ((insn >> 12) & 0xF) {
3249 case 0x0:
3250 /* Longword physical access */
2374e73e 3251 gen_helper_stl_phys(addr, val);
8bb6e981
AJ
3252 break;
3253 case 0x1:
3254 /* Quadword physical access */
2374e73e 3255 gen_helper_stq_phys(addr, val);
8bb6e981
AJ
3256 break;
3257 case 0x2:
3258 /* Longword physical access with lock */
c3082755 3259 gen_helper_stl_c_phys(val, cpu_env, addr, val);
8bb6e981
AJ
3260 break;
3261 case 0x3:
3262 /* Quadword physical access with lock */
c3082755 3263 gen_helper_stq_c_phys(val, cpu_env, addr, val);
8bb6e981
AJ
3264 break;
3265 case 0x4:
3266 /* Longword virtual access */
2374e73e 3267 goto invalid_opc;
8bb6e981
AJ
3268 case 0x5:
3269 /* Quadword virtual access */
2374e73e 3270 goto invalid_opc;
8bb6e981
AJ
3271 case 0x6:
3272 /* Invalid */
3273 goto invalid_opc;
3274 case 0x7:
3275 /* Invalid */
3276 goto invalid_opc;
3277 case 0x8:
3278 /* Invalid */
3279 goto invalid_opc;
3280 case 0x9:
3281 /* Invalid */
3282 goto invalid_opc;
3283 case 0xA:
3284 /* Invalid */
3285 goto invalid_opc;
3286 case 0xB:
3287 /* Invalid */
3288 goto invalid_opc;
3289 case 0xC:
3290 /* Longword virtual access with alternate access mode */
2374e73e 3291 goto invalid_opc;
8bb6e981
AJ
3292 case 0xD:
3293 /* Quadword virtual access with alternate access mode */
2374e73e 3294 goto invalid_opc;
8bb6e981
AJ
3295 case 0xE:
3296 /* Invalid */
3297 goto invalid_opc;
3298 case 0xF:
3299 /* Invalid */
3300 goto invalid_opc;
3301 }
45d46ce8 3302 if (ra == 31)
8bb6e981
AJ
3303 tcg_temp_free(val);
3304 tcg_temp_free(addr);
a18ad893 3305 break;
4c9649a9 3306 }
4c9649a9 3307#endif
a18ad893 3308 goto invalid_opc;
4c9649a9
JM
3309 case 0x20:
3310 /* LDF */
f18cd223 3311 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3312 break;
3313 case 0x21:
3314 /* LDG */
f18cd223 3315 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3316 break;
3317 case 0x22:
3318 /* LDS */
f18cd223 3319 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
3320 break;
3321 case 0x23:
3322 /* LDT */
f18cd223 3323 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3324 break;
3325 case 0x24:
3326 /* STF */
6910b8f6 3327 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3328 break;
3329 case 0x25:
3330 /* STG */
6910b8f6 3331 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3332 break;
3333 case 0x26:
3334 /* STS */
6910b8f6 3335 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
4c9649a9
JM
3336 break;
3337 case 0x27:
3338 /* STT */
6910b8f6 3339 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3340 break;
3341 case 0x28:
3342 /* LDL */
f18cd223 3343 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
3344 break;
3345 case 0x29:
3346 /* LDQ */
f18cd223 3347 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3348 break;
3349 case 0x2A:
3350 /* LDL_L */
f4ed8679 3351 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3352 break;
3353 case 0x2B:
3354 /* LDQ_L */
f4ed8679 3355 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3356 break;
3357 case 0x2C:
3358 /* STL */
6910b8f6 3359 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
4c9649a9
JM
3360 break;
3361 case 0x2D:
3362 /* STQ */
6910b8f6 3363 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3364 break;
3365 case 0x2E:
3366 /* STL_C */
6910b8f6 3367 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
4c9649a9
JM
3368 break;
3369 case 0x2F:
3370 /* STQ_C */
6910b8f6 3371 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
4c9649a9
JM
3372 break;
3373 case 0x30:
3374 /* BR */
4af70374 3375 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3376 break;
a7812ae4 3377 case 0x31: /* FBEQ */
4af70374 3378 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 3379 break;
a7812ae4 3380 case 0x32: /* FBLT */
4af70374 3381 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 3382 break;
a7812ae4 3383 case 0x33: /* FBLE */
4af70374 3384 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
3385 break;
3386 case 0x34:
3387 /* BSR */
4af70374 3388 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3389 break;
a7812ae4 3390 case 0x35: /* FBNE */
4af70374 3391 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 3392 break;
a7812ae4 3393 case 0x36: /* FBGE */
4af70374 3394 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 3395 break;
a7812ae4 3396 case 0x37: /* FBGT */
4af70374 3397 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
3398 break;
3399 case 0x38:
3400 /* BLBC */
4af70374 3401 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
3402 break;
3403 case 0x39:
3404 /* BEQ */
4af70374 3405 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
3406 break;
3407 case 0x3A:
3408 /* BLT */
4af70374 3409 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
3410 break;
3411 case 0x3B:
3412 /* BLE */
4af70374 3413 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
3414 break;
3415 case 0x3C:
3416 /* BLBS */
4af70374 3417 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
3418 break;
3419 case 0x3D:
3420 /* BNE */
4af70374 3421 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
3422 break;
3423 case 0x3E:
3424 /* BGE */
4af70374 3425 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
3426 break;
3427 case 0x3F:
3428 /* BGT */
4af70374 3429 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
3430 break;
3431 invalid_opc:
8aa3fa20 3432 ret = gen_invalid(ctx);
4c9649a9
JM
3433 break;
3434 }
3435
3436 return ret;
3437}
3438
86a35f7c 3439static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
636aa200 3440 TranslationBlock *tb,
86a35f7c 3441 bool search_pc)
4c9649a9 3442{
ed2803da 3443 CPUState *cs = CPU(cpu);
86a35f7c 3444 CPUAlphaState *env = &cpu->env;
4c9649a9
JM
3445 DisasContext ctx, *ctxp = &ctx;
3446 target_ulong pc_start;
b114b68a 3447 target_ulong pc_mask;
4c9649a9
JM
3448 uint32_t insn;
3449 uint16_t *gen_opc_end;
a1d1bb31 3450 CPUBreakpoint *bp;
4c9649a9 3451 int j, lj = -1;
4af70374 3452 ExitStatus ret;
2e70f6ef
PB
3453 int num_insns;
3454 int max_insns;
4c9649a9
JM
3455
3456 pc_start = tb->pc;
92414b31 3457 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4af70374
RH
3458
3459 ctx.tb = tb;
4c9649a9 3460 ctx.pc = pc_start;
bba9bdce 3461 ctx.mem_idx = cpu_mmu_index(env);
801c4c28 3462 ctx.implver = env->implver;
ed2803da 3463 ctx.singlestep_enabled = cs->singlestep_enabled;
f24518b5
RH
3464
3465 /* ??? Every TB begins with unset rounding mode, to be initialized on
3466 the first fp insn of the TB. Alternately we could define a proper
3467 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3468 to reset the FP_STATUS to that default at the end of any TB that
3469 changes the default. We could even (gasp) dynamiclly figure out
3470 what default would be most efficient given the running program. */
3471 ctx.tb_rm = -1;
3472 /* Similarly for flush-to-zero. */
3473 ctx.tb_ftz = -1;
3474
2e70f6ef
PB
3475 num_insns = 0;
3476 max_insns = tb->cflags & CF_COUNT_MASK;
b114b68a 3477 if (max_insns == 0) {
2e70f6ef 3478 max_insns = CF_COUNT_MASK;
b114b68a
RH
3479 }
3480
3481 if (in_superpage(&ctx, pc_start)) {
3482 pc_mask = (1ULL << 41) - 1;
3483 } else {
3484 pc_mask = ~TARGET_PAGE_MASK;
3485 }
2e70f6ef 3486
806f352d 3487 gen_tb_start();
4af70374 3488 do {
72cf2d4f
BS
3489 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3490 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 3491 if (bp->pc == ctx.pc) {
4c9649a9
JM
3492 gen_excp(&ctx, EXCP_DEBUG, 0);
3493 break;
3494 }
3495 }
3496 }
3497 if (search_pc) {
92414b31 3498 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4c9649a9
JM
3499 if (lj < j) {
3500 lj++;
3501 while (lj < j)
ab1103de 3502 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4c9649a9 3503 }
25983cad 3504 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
ab1103de 3505 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 3506 tcg_ctx.gen_opc_icount[lj] = num_insns;
4c9649a9 3507 }
2e70f6ef
PB
3508 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3509 gen_io_start();
c3082755 3510 insn = cpu_ldl_code(env, ctx.pc);
2e70f6ef 3511 num_insns++;
c4b3be39 3512
fdefe51c 3513 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
c4b3be39
RH
3514 tcg_gen_debug_insn_start(ctx.pc);
3515 }
3516
4c9649a9
JM
3517 ctx.pc += 4;
3518 ret = translate_one(ctxp, insn);
19bf517b 3519
bf1b03fe
RH
3520 /* If we reach a page boundary, are single stepping,
3521 or exhaust instruction count, stop generation. */
3522 if (ret == NO_EXIT
b114b68a 3523 && ((ctx.pc & pc_mask) == 0
efd7f486 3524 || tcg_ctx.gen_opc_ptr >= gen_opc_end
bf1b03fe
RH
3525 || num_insns >= max_insns
3526 || singlestep
ca6862a6 3527 || ctx.singlestep_enabled)) {
bf1b03fe 3528 ret = EXIT_PC_STALE;
1b530a6d 3529 }
4af70374
RH
3530 } while (ret == NO_EXIT);
3531
3532 if (tb->cflags & CF_LAST_IO) {
3533 gen_io_end();
4c9649a9 3534 }
4af70374
RH
3535
3536 switch (ret) {
3537 case EXIT_GOTO_TB:
8aa3fa20 3538 case EXIT_NORETURN:
4af70374
RH
3539 break;
3540 case EXIT_PC_STALE:
496cb5b9 3541 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
3542 /* FALLTHRU */
3543 case EXIT_PC_UPDATED:
ca6862a6 3544 if (ctx.singlestep_enabled) {
bf1b03fe
RH
3545 gen_excp_1(EXCP_DEBUG, 0);
3546 } else {
3547 tcg_gen_exit_tb(0);
3548 }
4af70374
RH
3549 break;
3550 default:
3551 abort();
4c9649a9 3552 }
4af70374 3553
806f352d 3554 gen_tb_end(tb, num_insns);
efd7f486 3555 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4c9649a9 3556 if (search_pc) {
92414b31 3557 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4c9649a9
JM
3558 lj++;
3559 while (lj <= j)
ab1103de 3560 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3561 } else {
3562 tb->size = ctx.pc - pc_start;
2e70f6ef 3563 tb->icount = num_insns;
4c9649a9 3564 }
4af70374 3565
806991da 3566#ifdef DEBUG_DISAS
8fec2b8c 3567 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39 3568 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 3569 log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
93fcfe39 3570 qemu_log("\n");
4c9649a9 3571 }
4c9649a9 3572#endif
4c9649a9
JM
3573}
3574
4d5712f1 3575void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 3576{
86a35f7c 3577 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
4c9649a9
JM
3578}
3579
4d5712f1 3580void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 3581{
86a35f7c 3582 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
4c9649a9
JM
3583}
3584
4d5712f1 3585void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 3586{
25983cad 3587 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
d2856f1a 3588}