]> git.proxmox.com Git - qemu.git/blame - target-alpha/translate.c
Open up 1.7 development branch
[qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
4c9649a9 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
1de7afc9 22#include "qemu/host-utils.h"
57fec1fe 23#include "tcg-op.h"
4c9649a9 24
a7812ae4
PB
25#include "helper.h"
26#define GEN_HELPER 1
27#include "helper.h"
28
19188121 29#undef ALPHA_DEBUG_DISAS
f24518b5 30#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
31
32#ifdef ALPHA_DEBUG_DISAS
806991da 33# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
34#else
35# define LOG_DISAS(...) do { } while (0)
36#endif
37
4c9649a9
JM
38typedef struct DisasContext DisasContext;
39struct DisasContext {
4af70374 40 struct TranslationBlock *tb;
4c9649a9
JM
41 uint64_t pc;
42 int mem_idx;
f24518b5
RH
43
44 /* Current rounding mode for this TB. */
45 int tb_rm;
46 /* Current flush-to-zero setting for this TB. */
47 int tb_ftz;
ca6862a6 48
801c4c28
RH
49 /* implver value for this CPU. */
50 int implver;
51
ca6862a6 52 bool singlestep_enabled;
4c9649a9
JM
53};
54
4af70374
RH
55/* Return values from translate_one, indicating the state of the TB.
56 Note that zero indicates that we are not exiting the TB. */
57
58typedef enum {
59 NO_EXIT,
60
61 /* We have emitted one or more goto_tb. No fixup required. */
62 EXIT_GOTO_TB,
63
64 /* We are not using a goto_tb (for whatever reason), but have updated
65 the PC (for whatever reason), so there's no need to do it again on
66 exiting the TB. */
67 EXIT_PC_UPDATED,
68
69 /* We are exiting the TB, but have neither emitted a goto_tb, nor
70 updated the PC for the next instruction to be executed. */
8aa3fa20
RH
71 EXIT_PC_STALE,
72
73 /* We are ending the TB with a noreturn function call, e.g. longjmp.
74 No following code will be executed. */
75 EXIT_NORETURN,
4af70374
RH
76} ExitStatus;
77
3761035f 78/* global register indexes */
a7812ae4 79static TCGv_ptr cpu_env;
496cb5b9 80static TCGv cpu_ir[31];
f18cd223 81static TCGv cpu_fir[31];
496cb5b9 82static TCGv cpu_pc;
6910b8f6
RH
83static TCGv cpu_lock_addr;
84static TCGv cpu_lock_st_addr;
85static TCGv cpu_lock_value;
2ace7e55
RH
86static TCGv cpu_unique;
87#ifndef CONFIG_USER_ONLY
88static TCGv cpu_sysval;
89static TCGv cpu_usp;
ab471ade 90#endif
496cb5b9 91
3761035f 92/* register names */
f18cd223 93static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef 94
022c62cb 95#include "exec/gen-icount.h"
2e70f6ef 96
0c28246f 97void alpha_translate_init(void)
2e70f6ef 98{
496cb5b9
AJ
99 int i;
100 char *p;
2e70f6ef 101 static int done_init = 0;
496cb5b9 102
2e70f6ef
PB
103 if (done_init)
104 return;
496cb5b9 105
a7812ae4 106 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
107
108 p = cpu_reg_names;
109 for (i = 0; i < 31; i++) {
110 sprintf(p, "ir%d", i);
a7812ae4 111 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 112 offsetof(CPUAlphaState, ir[i]), p);
6ba8dcd7 113 p += (i < 10) ? 4 : 5;
f18cd223
AJ
114
115 sprintf(p, "fir%d", i);
a7812ae4 116 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 117 offsetof(CPUAlphaState, fir[i]), p);
f18cd223 118 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
119 }
120
a7812ae4 121 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 122 offsetof(CPUAlphaState, pc), "pc");
496cb5b9 123
6910b8f6 124 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 125 offsetof(CPUAlphaState, lock_addr),
6910b8f6
RH
126 "lock_addr");
127 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 128 offsetof(CPUAlphaState, lock_st_addr),
6910b8f6
RH
129 "lock_st_addr");
130 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 131 offsetof(CPUAlphaState, lock_value),
6910b8f6 132 "lock_value");
f4ed8679 133
2ace7e55 134 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 135 offsetof(CPUAlphaState, unique), "unique");
2ace7e55
RH
136#ifndef CONFIG_USER_ONLY
137 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 138 offsetof(CPUAlphaState, sysval), "sysval");
2ace7e55 139 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 140 offsetof(CPUAlphaState, usp), "usp");
ab471ade
RH
141#endif
142
496cb5b9 143 /* register helpers */
a7812ae4 144#define GEN_HELPER 2
496cb5b9
AJ
145#include "helper.h"
146
2e70f6ef
PB
147 done_init = 1;
148}
149
bf1b03fe 150static void gen_excp_1(int exception, int error_code)
4c9649a9 151{
a7812ae4 152 TCGv_i32 tmp1, tmp2;
6ad02592 153
6ad02592
AJ
154 tmp1 = tcg_const_i32(exception);
155 tmp2 = tcg_const_i32(error_code);
b9f0923e 156 gen_helper_excp(cpu_env, tmp1, tmp2);
a7812ae4
PB
157 tcg_temp_free_i32(tmp2);
158 tcg_temp_free_i32(tmp1);
bf1b03fe 159}
8aa3fa20 160
bf1b03fe
RH
161static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
162{
163 tcg_gen_movi_i64(cpu_pc, ctx->pc);
164 gen_excp_1(exception, error_code);
8aa3fa20 165 return EXIT_NORETURN;
4c9649a9
JM
166}
167
8aa3fa20 168static inline ExitStatus gen_invalid(DisasContext *ctx)
4c9649a9 169{
8aa3fa20 170 return gen_excp(ctx, EXCP_OPCDEC, 0);
4c9649a9
JM
171}
172
636aa200 173static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 174{
a7812ae4
PB
175 TCGv tmp = tcg_temp_new();
176 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 177 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
178 tcg_gen_trunc_i64_i32(tmp32, tmp);
179 gen_helper_memory_to_f(t0, tmp32);
180 tcg_temp_free_i32(tmp32);
f18cd223
AJ
181 tcg_temp_free(tmp);
182}
183
636aa200 184static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 185{
a7812ae4 186 TCGv tmp = tcg_temp_new();
f18cd223 187 tcg_gen_qemu_ld64(tmp, t1, flags);
a7812ae4 188 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
189 tcg_temp_free(tmp);
190}
191
636aa200 192static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 193{
a7812ae4
PB
194 TCGv tmp = tcg_temp_new();
195 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 196 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
197 tcg_gen_trunc_i64_i32(tmp32, tmp);
198 gen_helper_memory_to_s(t0, tmp32);
199 tcg_temp_free_i32(tmp32);
f18cd223
AJ
200 tcg_temp_free(tmp);
201}
202
636aa200 203static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679 204{
f4ed8679 205 tcg_gen_qemu_ld32s(t0, t1, flags);
6910b8f6
RH
206 tcg_gen_mov_i64(cpu_lock_addr, t1);
207 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
208}
209
636aa200 210static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679 211{
f4ed8679 212 tcg_gen_qemu_ld64(t0, t1, flags);
6910b8f6
RH
213 tcg_gen_mov_i64(cpu_lock_addr, t1);
214 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
215}
216
636aa200
BS
217static inline void gen_load_mem(DisasContext *ctx,
218 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
219 int flags),
220 int ra, int rb, int32_t disp16, int fp,
221 int clear)
023d8ca2 222{
6910b8f6 223 TCGv addr, va;
023d8ca2 224
6910b8f6
RH
225 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
226 prefetches, which we can treat as nops. No worries about
227 missed exceptions here. */
228 if (unlikely(ra == 31)) {
023d8ca2 229 return;
6910b8f6 230 }
023d8ca2 231
a7812ae4 232 addr = tcg_temp_new();
023d8ca2
AJ
233 if (rb != 31) {
234 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 235 if (clear) {
023d8ca2 236 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 237 }
023d8ca2 238 } else {
6910b8f6 239 if (clear) {
023d8ca2 240 disp16 &= ~0x7;
6910b8f6 241 }
023d8ca2
AJ
242 tcg_gen_movi_i64(addr, disp16);
243 }
6910b8f6
RH
244
245 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
246 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
247
023d8ca2
AJ
248 tcg_temp_free(addr);
249}
250
636aa200 251static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 252{
a7812ae4
PB
253 TCGv_i32 tmp32 = tcg_temp_new_i32();
254 TCGv tmp = tcg_temp_new();
255 gen_helper_f_to_memory(tmp32, t0);
256 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
257 tcg_gen_qemu_st32(tmp, t1, flags);
258 tcg_temp_free(tmp);
a7812ae4 259 tcg_temp_free_i32(tmp32);
f18cd223
AJ
260}
261
636aa200 262static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 263{
a7812ae4
PB
264 TCGv tmp = tcg_temp_new();
265 gen_helper_g_to_memory(tmp, t0);
f18cd223
AJ
266 tcg_gen_qemu_st64(tmp, t1, flags);
267 tcg_temp_free(tmp);
268}
269
636aa200 270static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 271{
a7812ae4
PB
272 TCGv_i32 tmp32 = tcg_temp_new_i32();
273 TCGv tmp = tcg_temp_new();
274 gen_helper_s_to_memory(tmp32, t0);
275 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
276 tcg_gen_qemu_st32(tmp, t1, flags);
277 tcg_temp_free(tmp);
a7812ae4 278 tcg_temp_free_i32(tmp32);
f18cd223
AJ
279}
280
636aa200
BS
281static inline void gen_store_mem(DisasContext *ctx,
282 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
283 int flags),
284 int ra, int rb, int32_t disp16, int fp,
6910b8f6 285 int clear)
023d8ca2 286{
6910b8f6
RH
287 TCGv addr, va;
288
289 addr = tcg_temp_new();
023d8ca2
AJ
290 if (rb != 31) {
291 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 292 if (clear) {
023d8ca2 293 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 294 }
023d8ca2 295 } else {
6910b8f6 296 if (clear) {
023d8ca2 297 disp16 &= ~0x7;
6910b8f6 298 }
023d8ca2
AJ
299 tcg_gen_movi_i64(addr, disp16);
300 }
6910b8f6
RH
301
302 if (ra == 31) {
303 va = tcg_const_i64(0);
f18cd223 304 } else {
6910b8f6 305 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
023d8ca2 306 }
6910b8f6
RH
307 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
308
023d8ca2 309 tcg_temp_free(addr);
6910b8f6
RH
310 if (ra == 31) {
311 tcg_temp_free(va);
312 }
313}
314
315static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
316 int32_t disp16, int quad)
317{
318 TCGv addr;
319
320 if (ra == 31) {
321 /* ??? Don't bother storing anything. The user can't tell
322 the difference, since the zero register always reads zero. */
323 return NO_EXIT;
324 }
325
326#if defined(CONFIG_USER_ONLY)
327 addr = cpu_lock_st_addr;
328#else
e52458fe 329 addr = tcg_temp_local_new();
6910b8f6
RH
330#endif
331
332 if (rb != 31) {
333 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
334 } else {
335 tcg_gen_movi_i64(addr, disp16);
336 }
337
338#if defined(CONFIG_USER_ONLY)
339 /* ??? This is handled via a complicated version of compare-and-swap
340 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
341 in TCG so that this isn't necessary. */
342 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
343#else
344 /* ??? In system mode we are never multi-threaded, so CAS can be
345 implemented via a non-atomic load-compare-store sequence. */
346 {
347 int lab_fail, lab_done;
348 TCGv val;
349
350 lab_fail = gen_new_label();
351 lab_done = gen_new_label();
e52458fe 352 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
6910b8f6
RH
353
354 val = tcg_temp_new();
355 if (quad) {
356 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
357 } else {
358 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
359 }
e52458fe 360 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
6910b8f6
RH
361
362 if (quad) {
363 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
364 } else {
365 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
366 }
367 tcg_gen_movi_i64(cpu_ir[ra], 1);
368 tcg_gen_br(lab_done);
369
370 gen_set_label(lab_fail);
371 tcg_gen_movi_i64(cpu_ir[ra], 0);
372
373 gen_set_label(lab_done);
374 tcg_gen_movi_i64(cpu_lock_addr, -1);
375
376 tcg_temp_free(addr);
377 return NO_EXIT;
378 }
379#endif
023d8ca2
AJ
380}
381
4af70374 382static int use_goto_tb(DisasContext *ctx, uint64_t dest)
4c9649a9 383{
4af70374
RH
384 /* Check for the dest on the same page as the start of the TB. We
385 also want to suppress goto_tb in the case of single-steping and IO. */
386 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
ca6862a6 387 && !ctx->singlestep_enabled
4af70374
RH
388 && !(ctx->tb->cflags & CF_LAST_IO));
389}
dbb30fe6 390
4af70374
RH
391static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
392{
393 uint64_t dest = ctx->pc + (disp << 2);
394
395 if (ra != 31) {
396 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
397 }
398
399 /* Notice branch-to-next; used to initialize RA with the PC. */
400 if (disp == 0) {
401 return 0;
402 } else if (use_goto_tb(ctx, dest)) {
403 tcg_gen_goto_tb(0);
404 tcg_gen_movi_i64(cpu_pc, dest);
4b4a72e5 405 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
4af70374
RH
406 return EXIT_GOTO_TB;
407 } else {
408 tcg_gen_movi_i64(cpu_pc, dest);
409 return EXIT_PC_UPDATED;
410 }
dbb30fe6
RH
411}
412
4af70374
RH
413static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
414 TCGv cmp, int32_t disp)
dbb30fe6 415{
4af70374 416 uint64_t dest = ctx->pc + (disp << 2);
dbb30fe6 417 int lab_true = gen_new_label();
9c29504e 418
4af70374
RH
419 if (use_goto_tb(ctx, dest)) {
420 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
421
422 tcg_gen_goto_tb(0);
423 tcg_gen_movi_i64(cpu_pc, ctx->pc);
4b4a72e5 424 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
4af70374
RH
425
426 gen_set_label(lab_true);
427 tcg_gen_goto_tb(1);
428 tcg_gen_movi_i64(cpu_pc, dest);
4b4a72e5 429 tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
4af70374
RH
430
431 return EXIT_GOTO_TB;
432 } else {
57e289de
RH
433 TCGv_i64 z = tcg_const_i64(0);
434 TCGv_i64 d = tcg_const_i64(dest);
435 TCGv_i64 p = tcg_const_i64(ctx->pc);
4af70374 436
57e289de 437 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
4af70374 438
57e289de
RH
439 tcg_temp_free_i64(z);
440 tcg_temp_free_i64(d);
441 tcg_temp_free_i64(p);
4af70374
RH
442 return EXIT_PC_UPDATED;
443 }
444}
445
446static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
447 int32_t disp, int mask)
448{
449 TCGv cmp_tmp;
450
451 if (unlikely(ra == 31)) {
452 cmp_tmp = tcg_const_i64(0);
453 } else {
454 cmp_tmp = tcg_temp_new();
9c29504e 455 if (mask) {
4af70374 456 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
dbb30fe6 457 } else {
4af70374 458 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
dbb30fe6 459 }
9c29504e 460 }
4af70374
RH
461
462 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
463}
464
4af70374 465/* Fold -0.0 for comparison with COND. */
dbb30fe6 466
4af70374 467static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 468{
dbb30fe6 469 uint64_t mzero = 1ull << 63;
f18cd223 470
dbb30fe6
RH
471 switch (cond) {
472 case TCG_COND_LE:
473 case TCG_COND_GT:
474 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 475 tcg_gen_mov_i64(dest, src);
a7812ae4 476 break;
dbb30fe6
RH
477
478 case TCG_COND_EQ:
479 case TCG_COND_NE:
480 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 481 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 482 break;
dbb30fe6
RH
483
484 case TCG_COND_GE:
dbb30fe6 485 case TCG_COND_LT:
4af70374
RH
486 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
487 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
488 tcg_gen_neg_i64(dest, dest);
489 tcg_gen_and_i64(dest, dest, src);
a7812ae4 490 break;
dbb30fe6 491
a7812ae4
PB
492 default:
493 abort();
f18cd223 494 }
dbb30fe6
RH
495}
496
4af70374
RH
497static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
498 int32_t disp)
dbb30fe6 499{
4af70374 500 TCGv cmp_tmp;
dbb30fe6
RH
501
502 if (unlikely(ra == 31)) {
503 /* Very uncommon case, but easier to optimize it to an integer
504 comparison than continuing with the floating point comparison. */
4af70374 505 return gen_bcond(ctx, cond, ra, disp, 0);
dbb30fe6
RH
506 }
507
4af70374
RH
508 cmp_tmp = tcg_temp_new();
509 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
510 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
511}
512
bbe1dab4 513static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
4af70374 514 int islit, uint8_t lit, int mask)
4c9649a9 515{
57e289de 516 TCGv_i64 c1, z, v1;
9c29504e 517
57e289de 518 if (unlikely(rc == 31)) {
9c29504e 519 return;
57e289de 520 }
9c29504e 521
57e289de 522 if (ra == 31) {
9c29504e 523 /* Very uncommon case - Do not bother to optimize. */
57e289de
RH
524 c1 = tcg_const_i64(0);
525 } else if (mask) {
526 c1 = tcg_const_i64(1);
527 tcg_gen_and_i64(c1, c1, cpu_ir[ra]);
528 } else {
529 c1 = cpu_ir[ra];
530 }
531 if (islit) {
532 v1 = tcg_const_i64(lit);
533 } else {
534 v1 = cpu_ir[rb];
9c29504e 535 }
57e289de 536 z = tcg_const_i64(0);
9c29504e 537
57e289de
RH
538 tcg_gen_movcond_i64(cond, cpu_ir[rc], c1, z, v1, cpu_ir[rc]);
539
540 tcg_temp_free_i64(z);
541 if (ra == 31 || mask) {
542 tcg_temp_free_i64(c1);
543 }
544 if (islit) {
545 tcg_temp_free_i64(v1);
546 }
4c9649a9
JM
547}
548
bbe1dab4 549static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
dbb30fe6 550{
57e289de 551 TCGv_i64 c1, z, v1;
dbb30fe6 552
4af70374 553 if (unlikely(rc == 31)) {
dbb30fe6 554 return;
4af70374
RH
555 }
556
57e289de 557 c1 = tcg_temp_new_i64();
dbb30fe6 558 if (unlikely(ra == 31)) {
57e289de 559 tcg_gen_movi_i64(c1, 0);
4af70374 560 } else {
57e289de 561 gen_fold_mzero(cond, c1, cpu_fir[ra]);
dbb30fe6 562 }
57e289de
RH
563 if (rb == 31) {
564 v1 = tcg_const_i64(0);
565 } else {
566 v1 = cpu_fir[rb];
567 }
568 z = tcg_const_i64(0);
dbb30fe6 569
57e289de 570 tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]);
dbb30fe6 571
57e289de
RH
572 tcg_temp_free_i64(z);
573 tcg_temp_free_i64(c1);
574 if (rb == 31) {
575 tcg_temp_free_i64(v1);
576 }
dbb30fe6
RH
577}
578
f24518b5
RH
579#define QUAL_RM_N 0x080 /* Round mode nearest even */
580#define QUAL_RM_C 0x000 /* Round mode chopped */
581#define QUAL_RM_M 0x040 /* Round mode minus infinity */
582#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
583#define QUAL_RM_MASK 0x0c0
584
585#define QUAL_U 0x100 /* Underflow enable (fp output) */
586#define QUAL_V 0x100 /* Overflow enable (int output) */
587#define QUAL_S 0x400 /* Software completion enable */
588#define QUAL_I 0x200 /* Inexact detection enable */
589
590static void gen_qual_roundmode(DisasContext *ctx, int fn11)
591{
592 TCGv_i32 tmp;
593
594 fn11 &= QUAL_RM_MASK;
595 if (fn11 == ctx->tb_rm) {
596 return;
597 }
598 ctx->tb_rm = fn11;
599
600 tmp = tcg_temp_new_i32();
601 switch (fn11) {
602 case QUAL_RM_N:
603 tcg_gen_movi_i32(tmp, float_round_nearest_even);
604 break;
605 case QUAL_RM_C:
606 tcg_gen_movi_i32(tmp, float_round_to_zero);
607 break;
608 case QUAL_RM_M:
609 tcg_gen_movi_i32(tmp, float_round_down);
610 break;
611 case QUAL_RM_D:
4a58aedf
RH
612 tcg_gen_ld8u_i32(tmp, cpu_env,
613 offsetof(CPUAlphaState, fpcr_dyn_round));
f24518b5
RH
614 break;
615 }
616
617#if defined(CONFIG_SOFTFLOAT_INLINE)
6b4c305c 618 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
f24518b5
RH
619 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
620 sets the one field. */
621 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 622 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
f24518b5
RH
623#else
624 gen_helper_setroundmode(tmp);
625#endif
626
627 tcg_temp_free_i32(tmp);
628}
629
630static void gen_qual_flushzero(DisasContext *ctx, int fn11)
631{
632 TCGv_i32 tmp;
633
634 fn11 &= QUAL_U;
635 if (fn11 == ctx->tb_ftz) {
636 return;
637 }
638 ctx->tb_ftz = fn11;
639
640 tmp = tcg_temp_new_i32();
641 if (fn11) {
642 /* Underflow is enabled, use the FPCR setting. */
4a58aedf
RH
643 tcg_gen_ld8u_i32(tmp, cpu_env,
644 offsetof(CPUAlphaState, fpcr_flush_to_zero));
f24518b5
RH
645 } else {
646 /* Underflow is disabled, force flush-to-zero. */
647 tcg_gen_movi_i32(tmp, 1);
648 }
649
650#if defined(CONFIG_SOFTFLOAT_INLINE)
651 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 652 offsetof(CPUAlphaState, fp_status.flush_to_zero));
f24518b5
RH
653#else
654 gen_helper_setflushzero(tmp);
655#endif
656
657 tcg_temp_free_i32(tmp);
658}
659
660static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
661{
74343409 662 TCGv val;
f24518b5 663 if (reg == 31) {
74343409 664 val = tcg_const_i64(0);
f24518b5 665 } else {
74343409
RH
666 if ((fn11 & QUAL_S) == 0) {
667 if (is_cmp) {
668 gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
669 } else {
670 gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
671 }
672 }
673 val = tcg_temp_new();
674 tcg_gen_mov_i64(val, cpu_fir[reg]);
f24518b5
RH
675 }
676 return val;
677}
678
679static void gen_fp_exc_clear(void)
680{
681#if defined(CONFIG_SOFTFLOAT_INLINE)
682 TCGv_i32 zero = tcg_const_i32(0);
683 tcg_gen_st8_i32(zero, cpu_env,
4d5712f1 684 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5
RH
685 tcg_temp_free_i32(zero);
686#else
4a58aedf 687 gen_helper_fp_exc_clear(cpu_env);
f24518b5
RH
688#endif
689}
690
691static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
692{
693 /* ??? We ought to be able to do something with imprecise exceptions.
694 E.g. notice we're still in the trap shadow of something within the
695 TB and do not generate the code to signal the exception; end the TB
696 when an exception is forced to arrive, either by consumption of a
697 register value or TRAPB or EXCB. */
698 TCGv_i32 exc = tcg_temp_new_i32();
699 TCGv_i32 reg;
700
701#if defined(CONFIG_SOFTFLOAT_INLINE)
702 tcg_gen_ld8u_i32(exc, cpu_env,
4d5712f1 703 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5 704#else
4a58aedf 705 gen_helper_fp_exc_get(exc, cpu_env);
f24518b5
RH
706#endif
707
708 if (ignore) {
709 tcg_gen_andi_i32(exc, exc, ~ignore);
710 }
711
712 /* ??? Pass in the regno of the destination so that the helper can
713 set EXC_MASK, which contains a bitmask of destination registers
714 that have caused arithmetic traps. A simple userspace emulation
715 does not require this. We do need it for a guest kernel's entArith,
716 or if we were to do something clever with imprecise exceptions. */
717 reg = tcg_const_i32(rc + 32);
718
719 if (fn11 & QUAL_S) {
4a58aedf 720 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
f24518b5 721 } else {
4a58aedf 722 gen_helper_fp_exc_raise(cpu_env, exc, reg);
f24518b5
RH
723 }
724
725 tcg_temp_free_i32(reg);
726 tcg_temp_free_i32(exc);
727}
728
729static inline void gen_fp_exc_raise(int rc, int fn11)
730{
731 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 732}
f24518b5 733
593f17e5
RH
734static void gen_fcvtlq(int rb, int rc)
735{
736 if (unlikely(rc == 31)) {
737 return;
738 }
739 if (unlikely(rb == 31)) {
740 tcg_gen_movi_i64(cpu_fir[rc], 0);
741 } else {
742 TCGv tmp = tcg_temp_new();
743
744 /* The arithmetic right shift here, plus the sign-extended mask below
745 yields a sign-extended result without an explicit ext32s_i64. */
746 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
747 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
748 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
749 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
750 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
751
752 tcg_temp_free(tmp);
753 }
754}
755
735cf45f
RH
756static void gen_fcvtql(int rb, int rc)
757{
758 if (unlikely(rc == 31)) {
759 return;
760 }
761 if (unlikely(rb == 31)) {
762 tcg_gen_movi_i64(cpu_fir[rc], 0);
763 } else {
764 TCGv tmp = tcg_temp_new();
765
766 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
767 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
768 tcg_gen_shli_i64(tmp, tmp, 32);
769 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
770 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
771
772 tcg_temp_free(tmp);
773 }
774}
775
776static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
777{
778 if (rb != 31) {
779 int lab = gen_new_label();
780 TCGv tmp = tcg_temp_new();
781
782 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
783 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
784 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
785
786 gen_set_label(lab);
787 }
788 gen_fcvtql(rb, rc);
789}
790
4a58aedf
RH
791#define FARITH2(name) \
792 static inline void glue(gen_f, name)(int rb, int rc) \
793 { \
794 if (unlikely(rc == 31)) { \
795 return; \
796 } \
797 if (rb != 31) { \
798 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
799 } else { \
800 TCGv tmp = tcg_const_i64(0); \
801 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
802 tcg_temp_free(tmp); \
803 } \
804 }
f24518b5
RH
805
806/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
807FARITH2(sqrtf)
808FARITH2(sqrtg)
a7812ae4
PB
809FARITH2(cvtgf)
810FARITH2(cvtgq)
811FARITH2(cvtqf)
812FARITH2(cvtqg)
f24518b5 813
4a58aedf
RH
814static void gen_ieee_arith2(DisasContext *ctx,
815 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
816 int rb, int rc, int fn11)
817{
818 TCGv vb;
819
820 /* ??? This is wrong: the instruction is not a nop, it still may
821 raise exceptions. */
822 if (unlikely(rc == 31)) {
823 return;
824 }
825
826 gen_qual_roundmode(ctx, fn11);
827 gen_qual_flushzero(ctx, fn11);
828 gen_fp_exc_clear();
829
830 vb = gen_ieee_input(rb, fn11, 0);
4a58aedf 831 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
832 tcg_temp_free(vb);
833
834 gen_fp_exc_raise(rc, fn11);
835}
836
837#define IEEE_ARITH2(name) \
838static inline void glue(gen_f, name)(DisasContext *ctx, \
839 int rb, int rc, int fn11) \
840{ \
841 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
842}
843IEEE_ARITH2(sqrts)
844IEEE_ARITH2(sqrtt)
845IEEE_ARITH2(cvtst)
846IEEE_ARITH2(cvtts)
847
848static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
849{
850 TCGv vb;
851 int ignore = 0;
852
853 /* ??? This is wrong: the instruction is not a nop, it still may
854 raise exceptions. */
855 if (unlikely(rc == 31)) {
856 return;
857 }
858
859 /* No need to set flushzero, since we have an integer output. */
860 gen_fp_exc_clear();
861 vb = gen_ieee_input(rb, fn11, 0);
862
863 /* Almost all integer conversions use cropped rounding, and most
864 also do not have integer overflow enabled. Special case that. */
865 switch (fn11) {
866 case QUAL_RM_C:
4a58aedf 867 gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
868 break;
869 case QUAL_V | QUAL_RM_C:
870 case QUAL_S | QUAL_V | QUAL_RM_C:
871 ignore = float_flag_inexact;
872 /* FALLTHRU */
873 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
4a58aedf 874 gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
875 break;
876 default:
877 gen_qual_roundmode(ctx, fn11);
4a58aedf 878 gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
879 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
880 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
881 break;
882 }
883 tcg_temp_free(vb);
884
885 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
886}
887
4a58aedf
RH
888static void gen_ieee_intcvt(DisasContext *ctx,
889 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
890 int rb, int rc, int fn11)
891{
892 TCGv vb;
893
894 /* ??? This is wrong: the instruction is not a nop, it still may
895 raise exceptions. */
896 if (unlikely(rc == 31)) {
897 return;
898 }
899
900 gen_qual_roundmode(ctx, fn11);
901
902 if (rb == 31) {
903 vb = tcg_const_i64(0);
904 } else {
905 vb = cpu_fir[rb];
906 }
907
908 /* The only exception that can be raised by integer conversion
909 is inexact. Thus we only need to worry about exceptions when
910 inexact handling is requested. */
911 if (fn11 & QUAL_I) {
912 gen_fp_exc_clear();
4a58aedf 913 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
914 gen_fp_exc_raise(rc, fn11);
915 } else {
4a58aedf 916 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
917 }
918
919 if (rb == 31) {
920 tcg_temp_free(vb);
921 }
922}
923
924#define IEEE_INTCVT(name) \
925static inline void glue(gen_f, name)(DisasContext *ctx, \
926 int rb, int rc, int fn11) \
927{ \
928 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
929}
930IEEE_INTCVT(cvtqs)
931IEEE_INTCVT(cvtqt)
932
dc96be4b
RH
933static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
934{
935 TCGv va, vb, vmask;
936 int za = 0, zb = 0;
937
938 if (unlikely(rc == 31)) {
939 return;
940 }
941
942 vmask = tcg_const_i64(mask);
943
944 TCGV_UNUSED_I64(va);
945 if (ra == 31) {
946 if (inv_a) {
947 va = vmask;
948 } else {
949 za = 1;
950 }
951 } else {
952 va = tcg_temp_new_i64();
953 tcg_gen_mov_i64(va, cpu_fir[ra]);
954 if (inv_a) {
955 tcg_gen_andc_i64(va, vmask, va);
956 } else {
957 tcg_gen_and_i64(va, va, vmask);
958 }
959 }
960
961 TCGV_UNUSED_I64(vb);
962 if (rb == 31) {
963 zb = 1;
964 } else {
965 vb = tcg_temp_new_i64();
966 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
967 }
968
969 switch (za << 1 | zb) {
970 case 0 | 0:
971 tcg_gen_or_i64(cpu_fir[rc], va, vb);
972 break;
973 case 0 | 1:
974 tcg_gen_mov_i64(cpu_fir[rc], va);
975 break;
976 case 2 | 0:
977 tcg_gen_mov_i64(cpu_fir[rc], vb);
978 break;
979 case 2 | 1:
980 tcg_gen_movi_i64(cpu_fir[rc], 0);
981 break;
982 }
983
984 tcg_temp_free(vmask);
985 if (ra != 31) {
986 tcg_temp_free(va);
987 }
988 if (rb != 31) {
989 tcg_temp_free(vb);
990 }
991}
992
993static inline void gen_fcpys(int ra, int rb, int rc)
994{
995 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
996}
997
998static inline void gen_fcpysn(int ra, int rb, int rc)
999{
1000 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1001}
1002
1003static inline void gen_fcpyse(int ra, int rb, int rc)
1004{
1005 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1006}
1007
4a58aedf
RH
1008#define FARITH3(name) \
1009 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1010 { \
1011 TCGv va, vb; \
1012 \
1013 if (unlikely(rc == 31)) { \
1014 return; \
1015 } \
1016 if (ra == 31) { \
1017 va = tcg_const_i64(0); \
1018 } else { \
1019 va = cpu_fir[ra]; \
1020 } \
1021 if (rb == 31) { \
1022 vb = tcg_const_i64(0); \
1023 } else { \
1024 vb = cpu_fir[rb]; \
1025 } \
1026 \
1027 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1028 \
1029 if (ra == 31) { \
1030 tcg_temp_free(va); \
1031 } \
1032 if (rb == 31) { \
1033 tcg_temp_free(vb); \
1034 } \
1035 }
f24518b5
RH
1036
1037/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
1038FARITH3(addf)
1039FARITH3(subf)
1040FARITH3(mulf)
1041FARITH3(divf)
1042FARITH3(addg)
1043FARITH3(subg)
1044FARITH3(mulg)
1045FARITH3(divg)
1046FARITH3(cmpgeq)
1047FARITH3(cmpglt)
1048FARITH3(cmpgle)
f24518b5
RH
1049
1050static void gen_ieee_arith3(DisasContext *ctx,
4a58aedf 1051 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
1052 int ra, int rb, int rc, int fn11)
1053{
1054 TCGv va, vb;
1055
1056 /* ??? This is wrong: the instruction is not a nop, it still may
1057 raise exceptions. */
1058 if (unlikely(rc == 31)) {
1059 return;
1060 }
1061
1062 gen_qual_roundmode(ctx, fn11);
1063 gen_qual_flushzero(ctx, fn11);
1064 gen_fp_exc_clear();
1065
1066 va = gen_ieee_input(ra, fn11, 0);
1067 vb = gen_ieee_input(rb, fn11, 0);
4a58aedf 1068 helper(cpu_fir[rc], cpu_env, va, vb);
f24518b5
RH
1069 tcg_temp_free(va);
1070 tcg_temp_free(vb);
1071
1072 gen_fp_exc_raise(rc, fn11);
1073}
1074
1075#define IEEE_ARITH3(name) \
1076static inline void glue(gen_f, name)(DisasContext *ctx, \
1077 int ra, int rb, int rc, int fn11) \
1078{ \
1079 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1080}
1081IEEE_ARITH3(adds)
1082IEEE_ARITH3(subs)
1083IEEE_ARITH3(muls)
1084IEEE_ARITH3(divs)
1085IEEE_ARITH3(addt)
1086IEEE_ARITH3(subt)
1087IEEE_ARITH3(mult)
1088IEEE_ARITH3(divt)
1089
1090static void gen_ieee_compare(DisasContext *ctx,
4a58aedf 1091 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
1092 int ra, int rb, int rc, int fn11)
1093{
1094 TCGv va, vb;
1095
1096 /* ??? This is wrong: the instruction is not a nop, it still may
1097 raise exceptions. */
1098 if (unlikely(rc == 31)) {
1099 return;
1100 }
1101
1102 gen_fp_exc_clear();
1103
1104 va = gen_ieee_input(ra, fn11, 1);
1105 vb = gen_ieee_input(rb, fn11, 1);
4a58aedf 1106 helper(cpu_fir[rc], cpu_env, va, vb);
f24518b5
RH
1107 tcg_temp_free(va);
1108 tcg_temp_free(vb);
1109
1110 gen_fp_exc_raise(rc, fn11);
1111}
1112
1113#define IEEE_CMP3(name) \
1114static inline void glue(gen_f, name)(DisasContext *ctx, \
1115 int ra, int rb, int rc, int fn11) \
1116{ \
1117 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1118}
1119IEEE_CMP3(cmptun)
1120IEEE_CMP3(cmpteq)
1121IEEE_CMP3(cmptlt)
1122IEEE_CMP3(cmptle)
a7812ae4 1123
248c42f3
RH
1124static inline uint64_t zapnot_mask(uint8_t lit)
1125{
1126 uint64_t mask = 0;
1127 int i;
1128
1129 for (i = 0; i < 8; ++i) {
1130 if ((lit >> i) & 1)
1131 mask |= 0xffull << (i * 8);
1132 }
1133 return mask;
1134}
1135
87d98f95
RH
1136/* Implement zapnot with an immediate operand, which expands to some
1137 form of immediate AND. This is a basic building block in the
1138 definition of many of the other byte manipulation instructions. */
248c42f3 1139static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 1140{
87d98f95
RH
1141 switch (lit) {
1142 case 0x00:
248c42f3 1143 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
1144 break;
1145 case 0x01:
248c42f3 1146 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
1147 break;
1148 case 0x03:
248c42f3 1149 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
1150 break;
1151 case 0x0f:
248c42f3 1152 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
1153 break;
1154 case 0xff:
248c42f3 1155 tcg_gen_mov_i64(dest, src);
87d98f95
RH
1156 break;
1157 default:
248c42f3 1158 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
87d98f95
RH
1159 break;
1160 }
1161}
1162
1163static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1164{
1165 if (unlikely(rc == 31))
1166 return;
1167 else if (unlikely(ra == 31))
1168 tcg_gen_movi_i64(cpu_ir[rc], 0);
1169 else if (islit)
248c42f3 1170 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
87d98f95
RH
1171 else
1172 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1173}
1174
1175static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1176{
1177 if (unlikely(rc == 31))
1178 return;
1179 else if (unlikely(ra == 31))
1180 tcg_gen_movi_i64(cpu_ir[rc], 0);
1181 else if (islit)
248c42f3 1182 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
87d98f95
RH
1183 else
1184 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1185}
1186
1187
248c42f3 1188/* EXTWH, EXTLH, EXTQH */
ffec44f1
RH
1189static void gen_ext_h(int ra, int rb, int rc, int islit,
1190 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1191{
1192 if (unlikely(rc == 31))
1193 return;
377a43b6
RH
1194 else if (unlikely(ra == 31))
1195 tcg_gen_movi_i64(cpu_ir[rc], 0);
1196 else {
dfaa8583 1197 if (islit) {
377a43b6
RH
1198 lit = (64 - (lit & 7) * 8) & 0x3f;
1199 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
fe2b269a 1200 } else {
377a43b6 1201 TCGv tmp1 = tcg_temp_new();
b3249f63
AJ
1202 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1203 tcg_gen_shli_i64(tmp1, tmp1, 3);
dbf95805
VW
1204 tcg_gen_neg_i64(tmp1, tmp1);
1205 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
dfaa8583 1206 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
b3249f63 1207 tcg_temp_free(tmp1);
dfaa8583 1208 }
248c42f3 1209 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
377a43b6 1210 }
b3249f63
AJ
1211}
1212
248c42f3 1213/* EXTBL, EXTWL, EXTLL, EXTQL */
ffec44f1
RH
1214static void gen_ext_l(int ra, int rb, int rc, int islit,
1215 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1216{
1217 if (unlikely(rc == 31))
1218 return;
377a43b6
RH
1219 else if (unlikely(ra == 31))
1220 tcg_gen_movi_i64(cpu_ir[rc], 0);
1221 else {
dfaa8583 1222 if (islit) {
377a43b6 1223 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
dfaa8583 1224 } else {
a7812ae4 1225 TCGv tmp = tcg_temp_new();
b3249f63
AJ
1226 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1227 tcg_gen_shli_i64(tmp, tmp, 3);
dfaa8583 1228 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
b3249f63 1229 tcg_temp_free(tmp);
fe2b269a 1230 }
248c42f3
RH
1231 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1232 }
1233}
1234
50eb6e5c
RH
1235/* INSWH, INSLH, INSQH */
1236static void gen_ins_h(int ra, int rb, int rc, int islit,
1237 uint8_t lit, uint8_t byte_mask)
1238{
1239 if (unlikely(rc == 31))
1240 return;
1241 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1242 tcg_gen_movi_i64(cpu_ir[rc], 0);
1243 else {
1244 TCGv tmp = tcg_temp_new();
1245
1246 /* The instruction description has us left-shift the byte mask
1247 and extract bits <15:8> and apply that zap at the end. This
1248 is equivalent to simply performing the zap first and shifting
1249 afterward. */
1250 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1251
1252 if (islit) {
1253 /* Note that we have handled the lit==0 case above. */
1254 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1255 } else {
1256 TCGv shift = tcg_temp_new();
1257
1258 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1259 Do this portably by splitting the shift into two parts:
1260 shift_count-1 and 1. Arrange for the -1 by using
1261 ones-complement instead of twos-complement in the negation:
1262 ~((B & 7) * 8) & 63. */
1263
1264 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1265 tcg_gen_shli_i64(shift, shift, 3);
1266 tcg_gen_not_i64(shift, shift);
1267 tcg_gen_andi_i64(shift, shift, 0x3f);
1268
1269 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1270 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1271 tcg_temp_free(shift);
1272 }
1273 tcg_temp_free(tmp);
1274 }
1275}
1276
248c42f3 1277/* INSBL, INSWL, INSLL, INSQL */
ffec44f1
RH
1278static void gen_ins_l(int ra, int rb, int rc, int islit,
1279 uint8_t lit, uint8_t byte_mask)
248c42f3
RH
1280{
1281 if (unlikely(rc == 31))
1282 return;
1283 else if (unlikely(ra == 31))
1284 tcg_gen_movi_i64(cpu_ir[rc], 0);
1285 else {
1286 TCGv tmp = tcg_temp_new();
1287
1288 /* The instruction description has us left-shift the byte mask
1289 the same number of byte slots as the data and apply the zap
1290 at the end. This is equivalent to simply performing the zap
1291 first and shifting afterward. */
1292 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1293
1294 if (islit) {
1295 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1296 } else {
1297 TCGv shift = tcg_temp_new();
1298 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1299 tcg_gen_shli_i64(shift, shift, 3);
1300 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1301 tcg_temp_free(shift);
1302 }
1303 tcg_temp_free(tmp);
377a43b6 1304 }
b3249f63
AJ
1305}
1306
ffec44f1
RH
1307/* MSKWH, MSKLH, MSKQH */
1308static void gen_msk_h(int ra, int rb, int rc, int islit,
1309 uint8_t lit, uint8_t byte_mask)
1310{
1311 if (unlikely(rc == 31))
1312 return;
1313 else if (unlikely(ra == 31))
1314 tcg_gen_movi_i64(cpu_ir[rc], 0);
1315 else if (islit) {
1316 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1317 } else {
1318 TCGv shift = tcg_temp_new();
1319 TCGv mask = tcg_temp_new();
1320
1321 /* The instruction description is as above, where the byte_mask
1322 is shifted left, and then we extract bits <15:8>. This can be
1323 emulated with a right-shift on the expanded byte mask. This
1324 requires extra care because for an input <2:0> == 0 we need a
1325 shift of 64 bits in order to generate a zero. This is done by
1326 splitting the shift into two parts, the variable shift - 1
1327 followed by a constant 1 shift. The code we expand below is
1328 equivalent to ~((B & 7) * 8) & 63. */
1329
1330 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1331 tcg_gen_shli_i64(shift, shift, 3);
1332 tcg_gen_not_i64(shift, shift);
1333 tcg_gen_andi_i64(shift, shift, 0x3f);
1334 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1335 tcg_gen_shr_i64(mask, mask, shift);
1336 tcg_gen_shri_i64(mask, mask, 1);
1337
1338 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1339
1340 tcg_temp_free(mask);
1341 tcg_temp_free(shift);
1342 }
1343}
1344
14ab1634 1345/* MSKBL, MSKWL, MSKLL, MSKQL */
ffec44f1
RH
1346static void gen_msk_l(int ra, int rb, int rc, int islit,
1347 uint8_t lit, uint8_t byte_mask)
14ab1634
RH
1348{
1349 if (unlikely(rc == 31))
1350 return;
1351 else if (unlikely(ra == 31))
1352 tcg_gen_movi_i64(cpu_ir[rc], 0);
1353 else if (islit) {
1354 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1355 } else {
1356 TCGv shift = tcg_temp_new();
1357 TCGv mask = tcg_temp_new();
1358
1359 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1360 tcg_gen_shli_i64(shift, shift, 3);
1361 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1362 tcg_gen_shl_i64(mask, mask, shift);
1363
1364 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1365
1366 tcg_temp_free(mask);
1367 tcg_temp_free(shift);
1368 }
1369}
1370
04acd307 1371/* Code to call arith3 helpers */
a7812ae4 1372#define ARITH3(name) \
636aa200
BS
1373static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1374 uint8_t lit) \
a7812ae4
PB
1375{ \
1376 if (unlikely(rc == 31)) \
1377 return; \
1378 \
1379 if (ra != 31) { \
1380 if (islit) { \
1381 TCGv tmp = tcg_const_i64(lit); \
1382 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1383 tcg_temp_free(tmp); \
1384 } else \
1385 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1386 } else { \
1387 TCGv tmp1 = tcg_const_i64(0); \
1388 if (islit) { \
1389 TCGv tmp2 = tcg_const_i64(lit); \
1390 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1391 tcg_temp_free(tmp2); \
1392 } else \
1393 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1394 tcg_temp_free(tmp1); \
1395 } \
b3249f63 1396}
2958620f 1397ARITH3(cmpbge)
13e4df99
RH
1398ARITH3(minub8)
1399ARITH3(minsb8)
1400ARITH3(minuw4)
1401ARITH3(minsw4)
1402ARITH3(maxub8)
1403ARITH3(maxsb8)
1404ARITH3(maxuw4)
1405ARITH3(maxsw4)
1406ARITH3(perr)
1407
2958620f
RH
1408/* Code to call arith3 helpers */
1409#define ARITH3_EX(name) \
1410 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1411 int islit, uint8_t lit) \
1412 { \
1413 if (unlikely(rc == 31)) { \
1414 return; \
1415 } \
1416 if (ra != 31) { \
1417 if (islit) { \
1418 TCGv tmp = tcg_const_i64(lit); \
1419 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1420 cpu_ir[ra], tmp); \
1421 tcg_temp_free(tmp); \
1422 } else { \
1423 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1424 cpu_ir[ra], cpu_ir[rb]); \
1425 } \
1426 } else { \
1427 TCGv tmp1 = tcg_const_i64(0); \
1428 if (islit) { \
1429 TCGv tmp2 = tcg_const_i64(lit); \
1430 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1431 tcg_temp_free(tmp2); \
1432 } else { \
1433 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1434 } \
1435 tcg_temp_free(tmp1); \
1436 } \
1437 }
1438ARITH3_EX(addlv)
1439ARITH3_EX(sublv)
1440ARITH3_EX(addqv)
1441ARITH3_EX(subqv)
1442ARITH3_EX(mullv)
1443ARITH3_EX(mulqv)
1444
13e4df99
RH
1445#define MVIOP2(name) \
1446static inline void glue(gen_, name)(int rb, int rc) \
1447{ \
1448 if (unlikely(rc == 31)) \
1449 return; \
1450 if (unlikely(rb == 31)) \
1451 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1452 else \
1453 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1454}
1455MVIOP2(pklb)
1456MVIOP2(pkwb)
1457MVIOP2(unpkbl)
1458MVIOP2(unpkbw)
b3249f63 1459
9e05960f
RH
1460static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1461 int islit, uint8_t lit)
01ff9cc8 1462{
9e05960f 1463 TCGv va, vb;
01ff9cc8 1464
9e05960f 1465 if (unlikely(rc == 31)) {
13e4df99 1466 return;
9e05960f 1467 }
01ff9cc8 1468
9e05960f
RH
1469 if (ra == 31) {
1470 va = tcg_const_i64(0);
1471 } else {
1472 va = cpu_ir[ra];
1473 }
1474 if (islit) {
1475 vb = tcg_const_i64(lit);
1476 } else {
1477 vb = cpu_ir[rb];
1478 }
01ff9cc8 1479
9e05960f 1480 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
01ff9cc8 1481
9e05960f
RH
1482 if (ra == 31) {
1483 tcg_temp_free(va);
1484 }
1485 if (islit) {
1486 tcg_temp_free(vb);
1487 }
01ff9cc8
AJ
1488}
1489
ac316ca4
RH
1490static void gen_rx(int ra, int set)
1491{
1492 TCGv_i32 tmp;
1493
1494 if (ra != 31) {
4d5712f1 1495 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1496 }
1497
1498 tmp = tcg_const_i32(set);
4d5712f1 1499 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1500 tcg_temp_free_i32(tmp);
1501}
1502
2ace7e55
RH
1503static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1504{
1505 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1506 to internal cpu registers. */
1507
1508 /* Unprivileged PAL call */
1509 if (palcode >= 0x80 && palcode < 0xC0) {
1510 switch (palcode) {
1511 case 0x86:
1512 /* IMB */
1513 /* No-op inside QEMU. */
1514 break;
1515 case 0x9E:
1516 /* RDUNIQUE */
1517 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1518 break;
1519 case 0x9F:
1520 /* WRUNIQUE */
1521 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1522 break;
1523 default:
1524 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1525 }
1526 return NO_EXIT;
1527 }
1528
1529#ifndef CONFIG_USER_ONLY
1530 /* Privileged PAL code */
1531 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1532 switch (palcode) {
1533 case 0x01:
1534 /* CFLUSH */
1535 /* No-op inside QEMU. */
1536 break;
1537 case 0x02:
1538 /* DRAINA */
1539 /* No-op inside QEMU. */
1540 break;
1541 case 0x2D:
1542 /* WRVPTPTR */
4d5712f1 1543 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
2ace7e55
RH
1544 break;
1545 case 0x31:
1546 /* WRVAL */
1547 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1548 break;
1549 case 0x32:
1550 /* RDVAL */
1551 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1552 break;
1553
1554 case 0x35: {
1555 /* SWPIPL */
1556 TCGv tmp;
1557
1558 /* Note that we already know we're in kernel mode, so we know
1559 that PS only contains the 3 IPL bits. */
4d5712f1 1560 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1561
1562 /* But make sure and store only the 3 IPL bits from the user. */
1563 tmp = tcg_temp_new();
1564 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
4d5712f1 1565 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1566 tcg_temp_free(tmp);
1567 break;
1568 }
1569
1570 case 0x36:
1571 /* RDPS */
4d5712f1 1572 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1573 break;
1574 case 0x38:
1575 /* WRUSP */
1576 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1577 break;
1578 case 0x3A:
1579 /* RDUSP */
1580 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1581 break;
1582 case 0x3C:
1583 /* WHAMI */
1584 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
55e5c285 1585 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
2ace7e55
RH
1586 break;
1587
1588 default:
1589 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1590 }
1591 return NO_EXIT;
1592 }
1593#endif
1594
1595 return gen_invalid(ctx);
1596}
1597
26b46094
RH
1598#ifndef CONFIG_USER_ONLY
1599
1600#define PR_BYTE 0x100000
1601#define PR_LONG 0x200000
1602
1603static int cpu_pr_data(int pr)
1604{
1605 switch (pr) {
1606 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1607 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1608 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1609 case 3: return offsetof(CPUAlphaState, trap_arg0);
1610 case 4: return offsetof(CPUAlphaState, trap_arg1);
1611 case 5: return offsetof(CPUAlphaState, trap_arg2);
1612 case 6: return offsetof(CPUAlphaState, exc_addr);
1613 case 7: return offsetof(CPUAlphaState, palbr);
1614 case 8: return offsetof(CPUAlphaState, ptbr);
1615 case 9: return offsetof(CPUAlphaState, vptptr);
1616 case 10: return offsetof(CPUAlphaState, unique);
1617 case 11: return offsetof(CPUAlphaState, sysval);
1618 case 12: return offsetof(CPUAlphaState, usp);
1619
1620 case 32 ... 39:
1621 return offsetof(CPUAlphaState, shadow[pr - 32]);
1622 case 40 ... 63:
1623 return offsetof(CPUAlphaState, scratch[pr - 40]);
c781cf96
RH
1624
1625 case 251:
1626 return offsetof(CPUAlphaState, alarm_expire);
26b46094
RH
1627 }
1628 return 0;
1629}
1630
c781cf96 1631static ExitStatus gen_mfpr(int ra, int regno)
26b46094
RH
1632{
1633 int data = cpu_pr_data(regno);
1634
1635 /* In our emulated PALcode, these processor registers have no
1636 side effects from reading. */
1637 if (ra == 31) {
c781cf96
RH
1638 return NO_EXIT;
1639 }
1640
19e0cbb8
RH
1641 /* Special help for VMTIME and WALLTIME. */
1642 if (regno == 250 || regno == 249) {
1643 void (*helper)(TCGv) = gen_helper_get_walltime;
1644 if (regno == 249) {
1645 helper = gen_helper_get_vmtime;
1646 }
c781cf96
RH
1647 if (use_icount) {
1648 gen_io_start();
19e0cbb8 1649 helper(cpu_ir[ra]);
c781cf96
RH
1650 gen_io_end();
1651 return EXIT_PC_STALE;
1652 } else {
19e0cbb8 1653 helper(cpu_ir[ra]);
c781cf96
RH
1654 return NO_EXIT;
1655 }
26b46094
RH
1656 }
1657
1658 /* The basic registers are data only, and unknown registers
1659 are read-zero, write-ignore. */
1660 if (data == 0) {
1661 tcg_gen_movi_i64(cpu_ir[ra], 0);
1662 } else if (data & PR_BYTE) {
1663 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1664 } else if (data & PR_LONG) {
1665 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1666 } else {
1667 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1668 }
c781cf96 1669 return NO_EXIT;
26b46094
RH
1670}
1671
bc24270e 1672static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
26b46094
RH
1673{
1674 TCGv tmp;
bc24270e 1675 int data;
26b46094
RH
1676
1677 if (rb == 31) {
1678 tmp = tcg_const_i64(0);
1679 } else {
1680 tmp = cpu_ir[rb];
1681 }
1682
bc24270e
RH
1683 switch (regno) {
1684 case 255:
3b4fefd6 1685 /* TBIA */
69163fbb 1686 gen_helper_tbia(cpu_env);
bc24270e
RH
1687 break;
1688
1689 case 254:
3b4fefd6 1690 /* TBIS */
69163fbb 1691 gen_helper_tbis(cpu_env, tmp);
bc24270e
RH
1692 break;
1693
1694 case 253:
1695 /* WAIT */
1696 tmp = tcg_const_i64(1);
259186a7
AF
1697 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1698 offsetof(CPUState, halted));
bc24270e
RH
1699 return gen_excp(ctx, EXCP_HLT, 0);
1700
034ebc27
RH
1701 case 252:
1702 /* HALT */
1703 gen_helper_halt(tmp);
1704 return EXIT_PC_STALE;
1705
c781cf96
RH
1706 case 251:
1707 /* ALARM */
69163fbb 1708 gen_helper_set_alarm(cpu_env, tmp);
c781cf96
RH
1709 break;
1710
bc24270e 1711 default:
3b4fefd6
RH
1712 /* The basic registers are data only, and unknown registers
1713 are read-zero, write-ignore. */
bc24270e 1714 data = cpu_pr_data(regno);
3b4fefd6
RH
1715 if (data != 0) {
1716 if (data & PR_BYTE) {
1717 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1718 } else if (data & PR_LONG) {
1719 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1720 } else {
1721 tcg_gen_st_i64(tmp, cpu_env, data);
1722 }
26b46094 1723 }
bc24270e 1724 break;
26b46094
RH
1725 }
1726
1727 if (rb == 31) {
1728 tcg_temp_free(tmp);
1729 }
bc24270e
RH
1730
1731 return NO_EXIT;
26b46094
RH
1732}
1733#endif /* !USER_ONLY*/
1734
4af70374 1735static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1736{
1737 uint32_t palcode;
efa64351
MT
1738 int32_t disp21, disp16;
1739#ifndef CONFIG_USER_ONLY
1740 int32_t disp12;
1741#endif
f88fe4e3 1742 uint16_t fn11;
b6fb147c 1743 uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
adf3c8b6 1744 uint8_t lit;
4af70374 1745 ExitStatus ret;
4c9649a9
JM
1746
1747 /* Decode all instruction fields */
1748 opc = insn >> 26;
1749 ra = (insn >> 21) & 0x1F;
1750 rb = (insn >> 16) & 0x1F;
1751 rc = insn & 0x1F;
13e4df99 1752 real_islit = islit = (insn >> 12) & 1;
dfaa8583
AJ
1753 if (rb == 31 && !islit) {
1754 islit = 1;
1755 lit = 0;
1756 } else
1757 lit = (insn >> 13) & 0xFF;
4c9649a9
JM
1758 palcode = insn & 0x03FFFFFF;
1759 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1760 disp16 = (int16_t)(insn & 0x0000FFFF);
efa64351 1761#ifndef CONFIG_USER_ONLY
4c9649a9 1762 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
efa64351 1763#endif
4c9649a9
JM
1764 fn11 = (insn >> 5) & 0x000007FF;
1765 fpfn = fn11 & 0x3F;
1766 fn7 = (insn >> 5) & 0x0000007F;
806991da 1767 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1768 opc, ra, rb, rc, disp16);
806991da 1769
4af70374 1770 ret = NO_EXIT;
4c9649a9
JM
1771 switch (opc) {
1772 case 0x00:
1773 /* CALL_PAL */
2ace7e55
RH
1774 ret = gen_call_pal(ctx, palcode);
1775 break;
4c9649a9
JM
1776 case 0x01:
1777 /* OPC01 */
1778 goto invalid_opc;
1779 case 0x02:
1780 /* OPC02 */
1781 goto invalid_opc;
1782 case 0x03:
1783 /* OPC03 */
1784 goto invalid_opc;
1785 case 0x04:
1786 /* OPC04 */
1787 goto invalid_opc;
1788 case 0x05:
1789 /* OPC05 */
1790 goto invalid_opc;
1791 case 0x06:
1792 /* OPC06 */
1793 goto invalid_opc;
1794 case 0x07:
1795 /* OPC07 */
1796 goto invalid_opc;
1797 case 0x08:
1798 /* LDA */
1ef4ef4e 1799 if (likely(ra != 31)) {
496cb5b9 1800 if (rb != 31)
3761035f
AJ
1801 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1802 else
1803 tcg_gen_movi_i64(cpu_ir[ra], disp16);
496cb5b9 1804 }
4c9649a9
JM
1805 break;
1806 case 0x09:
1807 /* LDAH */
1ef4ef4e 1808 if (likely(ra != 31)) {
496cb5b9 1809 if (rb != 31)
3761035f
AJ
1810 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1811 else
1812 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
496cb5b9 1813 }
4c9649a9
JM
1814 break;
1815 case 0x0A:
1816 /* LDBU */
a18ad893
RH
1817 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1818 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1819 break;
1820 }
1821 goto invalid_opc;
4c9649a9
JM
1822 case 0x0B:
1823 /* LDQ_U */
f18cd223 1824 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1825 break;
1826 case 0x0C:
1827 /* LDWU */
a18ad893
RH
1828 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1829 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1830 break;
1831 }
1832 goto invalid_opc;
4c9649a9
JM
1833 case 0x0D:
1834 /* STW */
6910b8f6 1835 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
4c9649a9
JM
1836 break;
1837 case 0x0E:
1838 /* STB */
6910b8f6 1839 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
4c9649a9
JM
1840 break;
1841 case 0x0F:
1842 /* STQ_U */
6910b8f6 1843 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1844 break;
1845 case 0x10:
1846 switch (fn7) {
1847 case 0x00:
1848 /* ADDL */
30c7183b
AJ
1849 if (likely(rc != 31)) {
1850 if (ra != 31) {
1851 if (islit) {
1852 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1853 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1854 } else {
30c7183b
AJ
1855 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1856 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1857 }
30c7183b
AJ
1858 } else {
1859 if (islit)
dfaa8583 1860 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1861 else
dfaa8583 1862 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1863 }
1864 }
4c9649a9
JM
1865 break;
1866 case 0x02:
1867 /* S4ADDL */
30c7183b
AJ
1868 if (likely(rc != 31)) {
1869 if (ra != 31) {
a7812ae4 1870 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1871 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1872 if (islit)
1873 tcg_gen_addi_i64(tmp, tmp, lit);
1874 else
1875 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1876 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1877 tcg_temp_free(tmp);
30c7183b
AJ
1878 } else {
1879 if (islit)
1880 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1881 else
dfaa8583 1882 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1883 }
1884 }
4c9649a9
JM
1885 break;
1886 case 0x09:
1887 /* SUBL */
30c7183b
AJ
1888 if (likely(rc != 31)) {
1889 if (ra != 31) {
dfaa8583 1890 if (islit)
30c7183b 1891 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
dfaa8583 1892 else
30c7183b 1893 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
dfaa8583 1894 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1895 } else {
1896 if (islit)
1897 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1898 else {
30c7183b
AJ
1899 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1900 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1901 }
1902 }
4c9649a9
JM
1903 break;
1904 case 0x0B:
1905 /* S4SUBL */
30c7183b
AJ
1906 if (likely(rc != 31)) {
1907 if (ra != 31) {
a7812ae4 1908 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1909 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1910 if (islit)
1911 tcg_gen_subi_i64(tmp, tmp, lit);
1912 else
1913 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1914 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1915 tcg_temp_free(tmp);
30c7183b
AJ
1916 } else {
1917 if (islit)
1918 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1919 else {
30c7183b
AJ
1920 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1921 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1922 }
30c7183b
AJ
1923 }
1924 }
4c9649a9
JM
1925 break;
1926 case 0x0F:
1927 /* CMPBGE */
a7812ae4 1928 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1929 break;
1930 case 0x12:
1931 /* S8ADDL */
30c7183b
AJ
1932 if (likely(rc != 31)) {
1933 if (ra != 31) {
a7812ae4 1934 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1935 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1936 if (islit)
1937 tcg_gen_addi_i64(tmp, tmp, lit);
1938 else
1939 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1940 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1941 tcg_temp_free(tmp);
30c7183b
AJ
1942 } else {
1943 if (islit)
1944 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1945 else
dfaa8583 1946 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1947 }
1948 }
4c9649a9
JM
1949 break;
1950 case 0x1B:
1951 /* S8SUBL */
30c7183b
AJ
1952 if (likely(rc != 31)) {
1953 if (ra != 31) {
a7812ae4 1954 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1955 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1956 if (islit)
1957 tcg_gen_subi_i64(tmp, tmp, lit);
1958 else
1959 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1960 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1961 tcg_temp_free(tmp);
30c7183b
AJ
1962 } else {
1963 if (islit)
1964 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1965 else
30c7183b
AJ
1966 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1967 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1968 }
30c7183b
AJ
1969 }
1970 }
4c9649a9
JM
1971 break;
1972 case 0x1D:
1973 /* CMPULT */
01ff9cc8 1974 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
4c9649a9
JM
1975 break;
1976 case 0x20:
1977 /* ADDQ */
30c7183b
AJ
1978 if (likely(rc != 31)) {
1979 if (ra != 31) {
1980 if (islit)
1981 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1982 else
dfaa8583 1983 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1984 } else {
1985 if (islit)
1986 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1987 else
dfaa8583 1988 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1989 }
1990 }
4c9649a9
JM
1991 break;
1992 case 0x22:
1993 /* S4ADDQ */
30c7183b
AJ
1994 if (likely(rc != 31)) {
1995 if (ra != 31) {
a7812ae4 1996 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1997 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1998 if (islit)
1999 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2000 else
2001 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2002 tcg_temp_free(tmp);
30c7183b
AJ
2003 } else {
2004 if (islit)
2005 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2006 else
dfaa8583 2007 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2008 }
2009 }
4c9649a9
JM
2010 break;
2011 case 0x29:
2012 /* SUBQ */
30c7183b
AJ
2013 if (likely(rc != 31)) {
2014 if (ra != 31) {
2015 if (islit)
2016 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 2017 else
dfaa8583 2018 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2019 } else {
2020 if (islit)
2021 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2022 else
dfaa8583 2023 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2024 }
2025 }
4c9649a9
JM
2026 break;
2027 case 0x2B:
2028 /* S4SUBQ */
30c7183b
AJ
2029 if (likely(rc != 31)) {
2030 if (ra != 31) {
a7812ae4 2031 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2032 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2033 if (islit)
2034 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2035 else
2036 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2037 tcg_temp_free(tmp);
30c7183b
AJ
2038 } else {
2039 if (islit)
2040 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2041 else
dfaa8583 2042 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2043 }
2044 }
4c9649a9
JM
2045 break;
2046 case 0x2D:
2047 /* CMPEQ */
01ff9cc8 2048 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
4c9649a9
JM
2049 break;
2050 case 0x32:
2051 /* S8ADDQ */
30c7183b
AJ
2052 if (likely(rc != 31)) {
2053 if (ra != 31) {
a7812ae4 2054 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2055 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2056 if (islit)
2057 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2058 else
2059 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2060 tcg_temp_free(tmp);
30c7183b
AJ
2061 } else {
2062 if (islit)
2063 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2064 else
dfaa8583 2065 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2066 }
2067 }
4c9649a9
JM
2068 break;
2069 case 0x3B:
2070 /* S8SUBQ */
30c7183b
AJ
2071 if (likely(rc != 31)) {
2072 if (ra != 31) {
a7812ae4 2073 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2074 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2075 if (islit)
2076 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2077 else
2078 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2079 tcg_temp_free(tmp);
30c7183b
AJ
2080 } else {
2081 if (islit)
2082 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2083 else
dfaa8583 2084 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2085 }
2086 }
4c9649a9
JM
2087 break;
2088 case 0x3D:
2089 /* CMPULE */
01ff9cc8 2090 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
4c9649a9
JM
2091 break;
2092 case 0x40:
2093 /* ADDL/V */
a7812ae4 2094 gen_addlv(ra, rb, rc, islit, lit);
4c9649a9
JM
2095 break;
2096 case 0x49:
2097 /* SUBL/V */
a7812ae4 2098 gen_sublv(ra, rb, rc, islit, lit);
4c9649a9
JM
2099 break;
2100 case 0x4D:
2101 /* CMPLT */
01ff9cc8 2102 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
4c9649a9
JM
2103 break;
2104 case 0x60:
2105 /* ADDQ/V */
a7812ae4 2106 gen_addqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2107 break;
2108 case 0x69:
2109 /* SUBQ/V */
a7812ae4 2110 gen_subqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2111 break;
2112 case 0x6D:
2113 /* CMPLE */
01ff9cc8 2114 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
4c9649a9
JM
2115 break;
2116 default:
2117 goto invalid_opc;
2118 }
2119 break;
2120 case 0x11:
2121 switch (fn7) {
2122 case 0x00:
2123 /* AND */
30c7183b 2124 if (likely(rc != 31)) {
dfaa8583 2125 if (ra == 31)
30c7183b
AJ
2126 tcg_gen_movi_i64(cpu_ir[rc], 0);
2127 else if (islit)
2128 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2129 else
2130 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2131 }
4c9649a9
JM
2132 break;
2133 case 0x08:
2134 /* BIC */
30c7183b
AJ
2135 if (likely(rc != 31)) {
2136 if (ra != 31) {
2137 if (islit)
2138 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2139 else
2140 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2141 } else
2142 tcg_gen_movi_i64(cpu_ir[rc], 0);
2143 }
4c9649a9
JM
2144 break;
2145 case 0x14:
2146 /* CMOVLBS */
bbe1dab4 2147 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2148 break;
2149 case 0x16:
2150 /* CMOVLBC */
bbe1dab4 2151 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2152 break;
2153 case 0x20:
2154 /* BIS */
30c7183b
AJ
2155 if (likely(rc != 31)) {
2156 if (ra != 31) {
2157 if (islit)
2158 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
8bb6e981 2159 else
30c7183b 2160 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
4c9649a9 2161 } else {
30c7183b
AJ
2162 if (islit)
2163 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2164 else
dfaa8583 2165 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
4c9649a9 2166 }
4c9649a9
JM
2167 }
2168 break;
2169 case 0x24:
2170 /* CMOVEQ */
bbe1dab4 2171 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2172 break;
2173 case 0x26:
2174 /* CMOVNE */
bbe1dab4 2175 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2176 break;
2177 case 0x28:
2178 /* ORNOT */
30c7183b 2179 if (likely(rc != 31)) {
dfaa8583 2180 if (ra != 31) {
30c7183b
AJ
2181 if (islit)
2182 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2183 else
2184 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2185 } else {
2186 if (islit)
2187 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2188 else
2189 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2190 }
2191 }
4c9649a9
JM
2192 break;
2193 case 0x40:
2194 /* XOR */
30c7183b
AJ
2195 if (likely(rc != 31)) {
2196 if (ra != 31) {
2197 if (islit)
2198 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 2199 else
dfaa8583 2200 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2201 } else {
2202 if (islit)
2203 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2204 else
dfaa8583 2205 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2206 }
2207 }
4c9649a9
JM
2208 break;
2209 case 0x44:
2210 /* CMOVLT */
bbe1dab4 2211 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2212 break;
2213 case 0x46:
2214 /* CMOVGE */
bbe1dab4 2215 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2216 break;
2217 case 0x48:
2218 /* EQV */
30c7183b
AJ
2219 if (likely(rc != 31)) {
2220 if (ra != 31) {
2221 if (islit)
2222 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2223 else
2224 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2225 } else {
2226 if (islit)
2227 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
30c7183b 2228 else
dfaa8583 2229 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2230 }
2231 }
4c9649a9
JM
2232 break;
2233 case 0x61:
2234 /* AMASK */
ae8ecd42 2235 if (likely(rc != 31)) {
a18ad893
RH
2236 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2237
2238 if (islit) {
2239 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2240 } else {
2241 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
1a1f7dbc 2242 }
ae8ecd42 2243 }
4c9649a9
JM
2244 break;
2245 case 0x64:
2246 /* CMOVLE */
bbe1dab4 2247 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2248 break;
2249 case 0x66:
2250 /* CMOVGT */
bbe1dab4 2251 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2252 break;
2253 case 0x6C:
2254 /* IMPLVER */
801c4c28
RH
2255 if (rc != 31) {
2256 tcg_gen_movi_i64(cpu_ir[rc], ctx->implver);
2257 }
4c9649a9
JM
2258 break;
2259 default:
2260 goto invalid_opc;
2261 }
2262 break;
2263 case 0x12:
2264 switch (fn7) {
2265 case 0x02:
2266 /* MSKBL */
14ab1634 2267 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2268 break;
2269 case 0x06:
2270 /* EXTBL */
377a43b6 2271 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2272 break;
2273 case 0x0B:
2274 /* INSBL */
248c42f3 2275 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2276 break;
2277 case 0x12:
2278 /* MSKWL */
14ab1634 2279 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2280 break;
2281 case 0x16:
2282 /* EXTWL */
377a43b6 2283 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2284 break;
2285 case 0x1B:
2286 /* INSWL */
248c42f3 2287 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2288 break;
2289 case 0x22:
2290 /* MSKLL */
14ab1634 2291 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2292 break;
2293 case 0x26:
2294 /* EXTLL */
377a43b6 2295 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2296 break;
2297 case 0x2B:
2298 /* INSLL */
248c42f3 2299 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2300 break;
2301 case 0x30:
2302 /* ZAP */
a7812ae4 2303 gen_zap(ra, rb, rc, islit, lit);
4c9649a9
JM
2304 break;
2305 case 0x31:
2306 /* ZAPNOT */
a7812ae4 2307 gen_zapnot(ra, rb, rc, islit, lit);
4c9649a9
JM
2308 break;
2309 case 0x32:
2310 /* MSKQL */
14ab1634 2311 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2312 break;
2313 case 0x34:
2314 /* SRL */
30c7183b
AJ
2315 if (likely(rc != 31)) {
2316 if (ra != 31) {
2317 if (islit)
2318 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2319 else {
a7812ae4 2320 TCGv shift = tcg_temp_new();
30c7183b
AJ
2321 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2322 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2323 tcg_temp_free(shift);
dfaa8583 2324 }
30c7183b
AJ
2325 } else
2326 tcg_gen_movi_i64(cpu_ir[rc], 0);
2327 }
4c9649a9
JM
2328 break;
2329 case 0x36:
2330 /* EXTQL */
377a43b6 2331 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2332 break;
2333 case 0x39:
2334 /* SLL */
30c7183b
AJ
2335 if (likely(rc != 31)) {
2336 if (ra != 31) {
2337 if (islit)
2338 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2339 else {
a7812ae4 2340 TCGv shift = tcg_temp_new();
30c7183b
AJ
2341 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2342 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2343 tcg_temp_free(shift);
dfaa8583 2344 }
30c7183b
AJ
2345 } else
2346 tcg_gen_movi_i64(cpu_ir[rc], 0);
2347 }
4c9649a9
JM
2348 break;
2349 case 0x3B:
2350 /* INSQL */
248c42f3 2351 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2352 break;
2353 case 0x3C:
2354 /* SRA */
30c7183b
AJ
2355 if (likely(rc != 31)) {
2356 if (ra != 31) {
2357 if (islit)
2358 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2359 else {
a7812ae4 2360 TCGv shift = tcg_temp_new();
30c7183b
AJ
2361 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2362 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2363 tcg_temp_free(shift);
dfaa8583 2364 }
30c7183b
AJ
2365 } else
2366 tcg_gen_movi_i64(cpu_ir[rc], 0);
2367 }
4c9649a9
JM
2368 break;
2369 case 0x52:
2370 /* MSKWH */
ffec44f1 2371 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2372 break;
2373 case 0x57:
2374 /* INSWH */
50eb6e5c 2375 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2376 break;
2377 case 0x5A:
2378 /* EXTWH */
377a43b6 2379 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2380 break;
2381 case 0x62:
2382 /* MSKLH */
ffec44f1 2383 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2384 break;
2385 case 0x67:
2386 /* INSLH */
50eb6e5c 2387 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2388 break;
2389 case 0x6A:
2390 /* EXTLH */
377a43b6 2391 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2392 break;
2393 case 0x72:
2394 /* MSKQH */
ffec44f1 2395 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2396 break;
2397 case 0x77:
2398 /* INSQH */
50eb6e5c 2399 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2400 break;
2401 case 0x7A:
2402 /* EXTQH */
377a43b6 2403 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2404 break;
2405 default:
2406 goto invalid_opc;
2407 }
2408 break;
2409 case 0x13:
2410 switch (fn7) {
2411 case 0x00:
2412 /* MULL */
30c7183b 2413 if (likely(rc != 31)) {
dfaa8583 2414 if (ra == 31)
30c7183b
AJ
2415 tcg_gen_movi_i64(cpu_ir[rc], 0);
2416 else {
2417 if (islit)
2418 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2419 else
2420 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2421 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2422 }
2423 }
4c9649a9
JM
2424 break;
2425 case 0x20:
2426 /* MULQ */
30c7183b 2427 if (likely(rc != 31)) {
dfaa8583 2428 if (ra == 31)
30c7183b
AJ
2429 tcg_gen_movi_i64(cpu_ir[rc], 0);
2430 else if (islit)
2431 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2432 else
2433 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2434 }
4c9649a9
JM
2435 break;
2436 case 0x30:
2437 /* UMULH */
962415fc
RH
2438 {
2439 TCGv low;
2440 if (unlikely(rc == 31)){
2441 break;
2442 }
2443 if (ra == 31) {
2444 tcg_gen_movi_i64(cpu_ir[rc], 0);
2445 break;
2446 }
2447 low = tcg_temp_new();
2448 if (islit) {
2449 tcg_gen_movi_tl(low, lit);
2450 tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], low);
2451 } else {
2452 tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2453 }
2454 tcg_temp_free(low);
2455 }
4c9649a9
JM
2456 break;
2457 case 0x40:
2458 /* MULL/V */
a7812ae4 2459 gen_mullv(ra, rb, rc, islit, lit);
4c9649a9
JM
2460 break;
2461 case 0x60:
2462 /* MULQ/V */
a7812ae4 2463 gen_mulqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2464 break;
2465 default:
2466 goto invalid_opc;
2467 }
2468 break;
2469 case 0x14:
f24518b5 2470 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2471 case 0x04:
2472 /* ITOFS */
a18ad893 2473 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2474 goto invalid_opc;
a18ad893 2475 }
f18cd223
AJ
2476 if (likely(rc != 31)) {
2477 if (ra != 31) {
a7812ae4 2478 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2479 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2480 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2481 tcg_temp_free_i32(tmp);
f18cd223
AJ
2482 } else
2483 tcg_gen_movi_i64(cpu_fir[rc], 0);
2484 }
4c9649a9
JM
2485 break;
2486 case 0x0A:
2487 /* SQRTF */
a18ad893
RH
2488 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2489 gen_fsqrtf(rb, rc);
2490 break;
2491 }
2492 goto invalid_opc;
4c9649a9
JM
2493 case 0x0B:
2494 /* SQRTS */
a18ad893
RH
2495 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2496 gen_fsqrts(ctx, rb, rc, fn11);
2497 break;
2498 }
2499 goto invalid_opc;
4c9649a9
JM
2500 case 0x14:
2501 /* ITOFF */
a18ad893 2502 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2503 goto invalid_opc;
a18ad893 2504 }
f18cd223
AJ
2505 if (likely(rc != 31)) {
2506 if (ra != 31) {
a7812ae4 2507 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2508 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2509 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2510 tcg_temp_free_i32(tmp);
f18cd223
AJ
2511 } else
2512 tcg_gen_movi_i64(cpu_fir[rc], 0);
2513 }
4c9649a9
JM
2514 break;
2515 case 0x24:
2516 /* ITOFT */
a18ad893 2517 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2518 goto invalid_opc;
a18ad893 2519 }
f18cd223
AJ
2520 if (likely(rc != 31)) {
2521 if (ra != 31)
2522 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2523 else
2524 tcg_gen_movi_i64(cpu_fir[rc], 0);
2525 }
4c9649a9
JM
2526 break;
2527 case 0x2A:
2528 /* SQRTG */
a18ad893
RH
2529 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2530 gen_fsqrtg(rb, rc);
2531 break;
2532 }
2533 goto invalid_opc;
4c9649a9
JM
2534 case 0x02B:
2535 /* SQRTT */
a18ad893
RH
2536 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2537 gen_fsqrtt(ctx, rb, rc, fn11);
2538 break;
2539 }
2540 goto invalid_opc;
4c9649a9
JM
2541 default:
2542 goto invalid_opc;
2543 }
2544 break;
2545 case 0x15:
2546 /* VAX floating point */
2547 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2548 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2549 case 0x00:
2550 /* ADDF */
a7812ae4 2551 gen_faddf(ra, rb, rc);
4c9649a9
JM
2552 break;
2553 case 0x01:
2554 /* SUBF */
a7812ae4 2555 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2556 break;
2557 case 0x02:
2558 /* MULF */
a7812ae4 2559 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2560 break;
2561 case 0x03:
2562 /* DIVF */
a7812ae4 2563 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2564 break;
2565 case 0x1E:
2566 /* CVTDG */
2567#if 0 // TODO
a7812ae4 2568 gen_fcvtdg(rb, rc);
4c9649a9
JM
2569#else
2570 goto invalid_opc;
2571#endif
2572 break;
2573 case 0x20:
2574 /* ADDG */
a7812ae4 2575 gen_faddg(ra, rb, rc);
4c9649a9
JM
2576 break;
2577 case 0x21:
2578 /* SUBG */
a7812ae4 2579 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2580 break;
2581 case 0x22:
2582 /* MULG */
a7812ae4 2583 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2584 break;
2585 case 0x23:
2586 /* DIVG */
a7812ae4 2587 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2588 break;
2589 case 0x25:
2590 /* CMPGEQ */
a7812ae4 2591 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2592 break;
2593 case 0x26:
2594 /* CMPGLT */
a7812ae4 2595 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2596 break;
2597 case 0x27:
2598 /* CMPGLE */
a7812ae4 2599 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2600 break;
2601 case 0x2C:
2602 /* CVTGF */
a7812ae4 2603 gen_fcvtgf(rb, rc);
4c9649a9
JM
2604 break;
2605 case 0x2D:
2606 /* CVTGD */
2607#if 0 // TODO
a7812ae4 2608 gen_fcvtgd(rb, rc);
4c9649a9
JM
2609#else
2610 goto invalid_opc;
2611#endif
2612 break;
2613 case 0x2F:
2614 /* CVTGQ */
a7812ae4 2615 gen_fcvtgq(rb, rc);
4c9649a9
JM
2616 break;
2617 case 0x3C:
2618 /* CVTQF */
a7812ae4 2619 gen_fcvtqf(rb, rc);
4c9649a9
JM
2620 break;
2621 case 0x3E:
2622 /* CVTQG */
a7812ae4 2623 gen_fcvtqg(rb, rc);
4c9649a9
JM
2624 break;
2625 default:
2626 goto invalid_opc;
2627 }
2628 break;
2629 case 0x16:
2630 /* IEEE floating-point */
f24518b5 2631 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2632 case 0x00:
2633 /* ADDS */
f24518b5 2634 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2635 break;
2636 case 0x01:
2637 /* SUBS */
f24518b5 2638 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2639 break;
2640 case 0x02:
2641 /* MULS */
f24518b5 2642 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2643 break;
2644 case 0x03:
2645 /* DIVS */
f24518b5 2646 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2647 break;
2648 case 0x20:
2649 /* ADDT */
f24518b5 2650 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2651 break;
2652 case 0x21:
2653 /* SUBT */
f24518b5 2654 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2655 break;
2656 case 0x22:
2657 /* MULT */
f24518b5 2658 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2659 break;
2660 case 0x23:
2661 /* DIVT */
f24518b5 2662 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2663 break;
2664 case 0x24:
2665 /* CMPTUN */
f24518b5 2666 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2667 break;
2668 case 0x25:
2669 /* CMPTEQ */
f24518b5 2670 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2671 break;
2672 case 0x26:
2673 /* CMPTLT */
f24518b5 2674 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2675 break;
2676 case 0x27:
2677 /* CMPTLE */
f24518b5 2678 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2679 break;
2680 case 0x2C:
a74b4d2c 2681 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2682 /* CVTST */
f24518b5 2683 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2684 } else {
2685 /* CVTTS */
f24518b5 2686 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2687 }
2688 break;
2689 case 0x2F:
2690 /* CVTTQ */
f24518b5 2691 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2692 break;
2693 case 0x3C:
2694 /* CVTQS */
f24518b5 2695 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2696 break;
2697 case 0x3E:
2698 /* CVTQT */
f24518b5 2699 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2700 break;
2701 default:
2702 goto invalid_opc;
2703 }
2704 break;
2705 case 0x17:
2706 switch (fn11) {
2707 case 0x010:
2708 /* CVTLQ */
a7812ae4 2709 gen_fcvtlq(rb, rc);
4c9649a9
JM
2710 break;
2711 case 0x020:
f18cd223 2712 if (likely(rc != 31)) {
a06d48d9 2713 if (ra == rb) {
4c9649a9 2714 /* FMOV */
a06d48d9
RH
2715 if (ra == 31)
2716 tcg_gen_movi_i64(cpu_fir[rc], 0);
2717 else
2718 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2719 } else {
f18cd223 2720 /* CPYS */
a7812ae4 2721 gen_fcpys(ra, rb, rc);
a06d48d9 2722 }
4c9649a9
JM
2723 }
2724 break;
2725 case 0x021:
2726 /* CPYSN */
a7812ae4 2727 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2728 break;
2729 case 0x022:
2730 /* CPYSE */
a7812ae4 2731 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2732 break;
2733 case 0x024:
2734 /* MT_FPCR */
f18cd223 2735 if (likely(ra != 31))
a44a2777 2736 gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
f18cd223
AJ
2737 else {
2738 TCGv tmp = tcg_const_i64(0);
a44a2777 2739 gen_helper_store_fpcr(cpu_env, tmp);
f18cd223
AJ
2740 tcg_temp_free(tmp);
2741 }
4c9649a9
JM
2742 break;
2743 case 0x025:
2744 /* MF_FPCR */
f18cd223 2745 if (likely(ra != 31))
a44a2777 2746 gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
4c9649a9
JM
2747 break;
2748 case 0x02A:
2749 /* FCMOVEQ */
bbe1dab4 2750 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2751 break;
2752 case 0x02B:
2753 /* FCMOVNE */
bbe1dab4 2754 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2755 break;
2756 case 0x02C:
2757 /* FCMOVLT */
bbe1dab4 2758 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2759 break;
2760 case 0x02D:
2761 /* FCMOVGE */
bbe1dab4 2762 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2763 break;
2764 case 0x02E:
2765 /* FCMOVLE */
bbe1dab4 2766 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2767 break;
2768 case 0x02F:
2769 /* FCMOVGT */
bbe1dab4 2770 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2771 break;
2772 case 0x030:
2773 /* CVTQL */
a7812ae4 2774 gen_fcvtql(rb, rc);
4c9649a9
JM
2775 break;
2776 case 0x130:
2777 /* CVTQL/V */
4c9649a9
JM
2778 case 0x530:
2779 /* CVTQL/SV */
735cf45f
RH
2780 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2781 /v doesn't do. The only thing I can think is that /sv is a
2782 valid instruction merely for completeness in the ISA. */
2783 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2784 break;
2785 default:
2786 goto invalid_opc;
2787 }
2788 break;
2789 case 0x18:
2790 switch ((uint16_t)disp16) {
2791 case 0x0000:
2792 /* TRAPB */
4af70374 2793 /* No-op. */
4c9649a9
JM
2794 break;
2795 case 0x0400:
2796 /* EXCB */
4af70374 2797 /* No-op. */
4c9649a9
JM
2798 break;
2799 case 0x4000:
2800 /* MB */
2801 /* No-op */
2802 break;
2803 case 0x4400:
2804 /* WMB */
2805 /* No-op */
2806 break;
2807 case 0x8000:
2808 /* FETCH */
2809 /* No-op */
2810 break;
2811 case 0xA000:
2812 /* FETCH_M */
2813 /* No-op */
2814 break;
2815 case 0xC000:
2816 /* RPCC */
a9406ea1
RH
2817 if (ra != 31) {
2818 if (use_icount) {
2819 gen_io_start();
69163fbb 2820 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
a9406ea1
RH
2821 gen_io_end();
2822 ret = EXIT_PC_STALE;
2823 } else {
69163fbb 2824 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
a9406ea1
RH
2825 }
2826 }
4c9649a9
JM
2827 break;
2828 case 0xE000:
2829 /* RC */
ac316ca4 2830 gen_rx(ra, 0);
4c9649a9
JM
2831 break;
2832 case 0xE800:
2833 /* ECB */
4c9649a9
JM
2834 break;
2835 case 0xF000:
2836 /* RS */
ac316ca4 2837 gen_rx(ra, 1);
4c9649a9
JM
2838 break;
2839 case 0xF800:
2840 /* WH64 */
2841 /* No-op */
2842 break;
2843 default:
2844 goto invalid_opc;
2845 }
2846 break;
2847 case 0x19:
2848 /* HW_MFPR (PALcode) */
26b46094 2849#ifndef CONFIG_USER_ONLY
a18ad893 2850 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
c781cf96 2851 return gen_mfpr(ra, insn & 0xffff);
26b46094
RH
2852 }
2853#endif
4c9649a9 2854 goto invalid_opc;
4c9649a9 2855 case 0x1A:
49563a72
RH
2856 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2857 prediction stack action, which of course we don't implement. */
2858 if (rb != 31) {
3761035f 2859 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
49563a72 2860 } else {
3761035f 2861 tcg_gen_movi_i64(cpu_pc, 0);
49563a72
RH
2862 }
2863 if (ra != 31) {
1304ca87 2864 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2865 }
4af70374 2866 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2867 break;
2868 case 0x1B:
2869 /* HW_LD (PALcode) */
a18ad893
RH
2870#ifndef CONFIG_USER_ONLY
2871 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2872 TCGv addr;
2873
2874 if (ra == 31) {
2875 break;
2876 }
2877
2878 addr = tcg_temp_new();
8bb6e981
AJ
2879 if (rb != 31)
2880 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2881 else
2882 tcg_gen_movi_i64(addr, disp12);
2883 switch ((insn >> 12) & 0xF) {
2884 case 0x0:
b5d51029 2885 /* Longword physical access (hw_ldl/p) */
2374e73e 2886 gen_helper_ldl_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2887 break;
2888 case 0x1:
b5d51029 2889 /* Quadword physical access (hw_ldq/p) */
2374e73e 2890 gen_helper_ldq_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2891 break;
2892 case 0x2:
b5d51029 2893 /* Longword physical access with lock (hw_ldl_l/p) */
c3082755 2894 gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr);
8bb6e981
AJ
2895 break;
2896 case 0x3:
b5d51029 2897 /* Quadword physical access with lock (hw_ldq_l/p) */
c3082755 2898 gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr);
8bb6e981
AJ
2899 break;
2900 case 0x4:
b5d51029 2901 /* Longword virtual PTE fetch (hw_ldl/v) */
2374e73e 2902 goto invalid_opc;
8bb6e981 2903 case 0x5:
b5d51029 2904 /* Quadword virtual PTE fetch (hw_ldq/v) */
2374e73e 2905 goto invalid_opc;
8bb6e981
AJ
2906 break;
2907 case 0x6:
2908 /* Incpu_ir[ra]id */
b5d51029 2909 goto invalid_opc;
8bb6e981
AJ
2910 case 0x7:
2911 /* Incpu_ir[ra]id */
b5d51029 2912 goto invalid_opc;
8bb6e981 2913 case 0x8:
b5d51029 2914 /* Longword virtual access (hw_ldl) */
2374e73e 2915 goto invalid_opc;
8bb6e981 2916 case 0x9:
b5d51029 2917 /* Quadword virtual access (hw_ldq) */
2374e73e 2918 goto invalid_opc;
8bb6e981 2919 case 0xA:
b5d51029 2920 /* Longword virtual access with protection check (hw_ldl/w) */
8417845e 2921 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2922 break;
2923 case 0xB:
b5d51029 2924 /* Quadword virtual access with protection check (hw_ldq/w) */
8417845e 2925 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2926 break;
2927 case 0xC:
b5d51029 2928 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2374e73e 2929 goto invalid_opc;
8bb6e981 2930 case 0xD:
b5d51029 2931 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2374e73e 2932 goto invalid_opc;
8bb6e981
AJ
2933 case 0xE:
2934 /* Longword virtual access with alternate access mode and
2374e73e
RH
2935 protection checks (hw_ldl/wa) */
2936 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2937 break;
2938 case 0xF:
2939 /* Quadword virtual access with alternate access mode and
2374e73e
RH
2940 protection checks (hw_ldq/wa) */
2941 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2942 break;
2943 }
2944 tcg_temp_free(addr);
a18ad893 2945 break;
4c9649a9 2946 }
4c9649a9 2947#endif
a18ad893 2948 goto invalid_opc;
4c9649a9
JM
2949 case 0x1C:
2950 switch (fn7) {
2951 case 0x00:
2952 /* SEXTB */
a18ad893 2953 if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
4c9649a9 2954 goto invalid_opc;
a18ad893 2955 }
ae8ecd42
AJ
2956 if (likely(rc != 31)) {
2957 if (islit)
2958 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
ae8ecd42 2959 else
dfaa8583 2960 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2961 }
4c9649a9
JM
2962 break;
2963 case 0x01:
2964 /* SEXTW */
a18ad893
RH
2965 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2966 if (likely(rc != 31)) {
2967 if (islit) {
2968 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2969 } else {
2970 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2971 }
2972 }
2973 break;
ae8ecd42 2974 }
a18ad893 2975 goto invalid_opc;
4c9649a9
JM
2976 case 0x30:
2977 /* CTPOP */
a18ad893
RH
2978 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2979 if (likely(rc != 31)) {
2980 if (islit) {
2981 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2982 } else {
2983 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2984 }
2985 }
2986 break;
ae8ecd42 2987 }
a18ad893 2988 goto invalid_opc;
4c9649a9
JM
2989 case 0x31:
2990 /* PERR */
a18ad893
RH
2991 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2992 gen_perr(ra, rb, rc, islit, lit);
2993 break;
2994 }
2995 goto invalid_opc;
4c9649a9
JM
2996 case 0x32:
2997 /* CTLZ */
a18ad893
RH
2998 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2999 if (likely(rc != 31)) {
3000 if (islit) {
3001 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
3002 } else {
3003 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
3004 }
3005 }
3006 break;
ae8ecd42 3007 }
a18ad893 3008 goto invalid_opc;
4c9649a9
JM
3009 case 0x33:
3010 /* CTTZ */
a18ad893
RH
3011 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3012 if (likely(rc != 31)) {
3013 if (islit) {
3014 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
3015 } else {
3016 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
3017 }
3018 }
3019 break;
ae8ecd42 3020 }
a18ad893 3021 goto invalid_opc;
4c9649a9
JM
3022 case 0x34:
3023 /* UNPKBW */
a18ad893
RH
3024 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3025 if (real_islit || ra != 31) {
3026 goto invalid_opc;
3027 }
3028 gen_unpkbw(rb, rc);
3029 break;
3030 }
3031 goto invalid_opc;
4c9649a9 3032 case 0x35:
13e4df99 3033 /* UNPKBL */
a18ad893
RH
3034 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3035 if (real_islit || ra != 31) {
3036 goto invalid_opc;
3037 }
3038 gen_unpkbl(rb, rc);
3039 break;
3040 }
3041 goto invalid_opc;
4c9649a9
JM
3042 case 0x36:
3043 /* PKWB */
a18ad893
RH
3044 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3045 if (real_islit || ra != 31) {
3046 goto invalid_opc;
3047 }
3048 gen_pkwb(rb, rc);
3049 break;
3050 }
3051 goto invalid_opc;
4c9649a9
JM
3052 case 0x37:
3053 /* PKLB */
a18ad893
RH
3054 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3055 if (real_islit || ra != 31) {
3056 goto invalid_opc;
3057 }
3058 gen_pklb(rb, rc);
3059 break;
3060 }
3061 goto invalid_opc;
4c9649a9
JM
3062 case 0x38:
3063 /* MINSB8 */
a18ad893
RH
3064 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3065 gen_minsb8(ra, rb, rc, islit, lit);
3066 break;
3067 }
3068 goto invalid_opc;
4c9649a9
JM
3069 case 0x39:
3070 /* MINSW4 */
a18ad893
RH
3071 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3072 gen_minsw4(ra, rb, rc, islit, lit);
3073 break;
3074 }
3075 goto invalid_opc;
4c9649a9
JM
3076 case 0x3A:
3077 /* MINUB8 */
a18ad893
RH
3078 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3079 gen_minub8(ra, rb, rc, islit, lit);
3080 break;
3081 }
3082 goto invalid_opc;
4c9649a9
JM
3083 case 0x3B:
3084 /* MINUW4 */
a18ad893
RH
3085 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3086 gen_minuw4(ra, rb, rc, islit, lit);
3087 break;
3088 }
3089 goto invalid_opc;
4c9649a9
JM
3090 case 0x3C:
3091 /* MAXUB8 */
a18ad893
RH
3092 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3093 gen_maxub8(ra, rb, rc, islit, lit);
3094 break;
3095 }
3096 goto invalid_opc;
4c9649a9
JM
3097 case 0x3D:
3098 /* MAXUW4 */
a18ad893
RH
3099 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3100 gen_maxuw4(ra, rb, rc, islit, lit);
3101 break;
3102 }
3103 goto invalid_opc;
4c9649a9
JM
3104 case 0x3E:
3105 /* MAXSB8 */
a18ad893
RH
3106 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3107 gen_maxsb8(ra, rb, rc, islit, lit);
3108 break;
3109 }
3110 goto invalid_opc;
4c9649a9
JM
3111 case 0x3F:
3112 /* MAXSW4 */
a18ad893
RH
3113 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3114 gen_maxsw4(ra, rb, rc, islit, lit);
3115 break;
3116 }
3117 goto invalid_opc;
4c9649a9
JM
3118 case 0x70:
3119 /* FTOIT */
a18ad893 3120 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 3121 goto invalid_opc;
a18ad893 3122 }
f18cd223
AJ
3123 if (likely(rc != 31)) {
3124 if (ra != 31)
3125 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3126 else
3127 tcg_gen_movi_i64(cpu_ir[rc], 0);
3128 }
4c9649a9
JM
3129 break;
3130 case 0x78:
3131 /* FTOIS */
a18ad893 3132 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 3133 goto invalid_opc;
a18ad893 3134 }
f18cd223 3135 if (rc != 31) {
a7812ae4 3136 TCGv_i32 tmp1 = tcg_temp_new_i32();
f18cd223 3137 if (ra != 31)
a7812ae4 3138 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
f18cd223
AJ
3139 else {
3140 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 3141 gen_helper_s_to_memory(tmp1, tmp2);
f18cd223
AJ
3142 tcg_temp_free(tmp2);
3143 }
3144 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
a7812ae4 3145 tcg_temp_free_i32(tmp1);
f18cd223 3146 }
4c9649a9
JM
3147 break;
3148 default:
3149 goto invalid_opc;
3150 }
3151 break;
3152 case 0x1D:
3153 /* HW_MTPR (PALcode) */
26b46094 3154#ifndef CONFIG_USER_ONLY
a18ad893 3155 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
bc24270e 3156 return gen_mtpr(ctx, rb, insn & 0xffff);
26b46094
RH
3157 }
3158#endif
4c9649a9 3159 goto invalid_opc;
4c9649a9 3160 case 0x1E:
508b43ea 3161 /* HW_RET (PALcode) */
a18ad893
RH
3162#ifndef CONFIG_USER_ONLY
3163 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3164 if (rb == 31) {
3165 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3166 address from EXC_ADDR. This turns out to be useful for our
3167 emulation PALcode, so continue to accept it. */
3168 TCGv tmp = tcg_temp_new();
4d5712f1 3169 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
69163fbb 3170 gen_helper_hw_ret(cpu_env, tmp);
a18ad893
RH
3171 tcg_temp_free(tmp);
3172 } else {
69163fbb 3173 gen_helper_hw_ret(cpu_env, cpu_ir[rb]);
a18ad893
RH
3174 }
3175 ret = EXIT_PC_UPDATED;
3176 break;
4c9649a9 3177 }
4c9649a9 3178#endif
a18ad893 3179 goto invalid_opc;
4c9649a9
JM
3180 case 0x1F:
3181 /* HW_ST (PALcode) */
a18ad893
RH
3182#ifndef CONFIG_USER_ONLY
3183 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
8bb6e981 3184 TCGv addr, val;
a7812ae4 3185 addr = tcg_temp_new();
8bb6e981
AJ
3186 if (rb != 31)
3187 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3188 else
3189 tcg_gen_movi_i64(addr, disp12);
3190 if (ra != 31)
3191 val = cpu_ir[ra];
3192 else {
a7812ae4 3193 val = tcg_temp_new();
8bb6e981
AJ
3194 tcg_gen_movi_i64(val, 0);
3195 }
3196 switch ((insn >> 12) & 0xF) {
3197 case 0x0:
3198 /* Longword physical access */
2374e73e 3199 gen_helper_stl_phys(addr, val);
8bb6e981
AJ
3200 break;
3201 case 0x1:
3202 /* Quadword physical access */
2374e73e 3203 gen_helper_stq_phys(addr, val);
8bb6e981
AJ
3204 break;
3205 case 0x2:
3206 /* Longword physical access with lock */
c3082755 3207 gen_helper_stl_c_phys(val, cpu_env, addr, val);
8bb6e981
AJ
3208 break;
3209 case 0x3:
3210 /* Quadword physical access with lock */
c3082755 3211 gen_helper_stq_c_phys(val, cpu_env, addr, val);
8bb6e981
AJ
3212 break;
3213 case 0x4:
3214 /* Longword virtual access */
2374e73e 3215 goto invalid_opc;
8bb6e981
AJ
3216 case 0x5:
3217 /* Quadword virtual access */
2374e73e 3218 goto invalid_opc;
8bb6e981
AJ
3219 case 0x6:
3220 /* Invalid */
3221 goto invalid_opc;
3222 case 0x7:
3223 /* Invalid */
3224 goto invalid_opc;
3225 case 0x8:
3226 /* Invalid */
3227 goto invalid_opc;
3228 case 0x9:
3229 /* Invalid */
3230 goto invalid_opc;
3231 case 0xA:
3232 /* Invalid */
3233 goto invalid_opc;
3234 case 0xB:
3235 /* Invalid */
3236 goto invalid_opc;
3237 case 0xC:
3238 /* Longword virtual access with alternate access mode */
2374e73e 3239 goto invalid_opc;
8bb6e981
AJ
3240 case 0xD:
3241 /* Quadword virtual access with alternate access mode */
2374e73e 3242 goto invalid_opc;
8bb6e981
AJ
3243 case 0xE:
3244 /* Invalid */
3245 goto invalid_opc;
3246 case 0xF:
3247 /* Invalid */
3248 goto invalid_opc;
3249 }
45d46ce8 3250 if (ra == 31)
8bb6e981
AJ
3251 tcg_temp_free(val);
3252 tcg_temp_free(addr);
a18ad893 3253 break;
4c9649a9 3254 }
4c9649a9 3255#endif
a18ad893 3256 goto invalid_opc;
4c9649a9
JM
3257 case 0x20:
3258 /* LDF */
f18cd223 3259 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3260 break;
3261 case 0x21:
3262 /* LDG */
f18cd223 3263 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3264 break;
3265 case 0x22:
3266 /* LDS */
f18cd223 3267 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
3268 break;
3269 case 0x23:
3270 /* LDT */
f18cd223 3271 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3272 break;
3273 case 0x24:
3274 /* STF */
6910b8f6 3275 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3276 break;
3277 case 0x25:
3278 /* STG */
6910b8f6 3279 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3280 break;
3281 case 0x26:
3282 /* STS */
6910b8f6 3283 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
4c9649a9
JM
3284 break;
3285 case 0x27:
3286 /* STT */
6910b8f6 3287 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3288 break;
3289 case 0x28:
3290 /* LDL */
f18cd223 3291 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
3292 break;
3293 case 0x29:
3294 /* LDQ */
f18cd223 3295 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3296 break;
3297 case 0x2A:
3298 /* LDL_L */
f4ed8679 3299 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3300 break;
3301 case 0x2B:
3302 /* LDQ_L */
f4ed8679 3303 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3304 break;
3305 case 0x2C:
3306 /* STL */
6910b8f6 3307 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
4c9649a9
JM
3308 break;
3309 case 0x2D:
3310 /* STQ */
6910b8f6 3311 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3312 break;
3313 case 0x2E:
3314 /* STL_C */
6910b8f6 3315 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
4c9649a9
JM
3316 break;
3317 case 0x2F:
3318 /* STQ_C */
6910b8f6 3319 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
4c9649a9
JM
3320 break;
3321 case 0x30:
3322 /* BR */
4af70374 3323 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3324 break;
a7812ae4 3325 case 0x31: /* FBEQ */
4af70374 3326 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 3327 break;
a7812ae4 3328 case 0x32: /* FBLT */
4af70374 3329 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 3330 break;
a7812ae4 3331 case 0x33: /* FBLE */
4af70374 3332 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
3333 break;
3334 case 0x34:
3335 /* BSR */
4af70374 3336 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3337 break;
a7812ae4 3338 case 0x35: /* FBNE */
4af70374 3339 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 3340 break;
a7812ae4 3341 case 0x36: /* FBGE */
4af70374 3342 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 3343 break;
a7812ae4 3344 case 0x37: /* FBGT */
4af70374 3345 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
3346 break;
3347 case 0x38:
3348 /* BLBC */
4af70374 3349 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
3350 break;
3351 case 0x39:
3352 /* BEQ */
4af70374 3353 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
3354 break;
3355 case 0x3A:
3356 /* BLT */
4af70374 3357 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
3358 break;
3359 case 0x3B:
3360 /* BLE */
4af70374 3361 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
3362 break;
3363 case 0x3C:
3364 /* BLBS */
4af70374 3365 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
3366 break;
3367 case 0x3D:
3368 /* BNE */
4af70374 3369 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
3370 break;
3371 case 0x3E:
3372 /* BGE */
4af70374 3373 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
3374 break;
3375 case 0x3F:
3376 /* BGT */
4af70374 3377 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
3378 break;
3379 invalid_opc:
8aa3fa20 3380 ret = gen_invalid(ctx);
4c9649a9
JM
3381 break;
3382 }
3383
3384 return ret;
3385}
3386
86a35f7c 3387static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
636aa200 3388 TranslationBlock *tb,
86a35f7c 3389 bool search_pc)
4c9649a9 3390{
ed2803da 3391 CPUState *cs = CPU(cpu);
86a35f7c 3392 CPUAlphaState *env = &cpu->env;
4c9649a9
JM
3393 DisasContext ctx, *ctxp = &ctx;
3394 target_ulong pc_start;
3395 uint32_t insn;
3396 uint16_t *gen_opc_end;
a1d1bb31 3397 CPUBreakpoint *bp;
4c9649a9 3398 int j, lj = -1;
4af70374 3399 ExitStatus ret;
2e70f6ef
PB
3400 int num_insns;
3401 int max_insns;
4c9649a9
JM
3402
3403 pc_start = tb->pc;
92414b31 3404 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4af70374
RH
3405
3406 ctx.tb = tb;
4c9649a9 3407 ctx.pc = pc_start;
bba9bdce 3408 ctx.mem_idx = cpu_mmu_index(env);
801c4c28 3409 ctx.implver = env->implver;
ed2803da 3410 ctx.singlestep_enabled = cs->singlestep_enabled;
f24518b5
RH
3411
3412 /* ??? Every TB begins with unset rounding mode, to be initialized on
3413 the first fp insn of the TB. Alternately we could define a proper
3414 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3415 to reset the FP_STATUS to that default at the end of any TB that
3416 changes the default. We could even (gasp) dynamiclly figure out
3417 what default would be most efficient given the running program. */
3418 ctx.tb_rm = -1;
3419 /* Similarly for flush-to-zero. */
3420 ctx.tb_ftz = -1;
3421
2e70f6ef
PB
3422 num_insns = 0;
3423 max_insns = tb->cflags & CF_COUNT_MASK;
3424 if (max_insns == 0)
3425 max_insns = CF_COUNT_MASK;
3426
806f352d 3427 gen_tb_start();
4af70374 3428 do {
72cf2d4f
BS
3429 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3430 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 3431 if (bp->pc == ctx.pc) {
4c9649a9
JM
3432 gen_excp(&ctx, EXCP_DEBUG, 0);
3433 break;
3434 }
3435 }
3436 }
3437 if (search_pc) {
92414b31 3438 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4c9649a9
JM
3439 if (lj < j) {
3440 lj++;
3441 while (lj < j)
ab1103de 3442 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4c9649a9 3443 }
25983cad 3444 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
ab1103de 3445 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 3446 tcg_ctx.gen_opc_icount[lj] = num_insns;
4c9649a9 3447 }
2e70f6ef
PB
3448 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3449 gen_io_start();
c3082755 3450 insn = cpu_ldl_code(env, ctx.pc);
2e70f6ef 3451 num_insns++;
c4b3be39 3452
fdefe51c 3453 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
c4b3be39
RH
3454 tcg_gen_debug_insn_start(ctx.pc);
3455 }
3456
4c9649a9
JM
3457 ctx.pc += 4;
3458 ret = translate_one(ctxp, insn);
19bf517b 3459
bf1b03fe
RH
3460 /* If we reach a page boundary, are single stepping,
3461 or exhaust instruction count, stop generation. */
3462 if (ret == NO_EXIT
3463 && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
efd7f486 3464 || tcg_ctx.gen_opc_ptr >= gen_opc_end
bf1b03fe
RH
3465 || num_insns >= max_insns
3466 || singlestep
ca6862a6 3467 || ctx.singlestep_enabled)) {
bf1b03fe 3468 ret = EXIT_PC_STALE;
1b530a6d 3469 }
4af70374
RH
3470 } while (ret == NO_EXIT);
3471
3472 if (tb->cflags & CF_LAST_IO) {
3473 gen_io_end();
4c9649a9 3474 }
4af70374
RH
3475
3476 switch (ret) {
3477 case EXIT_GOTO_TB:
8aa3fa20 3478 case EXIT_NORETURN:
4af70374
RH
3479 break;
3480 case EXIT_PC_STALE:
496cb5b9 3481 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
3482 /* FALLTHRU */
3483 case EXIT_PC_UPDATED:
ca6862a6 3484 if (ctx.singlestep_enabled) {
bf1b03fe
RH
3485 gen_excp_1(EXCP_DEBUG, 0);
3486 } else {
3487 tcg_gen_exit_tb(0);
3488 }
4af70374
RH
3489 break;
3490 default:
3491 abort();
4c9649a9 3492 }
4af70374 3493
806f352d 3494 gen_tb_end(tb, num_insns);
efd7f486 3495 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4c9649a9 3496 if (search_pc) {
92414b31 3497 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4c9649a9
JM
3498 lj++;
3499 while (lj <= j)
ab1103de 3500 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3501 } else {
3502 tb->size = ctx.pc - pc_start;
2e70f6ef 3503 tb->icount = num_insns;
4c9649a9 3504 }
4af70374 3505
806991da 3506#ifdef DEBUG_DISAS
8fec2b8c 3507 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39 3508 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 3509 log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
93fcfe39 3510 qemu_log("\n");
4c9649a9 3511 }
4c9649a9 3512#endif
4c9649a9
JM
3513}
3514
4d5712f1 3515void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 3516{
86a35f7c 3517 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
4c9649a9
JM
3518}
3519
4d5712f1 3520void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 3521{
86a35f7c 3522 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
4c9649a9
JM
3523}
3524
4d5712f1 3525void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 3526{
25983cad 3527 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
d2856f1a 3528}