]> git.proxmox.com Git - qemu.git/blame - target-alpha/translate.c
tcg: Move helper registration into tcg_context_init
[qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
4c9649a9 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
1de7afc9 22#include "qemu/host-utils.h"
57fec1fe 23#include "tcg-op.h"
4c9649a9 24
a7812ae4
PB
25#include "helper.h"
26#define GEN_HELPER 1
27#include "helper.h"
28
19188121 29#undef ALPHA_DEBUG_DISAS
f24518b5 30#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
31
32#ifdef ALPHA_DEBUG_DISAS
806991da 33# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
34#else
35# define LOG_DISAS(...) do { } while (0)
36#endif
37
4c9649a9
JM
38typedef struct DisasContext DisasContext;
39struct DisasContext {
4af70374 40 struct TranslationBlock *tb;
4c9649a9
JM
41 uint64_t pc;
42 int mem_idx;
f24518b5
RH
43
44 /* Current rounding mode for this TB. */
45 int tb_rm;
46 /* Current flush-to-zero setting for this TB. */
47 int tb_ftz;
ca6862a6 48
801c4c28
RH
49 /* implver value for this CPU. */
50 int implver;
51
ca6862a6 52 bool singlestep_enabled;
4c9649a9
JM
53};
54
4af70374
RH
55/* Return values from translate_one, indicating the state of the TB.
56 Note that zero indicates that we are not exiting the TB. */
57
58typedef enum {
59 NO_EXIT,
60
61 /* We have emitted one or more goto_tb. No fixup required. */
62 EXIT_GOTO_TB,
63
64 /* We are not using a goto_tb (for whatever reason), but have updated
65 the PC (for whatever reason), so there's no need to do it again on
66 exiting the TB. */
67 EXIT_PC_UPDATED,
68
69 /* We are exiting the TB, but have neither emitted a goto_tb, nor
70 updated the PC for the next instruction to be executed. */
8aa3fa20
RH
71 EXIT_PC_STALE,
72
73 /* We are ending the TB with a noreturn function call, e.g. longjmp.
74 No following code will be executed. */
75 EXIT_NORETURN,
4af70374
RH
76} ExitStatus;
77
3761035f 78/* global register indexes */
a7812ae4 79static TCGv_ptr cpu_env;
496cb5b9 80static TCGv cpu_ir[31];
f18cd223 81static TCGv cpu_fir[31];
496cb5b9 82static TCGv cpu_pc;
6910b8f6
RH
83static TCGv cpu_lock_addr;
84static TCGv cpu_lock_st_addr;
85static TCGv cpu_lock_value;
2ace7e55
RH
86static TCGv cpu_unique;
87#ifndef CONFIG_USER_ONLY
88static TCGv cpu_sysval;
89static TCGv cpu_usp;
ab471ade 90#endif
496cb5b9 91
3761035f 92/* register names */
f18cd223 93static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef 94
022c62cb 95#include "exec/gen-icount.h"
2e70f6ef 96
0c28246f 97void alpha_translate_init(void)
2e70f6ef 98{
496cb5b9
AJ
99 int i;
100 char *p;
2e70f6ef 101 static int done_init = 0;
496cb5b9 102
2e70f6ef
PB
103 if (done_init)
104 return;
496cb5b9 105
a7812ae4 106 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
107
108 p = cpu_reg_names;
109 for (i = 0; i < 31; i++) {
110 sprintf(p, "ir%d", i);
a7812ae4 111 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 112 offsetof(CPUAlphaState, ir[i]), p);
6ba8dcd7 113 p += (i < 10) ? 4 : 5;
f18cd223
AJ
114
115 sprintf(p, "fir%d", i);
a7812ae4 116 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 117 offsetof(CPUAlphaState, fir[i]), p);
f18cd223 118 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
119 }
120
a7812ae4 121 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 122 offsetof(CPUAlphaState, pc), "pc");
496cb5b9 123
6910b8f6 124 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 125 offsetof(CPUAlphaState, lock_addr),
6910b8f6
RH
126 "lock_addr");
127 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 128 offsetof(CPUAlphaState, lock_st_addr),
6910b8f6
RH
129 "lock_st_addr");
130 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 131 offsetof(CPUAlphaState, lock_value),
6910b8f6 132 "lock_value");
f4ed8679 133
2ace7e55 134 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 135 offsetof(CPUAlphaState, unique), "unique");
2ace7e55
RH
136#ifndef CONFIG_USER_ONLY
137 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 138 offsetof(CPUAlphaState, sysval), "sysval");
2ace7e55 139 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
4d5712f1 140 offsetof(CPUAlphaState, usp), "usp");
ab471ade
RH
141#endif
142
2e70f6ef
PB
143 done_init = 1;
144}
145
bf1b03fe 146static void gen_excp_1(int exception, int error_code)
4c9649a9 147{
a7812ae4 148 TCGv_i32 tmp1, tmp2;
6ad02592 149
6ad02592
AJ
150 tmp1 = tcg_const_i32(exception);
151 tmp2 = tcg_const_i32(error_code);
b9f0923e 152 gen_helper_excp(cpu_env, tmp1, tmp2);
a7812ae4
PB
153 tcg_temp_free_i32(tmp2);
154 tcg_temp_free_i32(tmp1);
bf1b03fe 155}
8aa3fa20 156
bf1b03fe
RH
157static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
158{
159 tcg_gen_movi_i64(cpu_pc, ctx->pc);
160 gen_excp_1(exception, error_code);
8aa3fa20 161 return EXIT_NORETURN;
4c9649a9
JM
162}
163
8aa3fa20 164static inline ExitStatus gen_invalid(DisasContext *ctx)
4c9649a9 165{
8aa3fa20 166 return gen_excp(ctx, EXCP_OPCDEC, 0);
4c9649a9
JM
167}
168
636aa200 169static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 170{
a7812ae4
PB
171 TCGv tmp = tcg_temp_new();
172 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 173 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
174 tcg_gen_trunc_i64_i32(tmp32, tmp);
175 gen_helper_memory_to_f(t0, tmp32);
176 tcg_temp_free_i32(tmp32);
f18cd223
AJ
177 tcg_temp_free(tmp);
178}
179
636aa200 180static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 181{
a7812ae4 182 TCGv tmp = tcg_temp_new();
f18cd223 183 tcg_gen_qemu_ld64(tmp, t1, flags);
a7812ae4 184 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
185 tcg_temp_free(tmp);
186}
187
636aa200 188static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 189{
a7812ae4
PB
190 TCGv tmp = tcg_temp_new();
191 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 192 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
193 tcg_gen_trunc_i64_i32(tmp32, tmp);
194 gen_helper_memory_to_s(t0, tmp32);
195 tcg_temp_free_i32(tmp32);
f18cd223
AJ
196 tcg_temp_free(tmp);
197}
198
636aa200 199static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679 200{
f4ed8679 201 tcg_gen_qemu_ld32s(t0, t1, flags);
6910b8f6
RH
202 tcg_gen_mov_i64(cpu_lock_addr, t1);
203 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
204}
205
636aa200 206static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679 207{
f4ed8679 208 tcg_gen_qemu_ld64(t0, t1, flags);
6910b8f6
RH
209 tcg_gen_mov_i64(cpu_lock_addr, t1);
210 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
211}
212
636aa200
BS
213static inline void gen_load_mem(DisasContext *ctx,
214 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
215 int flags),
216 int ra, int rb, int32_t disp16, int fp,
217 int clear)
023d8ca2 218{
6910b8f6 219 TCGv addr, va;
023d8ca2 220
6910b8f6
RH
221 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
222 prefetches, which we can treat as nops. No worries about
223 missed exceptions here. */
224 if (unlikely(ra == 31)) {
023d8ca2 225 return;
6910b8f6 226 }
023d8ca2 227
a7812ae4 228 addr = tcg_temp_new();
023d8ca2
AJ
229 if (rb != 31) {
230 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 231 if (clear) {
023d8ca2 232 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 233 }
023d8ca2 234 } else {
6910b8f6 235 if (clear) {
023d8ca2 236 disp16 &= ~0x7;
6910b8f6 237 }
023d8ca2
AJ
238 tcg_gen_movi_i64(addr, disp16);
239 }
6910b8f6
RH
240
241 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
242 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
243
023d8ca2
AJ
244 tcg_temp_free(addr);
245}
246
636aa200 247static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 248{
a7812ae4
PB
249 TCGv_i32 tmp32 = tcg_temp_new_i32();
250 TCGv tmp = tcg_temp_new();
251 gen_helper_f_to_memory(tmp32, t0);
252 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
253 tcg_gen_qemu_st32(tmp, t1, flags);
254 tcg_temp_free(tmp);
a7812ae4 255 tcg_temp_free_i32(tmp32);
f18cd223
AJ
256}
257
636aa200 258static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 259{
a7812ae4
PB
260 TCGv tmp = tcg_temp_new();
261 gen_helper_g_to_memory(tmp, t0);
f18cd223
AJ
262 tcg_gen_qemu_st64(tmp, t1, flags);
263 tcg_temp_free(tmp);
264}
265
636aa200 266static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 267{
a7812ae4
PB
268 TCGv_i32 tmp32 = tcg_temp_new_i32();
269 TCGv tmp = tcg_temp_new();
270 gen_helper_s_to_memory(tmp32, t0);
271 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
272 tcg_gen_qemu_st32(tmp, t1, flags);
273 tcg_temp_free(tmp);
a7812ae4 274 tcg_temp_free_i32(tmp32);
f18cd223
AJ
275}
276
636aa200
BS
277static inline void gen_store_mem(DisasContext *ctx,
278 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
279 int flags),
280 int ra, int rb, int32_t disp16, int fp,
6910b8f6 281 int clear)
023d8ca2 282{
6910b8f6
RH
283 TCGv addr, va;
284
285 addr = tcg_temp_new();
023d8ca2
AJ
286 if (rb != 31) {
287 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 288 if (clear) {
023d8ca2 289 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 290 }
023d8ca2 291 } else {
6910b8f6 292 if (clear) {
023d8ca2 293 disp16 &= ~0x7;
6910b8f6 294 }
023d8ca2
AJ
295 tcg_gen_movi_i64(addr, disp16);
296 }
6910b8f6
RH
297
298 if (ra == 31) {
299 va = tcg_const_i64(0);
f18cd223 300 } else {
6910b8f6 301 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
023d8ca2 302 }
6910b8f6
RH
303 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
304
023d8ca2 305 tcg_temp_free(addr);
6910b8f6
RH
306 if (ra == 31) {
307 tcg_temp_free(va);
308 }
309}
310
311static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
312 int32_t disp16, int quad)
313{
314 TCGv addr;
315
316 if (ra == 31) {
317 /* ??? Don't bother storing anything. The user can't tell
318 the difference, since the zero register always reads zero. */
319 return NO_EXIT;
320 }
321
322#if defined(CONFIG_USER_ONLY)
323 addr = cpu_lock_st_addr;
324#else
e52458fe 325 addr = tcg_temp_local_new();
6910b8f6
RH
326#endif
327
328 if (rb != 31) {
329 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
330 } else {
331 tcg_gen_movi_i64(addr, disp16);
332 }
333
334#if defined(CONFIG_USER_ONLY)
335 /* ??? This is handled via a complicated version of compare-and-swap
336 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
337 in TCG so that this isn't necessary. */
338 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
339#else
340 /* ??? In system mode we are never multi-threaded, so CAS can be
341 implemented via a non-atomic load-compare-store sequence. */
342 {
343 int lab_fail, lab_done;
344 TCGv val;
345
346 lab_fail = gen_new_label();
347 lab_done = gen_new_label();
e52458fe 348 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
6910b8f6
RH
349
350 val = tcg_temp_new();
351 if (quad) {
352 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
353 } else {
354 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
355 }
e52458fe 356 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
6910b8f6
RH
357
358 if (quad) {
359 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
360 } else {
361 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
362 }
363 tcg_gen_movi_i64(cpu_ir[ra], 1);
364 tcg_gen_br(lab_done);
365
366 gen_set_label(lab_fail);
367 tcg_gen_movi_i64(cpu_ir[ra], 0);
368
369 gen_set_label(lab_done);
370 tcg_gen_movi_i64(cpu_lock_addr, -1);
371
372 tcg_temp_free(addr);
373 return NO_EXIT;
374 }
375#endif
023d8ca2
AJ
376}
377
b114b68a 378static bool in_superpage(DisasContext *ctx, int64_t addr)
4c9649a9 379{
b114b68a
RH
380 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
381 && addr < 0
382 && ((addr >> 41) & 3) == 2
383 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
384}
385
386static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
387{
388 /* Suppress goto_tb in the case of single-steping and IO. */
389 if (ctx->singlestep_enabled || (ctx->tb->cflags & CF_LAST_IO)) {
390 return false;
391 }
392 /* If the destination is in the superpage, the page perms can't change. */
393 if (in_superpage(ctx, dest)) {
394 return true;
395 }
396 /* Check for the dest on the same page as the start of the TB. */
397 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
4af70374 398}
dbb30fe6 399
4af70374
RH
400static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
401{
402 uint64_t dest = ctx->pc + (disp << 2);
403
404 if (ra != 31) {
405 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
406 }
407
408 /* Notice branch-to-next; used to initialize RA with the PC. */
409 if (disp == 0) {
410 return 0;
411 } else if (use_goto_tb(ctx, dest)) {
412 tcg_gen_goto_tb(0);
413 tcg_gen_movi_i64(cpu_pc, dest);
8cfd0495 414 tcg_gen_exit_tb((uintptr_t)ctx->tb);
4af70374
RH
415 return EXIT_GOTO_TB;
416 } else {
417 tcg_gen_movi_i64(cpu_pc, dest);
418 return EXIT_PC_UPDATED;
419 }
dbb30fe6
RH
420}
421
4af70374
RH
422static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
423 TCGv cmp, int32_t disp)
dbb30fe6 424{
4af70374 425 uint64_t dest = ctx->pc + (disp << 2);
dbb30fe6 426 int lab_true = gen_new_label();
9c29504e 427
4af70374
RH
428 if (use_goto_tb(ctx, dest)) {
429 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
430
431 tcg_gen_goto_tb(0);
432 tcg_gen_movi_i64(cpu_pc, ctx->pc);
8cfd0495 433 tcg_gen_exit_tb((uintptr_t)ctx->tb);
4af70374
RH
434
435 gen_set_label(lab_true);
436 tcg_gen_goto_tb(1);
437 tcg_gen_movi_i64(cpu_pc, dest);
8cfd0495 438 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
4af70374
RH
439
440 return EXIT_GOTO_TB;
441 } else {
57e289de
RH
442 TCGv_i64 z = tcg_const_i64(0);
443 TCGv_i64 d = tcg_const_i64(dest);
444 TCGv_i64 p = tcg_const_i64(ctx->pc);
4af70374 445
57e289de 446 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
4af70374 447
57e289de
RH
448 tcg_temp_free_i64(z);
449 tcg_temp_free_i64(d);
450 tcg_temp_free_i64(p);
4af70374
RH
451 return EXIT_PC_UPDATED;
452 }
453}
454
455static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
456 int32_t disp, int mask)
457{
458 TCGv cmp_tmp;
459
460 if (unlikely(ra == 31)) {
461 cmp_tmp = tcg_const_i64(0);
462 } else {
463 cmp_tmp = tcg_temp_new();
9c29504e 464 if (mask) {
4af70374 465 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
dbb30fe6 466 } else {
4af70374 467 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
dbb30fe6 468 }
9c29504e 469 }
4af70374
RH
470
471 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
472}
473
4af70374 474/* Fold -0.0 for comparison with COND. */
dbb30fe6 475
4af70374 476static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 477{
dbb30fe6 478 uint64_t mzero = 1ull << 63;
f18cd223 479
dbb30fe6
RH
480 switch (cond) {
481 case TCG_COND_LE:
482 case TCG_COND_GT:
483 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 484 tcg_gen_mov_i64(dest, src);
a7812ae4 485 break;
dbb30fe6
RH
486
487 case TCG_COND_EQ:
488 case TCG_COND_NE:
489 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 490 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 491 break;
dbb30fe6
RH
492
493 case TCG_COND_GE:
dbb30fe6 494 case TCG_COND_LT:
4af70374
RH
495 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
496 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
497 tcg_gen_neg_i64(dest, dest);
498 tcg_gen_and_i64(dest, dest, src);
a7812ae4 499 break;
dbb30fe6 500
a7812ae4
PB
501 default:
502 abort();
f18cd223 503 }
dbb30fe6
RH
504}
505
4af70374
RH
506static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
507 int32_t disp)
dbb30fe6 508{
4af70374 509 TCGv cmp_tmp;
dbb30fe6
RH
510
511 if (unlikely(ra == 31)) {
512 /* Very uncommon case, but easier to optimize it to an integer
513 comparison than continuing with the floating point comparison. */
4af70374 514 return gen_bcond(ctx, cond, ra, disp, 0);
dbb30fe6
RH
515 }
516
4af70374
RH
517 cmp_tmp = tcg_temp_new();
518 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
519 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
520}
521
bbe1dab4 522static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
4af70374 523 int islit, uint8_t lit, int mask)
4c9649a9 524{
57e289de 525 TCGv_i64 c1, z, v1;
9c29504e 526
57e289de 527 if (unlikely(rc == 31)) {
9c29504e 528 return;
57e289de 529 }
9c29504e 530
57e289de 531 if (ra == 31) {
9c29504e 532 /* Very uncommon case - Do not bother to optimize. */
57e289de
RH
533 c1 = tcg_const_i64(0);
534 } else if (mask) {
535 c1 = tcg_const_i64(1);
536 tcg_gen_and_i64(c1, c1, cpu_ir[ra]);
537 } else {
538 c1 = cpu_ir[ra];
539 }
540 if (islit) {
541 v1 = tcg_const_i64(lit);
542 } else {
543 v1 = cpu_ir[rb];
9c29504e 544 }
57e289de 545 z = tcg_const_i64(0);
9c29504e 546
57e289de
RH
547 tcg_gen_movcond_i64(cond, cpu_ir[rc], c1, z, v1, cpu_ir[rc]);
548
549 tcg_temp_free_i64(z);
550 if (ra == 31 || mask) {
551 tcg_temp_free_i64(c1);
552 }
553 if (islit) {
554 tcg_temp_free_i64(v1);
555 }
4c9649a9
JM
556}
557
bbe1dab4 558static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
dbb30fe6 559{
57e289de 560 TCGv_i64 c1, z, v1;
dbb30fe6 561
4af70374 562 if (unlikely(rc == 31)) {
dbb30fe6 563 return;
4af70374
RH
564 }
565
57e289de 566 c1 = tcg_temp_new_i64();
dbb30fe6 567 if (unlikely(ra == 31)) {
57e289de 568 tcg_gen_movi_i64(c1, 0);
4af70374 569 } else {
57e289de 570 gen_fold_mzero(cond, c1, cpu_fir[ra]);
dbb30fe6 571 }
57e289de
RH
572 if (rb == 31) {
573 v1 = tcg_const_i64(0);
574 } else {
575 v1 = cpu_fir[rb];
576 }
577 z = tcg_const_i64(0);
dbb30fe6 578
57e289de 579 tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]);
dbb30fe6 580
57e289de
RH
581 tcg_temp_free_i64(z);
582 tcg_temp_free_i64(c1);
583 if (rb == 31) {
584 tcg_temp_free_i64(v1);
585 }
dbb30fe6
RH
586}
587
f24518b5
RH
588#define QUAL_RM_N 0x080 /* Round mode nearest even */
589#define QUAL_RM_C 0x000 /* Round mode chopped */
590#define QUAL_RM_M 0x040 /* Round mode minus infinity */
591#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
592#define QUAL_RM_MASK 0x0c0
593
594#define QUAL_U 0x100 /* Underflow enable (fp output) */
595#define QUAL_V 0x100 /* Overflow enable (int output) */
596#define QUAL_S 0x400 /* Software completion enable */
597#define QUAL_I 0x200 /* Inexact detection enable */
598
599static void gen_qual_roundmode(DisasContext *ctx, int fn11)
600{
601 TCGv_i32 tmp;
602
603 fn11 &= QUAL_RM_MASK;
604 if (fn11 == ctx->tb_rm) {
605 return;
606 }
607 ctx->tb_rm = fn11;
608
609 tmp = tcg_temp_new_i32();
610 switch (fn11) {
611 case QUAL_RM_N:
612 tcg_gen_movi_i32(tmp, float_round_nearest_even);
613 break;
614 case QUAL_RM_C:
615 tcg_gen_movi_i32(tmp, float_round_to_zero);
616 break;
617 case QUAL_RM_M:
618 tcg_gen_movi_i32(tmp, float_round_down);
619 break;
620 case QUAL_RM_D:
4a58aedf
RH
621 tcg_gen_ld8u_i32(tmp, cpu_env,
622 offsetof(CPUAlphaState, fpcr_dyn_round));
f24518b5
RH
623 break;
624 }
625
626#if defined(CONFIG_SOFTFLOAT_INLINE)
6b4c305c 627 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
f24518b5
RH
628 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
629 sets the one field. */
630 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 631 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
f24518b5
RH
632#else
633 gen_helper_setroundmode(tmp);
634#endif
635
636 tcg_temp_free_i32(tmp);
637}
638
639static void gen_qual_flushzero(DisasContext *ctx, int fn11)
640{
641 TCGv_i32 tmp;
642
643 fn11 &= QUAL_U;
644 if (fn11 == ctx->tb_ftz) {
645 return;
646 }
647 ctx->tb_ftz = fn11;
648
649 tmp = tcg_temp_new_i32();
650 if (fn11) {
651 /* Underflow is enabled, use the FPCR setting. */
4a58aedf
RH
652 tcg_gen_ld8u_i32(tmp, cpu_env,
653 offsetof(CPUAlphaState, fpcr_flush_to_zero));
f24518b5
RH
654 } else {
655 /* Underflow is disabled, force flush-to-zero. */
656 tcg_gen_movi_i32(tmp, 1);
657 }
658
659#if defined(CONFIG_SOFTFLOAT_INLINE)
660 tcg_gen_st8_i32(tmp, cpu_env,
4d5712f1 661 offsetof(CPUAlphaState, fp_status.flush_to_zero));
f24518b5
RH
662#else
663 gen_helper_setflushzero(tmp);
664#endif
665
666 tcg_temp_free_i32(tmp);
667}
668
669static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
670{
74343409 671 TCGv val;
f24518b5 672 if (reg == 31) {
74343409 673 val = tcg_const_i64(0);
f24518b5 674 } else {
74343409
RH
675 if ((fn11 & QUAL_S) == 0) {
676 if (is_cmp) {
677 gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
678 } else {
679 gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
680 }
681 }
682 val = tcg_temp_new();
683 tcg_gen_mov_i64(val, cpu_fir[reg]);
f24518b5
RH
684 }
685 return val;
686}
687
688static void gen_fp_exc_clear(void)
689{
690#if defined(CONFIG_SOFTFLOAT_INLINE)
691 TCGv_i32 zero = tcg_const_i32(0);
692 tcg_gen_st8_i32(zero, cpu_env,
4d5712f1 693 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5
RH
694 tcg_temp_free_i32(zero);
695#else
4a58aedf 696 gen_helper_fp_exc_clear(cpu_env);
f24518b5
RH
697#endif
698}
699
700static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
701{
702 /* ??? We ought to be able to do something with imprecise exceptions.
703 E.g. notice we're still in the trap shadow of something within the
704 TB and do not generate the code to signal the exception; end the TB
705 when an exception is forced to arrive, either by consumption of a
706 register value or TRAPB or EXCB. */
707 TCGv_i32 exc = tcg_temp_new_i32();
708 TCGv_i32 reg;
709
710#if defined(CONFIG_SOFTFLOAT_INLINE)
711 tcg_gen_ld8u_i32(exc, cpu_env,
4d5712f1 712 offsetof(CPUAlphaState, fp_status.float_exception_flags));
f24518b5 713#else
4a58aedf 714 gen_helper_fp_exc_get(exc, cpu_env);
f24518b5
RH
715#endif
716
717 if (ignore) {
718 tcg_gen_andi_i32(exc, exc, ~ignore);
719 }
720
721 /* ??? Pass in the regno of the destination so that the helper can
722 set EXC_MASK, which contains a bitmask of destination registers
723 that have caused arithmetic traps. A simple userspace emulation
724 does not require this. We do need it for a guest kernel's entArith,
725 or if we were to do something clever with imprecise exceptions. */
726 reg = tcg_const_i32(rc + 32);
727
728 if (fn11 & QUAL_S) {
4a58aedf 729 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
f24518b5 730 } else {
4a58aedf 731 gen_helper_fp_exc_raise(cpu_env, exc, reg);
f24518b5
RH
732 }
733
734 tcg_temp_free_i32(reg);
735 tcg_temp_free_i32(exc);
736}
737
738static inline void gen_fp_exc_raise(int rc, int fn11)
739{
740 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 741}
f24518b5 742
593f17e5
RH
743static void gen_fcvtlq(int rb, int rc)
744{
745 if (unlikely(rc == 31)) {
746 return;
747 }
748 if (unlikely(rb == 31)) {
749 tcg_gen_movi_i64(cpu_fir[rc], 0);
750 } else {
751 TCGv tmp = tcg_temp_new();
752
753 /* The arithmetic right shift here, plus the sign-extended mask below
754 yields a sign-extended result without an explicit ext32s_i64. */
755 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
756 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
757 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
758 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
759 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
760
761 tcg_temp_free(tmp);
762 }
763}
764
735cf45f
RH
765static void gen_fcvtql(int rb, int rc)
766{
767 if (unlikely(rc == 31)) {
768 return;
769 }
770 if (unlikely(rb == 31)) {
771 tcg_gen_movi_i64(cpu_fir[rc], 0);
772 } else {
773 TCGv tmp = tcg_temp_new();
774
775 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
776 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
777 tcg_gen_shli_i64(tmp, tmp, 32);
778 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
779 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
780
781 tcg_temp_free(tmp);
782 }
783}
784
785static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
786{
787 if (rb != 31) {
788 int lab = gen_new_label();
789 TCGv tmp = tcg_temp_new();
790
791 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
792 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
793 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
794
795 gen_set_label(lab);
796 }
797 gen_fcvtql(rb, rc);
798}
799
4a58aedf
RH
800#define FARITH2(name) \
801 static inline void glue(gen_f, name)(int rb, int rc) \
802 { \
803 if (unlikely(rc == 31)) { \
804 return; \
805 } \
806 if (rb != 31) { \
807 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
808 } else { \
809 TCGv tmp = tcg_const_i64(0); \
810 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
811 tcg_temp_free(tmp); \
812 } \
813 }
f24518b5
RH
814
815/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
816FARITH2(sqrtf)
817FARITH2(sqrtg)
a7812ae4
PB
818FARITH2(cvtgf)
819FARITH2(cvtgq)
820FARITH2(cvtqf)
821FARITH2(cvtqg)
f24518b5 822
4a58aedf
RH
823static void gen_ieee_arith2(DisasContext *ctx,
824 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
825 int rb, int rc, int fn11)
826{
827 TCGv vb;
828
829 /* ??? This is wrong: the instruction is not a nop, it still may
830 raise exceptions. */
831 if (unlikely(rc == 31)) {
832 return;
833 }
834
835 gen_qual_roundmode(ctx, fn11);
836 gen_qual_flushzero(ctx, fn11);
837 gen_fp_exc_clear();
838
839 vb = gen_ieee_input(rb, fn11, 0);
4a58aedf 840 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
841 tcg_temp_free(vb);
842
843 gen_fp_exc_raise(rc, fn11);
844}
845
846#define IEEE_ARITH2(name) \
847static inline void glue(gen_f, name)(DisasContext *ctx, \
848 int rb, int rc, int fn11) \
849{ \
850 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
851}
852IEEE_ARITH2(sqrts)
853IEEE_ARITH2(sqrtt)
854IEEE_ARITH2(cvtst)
855IEEE_ARITH2(cvtts)
856
857static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
858{
859 TCGv vb;
860 int ignore = 0;
861
862 /* ??? This is wrong: the instruction is not a nop, it still may
863 raise exceptions. */
864 if (unlikely(rc == 31)) {
865 return;
866 }
867
868 /* No need to set flushzero, since we have an integer output. */
869 gen_fp_exc_clear();
870 vb = gen_ieee_input(rb, fn11, 0);
871
872 /* Almost all integer conversions use cropped rounding, and most
873 also do not have integer overflow enabled. Special case that. */
874 switch (fn11) {
875 case QUAL_RM_C:
4a58aedf 876 gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
877 break;
878 case QUAL_V | QUAL_RM_C:
879 case QUAL_S | QUAL_V | QUAL_RM_C:
880 ignore = float_flag_inexact;
881 /* FALLTHRU */
882 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
4a58aedf 883 gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
884 break;
885 default:
886 gen_qual_roundmode(ctx, fn11);
4a58aedf 887 gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
888 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
889 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
890 break;
891 }
892 tcg_temp_free(vb);
893
894 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
895}
896
4a58aedf
RH
897static void gen_ieee_intcvt(DisasContext *ctx,
898 void (*helper)(TCGv, TCGv_ptr, TCGv),
f24518b5
RH
899 int rb, int rc, int fn11)
900{
901 TCGv vb;
902
903 /* ??? This is wrong: the instruction is not a nop, it still may
904 raise exceptions. */
905 if (unlikely(rc == 31)) {
906 return;
907 }
908
909 gen_qual_roundmode(ctx, fn11);
910
911 if (rb == 31) {
912 vb = tcg_const_i64(0);
913 } else {
914 vb = cpu_fir[rb];
915 }
916
917 /* The only exception that can be raised by integer conversion
918 is inexact. Thus we only need to worry about exceptions when
919 inexact handling is requested. */
920 if (fn11 & QUAL_I) {
921 gen_fp_exc_clear();
4a58aedf 922 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
923 gen_fp_exc_raise(rc, fn11);
924 } else {
4a58aedf 925 helper(cpu_fir[rc], cpu_env, vb);
f24518b5
RH
926 }
927
928 if (rb == 31) {
929 tcg_temp_free(vb);
930 }
931}
932
933#define IEEE_INTCVT(name) \
934static inline void glue(gen_f, name)(DisasContext *ctx, \
935 int rb, int rc, int fn11) \
936{ \
937 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
938}
939IEEE_INTCVT(cvtqs)
940IEEE_INTCVT(cvtqt)
941
dc96be4b
RH
942static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
943{
944 TCGv va, vb, vmask;
945 int za = 0, zb = 0;
946
947 if (unlikely(rc == 31)) {
948 return;
949 }
950
951 vmask = tcg_const_i64(mask);
952
953 TCGV_UNUSED_I64(va);
954 if (ra == 31) {
955 if (inv_a) {
956 va = vmask;
957 } else {
958 za = 1;
959 }
960 } else {
961 va = tcg_temp_new_i64();
962 tcg_gen_mov_i64(va, cpu_fir[ra]);
963 if (inv_a) {
964 tcg_gen_andc_i64(va, vmask, va);
965 } else {
966 tcg_gen_and_i64(va, va, vmask);
967 }
968 }
969
970 TCGV_UNUSED_I64(vb);
971 if (rb == 31) {
972 zb = 1;
973 } else {
974 vb = tcg_temp_new_i64();
975 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
976 }
977
978 switch (za << 1 | zb) {
979 case 0 | 0:
980 tcg_gen_or_i64(cpu_fir[rc], va, vb);
981 break;
982 case 0 | 1:
983 tcg_gen_mov_i64(cpu_fir[rc], va);
984 break;
985 case 2 | 0:
986 tcg_gen_mov_i64(cpu_fir[rc], vb);
987 break;
988 case 2 | 1:
989 tcg_gen_movi_i64(cpu_fir[rc], 0);
990 break;
991 }
992
993 tcg_temp_free(vmask);
994 if (ra != 31) {
995 tcg_temp_free(va);
996 }
997 if (rb != 31) {
998 tcg_temp_free(vb);
999 }
1000}
1001
1002static inline void gen_fcpys(int ra, int rb, int rc)
1003{
1004 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
1005}
1006
1007static inline void gen_fcpysn(int ra, int rb, int rc)
1008{
1009 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1010}
1011
1012static inline void gen_fcpyse(int ra, int rb, int rc)
1013{
1014 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1015}
1016
4a58aedf
RH
1017#define FARITH3(name) \
1018 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1019 { \
1020 TCGv va, vb; \
1021 \
1022 if (unlikely(rc == 31)) { \
1023 return; \
1024 } \
1025 if (ra == 31) { \
1026 va = tcg_const_i64(0); \
1027 } else { \
1028 va = cpu_fir[ra]; \
1029 } \
1030 if (rb == 31) { \
1031 vb = tcg_const_i64(0); \
1032 } else { \
1033 vb = cpu_fir[rb]; \
1034 } \
1035 \
1036 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1037 \
1038 if (ra == 31) { \
1039 tcg_temp_free(va); \
1040 } \
1041 if (rb == 31) { \
1042 tcg_temp_free(vb); \
1043 } \
1044 }
f24518b5
RH
1045
1046/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
1047FARITH3(addf)
1048FARITH3(subf)
1049FARITH3(mulf)
1050FARITH3(divf)
1051FARITH3(addg)
1052FARITH3(subg)
1053FARITH3(mulg)
1054FARITH3(divg)
1055FARITH3(cmpgeq)
1056FARITH3(cmpglt)
1057FARITH3(cmpgle)
f24518b5
RH
1058
1059static void gen_ieee_arith3(DisasContext *ctx,
4a58aedf 1060 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
1061 int ra, int rb, int rc, int fn11)
1062{
1063 TCGv va, vb;
1064
1065 /* ??? This is wrong: the instruction is not a nop, it still may
1066 raise exceptions. */
1067 if (unlikely(rc == 31)) {
1068 return;
1069 }
1070
1071 gen_qual_roundmode(ctx, fn11);
1072 gen_qual_flushzero(ctx, fn11);
1073 gen_fp_exc_clear();
1074
1075 va = gen_ieee_input(ra, fn11, 0);
1076 vb = gen_ieee_input(rb, fn11, 0);
4a58aedf 1077 helper(cpu_fir[rc], cpu_env, va, vb);
f24518b5
RH
1078 tcg_temp_free(va);
1079 tcg_temp_free(vb);
1080
1081 gen_fp_exc_raise(rc, fn11);
1082}
1083
1084#define IEEE_ARITH3(name) \
1085static inline void glue(gen_f, name)(DisasContext *ctx, \
1086 int ra, int rb, int rc, int fn11) \
1087{ \
1088 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1089}
1090IEEE_ARITH3(adds)
1091IEEE_ARITH3(subs)
1092IEEE_ARITH3(muls)
1093IEEE_ARITH3(divs)
1094IEEE_ARITH3(addt)
1095IEEE_ARITH3(subt)
1096IEEE_ARITH3(mult)
1097IEEE_ARITH3(divt)
1098
1099static void gen_ieee_compare(DisasContext *ctx,
4a58aedf 1100 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
f24518b5
RH
1101 int ra, int rb, int rc, int fn11)
1102{
1103 TCGv va, vb;
1104
1105 /* ??? This is wrong: the instruction is not a nop, it still may
1106 raise exceptions. */
1107 if (unlikely(rc == 31)) {
1108 return;
1109 }
1110
1111 gen_fp_exc_clear();
1112
1113 va = gen_ieee_input(ra, fn11, 1);
1114 vb = gen_ieee_input(rb, fn11, 1);
4a58aedf 1115 helper(cpu_fir[rc], cpu_env, va, vb);
f24518b5
RH
1116 tcg_temp_free(va);
1117 tcg_temp_free(vb);
1118
1119 gen_fp_exc_raise(rc, fn11);
1120}
1121
1122#define IEEE_CMP3(name) \
1123static inline void glue(gen_f, name)(DisasContext *ctx, \
1124 int ra, int rb, int rc, int fn11) \
1125{ \
1126 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1127}
1128IEEE_CMP3(cmptun)
1129IEEE_CMP3(cmpteq)
1130IEEE_CMP3(cmptlt)
1131IEEE_CMP3(cmptle)
a7812ae4 1132
248c42f3
RH
1133static inline uint64_t zapnot_mask(uint8_t lit)
1134{
1135 uint64_t mask = 0;
1136 int i;
1137
1138 for (i = 0; i < 8; ++i) {
1139 if ((lit >> i) & 1)
1140 mask |= 0xffull << (i * 8);
1141 }
1142 return mask;
1143}
1144
87d98f95
RH
1145/* Implement zapnot with an immediate operand, which expands to some
1146 form of immediate AND. This is a basic building block in the
1147 definition of many of the other byte manipulation instructions. */
248c42f3 1148static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 1149{
87d98f95
RH
1150 switch (lit) {
1151 case 0x00:
248c42f3 1152 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
1153 break;
1154 case 0x01:
248c42f3 1155 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
1156 break;
1157 case 0x03:
248c42f3 1158 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
1159 break;
1160 case 0x0f:
248c42f3 1161 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
1162 break;
1163 case 0xff:
248c42f3 1164 tcg_gen_mov_i64(dest, src);
87d98f95
RH
1165 break;
1166 default:
248c42f3 1167 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
87d98f95
RH
1168 break;
1169 }
1170}
1171
1172static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1173{
1174 if (unlikely(rc == 31))
1175 return;
1176 else if (unlikely(ra == 31))
1177 tcg_gen_movi_i64(cpu_ir[rc], 0);
1178 else if (islit)
248c42f3 1179 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
87d98f95
RH
1180 else
1181 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1182}
1183
1184static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1185{
1186 if (unlikely(rc == 31))
1187 return;
1188 else if (unlikely(ra == 31))
1189 tcg_gen_movi_i64(cpu_ir[rc], 0);
1190 else if (islit)
248c42f3 1191 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
87d98f95
RH
1192 else
1193 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1194}
1195
1196
248c42f3 1197/* EXTWH, EXTLH, EXTQH */
ffec44f1
RH
1198static void gen_ext_h(int ra, int rb, int rc, int islit,
1199 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1200{
1201 if (unlikely(rc == 31))
1202 return;
377a43b6
RH
1203 else if (unlikely(ra == 31))
1204 tcg_gen_movi_i64(cpu_ir[rc], 0);
1205 else {
dfaa8583 1206 if (islit) {
377a43b6
RH
1207 lit = (64 - (lit & 7) * 8) & 0x3f;
1208 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
fe2b269a 1209 } else {
377a43b6 1210 TCGv tmp1 = tcg_temp_new();
b3249f63
AJ
1211 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1212 tcg_gen_shli_i64(tmp1, tmp1, 3);
dbf95805
VW
1213 tcg_gen_neg_i64(tmp1, tmp1);
1214 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
dfaa8583 1215 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
b3249f63 1216 tcg_temp_free(tmp1);
dfaa8583 1217 }
248c42f3 1218 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
377a43b6 1219 }
b3249f63
AJ
1220}
1221
248c42f3 1222/* EXTBL, EXTWL, EXTLL, EXTQL */
ffec44f1
RH
1223static void gen_ext_l(int ra, int rb, int rc, int islit,
1224 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1225{
1226 if (unlikely(rc == 31))
1227 return;
377a43b6
RH
1228 else if (unlikely(ra == 31))
1229 tcg_gen_movi_i64(cpu_ir[rc], 0);
1230 else {
dfaa8583 1231 if (islit) {
377a43b6 1232 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
dfaa8583 1233 } else {
a7812ae4 1234 TCGv tmp = tcg_temp_new();
b3249f63
AJ
1235 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1236 tcg_gen_shli_i64(tmp, tmp, 3);
dfaa8583 1237 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
b3249f63 1238 tcg_temp_free(tmp);
fe2b269a 1239 }
248c42f3
RH
1240 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1241 }
1242}
1243
50eb6e5c
RH
1244/* INSWH, INSLH, INSQH */
1245static void gen_ins_h(int ra, int rb, int rc, int islit,
1246 uint8_t lit, uint8_t byte_mask)
1247{
1248 if (unlikely(rc == 31))
1249 return;
1250 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1251 tcg_gen_movi_i64(cpu_ir[rc], 0);
1252 else {
1253 TCGv tmp = tcg_temp_new();
1254
1255 /* The instruction description has us left-shift the byte mask
1256 and extract bits <15:8> and apply that zap at the end. This
1257 is equivalent to simply performing the zap first and shifting
1258 afterward. */
1259 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1260
1261 if (islit) {
1262 /* Note that we have handled the lit==0 case above. */
1263 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1264 } else {
1265 TCGv shift = tcg_temp_new();
1266
1267 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1268 Do this portably by splitting the shift into two parts:
1269 shift_count-1 and 1. Arrange for the -1 by using
1270 ones-complement instead of twos-complement in the negation:
1271 ~((B & 7) * 8) & 63. */
1272
1273 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1274 tcg_gen_shli_i64(shift, shift, 3);
1275 tcg_gen_not_i64(shift, shift);
1276 tcg_gen_andi_i64(shift, shift, 0x3f);
1277
1278 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1279 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1280 tcg_temp_free(shift);
1281 }
1282 tcg_temp_free(tmp);
1283 }
1284}
1285
248c42f3 1286/* INSBL, INSWL, INSLL, INSQL */
ffec44f1
RH
1287static void gen_ins_l(int ra, int rb, int rc, int islit,
1288 uint8_t lit, uint8_t byte_mask)
248c42f3
RH
1289{
1290 if (unlikely(rc == 31))
1291 return;
1292 else if (unlikely(ra == 31))
1293 tcg_gen_movi_i64(cpu_ir[rc], 0);
1294 else {
1295 TCGv tmp = tcg_temp_new();
1296
1297 /* The instruction description has us left-shift the byte mask
1298 the same number of byte slots as the data and apply the zap
1299 at the end. This is equivalent to simply performing the zap
1300 first and shifting afterward. */
1301 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1302
1303 if (islit) {
1304 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1305 } else {
1306 TCGv shift = tcg_temp_new();
1307 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1308 tcg_gen_shli_i64(shift, shift, 3);
1309 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1310 tcg_temp_free(shift);
1311 }
1312 tcg_temp_free(tmp);
377a43b6 1313 }
b3249f63
AJ
1314}
1315
ffec44f1
RH
1316/* MSKWH, MSKLH, MSKQH */
1317static void gen_msk_h(int ra, int rb, int rc, int islit,
1318 uint8_t lit, uint8_t byte_mask)
1319{
1320 if (unlikely(rc == 31))
1321 return;
1322 else if (unlikely(ra == 31))
1323 tcg_gen_movi_i64(cpu_ir[rc], 0);
1324 else if (islit) {
1325 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1326 } else {
1327 TCGv shift = tcg_temp_new();
1328 TCGv mask = tcg_temp_new();
1329
1330 /* The instruction description is as above, where the byte_mask
1331 is shifted left, and then we extract bits <15:8>. This can be
1332 emulated with a right-shift on the expanded byte mask. This
1333 requires extra care because for an input <2:0> == 0 we need a
1334 shift of 64 bits in order to generate a zero. This is done by
1335 splitting the shift into two parts, the variable shift - 1
1336 followed by a constant 1 shift. The code we expand below is
1337 equivalent to ~((B & 7) * 8) & 63. */
1338
1339 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1340 tcg_gen_shli_i64(shift, shift, 3);
1341 tcg_gen_not_i64(shift, shift);
1342 tcg_gen_andi_i64(shift, shift, 0x3f);
1343 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1344 tcg_gen_shr_i64(mask, mask, shift);
1345 tcg_gen_shri_i64(mask, mask, 1);
1346
1347 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1348
1349 tcg_temp_free(mask);
1350 tcg_temp_free(shift);
1351 }
1352}
1353
14ab1634 1354/* MSKBL, MSKWL, MSKLL, MSKQL */
ffec44f1
RH
1355static void gen_msk_l(int ra, int rb, int rc, int islit,
1356 uint8_t lit, uint8_t byte_mask)
14ab1634
RH
1357{
1358 if (unlikely(rc == 31))
1359 return;
1360 else if (unlikely(ra == 31))
1361 tcg_gen_movi_i64(cpu_ir[rc], 0);
1362 else if (islit) {
1363 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1364 } else {
1365 TCGv shift = tcg_temp_new();
1366 TCGv mask = tcg_temp_new();
1367
1368 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1369 tcg_gen_shli_i64(shift, shift, 3);
1370 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1371 tcg_gen_shl_i64(mask, mask, shift);
1372
1373 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1374
1375 tcg_temp_free(mask);
1376 tcg_temp_free(shift);
1377 }
1378}
1379
04acd307 1380/* Code to call arith3 helpers */
a7812ae4 1381#define ARITH3(name) \
636aa200
BS
1382static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1383 uint8_t lit) \
a7812ae4
PB
1384{ \
1385 if (unlikely(rc == 31)) \
1386 return; \
1387 \
1388 if (ra != 31) { \
1389 if (islit) { \
1390 TCGv tmp = tcg_const_i64(lit); \
1391 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1392 tcg_temp_free(tmp); \
1393 } else \
1394 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1395 } else { \
1396 TCGv tmp1 = tcg_const_i64(0); \
1397 if (islit) { \
1398 TCGv tmp2 = tcg_const_i64(lit); \
1399 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1400 tcg_temp_free(tmp2); \
1401 } else \
1402 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1403 tcg_temp_free(tmp1); \
1404 } \
b3249f63 1405}
2958620f 1406ARITH3(cmpbge)
13e4df99
RH
1407ARITH3(minub8)
1408ARITH3(minsb8)
1409ARITH3(minuw4)
1410ARITH3(minsw4)
1411ARITH3(maxub8)
1412ARITH3(maxsb8)
1413ARITH3(maxuw4)
1414ARITH3(maxsw4)
1415ARITH3(perr)
1416
2958620f
RH
1417/* Code to call arith3 helpers */
1418#define ARITH3_EX(name) \
1419 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1420 int islit, uint8_t lit) \
1421 { \
1422 if (unlikely(rc == 31)) { \
1423 return; \
1424 } \
1425 if (ra != 31) { \
1426 if (islit) { \
1427 TCGv tmp = tcg_const_i64(lit); \
1428 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1429 cpu_ir[ra], tmp); \
1430 tcg_temp_free(tmp); \
1431 } else { \
1432 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1433 cpu_ir[ra], cpu_ir[rb]); \
1434 } \
1435 } else { \
1436 TCGv tmp1 = tcg_const_i64(0); \
1437 if (islit) { \
1438 TCGv tmp2 = tcg_const_i64(lit); \
1439 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1440 tcg_temp_free(tmp2); \
1441 } else { \
1442 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1443 } \
1444 tcg_temp_free(tmp1); \
1445 } \
1446 }
1447ARITH3_EX(addlv)
1448ARITH3_EX(sublv)
1449ARITH3_EX(addqv)
1450ARITH3_EX(subqv)
1451ARITH3_EX(mullv)
1452ARITH3_EX(mulqv)
1453
13e4df99
RH
1454#define MVIOP2(name) \
1455static inline void glue(gen_, name)(int rb, int rc) \
1456{ \
1457 if (unlikely(rc == 31)) \
1458 return; \
1459 if (unlikely(rb == 31)) \
1460 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1461 else \
1462 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1463}
1464MVIOP2(pklb)
1465MVIOP2(pkwb)
1466MVIOP2(unpkbl)
1467MVIOP2(unpkbw)
b3249f63 1468
9e05960f
RH
1469static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1470 int islit, uint8_t lit)
01ff9cc8 1471{
9e05960f 1472 TCGv va, vb;
01ff9cc8 1473
9e05960f 1474 if (unlikely(rc == 31)) {
13e4df99 1475 return;
9e05960f 1476 }
01ff9cc8 1477
9e05960f
RH
1478 if (ra == 31) {
1479 va = tcg_const_i64(0);
1480 } else {
1481 va = cpu_ir[ra];
1482 }
1483 if (islit) {
1484 vb = tcg_const_i64(lit);
1485 } else {
1486 vb = cpu_ir[rb];
1487 }
01ff9cc8 1488
9e05960f 1489 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
01ff9cc8 1490
9e05960f
RH
1491 if (ra == 31) {
1492 tcg_temp_free(va);
1493 }
1494 if (islit) {
1495 tcg_temp_free(vb);
1496 }
01ff9cc8
AJ
1497}
1498
ac316ca4
RH
1499static void gen_rx(int ra, int set)
1500{
1501 TCGv_i32 tmp;
1502
1503 if (ra != 31) {
4d5712f1 1504 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1505 }
1506
1507 tmp = tcg_const_i32(set);
4d5712f1 1508 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
ac316ca4
RH
1509 tcg_temp_free_i32(tmp);
1510}
1511
2ace7e55
RH
1512static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1513{
1514 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1515 to internal cpu registers. */
1516
1517 /* Unprivileged PAL call */
1518 if (palcode >= 0x80 && palcode < 0xC0) {
1519 switch (palcode) {
1520 case 0x86:
1521 /* IMB */
1522 /* No-op inside QEMU. */
1523 break;
1524 case 0x9E:
1525 /* RDUNIQUE */
1526 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1527 break;
1528 case 0x9F:
1529 /* WRUNIQUE */
1530 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1531 break;
1532 default:
ba96394e
RH
1533 palcode &= 0xbf;
1534 goto do_call_pal;
2ace7e55
RH
1535 }
1536 return NO_EXIT;
1537 }
1538
1539#ifndef CONFIG_USER_ONLY
1540 /* Privileged PAL code */
1541 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1542 switch (palcode) {
1543 case 0x01:
1544 /* CFLUSH */
1545 /* No-op inside QEMU. */
1546 break;
1547 case 0x02:
1548 /* DRAINA */
1549 /* No-op inside QEMU. */
1550 break;
1551 case 0x2D:
1552 /* WRVPTPTR */
4d5712f1 1553 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
2ace7e55
RH
1554 break;
1555 case 0x31:
1556 /* WRVAL */
1557 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1558 break;
1559 case 0x32:
1560 /* RDVAL */
1561 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1562 break;
1563
1564 case 0x35: {
1565 /* SWPIPL */
1566 TCGv tmp;
1567
1568 /* Note that we already know we're in kernel mode, so we know
1569 that PS only contains the 3 IPL bits. */
4d5712f1 1570 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1571
1572 /* But make sure and store only the 3 IPL bits from the user. */
1573 tmp = tcg_temp_new();
1574 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
4d5712f1 1575 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1576 tcg_temp_free(tmp);
1577 break;
1578 }
1579
1580 case 0x36:
1581 /* RDPS */
4d5712f1 1582 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
2ace7e55
RH
1583 break;
1584 case 0x38:
1585 /* WRUSP */
1586 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1587 break;
1588 case 0x3A:
1589 /* RDUSP */
1590 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1591 break;
1592 case 0x3C:
1593 /* WHAMI */
1594 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
55e5c285 1595 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
2ace7e55
RH
1596 break;
1597
1598 default:
ba96394e
RH
1599 palcode &= 0x3f;
1600 goto do_call_pal;
2ace7e55
RH
1601 }
1602 return NO_EXIT;
1603 }
1604#endif
2ace7e55 1605 return gen_invalid(ctx);
ba96394e
RH
1606
1607 do_call_pal:
1608#ifdef CONFIG_USER_ONLY
1609 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1610#else
1611 {
1612 TCGv pc = tcg_const_i64(ctx->pc);
1613 TCGv entry = tcg_const_i64(palcode & 0x80
1614 ? 0x2000 + (palcode - 0x80) * 64
1615 : 0x1000 + palcode * 64);
1616
1617 gen_helper_call_pal(cpu_env, pc, entry);
1618
1619 tcg_temp_free(entry);
1620 tcg_temp_free(pc);
a9ead832
RH
1621
1622 /* Since the destination is running in PALmode, we don't really
1623 need the page permissions check. We'll see the existance of
1624 the page when we create the TB, and we'll flush all TBs if
1625 we change the PAL base register. */
1626 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1627 tcg_gen_goto_tb(0);
8cfd0495 1628 tcg_gen_exit_tb((uintptr_t)ctx->tb);
a9ead832
RH
1629 return EXIT_GOTO_TB;
1630 }
1631
ba96394e
RH
1632 return EXIT_PC_UPDATED;
1633 }
1634#endif
2ace7e55
RH
1635}
1636
26b46094
RH
1637#ifndef CONFIG_USER_ONLY
1638
1639#define PR_BYTE 0x100000
1640#define PR_LONG 0x200000
1641
1642static int cpu_pr_data(int pr)
1643{
1644 switch (pr) {
1645 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1646 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1647 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1648 case 3: return offsetof(CPUAlphaState, trap_arg0);
1649 case 4: return offsetof(CPUAlphaState, trap_arg1);
1650 case 5: return offsetof(CPUAlphaState, trap_arg2);
1651 case 6: return offsetof(CPUAlphaState, exc_addr);
1652 case 7: return offsetof(CPUAlphaState, palbr);
1653 case 8: return offsetof(CPUAlphaState, ptbr);
1654 case 9: return offsetof(CPUAlphaState, vptptr);
1655 case 10: return offsetof(CPUAlphaState, unique);
1656 case 11: return offsetof(CPUAlphaState, sysval);
1657 case 12: return offsetof(CPUAlphaState, usp);
1658
1659 case 32 ... 39:
1660 return offsetof(CPUAlphaState, shadow[pr - 32]);
1661 case 40 ... 63:
1662 return offsetof(CPUAlphaState, scratch[pr - 40]);
c781cf96
RH
1663
1664 case 251:
1665 return offsetof(CPUAlphaState, alarm_expire);
26b46094
RH
1666 }
1667 return 0;
1668}
1669
c781cf96 1670static ExitStatus gen_mfpr(int ra, int regno)
26b46094
RH
1671{
1672 int data = cpu_pr_data(regno);
1673
1674 /* In our emulated PALcode, these processor registers have no
1675 side effects from reading. */
1676 if (ra == 31) {
c781cf96
RH
1677 return NO_EXIT;
1678 }
1679
19e0cbb8
RH
1680 /* Special help for VMTIME and WALLTIME. */
1681 if (regno == 250 || regno == 249) {
1682 void (*helper)(TCGv) = gen_helper_get_walltime;
1683 if (regno == 249) {
1684 helper = gen_helper_get_vmtime;
1685 }
c781cf96
RH
1686 if (use_icount) {
1687 gen_io_start();
19e0cbb8 1688 helper(cpu_ir[ra]);
c781cf96
RH
1689 gen_io_end();
1690 return EXIT_PC_STALE;
1691 } else {
19e0cbb8 1692 helper(cpu_ir[ra]);
c781cf96
RH
1693 return NO_EXIT;
1694 }
26b46094
RH
1695 }
1696
1697 /* The basic registers are data only, and unknown registers
1698 are read-zero, write-ignore. */
1699 if (data == 0) {
1700 tcg_gen_movi_i64(cpu_ir[ra], 0);
1701 } else if (data & PR_BYTE) {
1702 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1703 } else if (data & PR_LONG) {
1704 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1705 } else {
1706 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1707 }
c781cf96 1708 return NO_EXIT;
26b46094
RH
1709}
1710
bc24270e 1711static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
26b46094
RH
1712{
1713 TCGv tmp;
bc24270e 1714 int data;
26b46094
RH
1715
1716 if (rb == 31) {
1717 tmp = tcg_const_i64(0);
1718 } else {
1719 tmp = cpu_ir[rb];
1720 }
1721
bc24270e
RH
1722 switch (regno) {
1723 case 255:
3b4fefd6 1724 /* TBIA */
69163fbb 1725 gen_helper_tbia(cpu_env);
bc24270e
RH
1726 break;
1727
1728 case 254:
3b4fefd6 1729 /* TBIS */
69163fbb 1730 gen_helper_tbis(cpu_env, tmp);
bc24270e
RH
1731 break;
1732
1733 case 253:
1734 /* WAIT */
1735 tmp = tcg_const_i64(1);
259186a7
AF
1736 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1737 offsetof(CPUState, halted));
bc24270e
RH
1738 return gen_excp(ctx, EXCP_HLT, 0);
1739
034ebc27
RH
1740 case 252:
1741 /* HALT */
1742 gen_helper_halt(tmp);
1743 return EXIT_PC_STALE;
1744
c781cf96
RH
1745 case 251:
1746 /* ALARM */
69163fbb 1747 gen_helper_set_alarm(cpu_env, tmp);
c781cf96
RH
1748 break;
1749
a9ead832
RH
1750 case 7:
1751 /* PALBR */
1752 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, palbr));
1753 /* Changing the PAL base register implies un-chaining all of the TBs
1754 that ended with a CALL_PAL. Since the base register usually only
1755 changes during boot, flushing everything works well. */
1756 gen_helper_tb_flush(cpu_env);
1757 return EXIT_PC_STALE;
1758
bc24270e 1759 default:
3b4fefd6
RH
1760 /* The basic registers are data only, and unknown registers
1761 are read-zero, write-ignore. */
bc24270e 1762 data = cpu_pr_data(regno);
3b4fefd6
RH
1763 if (data != 0) {
1764 if (data & PR_BYTE) {
1765 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1766 } else if (data & PR_LONG) {
1767 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1768 } else {
1769 tcg_gen_st_i64(tmp, cpu_env, data);
1770 }
26b46094 1771 }
bc24270e 1772 break;
26b46094
RH
1773 }
1774
1775 if (rb == 31) {
1776 tcg_temp_free(tmp);
1777 }
bc24270e
RH
1778
1779 return NO_EXIT;
26b46094
RH
1780}
1781#endif /* !USER_ONLY*/
1782
4af70374 1783static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1784{
1785 uint32_t palcode;
efa64351
MT
1786 int32_t disp21, disp16;
1787#ifndef CONFIG_USER_ONLY
1788 int32_t disp12;
1789#endif
f88fe4e3 1790 uint16_t fn11;
b6fb147c 1791 uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
adf3c8b6 1792 uint8_t lit;
4af70374 1793 ExitStatus ret;
4c9649a9
JM
1794
1795 /* Decode all instruction fields */
1796 opc = insn >> 26;
1797 ra = (insn >> 21) & 0x1F;
1798 rb = (insn >> 16) & 0x1F;
1799 rc = insn & 0x1F;
13e4df99 1800 real_islit = islit = (insn >> 12) & 1;
dfaa8583
AJ
1801 if (rb == 31 && !islit) {
1802 islit = 1;
1803 lit = 0;
1804 } else
1805 lit = (insn >> 13) & 0xFF;
4c9649a9
JM
1806 palcode = insn & 0x03FFFFFF;
1807 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1808 disp16 = (int16_t)(insn & 0x0000FFFF);
efa64351 1809#ifndef CONFIG_USER_ONLY
4c9649a9 1810 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
efa64351 1811#endif
4c9649a9
JM
1812 fn11 = (insn >> 5) & 0x000007FF;
1813 fpfn = fn11 & 0x3F;
1814 fn7 = (insn >> 5) & 0x0000007F;
806991da 1815 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1816 opc, ra, rb, rc, disp16);
806991da 1817
4af70374 1818 ret = NO_EXIT;
4c9649a9
JM
1819 switch (opc) {
1820 case 0x00:
1821 /* CALL_PAL */
2ace7e55
RH
1822 ret = gen_call_pal(ctx, palcode);
1823 break;
4c9649a9
JM
1824 case 0x01:
1825 /* OPC01 */
1826 goto invalid_opc;
1827 case 0x02:
1828 /* OPC02 */
1829 goto invalid_opc;
1830 case 0x03:
1831 /* OPC03 */
1832 goto invalid_opc;
1833 case 0x04:
1834 /* OPC04 */
1835 goto invalid_opc;
1836 case 0x05:
1837 /* OPC05 */
1838 goto invalid_opc;
1839 case 0x06:
1840 /* OPC06 */
1841 goto invalid_opc;
1842 case 0x07:
1843 /* OPC07 */
1844 goto invalid_opc;
1845 case 0x08:
1846 /* LDA */
1ef4ef4e 1847 if (likely(ra != 31)) {
496cb5b9 1848 if (rb != 31)
3761035f
AJ
1849 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1850 else
1851 tcg_gen_movi_i64(cpu_ir[ra], disp16);
496cb5b9 1852 }
4c9649a9
JM
1853 break;
1854 case 0x09:
1855 /* LDAH */
1ef4ef4e 1856 if (likely(ra != 31)) {
496cb5b9 1857 if (rb != 31)
3761035f
AJ
1858 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1859 else
1860 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
496cb5b9 1861 }
4c9649a9
JM
1862 break;
1863 case 0x0A:
1864 /* LDBU */
a18ad893
RH
1865 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1866 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1867 break;
1868 }
1869 goto invalid_opc;
4c9649a9
JM
1870 case 0x0B:
1871 /* LDQ_U */
f18cd223 1872 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1873 break;
1874 case 0x0C:
1875 /* LDWU */
a18ad893
RH
1876 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1877 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1878 break;
1879 }
1880 goto invalid_opc;
4c9649a9
JM
1881 case 0x0D:
1882 /* STW */
6910b8f6 1883 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
4c9649a9
JM
1884 break;
1885 case 0x0E:
1886 /* STB */
6910b8f6 1887 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
4c9649a9
JM
1888 break;
1889 case 0x0F:
1890 /* STQ_U */
6910b8f6 1891 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1892 break;
1893 case 0x10:
1894 switch (fn7) {
1895 case 0x00:
1896 /* ADDL */
30c7183b
AJ
1897 if (likely(rc != 31)) {
1898 if (ra != 31) {
1899 if (islit) {
1900 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1901 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1902 } else {
30c7183b
AJ
1903 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1904 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1905 }
30c7183b
AJ
1906 } else {
1907 if (islit)
dfaa8583 1908 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1909 else
dfaa8583 1910 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1911 }
1912 }
4c9649a9
JM
1913 break;
1914 case 0x02:
1915 /* S4ADDL */
30c7183b
AJ
1916 if (likely(rc != 31)) {
1917 if (ra != 31) {
a7812ae4 1918 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1919 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1920 if (islit)
1921 tcg_gen_addi_i64(tmp, tmp, lit);
1922 else
1923 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1924 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1925 tcg_temp_free(tmp);
30c7183b
AJ
1926 } else {
1927 if (islit)
1928 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1929 else
dfaa8583 1930 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1931 }
1932 }
4c9649a9
JM
1933 break;
1934 case 0x09:
1935 /* SUBL */
30c7183b
AJ
1936 if (likely(rc != 31)) {
1937 if (ra != 31) {
dfaa8583 1938 if (islit)
30c7183b 1939 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
dfaa8583 1940 else
30c7183b 1941 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
dfaa8583 1942 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1943 } else {
1944 if (islit)
1945 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1946 else {
30c7183b
AJ
1947 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1948 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1949 }
1950 }
4c9649a9
JM
1951 break;
1952 case 0x0B:
1953 /* S4SUBL */
30c7183b
AJ
1954 if (likely(rc != 31)) {
1955 if (ra != 31) {
a7812ae4 1956 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1957 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1958 if (islit)
1959 tcg_gen_subi_i64(tmp, tmp, lit);
1960 else
1961 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1962 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1963 tcg_temp_free(tmp);
30c7183b
AJ
1964 } else {
1965 if (islit)
1966 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1967 else {
30c7183b
AJ
1968 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1969 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1970 }
30c7183b
AJ
1971 }
1972 }
4c9649a9
JM
1973 break;
1974 case 0x0F:
1975 /* CMPBGE */
a7812ae4 1976 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1977 break;
1978 case 0x12:
1979 /* S8ADDL */
30c7183b
AJ
1980 if (likely(rc != 31)) {
1981 if (ra != 31) {
a7812ae4 1982 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1983 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1984 if (islit)
1985 tcg_gen_addi_i64(tmp, tmp, lit);
1986 else
1987 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1988 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1989 tcg_temp_free(tmp);
30c7183b
AJ
1990 } else {
1991 if (islit)
1992 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1993 else
dfaa8583 1994 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1995 }
1996 }
4c9649a9
JM
1997 break;
1998 case 0x1B:
1999 /* S8SUBL */
30c7183b
AJ
2000 if (likely(rc != 31)) {
2001 if (ra != 31) {
a7812ae4 2002 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2003 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2004 if (islit)
2005 tcg_gen_subi_i64(tmp, tmp, lit);
2006 else
2007 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
2008 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
2009 tcg_temp_free(tmp);
30c7183b
AJ
2010 } else {
2011 if (islit)
2012 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 2013 else
30c7183b
AJ
2014 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2015 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 2016 }
30c7183b
AJ
2017 }
2018 }
4c9649a9
JM
2019 break;
2020 case 0x1D:
2021 /* CMPULT */
01ff9cc8 2022 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
4c9649a9
JM
2023 break;
2024 case 0x20:
2025 /* ADDQ */
30c7183b
AJ
2026 if (likely(rc != 31)) {
2027 if (ra != 31) {
2028 if (islit)
2029 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 2030 else
dfaa8583 2031 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2032 } else {
2033 if (islit)
2034 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2035 else
dfaa8583 2036 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2037 }
2038 }
4c9649a9
JM
2039 break;
2040 case 0x22:
2041 /* S4ADDQ */
30c7183b
AJ
2042 if (likely(rc != 31)) {
2043 if (ra != 31) {
a7812ae4 2044 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2045 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2046 if (islit)
2047 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2048 else
2049 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2050 tcg_temp_free(tmp);
30c7183b
AJ
2051 } else {
2052 if (islit)
2053 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2054 else
dfaa8583 2055 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2056 }
2057 }
4c9649a9
JM
2058 break;
2059 case 0x29:
2060 /* SUBQ */
30c7183b
AJ
2061 if (likely(rc != 31)) {
2062 if (ra != 31) {
2063 if (islit)
2064 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 2065 else
dfaa8583 2066 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2067 } else {
2068 if (islit)
2069 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2070 else
dfaa8583 2071 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2072 }
2073 }
4c9649a9
JM
2074 break;
2075 case 0x2B:
2076 /* S4SUBQ */
30c7183b
AJ
2077 if (likely(rc != 31)) {
2078 if (ra != 31) {
a7812ae4 2079 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2080 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2081 if (islit)
2082 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2083 else
2084 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2085 tcg_temp_free(tmp);
30c7183b
AJ
2086 } else {
2087 if (islit)
2088 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2089 else
dfaa8583 2090 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2091 }
2092 }
4c9649a9
JM
2093 break;
2094 case 0x2D:
2095 /* CMPEQ */
01ff9cc8 2096 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
4c9649a9
JM
2097 break;
2098 case 0x32:
2099 /* S8ADDQ */
30c7183b
AJ
2100 if (likely(rc != 31)) {
2101 if (ra != 31) {
a7812ae4 2102 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2103 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2104 if (islit)
2105 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2106 else
2107 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2108 tcg_temp_free(tmp);
30c7183b
AJ
2109 } else {
2110 if (islit)
2111 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2112 else
dfaa8583 2113 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2114 }
2115 }
4c9649a9
JM
2116 break;
2117 case 0x3B:
2118 /* S8SUBQ */
30c7183b
AJ
2119 if (likely(rc != 31)) {
2120 if (ra != 31) {
a7812ae4 2121 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2122 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2123 if (islit)
2124 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2125 else
2126 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2127 tcg_temp_free(tmp);
30c7183b
AJ
2128 } else {
2129 if (islit)
2130 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2131 else
dfaa8583 2132 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2133 }
2134 }
4c9649a9
JM
2135 break;
2136 case 0x3D:
2137 /* CMPULE */
01ff9cc8 2138 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
4c9649a9
JM
2139 break;
2140 case 0x40:
2141 /* ADDL/V */
a7812ae4 2142 gen_addlv(ra, rb, rc, islit, lit);
4c9649a9
JM
2143 break;
2144 case 0x49:
2145 /* SUBL/V */
a7812ae4 2146 gen_sublv(ra, rb, rc, islit, lit);
4c9649a9
JM
2147 break;
2148 case 0x4D:
2149 /* CMPLT */
01ff9cc8 2150 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
4c9649a9
JM
2151 break;
2152 case 0x60:
2153 /* ADDQ/V */
a7812ae4 2154 gen_addqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2155 break;
2156 case 0x69:
2157 /* SUBQ/V */
a7812ae4 2158 gen_subqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2159 break;
2160 case 0x6D:
2161 /* CMPLE */
01ff9cc8 2162 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
4c9649a9
JM
2163 break;
2164 default:
2165 goto invalid_opc;
2166 }
2167 break;
2168 case 0x11:
2169 switch (fn7) {
2170 case 0x00:
2171 /* AND */
30c7183b 2172 if (likely(rc != 31)) {
dfaa8583 2173 if (ra == 31)
30c7183b
AJ
2174 tcg_gen_movi_i64(cpu_ir[rc], 0);
2175 else if (islit)
2176 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2177 else
2178 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2179 }
4c9649a9
JM
2180 break;
2181 case 0x08:
2182 /* BIC */
30c7183b
AJ
2183 if (likely(rc != 31)) {
2184 if (ra != 31) {
2185 if (islit)
2186 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2187 else
2188 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2189 } else
2190 tcg_gen_movi_i64(cpu_ir[rc], 0);
2191 }
4c9649a9
JM
2192 break;
2193 case 0x14:
2194 /* CMOVLBS */
bbe1dab4 2195 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2196 break;
2197 case 0x16:
2198 /* CMOVLBC */
bbe1dab4 2199 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2200 break;
2201 case 0x20:
2202 /* BIS */
30c7183b
AJ
2203 if (likely(rc != 31)) {
2204 if (ra != 31) {
2205 if (islit)
2206 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
8bb6e981 2207 else
30c7183b 2208 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
4c9649a9 2209 } else {
30c7183b
AJ
2210 if (islit)
2211 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2212 else
dfaa8583 2213 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
4c9649a9 2214 }
4c9649a9
JM
2215 }
2216 break;
2217 case 0x24:
2218 /* CMOVEQ */
bbe1dab4 2219 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2220 break;
2221 case 0x26:
2222 /* CMOVNE */
bbe1dab4 2223 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2224 break;
2225 case 0x28:
2226 /* ORNOT */
30c7183b 2227 if (likely(rc != 31)) {
dfaa8583 2228 if (ra != 31) {
30c7183b
AJ
2229 if (islit)
2230 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2231 else
2232 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2233 } else {
2234 if (islit)
2235 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2236 else
2237 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2238 }
2239 }
4c9649a9
JM
2240 break;
2241 case 0x40:
2242 /* XOR */
30c7183b
AJ
2243 if (likely(rc != 31)) {
2244 if (ra != 31) {
2245 if (islit)
2246 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 2247 else
dfaa8583 2248 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2249 } else {
2250 if (islit)
2251 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2252 else
dfaa8583 2253 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2254 }
2255 }
4c9649a9
JM
2256 break;
2257 case 0x44:
2258 /* CMOVLT */
bbe1dab4 2259 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2260 break;
2261 case 0x46:
2262 /* CMOVGE */
bbe1dab4 2263 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2264 break;
2265 case 0x48:
2266 /* EQV */
30c7183b
AJ
2267 if (likely(rc != 31)) {
2268 if (ra != 31) {
2269 if (islit)
2270 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2271 else
2272 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2273 } else {
2274 if (islit)
2275 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
30c7183b 2276 else
dfaa8583 2277 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2278 }
2279 }
4c9649a9
JM
2280 break;
2281 case 0x61:
2282 /* AMASK */
ae8ecd42 2283 if (likely(rc != 31)) {
a18ad893
RH
2284 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2285
2286 if (islit) {
2287 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2288 } else {
2289 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
1a1f7dbc 2290 }
ae8ecd42 2291 }
4c9649a9
JM
2292 break;
2293 case 0x64:
2294 /* CMOVLE */
bbe1dab4 2295 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2296 break;
2297 case 0x66:
2298 /* CMOVGT */
bbe1dab4 2299 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2300 break;
2301 case 0x6C:
2302 /* IMPLVER */
801c4c28
RH
2303 if (rc != 31) {
2304 tcg_gen_movi_i64(cpu_ir[rc], ctx->implver);
2305 }
4c9649a9
JM
2306 break;
2307 default:
2308 goto invalid_opc;
2309 }
2310 break;
2311 case 0x12:
2312 switch (fn7) {
2313 case 0x02:
2314 /* MSKBL */
14ab1634 2315 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2316 break;
2317 case 0x06:
2318 /* EXTBL */
377a43b6 2319 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2320 break;
2321 case 0x0B:
2322 /* INSBL */
248c42f3 2323 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2324 break;
2325 case 0x12:
2326 /* MSKWL */
14ab1634 2327 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2328 break;
2329 case 0x16:
2330 /* EXTWL */
377a43b6 2331 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2332 break;
2333 case 0x1B:
2334 /* INSWL */
248c42f3 2335 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2336 break;
2337 case 0x22:
2338 /* MSKLL */
14ab1634 2339 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2340 break;
2341 case 0x26:
2342 /* EXTLL */
377a43b6 2343 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2344 break;
2345 case 0x2B:
2346 /* INSLL */
248c42f3 2347 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2348 break;
2349 case 0x30:
2350 /* ZAP */
a7812ae4 2351 gen_zap(ra, rb, rc, islit, lit);
4c9649a9
JM
2352 break;
2353 case 0x31:
2354 /* ZAPNOT */
a7812ae4 2355 gen_zapnot(ra, rb, rc, islit, lit);
4c9649a9
JM
2356 break;
2357 case 0x32:
2358 /* MSKQL */
14ab1634 2359 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2360 break;
2361 case 0x34:
2362 /* SRL */
30c7183b
AJ
2363 if (likely(rc != 31)) {
2364 if (ra != 31) {
2365 if (islit)
2366 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2367 else {
a7812ae4 2368 TCGv shift = tcg_temp_new();
30c7183b
AJ
2369 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2370 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2371 tcg_temp_free(shift);
dfaa8583 2372 }
30c7183b
AJ
2373 } else
2374 tcg_gen_movi_i64(cpu_ir[rc], 0);
2375 }
4c9649a9
JM
2376 break;
2377 case 0x36:
2378 /* EXTQL */
377a43b6 2379 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2380 break;
2381 case 0x39:
2382 /* SLL */
30c7183b
AJ
2383 if (likely(rc != 31)) {
2384 if (ra != 31) {
2385 if (islit)
2386 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2387 else {
a7812ae4 2388 TCGv shift = tcg_temp_new();
30c7183b
AJ
2389 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2390 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2391 tcg_temp_free(shift);
dfaa8583 2392 }
30c7183b
AJ
2393 } else
2394 tcg_gen_movi_i64(cpu_ir[rc], 0);
2395 }
4c9649a9
JM
2396 break;
2397 case 0x3B:
2398 /* INSQL */
248c42f3 2399 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2400 break;
2401 case 0x3C:
2402 /* SRA */
30c7183b
AJ
2403 if (likely(rc != 31)) {
2404 if (ra != 31) {
2405 if (islit)
2406 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2407 else {
a7812ae4 2408 TCGv shift = tcg_temp_new();
30c7183b
AJ
2409 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2410 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2411 tcg_temp_free(shift);
dfaa8583 2412 }
30c7183b
AJ
2413 } else
2414 tcg_gen_movi_i64(cpu_ir[rc], 0);
2415 }
4c9649a9
JM
2416 break;
2417 case 0x52:
2418 /* MSKWH */
ffec44f1 2419 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2420 break;
2421 case 0x57:
2422 /* INSWH */
50eb6e5c 2423 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2424 break;
2425 case 0x5A:
2426 /* EXTWH */
377a43b6 2427 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2428 break;
2429 case 0x62:
2430 /* MSKLH */
ffec44f1 2431 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2432 break;
2433 case 0x67:
2434 /* INSLH */
50eb6e5c 2435 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2436 break;
2437 case 0x6A:
2438 /* EXTLH */
377a43b6 2439 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2440 break;
2441 case 0x72:
2442 /* MSKQH */
ffec44f1 2443 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2444 break;
2445 case 0x77:
2446 /* INSQH */
50eb6e5c 2447 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2448 break;
2449 case 0x7A:
2450 /* EXTQH */
377a43b6 2451 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2452 break;
2453 default:
2454 goto invalid_opc;
2455 }
2456 break;
2457 case 0x13:
2458 switch (fn7) {
2459 case 0x00:
2460 /* MULL */
30c7183b 2461 if (likely(rc != 31)) {
dfaa8583 2462 if (ra == 31)
30c7183b
AJ
2463 tcg_gen_movi_i64(cpu_ir[rc], 0);
2464 else {
2465 if (islit)
2466 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2467 else
2468 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2469 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2470 }
2471 }
4c9649a9
JM
2472 break;
2473 case 0x20:
2474 /* MULQ */
30c7183b 2475 if (likely(rc != 31)) {
dfaa8583 2476 if (ra == 31)
30c7183b
AJ
2477 tcg_gen_movi_i64(cpu_ir[rc], 0);
2478 else if (islit)
2479 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2480 else
2481 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2482 }
4c9649a9
JM
2483 break;
2484 case 0x30:
2485 /* UMULH */
962415fc
RH
2486 {
2487 TCGv low;
2488 if (unlikely(rc == 31)){
2489 break;
2490 }
2491 if (ra == 31) {
2492 tcg_gen_movi_i64(cpu_ir[rc], 0);
2493 break;
2494 }
2495 low = tcg_temp_new();
2496 if (islit) {
2497 tcg_gen_movi_tl(low, lit);
2498 tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], low);
2499 } else {
2500 tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2501 }
2502 tcg_temp_free(low);
2503 }
4c9649a9
JM
2504 break;
2505 case 0x40:
2506 /* MULL/V */
a7812ae4 2507 gen_mullv(ra, rb, rc, islit, lit);
4c9649a9
JM
2508 break;
2509 case 0x60:
2510 /* MULQ/V */
a7812ae4 2511 gen_mulqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2512 break;
2513 default:
2514 goto invalid_opc;
2515 }
2516 break;
2517 case 0x14:
f24518b5 2518 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2519 case 0x04:
2520 /* ITOFS */
a18ad893 2521 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2522 goto invalid_opc;
a18ad893 2523 }
f18cd223
AJ
2524 if (likely(rc != 31)) {
2525 if (ra != 31) {
a7812ae4 2526 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2527 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2528 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2529 tcg_temp_free_i32(tmp);
f18cd223
AJ
2530 } else
2531 tcg_gen_movi_i64(cpu_fir[rc], 0);
2532 }
4c9649a9
JM
2533 break;
2534 case 0x0A:
2535 /* SQRTF */
a18ad893
RH
2536 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2537 gen_fsqrtf(rb, rc);
2538 break;
2539 }
2540 goto invalid_opc;
4c9649a9
JM
2541 case 0x0B:
2542 /* SQRTS */
a18ad893
RH
2543 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2544 gen_fsqrts(ctx, rb, rc, fn11);
2545 break;
2546 }
2547 goto invalid_opc;
4c9649a9
JM
2548 case 0x14:
2549 /* ITOFF */
a18ad893 2550 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2551 goto invalid_opc;
a18ad893 2552 }
f18cd223
AJ
2553 if (likely(rc != 31)) {
2554 if (ra != 31) {
a7812ae4 2555 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2556 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2557 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2558 tcg_temp_free_i32(tmp);
f18cd223
AJ
2559 } else
2560 tcg_gen_movi_i64(cpu_fir[rc], 0);
2561 }
4c9649a9
JM
2562 break;
2563 case 0x24:
2564 /* ITOFT */
a18ad893 2565 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2566 goto invalid_opc;
a18ad893 2567 }
f18cd223
AJ
2568 if (likely(rc != 31)) {
2569 if (ra != 31)
2570 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2571 else
2572 tcg_gen_movi_i64(cpu_fir[rc], 0);
2573 }
4c9649a9
JM
2574 break;
2575 case 0x2A:
2576 /* SQRTG */
a18ad893
RH
2577 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2578 gen_fsqrtg(rb, rc);
2579 break;
2580 }
2581 goto invalid_opc;
4c9649a9
JM
2582 case 0x02B:
2583 /* SQRTT */
a18ad893
RH
2584 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2585 gen_fsqrtt(ctx, rb, rc, fn11);
2586 break;
2587 }
2588 goto invalid_opc;
4c9649a9
JM
2589 default:
2590 goto invalid_opc;
2591 }
2592 break;
2593 case 0x15:
2594 /* VAX floating point */
2595 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2596 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2597 case 0x00:
2598 /* ADDF */
a7812ae4 2599 gen_faddf(ra, rb, rc);
4c9649a9
JM
2600 break;
2601 case 0x01:
2602 /* SUBF */
a7812ae4 2603 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2604 break;
2605 case 0x02:
2606 /* MULF */
a7812ae4 2607 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2608 break;
2609 case 0x03:
2610 /* DIVF */
a7812ae4 2611 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2612 break;
2613 case 0x1E:
2614 /* CVTDG */
2615#if 0 // TODO
a7812ae4 2616 gen_fcvtdg(rb, rc);
4c9649a9
JM
2617#else
2618 goto invalid_opc;
2619#endif
2620 break;
2621 case 0x20:
2622 /* ADDG */
a7812ae4 2623 gen_faddg(ra, rb, rc);
4c9649a9
JM
2624 break;
2625 case 0x21:
2626 /* SUBG */
a7812ae4 2627 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2628 break;
2629 case 0x22:
2630 /* MULG */
a7812ae4 2631 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2632 break;
2633 case 0x23:
2634 /* DIVG */
a7812ae4 2635 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2636 break;
2637 case 0x25:
2638 /* CMPGEQ */
a7812ae4 2639 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2640 break;
2641 case 0x26:
2642 /* CMPGLT */
a7812ae4 2643 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2644 break;
2645 case 0x27:
2646 /* CMPGLE */
a7812ae4 2647 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2648 break;
2649 case 0x2C:
2650 /* CVTGF */
a7812ae4 2651 gen_fcvtgf(rb, rc);
4c9649a9
JM
2652 break;
2653 case 0x2D:
2654 /* CVTGD */
2655#if 0 // TODO
a7812ae4 2656 gen_fcvtgd(rb, rc);
4c9649a9
JM
2657#else
2658 goto invalid_opc;
2659#endif
2660 break;
2661 case 0x2F:
2662 /* CVTGQ */
a7812ae4 2663 gen_fcvtgq(rb, rc);
4c9649a9
JM
2664 break;
2665 case 0x3C:
2666 /* CVTQF */
a7812ae4 2667 gen_fcvtqf(rb, rc);
4c9649a9
JM
2668 break;
2669 case 0x3E:
2670 /* CVTQG */
a7812ae4 2671 gen_fcvtqg(rb, rc);
4c9649a9
JM
2672 break;
2673 default:
2674 goto invalid_opc;
2675 }
2676 break;
2677 case 0x16:
2678 /* IEEE floating-point */
f24518b5 2679 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2680 case 0x00:
2681 /* ADDS */
f24518b5 2682 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2683 break;
2684 case 0x01:
2685 /* SUBS */
f24518b5 2686 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2687 break;
2688 case 0x02:
2689 /* MULS */
f24518b5 2690 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2691 break;
2692 case 0x03:
2693 /* DIVS */
f24518b5 2694 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2695 break;
2696 case 0x20:
2697 /* ADDT */
f24518b5 2698 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2699 break;
2700 case 0x21:
2701 /* SUBT */
f24518b5 2702 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2703 break;
2704 case 0x22:
2705 /* MULT */
f24518b5 2706 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2707 break;
2708 case 0x23:
2709 /* DIVT */
f24518b5 2710 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2711 break;
2712 case 0x24:
2713 /* CMPTUN */
f24518b5 2714 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2715 break;
2716 case 0x25:
2717 /* CMPTEQ */
f24518b5 2718 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2719 break;
2720 case 0x26:
2721 /* CMPTLT */
f24518b5 2722 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2723 break;
2724 case 0x27:
2725 /* CMPTLE */
f24518b5 2726 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2727 break;
2728 case 0x2C:
a74b4d2c 2729 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2730 /* CVTST */
f24518b5 2731 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2732 } else {
2733 /* CVTTS */
f24518b5 2734 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2735 }
2736 break;
2737 case 0x2F:
2738 /* CVTTQ */
f24518b5 2739 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2740 break;
2741 case 0x3C:
2742 /* CVTQS */
f24518b5 2743 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2744 break;
2745 case 0x3E:
2746 /* CVTQT */
f24518b5 2747 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2748 break;
2749 default:
2750 goto invalid_opc;
2751 }
2752 break;
2753 case 0x17:
2754 switch (fn11) {
2755 case 0x010:
2756 /* CVTLQ */
a7812ae4 2757 gen_fcvtlq(rb, rc);
4c9649a9
JM
2758 break;
2759 case 0x020:
f18cd223 2760 if (likely(rc != 31)) {
a06d48d9 2761 if (ra == rb) {
4c9649a9 2762 /* FMOV */
a06d48d9
RH
2763 if (ra == 31)
2764 tcg_gen_movi_i64(cpu_fir[rc], 0);
2765 else
2766 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2767 } else {
f18cd223 2768 /* CPYS */
a7812ae4 2769 gen_fcpys(ra, rb, rc);
a06d48d9 2770 }
4c9649a9
JM
2771 }
2772 break;
2773 case 0x021:
2774 /* CPYSN */
a7812ae4 2775 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2776 break;
2777 case 0x022:
2778 /* CPYSE */
a7812ae4 2779 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2780 break;
2781 case 0x024:
2782 /* MT_FPCR */
f18cd223 2783 if (likely(ra != 31))
a44a2777 2784 gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
f18cd223
AJ
2785 else {
2786 TCGv tmp = tcg_const_i64(0);
a44a2777 2787 gen_helper_store_fpcr(cpu_env, tmp);
f18cd223
AJ
2788 tcg_temp_free(tmp);
2789 }
4c9649a9
JM
2790 break;
2791 case 0x025:
2792 /* MF_FPCR */
f18cd223 2793 if (likely(ra != 31))
a44a2777 2794 gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
4c9649a9
JM
2795 break;
2796 case 0x02A:
2797 /* FCMOVEQ */
bbe1dab4 2798 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2799 break;
2800 case 0x02B:
2801 /* FCMOVNE */
bbe1dab4 2802 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2803 break;
2804 case 0x02C:
2805 /* FCMOVLT */
bbe1dab4 2806 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2807 break;
2808 case 0x02D:
2809 /* FCMOVGE */
bbe1dab4 2810 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2811 break;
2812 case 0x02E:
2813 /* FCMOVLE */
bbe1dab4 2814 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2815 break;
2816 case 0x02F:
2817 /* FCMOVGT */
bbe1dab4 2818 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2819 break;
2820 case 0x030:
2821 /* CVTQL */
a7812ae4 2822 gen_fcvtql(rb, rc);
4c9649a9
JM
2823 break;
2824 case 0x130:
2825 /* CVTQL/V */
4c9649a9
JM
2826 case 0x530:
2827 /* CVTQL/SV */
735cf45f
RH
2828 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2829 /v doesn't do. The only thing I can think is that /sv is a
2830 valid instruction merely for completeness in the ISA. */
2831 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2832 break;
2833 default:
2834 goto invalid_opc;
2835 }
2836 break;
2837 case 0x18:
2838 switch ((uint16_t)disp16) {
2839 case 0x0000:
2840 /* TRAPB */
4af70374 2841 /* No-op. */
4c9649a9
JM
2842 break;
2843 case 0x0400:
2844 /* EXCB */
4af70374 2845 /* No-op. */
4c9649a9
JM
2846 break;
2847 case 0x4000:
2848 /* MB */
2849 /* No-op */
2850 break;
2851 case 0x4400:
2852 /* WMB */
2853 /* No-op */
2854 break;
2855 case 0x8000:
2856 /* FETCH */
2857 /* No-op */
2858 break;
2859 case 0xA000:
2860 /* FETCH_M */
2861 /* No-op */
2862 break;
2863 case 0xC000:
2864 /* RPCC */
a9406ea1
RH
2865 if (ra != 31) {
2866 if (use_icount) {
2867 gen_io_start();
69163fbb 2868 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
a9406ea1
RH
2869 gen_io_end();
2870 ret = EXIT_PC_STALE;
2871 } else {
69163fbb 2872 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
a9406ea1
RH
2873 }
2874 }
4c9649a9
JM
2875 break;
2876 case 0xE000:
2877 /* RC */
ac316ca4 2878 gen_rx(ra, 0);
4c9649a9
JM
2879 break;
2880 case 0xE800:
2881 /* ECB */
4c9649a9
JM
2882 break;
2883 case 0xF000:
2884 /* RS */
ac316ca4 2885 gen_rx(ra, 1);
4c9649a9
JM
2886 break;
2887 case 0xF800:
2888 /* WH64 */
2889 /* No-op */
2890 break;
2891 default:
2892 goto invalid_opc;
2893 }
2894 break;
2895 case 0x19:
2896 /* HW_MFPR (PALcode) */
26b46094 2897#ifndef CONFIG_USER_ONLY
a18ad893 2898 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
c781cf96 2899 return gen_mfpr(ra, insn & 0xffff);
26b46094
RH
2900 }
2901#endif
4c9649a9 2902 goto invalid_opc;
4c9649a9 2903 case 0x1A:
49563a72
RH
2904 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2905 prediction stack action, which of course we don't implement. */
2906 if (rb != 31) {
3761035f 2907 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
49563a72 2908 } else {
3761035f 2909 tcg_gen_movi_i64(cpu_pc, 0);
49563a72
RH
2910 }
2911 if (ra != 31) {
1304ca87 2912 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2913 }
4af70374 2914 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2915 break;
2916 case 0x1B:
2917 /* HW_LD (PALcode) */
a18ad893
RH
2918#ifndef CONFIG_USER_ONLY
2919 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2920 TCGv addr;
2921
2922 if (ra == 31) {
2923 break;
2924 }
2925
2926 addr = tcg_temp_new();
8bb6e981
AJ
2927 if (rb != 31)
2928 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2929 else
2930 tcg_gen_movi_i64(addr, disp12);
2931 switch ((insn >> 12) & 0xF) {
2932 case 0x0:
b5d51029 2933 /* Longword physical access (hw_ldl/p) */
2374e73e 2934 gen_helper_ldl_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2935 break;
2936 case 0x1:
b5d51029 2937 /* Quadword physical access (hw_ldq/p) */
2374e73e 2938 gen_helper_ldq_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2939 break;
2940 case 0x2:
b5d51029 2941 /* Longword physical access with lock (hw_ldl_l/p) */
c3082755 2942 gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr);
8bb6e981
AJ
2943 break;
2944 case 0x3:
b5d51029 2945 /* Quadword physical access with lock (hw_ldq_l/p) */
c3082755 2946 gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr);
8bb6e981
AJ
2947 break;
2948 case 0x4:
b5d51029 2949 /* Longword virtual PTE fetch (hw_ldl/v) */
2374e73e 2950 goto invalid_opc;
8bb6e981 2951 case 0x5:
b5d51029 2952 /* Quadword virtual PTE fetch (hw_ldq/v) */
2374e73e 2953 goto invalid_opc;
8bb6e981
AJ
2954 break;
2955 case 0x6:
2956 /* Incpu_ir[ra]id */
b5d51029 2957 goto invalid_opc;
8bb6e981
AJ
2958 case 0x7:
2959 /* Incpu_ir[ra]id */
b5d51029 2960 goto invalid_opc;
8bb6e981 2961 case 0x8:
b5d51029 2962 /* Longword virtual access (hw_ldl) */
2374e73e 2963 goto invalid_opc;
8bb6e981 2964 case 0x9:
b5d51029 2965 /* Quadword virtual access (hw_ldq) */
2374e73e 2966 goto invalid_opc;
8bb6e981 2967 case 0xA:
b5d51029 2968 /* Longword virtual access with protection check (hw_ldl/w) */
8417845e 2969 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2970 break;
2971 case 0xB:
b5d51029 2972 /* Quadword virtual access with protection check (hw_ldq/w) */
8417845e 2973 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2974 break;
2975 case 0xC:
b5d51029 2976 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2374e73e 2977 goto invalid_opc;
8bb6e981 2978 case 0xD:
b5d51029 2979 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2374e73e 2980 goto invalid_opc;
8bb6e981
AJ
2981 case 0xE:
2982 /* Longword virtual access with alternate access mode and
2374e73e
RH
2983 protection checks (hw_ldl/wa) */
2984 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2985 break;
2986 case 0xF:
2987 /* Quadword virtual access with alternate access mode and
2374e73e
RH
2988 protection checks (hw_ldq/wa) */
2989 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2990 break;
2991 }
2992 tcg_temp_free(addr);
a18ad893 2993 break;
4c9649a9 2994 }
4c9649a9 2995#endif
a18ad893 2996 goto invalid_opc;
4c9649a9
JM
2997 case 0x1C:
2998 switch (fn7) {
2999 case 0x00:
3000 /* SEXTB */
a18ad893 3001 if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
4c9649a9 3002 goto invalid_opc;
a18ad893 3003 }
ae8ecd42
AJ
3004 if (likely(rc != 31)) {
3005 if (islit)
3006 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
ae8ecd42 3007 else
dfaa8583 3008 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 3009 }
4c9649a9
JM
3010 break;
3011 case 0x01:
3012 /* SEXTW */
a18ad893
RH
3013 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
3014 if (likely(rc != 31)) {
3015 if (islit) {
3016 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
3017 } else {
3018 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
3019 }
3020 }
3021 break;
ae8ecd42 3022 }
a18ad893 3023 goto invalid_opc;
4c9649a9
JM
3024 case 0x30:
3025 /* CTPOP */
a18ad893
RH
3026 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3027 if (likely(rc != 31)) {
3028 if (islit) {
3029 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
3030 } else {
3031 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
3032 }
3033 }
3034 break;
ae8ecd42 3035 }
a18ad893 3036 goto invalid_opc;
4c9649a9
JM
3037 case 0x31:
3038 /* PERR */
a18ad893
RH
3039 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3040 gen_perr(ra, rb, rc, islit, lit);
3041 break;
3042 }
3043 goto invalid_opc;
4c9649a9
JM
3044 case 0x32:
3045 /* CTLZ */
a18ad893
RH
3046 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3047 if (likely(rc != 31)) {
3048 if (islit) {
3049 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
3050 } else {
3051 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
3052 }
3053 }
3054 break;
ae8ecd42 3055 }
a18ad893 3056 goto invalid_opc;
4c9649a9
JM
3057 case 0x33:
3058 /* CTTZ */
a18ad893
RH
3059 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3060 if (likely(rc != 31)) {
3061 if (islit) {
3062 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
3063 } else {
3064 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
3065 }
3066 }
3067 break;
ae8ecd42 3068 }
a18ad893 3069 goto invalid_opc;
4c9649a9
JM
3070 case 0x34:
3071 /* UNPKBW */
a18ad893
RH
3072 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3073 if (real_islit || ra != 31) {
3074 goto invalid_opc;
3075 }
3076 gen_unpkbw(rb, rc);
3077 break;
3078 }
3079 goto invalid_opc;
4c9649a9 3080 case 0x35:
13e4df99 3081 /* UNPKBL */
a18ad893
RH
3082 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3083 if (real_islit || ra != 31) {
3084 goto invalid_opc;
3085 }
3086 gen_unpkbl(rb, rc);
3087 break;
3088 }
3089 goto invalid_opc;
4c9649a9
JM
3090 case 0x36:
3091 /* PKWB */
a18ad893
RH
3092 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3093 if (real_islit || ra != 31) {
3094 goto invalid_opc;
3095 }
3096 gen_pkwb(rb, rc);
3097 break;
3098 }
3099 goto invalid_opc;
4c9649a9
JM
3100 case 0x37:
3101 /* PKLB */
a18ad893
RH
3102 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3103 if (real_islit || ra != 31) {
3104 goto invalid_opc;
3105 }
3106 gen_pklb(rb, rc);
3107 break;
3108 }
3109 goto invalid_opc;
4c9649a9
JM
3110 case 0x38:
3111 /* MINSB8 */
a18ad893
RH
3112 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3113 gen_minsb8(ra, rb, rc, islit, lit);
3114 break;
3115 }
3116 goto invalid_opc;
4c9649a9
JM
3117 case 0x39:
3118 /* MINSW4 */
a18ad893
RH
3119 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3120 gen_minsw4(ra, rb, rc, islit, lit);
3121 break;
3122 }
3123 goto invalid_opc;
4c9649a9
JM
3124 case 0x3A:
3125 /* MINUB8 */
a18ad893
RH
3126 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3127 gen_minub8(ra, rb, rc, islit, lit);
3128 break;
3129 }
3130 goto invalid_opc;
4c9649a9
JM
3131 case 0x3B:
3132 /* MINUW4 */
a18ad893
RH
3133 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3134 gen_minuw4(ra, rb, rc, islit, lit);
3135 break;
3136 }
3137 goto invalid_opc;
4c9649a9
JM
3138 case 0x3C:
3139 /* MAXUB8 */
a18ad893
RH
3140 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3141 gen_maxub8(ra, rb, rc, islit, lit);
3142 break;
3143 }
3144 goto invalid_opc;
4c9649a9
JM
3145 case 0x3D:
3146 /* MAXUW4 */
a18ad893
RH
3147 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3148 gen_maxuw4(ra, rb, rc, islit, lit);
3149 break;
3150 }
3151 goto invalid_opc;
4c9649a9
JM
3152 case 0x3E:
3153 /* MAXSB8 */
a18ad893
RH
3154 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3155 gen_maxsb8(ra, rb, rc, islit, lit);
3156 break;
3157 }
3158 goto invalid_opc;
4c9649a9
JM
3159 case 0x3F:
3160 /* MAXSW4 */
a18ad893
RH
3161 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3162 gen_maxsw4(ra, rb, rc, islit, lit);
3163 break;
3164 }
3165 goto invalid_opc;
4c9649a9
JM
3166 case 0x70:
3167 /* FTOIT */
a18ad893 3168 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 3169 goto invalid_opc;
a18ad893 3170 }
f18cd223
AJ
3171 if (likely(rc != 31)) {
3172 if (ra != 31)
3173 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3174 else
3175 tcg_gen_movi_i64(cpu_ir[rc], 0);
3176 }
4c9649a9
JM
3177 break;
3178 case 0x78:
3179 /* FTOIS */
a18ad893 3180 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 3181 goto invalid_opc;
a18ad893 3182 }
f18cd223 3183 if (rc != 31) {
a7812ae4 3184 TCGv_i32 tmp1 = tcg_temp_new_i32();
f18cd223 3185 if (ra != 31)
a7812ae4 3186 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
f18cd223
AJ
3187 else {
3188 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 3189 gen_helper_s_to_memory(tmp1, tmp2);
f18cd223
AJ
3190 tcg_temp_free(tmp2);
3191 }
3192 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
a7812ae4 3193 tcg_temp_free_i32(tmp1);
f18cd223 3194 }
4c9649a9
JM
3195 break;
3196 default:
3197 goto invalid_opc;
3198 }
3199 break;
3200 case 0x1D:
3201 /* HW_MTPR (PALcode) */
26b46094 3202#ifndef CONFIG_USER_ONLY
a18ad893 3203 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
bc24270e 3204 return gen_mtpr(ctx, rb, insn & 0xffff);
26b46094
RH
3205 }
3206#endif
4c9649a9 3207 goto invalid_opc;
4c9649a9 3208 case 0x1E:
508b43ea 3209 /* HW_RET (PALcode) */
a18ad893
RH
3210#ifndef CONFIG_USER_ONLY
3211 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3212 if (rb == 31) {
3213 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3214 address from EXC_ADDR. This turns out to be useful for our
3215 emulation PALcode, so continue to accept it. */
3216 TCGv tmp = tcg_temp_new();
4d5712f1 3217 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
69163fbb 3218 gen_helper_hw_ret(cpu_env, tmp);
a18ad893
RH
3219 tcg_temp_free(tmp);
3220 } else {
69163fbb 3221 gen_helper_hw_ret(cpu_env, cpu_ir[rb]);
a18ad893
RH
3222 }
3223 ret = EXIT_PC_UPDATED;
3224 break;
4c9649a9 3225 }
4c9649a9 3226#endif
a18ad893 3227 goto invalid_opc;
4c9649a9
JM
3228 case 0x1F:
3229 /* HW_ST (PALcode) */
a18ad893
RH
3230#ifndef CONFIG_USER_ONLY
3231 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
8bb6e981 3232 TCGv addr, val;
a7812ae4 3233 addr = tcg_temp_new();
8bb6e981
AJ
3234 if (rb != 31)
3235 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3236 else
3237 tcg_gen_movi_i64(addr, disp12);
3238 if (ra != 31)
3239 val = cpu_ir[ra];
3240 else {
a7812ae4 3241 val = tcg_temp_new();
8bb6e981
AJ
3242 tcg_gen_movi_i64(val, 0);
3243 }
3244 switch ((insn >> 12) & 0xF) {
3245 case 0x0:
3246 /* Longword physical access */
2374e73e 3247 gen_helper_stl_phys(addr, val);
8bb6e981
AJ
3248 break;
3249 case 0x1:
3250 /* Quadword physical access */
2374e73e 3251 gen_helper_stq_phys(addr, val);
8bb6e981
AJ
3252 break;
3253 case 0x2:
3254 /* Longword physical access with lock */
c3082755 3255 gen_helper_stl_c_phys(val, cpu_env, addr, val);
8bb6e981
AJ
3256 break;
3257 case 0x3:
3258 /* Quadword physical access with lock */
c3082755 3259 gen_helper_stq_c_phys(val, cpu_env, addr, val);
8bb6e981
AJ
3260 break;
3261 case 0x4:
3262 /* Longword virtual access */
2374e73e 3263 goto invalid_opc;
8bb6e981
AJ
3264 case 0x5:
3265 /* Quadword virtual access */
2374e73e 3266 goto invalid_opc;
8bb6e981
AJ
3267 case 0x6:
3268 /* Invalid */
3269 goto invalid_opc;
3270 case 0x7:
3271 /* Invalid */
3272 goto invalid_opc;
3273 case 0x8:
3274 /* Invalid */
3275 goto invalid_opc;
3276 case 0x9:
3277 /* Invalid */
3278 goto invalid_opc;
3279 case 0xA:
3280 /* Invalid */
3281 goto invalid_opc;
3282 case 0xB:
3283 /* Invalid */
3284 goto invalid_opc;
3285 case 0xC:
3286 /* Longword virtual access with alternate access mode */
2374e73e 3287 goto invalid_opc;
8bb6e981
AJ
3288 case 0xD:
3289 /* Quadword virtual access with alternate access mode */
2374e73e 3290 goto invalid_opc;
8bb6e981
AJ
3291 case 0xE:
3292 /* Invalid */
3293 goto invalid_opc;
3294 case 0xF:
3295 /* Invalid */
3296 goto invalid_opc;
3297 }
45d46ce8 3298 if (ra == 31)
8bb6e981
AJ
3299 tcg_temp_free(val);
3300 tcg_temp_free(addr);
a18ad893 3301 break;
4c9649a9 3302 }
4c9649a9 3303#endif
a18ad893 3304 goto invalid_opc;
4c9649a9
JM
3305 case 0x20:
3306 /* LDF */
f18cd223 3307 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3308 break;
3309 case 0x21:
3310 /* LDG */
f18cd223 3311 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3312 break;
3313 case 0x22:
3314 /* LDS */
f18cd223 3315 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
3316 break;
3317 case 0x23:
3318 /* LDT */
f18cd223 3319 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3320 break;
3321 case 0x24:
3322 /* STF */
6910b8f6 3323 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3324 break;
3325 case 0x25:
3326 /* STG */
6910b8f6 3327 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3328 break;
3329 case 0x26:
3330 /* STS */
6910b8f6 3331 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
4c9649a9
JM
3332 break;
3333 case 0x27:
3334 /* STT */
6910b8f6 3335 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3336 break;
3337 case 0x28:
3338 /* LDL */
f18cd223 3339 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
3340 break;
3341 case 0x29:
3342 /* LDQ */
f18cd223 3343 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3344 break;
3345 case 0x2A:
3346 /* LDL_L */
f4ed8679 3347 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3348 break;
3349 case 0x2B:
3350 /* LDQ_L */
f4ed8679 3351 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3352 break;
3353 case 0x2C:
3354 /* STL */
6910b8f6 3355 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
4c9649a9
JM
3356 break;
3357 case 0x2D:
3358 /* STQ */
6910b8f6 3359 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3360 break;
3361 case 0x2E:
3362 /* STL_C */
6910b8f6 3363 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
4c9649a9
JM
3364 break;
3365 case 0x2F:
3366 /* STQ_C */
6910b8f6 3367 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
4c9649a9
JM
3368 break;
3369 case 0x30:
3370 /* BR */
4af70374 3371 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3372 break;
a7812ae4 3373 case 0x31: /* FBEQ */
4af70374 3374 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 3375 break;
a7812ae4 3376 case 0x32: /* FBLT */
4af70374 3377 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 3378 break;
a7812ae4 3379 case 0x33: /* FBLE */
4af70374 3380 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
3381 break;
3382 case 0x34:
3383 /* BSR */
4af70374 3384 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3385 break;
a7812ae4 3386 case 0x35: /* FBNE */
4af70374 3387 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 3388 break;
a7812ae4 3389 case 0x36: /* FBGE */
4af70374 3390 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 3391 break;
a7812ae4 3392 case 0x37: /* FBGT */
4af70374 3393 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
3394 break;
3395 case 0x38:
3396 /* BLBC */
4af70374 3397 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
3398 break;
3399 case 0x39:
3400 /* BEQ */
4af70374 3401 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
3402 break;
3403 case 0x3A:
3404 /* BLT */
4af70374 3405 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
3406 break;
3407 case 0x3B:
3408 /* BLE */
4af70374 3409 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
3410 break;
3411 case 0x3C:
3412 /* BLBS */
4af70374 3413 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
3414 break;
3415 case 0x3D:
3416 /* BNE */
4af70374 3417 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
3418 break;
3419 case 0x3E:
3420 /* BGE */
4af70374 3421 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
3422 break;
3423 case 0x3F:
3424 /* BGT */
4af70374 3425 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
3426 break;
3427 invalid_opc:
8aa3fa20 3428 ret = gen_invalid(ctx);
4c9649a9
JM
3429 break;
3430 }
3431
3432 return ret;
3433}
3434
86a35f7c 3435static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
636aa200 3436 TranslationBlock *tb,
86a35f7c 3437 bool search_pc)
4c9649a9 3438{
ed2803da 3439 CPUState *cs = CPU(cpu);
86a35f7c 3440 CPUAlphaState *env = &cpu->env;
4c9649a9
JM
3441 DisasContext ctx, *ctxp = &ctx;
3442 target_ulong pc_start;
b114b68a 3443 target_ulong pc_mask;
4c9649a9
JM
3444 uint32_t insn;
3445 uint16_t *gen_opc_end;
a1d1bb31 3446 CPUBreakpoint *bp;
4c9649a9 3447 int j, lj = -1;
4af70374 3448 ExitStatus ret;
2e70f6ef
PB
3449 int num_insns;
3450 int max_insns;
4c9649a9
JM
3451
3452 pc_start = tb->pc;
92414b31 3453 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4af70374
RH
3454
3455 ctx.tb = tb;
4c9649a9 3456 ctx.pc = pc_start;
bba9bdce 3457 ctx.mem_idx = cpu_mmu_index(env);
801c4c28 3458 ctx.implver = env->implver;
ed2803da 3459 ctx.singlestep_enabled = cs->singlestep_enabled;
f24518b5
RH
3460
3461 /* ??? Every TB begins with unset rounding mode, to be initialized on
3462 the first fp insn of the TB. Alternately we could define a proper
3463 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3464 to reset the FP_STATUS to that default at the end of any TB that
3465 changes the default. We could even (gasp) dynamiclly figure out
3466 what default would be most efficient given the running program. */
3467 ctx.tb_rm = -1;
3468 /* Similarly for flush-to-zero. */
3469 ctx.tb_ftz = -1;
3470
2e70f6ef
PB
3471 num_insns = 0;
3472 max_insns = tb->cflags & CF_COUNT_MASK;
b114b68a 3473 if (max_insns == 0) {
2e70f6ef 3474 max_insns = CF_COUNT_MASK;
b114b68a
RH
3475 }
3476
3477 if (in_superpage(&ctx, pc_start)) {
3478 pc_mask = (1ULL << 41) - 1;
3479 } else {
3480 pc_mask = ~TARGET_PAGE_MASK;
3481 }
2e70f6ef 3482
806f352d 3483 gen_tb_start();
4af70374 3484 do {
72cf2d4f
BS
3485 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3486 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 3487 if (bp->pc == ctx.pc) {
4c9649a9
JM
3488 gen_excp(&ctx, EXCP_DEBUG, 0);
3489 break;
3490 }
3491 }
3492 }
3493 if (search_pc) {
92414b31 3494 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4c9649a9
JM
3495 if (lj < j) {
3496 lj++;
3497 while (lj < j)
ab1103de 3498 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4c9649a9 3499 }
25983cad 3500 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
ab1103de 3501 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 3502 tcg_ctx.gen_opc_icount[lj] = num_insns;
4c9649a9 3503 }
2e70f6ef
PB
3504 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3505 gen_io_start();
c3082755 3506 insn = cpu_ldl_code(env, ctx.pc);
2e70f6ef 3507 num_insns++;
c4b3be39 3508
fdefe51c 3509 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
c4b3be39
RH
3510 tcg_gen_debug_insn_start(ctx.pc);
3511 }
3512
4c9649a9
JM
3513 ctx.pc += 4;
3514 ret = translate_one(ctxp, insn);
19bf517b 3515
bf1b03fe
RH
3516 /* If we reach a page boundary, are single stepping,
3517 or exhaust instruction count, stop generation. */
3518 if (ret == NO_EXIT
b114b68a 3519 && ((ctx.pc & pc_mask) == 0
efd7f486 3520 || tcg_ctx.gen_opc_ptr >= gen_opc_end
bf1b03fe
RH
3521 || num_insns >= max_insns
3522 || singlestep
ca6862a6 3523 || ctx.singlestep_enabled)) {
bf1b03fe 3524 ret = EXIT_PC_STALE;
1b530a6d 3525 }
4af70374
RH
3526 } while (ret == NO_EXIT);
3527
3528 if (tb->cflags & CF_LAST_IO) {
3529 gen_io_end();
4c9649a9 3530 }
4af70374
RH
3531
3532 switch (ret) {
3533 case EXIT_GOTO_TB:
8aa3fa20 3534 case EXIT_NORETURN:
4af70374
RH
3535 break;
3536 case EXIT_PC_STALE:
496cb5b9 3537 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
3538 /* FALLTHRU */
3539 case EXIT_PC_UPDATED:
ca6862a6 3540 if (ctx.singlestep_enabled) {
bf1b03fe
RH
3541 gen_excp_1(EXCP_DEBUG, 0);
3542 } else {
3543 tcg_gen_exit_tb(0);
3544 }
4af70374
RH
3545 break;
3546 default:
3547 abort();
4c9649a9 3548 }
4af70374 3549
806f352d 3550 gen_tb_end(tb, num_insns);
efd7f486 3551 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4c9649a9 3552 if (search_pc) {
92414b31 3553 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4c9649a9
JM
3554 lj++;
3555 while (lj <= j)
ab1103de 3556 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3557 } else {
3558 tb->size = ctx.pc - pc_start;
2e70f6ef 3559 tb->icount = num_insns;
4c9649a9 3560 }
4af70374 3561
806991da 3562#ifdef DEBUG_DISAS
8fec2b8c 3563 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39 3564 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 3565 log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
93fcfe39 3566 qemu_log("\n");
4c9649a9 3567 }
4c9649a9 3568#endif
4c9649a9
JM
3569}
3570
4d5712f1 3571void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 3572{
86a35f7c 3573 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
4c9649a9
JM
3574}
3575
4d5712f1 3576void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
4c9649a9 3577{
86a35f7c 3578 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
4c9649a9
JM
3579}
3580
4d5712f1 3581void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 3582{
25983cad 3583 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
d2856f1a 3584}