]> git.proxmox.com Git - qemu.git/blame - target-alpha/translate.c
migration: Remove get_status() accessor
[qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
20#include <stdint.h>
21#include <stdlib.h>
22#include <stdio.h>
23
24#include "cpu.h"
4c9649a9 25#include "disas.h"
ae8ecd42 26#include "host-utils.h"
57fec1fe 27#include "tcg-op.h"
ca10f867 28#include "qemu-common.h"
4c9649a9 29
a7812ae4
PB
30#include "helper.h"
31#define GEN_HELPER 1
32#include "helper.h"
33
19188121 34#undef ALPHA_DEBUG_DISAS
f24518b5 35#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
36
37#ifdef ALPHA_DEBUG_DISAS
806991da 38# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
39#else
40# define LOG_DISAS(...) do { } while (0)
41#endif
42
4c9649a9
JM
43typedef struct DisasContext DisasContext;
44struct DisasContext {
4af70374
RH
45 struct TranslationBlock *tb;
46 CPUAlphaState *env;
4c9649a9
JM
47 uint64_t pc;
48 int mem_idx;
f24518b5
RH
49
50 /* Current rounding mode for this TB. */
51 int tb_rm;
52 /* Current flush-to-zero setting for this TB. */
53 int tb_ftz;
4c9649a9
JM
54};
55
4af70374
RH
56/* Return values from translate_one, indicating the state of the TB.
57 Note that zero indicates that we are not exiting the TB. */
58
59typedef enum {
60 NO_EXIT,
61
62 /* We have emitted one or more goto_tb. No fixup required. */
63 EXIT_GOTO_TB,
64
65 /* We are not using a goto_tb (for whatever reason), but have updated
66 the PC (for whatever reason), so there's no need to do it again on
67 exiting the TB. */
68 EXIT_PC_UPDATED,
69
70 /* We are exiting the TB, but have neither emitted a goto_tb, nor
71 updated the PC for the next instruction to be executed. */
8aa3fa20
RH
72 EXIT_PC_STALE,
73
74 /* We are ending the TB with a noreturn function call, e.g. longjmp.
75 No following code will be executed. */
76 EXIT_NORETURN,
4af70374
RH
77} ExitStatus;
78
3761035f 79/* global register indexes */
a7812ae4 80static TCGv_ptr cpu_env;
496cb5b9 81static TCGv cpu_ir[31];
f18cd223 82static TCGv cpu_fir[31];
496cb5b9 83static TCGv cpu_pc;
6910b8f6
RH
84static TCGv cpu_lock_addr;
85static TCGv cpu_lock_st_addr;
86static TCGv cpu_lock_value;
2ace7e55
RH
87static TCGv cpu_unique;
88#ifndef CONFIG_USER_ONLY
89static TCGv cpu_sysval;
90static TCGv cpu_usp;
ab471ade 91#endif
496cb5b9 92
3761035f 93/* register names */
f18cd223 94static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef
PB
95
96#include "gen-icount.h"
97
a5f1b965 98static void alpha_translate_init(void)
2e70f6ef 99{
496cb5b9
AJ
100 int i;
101 char *p;
2e70f6ef 102 static int done_init = 0;
496cb5b9 103
2e70f6ef
PB
104 if (done_init)
105 return;
496cb5b9 106
a7812ae4 107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
108
109 p = cpu_reg_names;
110 for (i = 0; i < 31; i++) {
111 sprintf(p, "ir%d", i);
a7812ae4
PB
112 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
113 offsetof(CPUState, ir[i]), p);
6ba8dcd7 114 p += (i < 10) ? 4 : 5;
f18cd223
AJ
115
116 sprintf(p, "fir%d", i);
a7812ae4
PB
117 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
118 offsetof(CPUState, fir[i]), p);
f18cd223 119 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
120 }
121
a7812ae4
PB
122 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
123 offsetof(CPUState, pc), "pc");
496cb5b9 124
6910b8f6
RH
125 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
126 offsetof(CPUState, lock_addr),
127 "lock_addr");
128 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
129 offsetof(CPUState, lock_st_addr),
130 "lock_st_addr");
131 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
132 offsetof(CPUState, lock_value),
133 "lock_value");
f4ed8679 134
2ace7e55
RH
135 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
136 offsetof(CPUState, unique), "unique");
137#ifndef CONFIG_USER_ONLY
138 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
139 offsetof(CPUState, sysval), "sysval");
140 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
141 offsetof(CPUState, usp), "usp");
ab471ade
RH
142#endif
143
496cb5b9 144 /* register helpers */
a7812ae4 145#define GEN_HELPER 2
496cb5b9
AJ
146#include "helper.h"
147
2e70f6ef
PB
148 done_init = 1;
149}
150
bf1b03fe 151static void gen_excp_1(int exception, int error_code)
4c9649a9 152{
a7812ae4 153 TCGv_i32 tmp1, tmp2;
6ad02592 154
6ad02592
AJ
155 tmp1 = tcg_const_i32(exception);
156 tmp2 = tcg_const_i32(error_code);
a7812ae4
PB
157 gen_helper_excp(tmp1, tmp2);
158 tcg_temp_free_i32(tmp2);
159 tcg_temp_free_i32(tmp1);
bf1b03fe 160}
8aa3fa20 161
bf1b03fe
RH
162static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
163{
164 tcg_gen_movi_i64(cpu_pc, ctx->pc);
165 gen_excp_1(exception, error_code);
8aa3fa20 166 return EXIT_NORETURN;
4c9649a9
JM
167}
168
8aa3fa20 169static inline ExitStatus gen_invalid(DisasContext *ctx)
4c9649a9 170{
8aa3fa20 171 return gen_excp(ctx, EXCP_OPCDEC, 0);
4c9649a9
JM
172}
173
636aa200 174static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 175{
a7812ae4
PB
176 TCGv tmp = tcg_temp_new();
177 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 178 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
179 tcg_gen_trunc_i64_i32(tmp32, tmp);
180 gen_helper_memory_to_f(t0, tmp32);
181 tcg_temp_free_i32(tmp32);
f18cd223
AJ
182 tcg_temp_free(tmp);
183}
184
636aa200 185static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 186{
a7812ae4 187 TCGv tmp = tcg_temp_new();
f18cd223 188 tcg_gen_qemu_ld64(tmp, t1, flags);
a7812ae4 189 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
190 tcg_temp_free(tmp);
191}
192
636aa200 193static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 194{
a7812ae4
PB
195 TCGv tmp = tcg_temp_new();
196 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 197 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
198 tcg_gen_trunc_i64_i32(tmp32, tmp);
199 gen_helper_memory_to_s(t0, tmp32);
200 tcg_temp_free_i32(tmp32);
f18cd223
AJ
201 tcg_temp_free(tmp);
202}
203
636aa200 204static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679 205{
f4ed8679 206 tcg_gen_qemu_ld32s(t0, t1, flags);
6910b8f6
RH
207 tcg_gen_mov_i64(cpu_lock_addr, t1);
208 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
209}
210
636aa200 211static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679 212{
f4ed8679 213 tcg_gen_qemu_ld64(t0, t1, flags);
6910b8f6
RH
214 tcg_gen_mov_i64(cpu_lock_addr, t1);
215 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
216}
217
636aa200
BS
218static inline void gen_load_mem(DisasContext *ctx,
219 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
220 int flags),
221 int ra, int rb, int32_t disp16, int fp,
222 int clear)
023d8ca2 223{
6910b8f6 224 TCGv addr, va;
023d8ca2 225
6910b8f6
RH
226 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
227 prefetches, which we can treat as nops. No worries about
228 missed exceptions here. */
229 if (unlikely(ra == 31)) {
023d8ca2 230 return;
6910b8f6 231 }
023d8ca2 232
a7812ae4 233 addr = tcg_temp_new();
023d8ca2
AJ
234 if (rb != 31) {
235 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 236 if (clear) {
023d8ca2 237 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 238 }
023d8ca2 239 } else {
6910b8f6 240 if (clear) {
023d8ca2 241 disp16 &= ~0x7;
6910b8f6 242 }
023d8ca2
AJ
243 tcg_gen_movi_i64(addr, disp16);
244 }
6910b8f6
RH
245
246 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
247 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
248
023d8ca2
AJ
249 tcg_temp_free(addr);
250}
251
636aa200 252static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 253{
a7812ae4
PB
254 TCGv_i32 tmp32 = tcg_temp_new_i32();
255 TCGv tmp = tcg_temp_new();
256 gen_helper_f_to_memory(tmp32, t0);
257 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
258 tcg_gen_qemu_st32(tmp, t1, flags);
259 tcg_temp_free(tmp);
a7812ae4 260 tcg_temp_free_i32(tmp32);
f18cd223
AJ
261}
262
636aa200 263static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 264{
a7812ae4
PB
265 TCGv tmp = tcg_temp_new();
266 gen_helper_g_to_memory(tmp, t0);
f18cd223
AJ
267 tcg_gen_qemu_st64(tmp, t1, flags);
268 tcg_temp_free(tmp);
269}
270
636aa200 271static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 272{
a7812ae4
PB
273 TCGv_i32 tmp32 = tcg_temp_new_i32();
274 TCGv tmp = tcg_temp_new();
275 gen_helper_s_to_memory(tmp32, t0);
276 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
277 tcg_gen_qemu_st32(tmp, t1, flags);
278 tcg_temp_free(tmp);
a7812ae4 279 tcg_temp_free_i32(tmp32);
f18cd223
AJ
280}
281
636aa200
BS
282static inline void gen_store_mem(DisasContext *ctx,
283 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
284 int flags),
285 int ra, int rb, int32_t disp16, int fp,
6910b8f6 286 int clear)
023d8ca2 287{
6910b8f6
RH
288 TCGv addr, va;
289
290 addr = tcg_temp_new();
023d8ca2
AJ
291 if (rb != 31) {
292 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 293 if (clear) {
023d8ca2 294 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 295 }
023d8ca2 296 } else {
6910b8f6 297 if (clear) {
023d8ca2 298 disp16 &= ~0x7;
6910b8f6 299 }
023d8ca2
AJ
300 tcg_gen_movi_i64(addr, disp16);
301 }
6910b8f6
RH
302
303 if (ra == 31) {
304 va = tcg_const_i64(0);
f18cd223 305 } else {
6910b8f6 306 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
023d8ca2 307 }
6910b8f6
RH
308 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
309
023d8ca2 310 tcg_temp_free(addr);
6910b8f6
RH
311 if (ra == 31) {
312 tcg_temp_free(va);
313 }
314}
315
316static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
317 int32_t disp16, int quad)
318{
319 TCGv addr;
320
321 if (ra == 31) {
322 /* ??? Don't bother storing anything. The user can't tell
323 the difference, since the zero register always reads zero. */
324 return NO_EXIT;
325 }
326
327#if defined(CONFIG_USER_ONLY)
328 addr = cpu_lock_st_addr;
329#else
e52458fe 330 addr = tcg_temp_local_new();
6910b8f6
RH
331#endif
332
333 if (rb != 31) {
334 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
335 } else {
336 tcg_gen_movi_i64(addr, disp16);
337 }
338
339#if defined(CONFIG_USER_ONLY)
340 /* ??? This is handled via a complicated version of compare-and-swap
341 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
342 in TCG so that this isn't necessary. */
343 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
344#else
345 /* ??? In system mode we are never multi-threaded, so CAS can be
346 implemented via a non-atomic load-compare-store sequence. */
347 {
348 int lab_fail, lab_done;
349 TCGv val;
350
351 lab_fail = gen_new_label();
352 lab_done = gen_new_label();
e52458fe 353 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
6910b8f6
RH
354
355 val = tcg_temp_new();
356 if (quad) {
357 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
358 } else {
359 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
360 }
e52458fe 361 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
6910b8f6
RH
362
363 if (quad) {
364 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
365 } else {
366 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
367 }
368 tcg_gen_movi_i64(cpu_ir[ra], 1);
369 tcg_gen_br(lab_done);
370
371 gen_set_label(lab_fail);
372 tcg_gen_movi_i64(cpu_ir[ra], 0);
373
374 gen_set_label(lab_done);
375 tcg_gen_movi_i64(cpu_lock_addr, -1);
376
377 tcg_temp_free(addr);
378 return NO_EXIT;
379 }
380#endif
023d8ca2
AJ
381}
382
4af70374 383static int use_goto_tb(DisasContext *ctx, uint64_t dest)
4c9649a9 384{
4af70374
RH
385 /* Check for the dest on the same page as the start of the TB. We
386 also want to suppress goto_tb in the case of single-steping and IO. */
387 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
388 && !ctx->env->singlestep_enabled
389 && !(ctx->tb->cflags & CF_LAST_IO));
390}
dbb30fe6 391
4af70374
RH
392static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
393{
394 uint64_t dest = ctx->pc + (disp << 2);
395
396 if (ra != 31) {
397 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
398 }
399
400 /* Notice branch-to-next; used to initialize RA with the PC. */
401 if (disp == 0) {
402 return 0;
403 } else if (use_goto_tb(ctx, dest)) {
404 tcg_gen_goto_tb(0);
405 tcg_gen_movi_i64(cpu_pc, dest);
4b4a72e5 406 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
4af70374
RH
407 return EXIT_GOTO_TB;
408 } else {
409 tcg_gen_movi_i64(cpu_pc, dest);
410 return EXIT_PC_UPDATED;
411 }
dbb30fe6
RH
412}
413
4af70374
RH
414static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
415 TCGv cmp, int32_t disp)
dbb30fe6 416{
4af70374 417 uint64_t dest = ctx->pc + (disp << 2);
dbb30fe6 418 int lab_true = gen_new_label();
9c29504e 419
4af70374
RH
420 if (use_goto_tb(ctx, dest)) {
421 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
422
423 tcg_gen_goto_tb(0);
424 tcg_gen_movi_i64(cpu_pc, ctx->pc);
4b4a72e5 425 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
4af70374
RH
426
427 gen_set_label(lab_true);
428 tcg_gen_goto_tb(1);
429 tcg_gen_movi_i64(cpu_pc, dest);
4b4a72e5 430 tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
4af70374
RH
431
432 return EXIT_GOTO_TB;
433 } else {
434 int lab_over = gen_new_label();
435
436 /* ??? Consider using either
437 movi pc, next
438 addi tmp, pc, disp
439 movcond pc, cond, 0, tmp, pc
440 or
441 setcond tmp, cond, 0
442 movi pc, next
443 neg tmp, tmp
444 andi tmp, tmp, disp
445 add pc, pc, tmp
446 The current diamond subgraph surely isn't efficient. */
447
448 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
449 tcg_gen_movi_i64(cpu_pc, ctx->pc);
450 tcg_gen_br(lab_over);
451 gen_set_label(lab_true);
452 tcg_gen_movi_i64(cpu_pc, dest);
453 gen_set_label(lab_over);
454
455 return EXIT_PC_UPDATED;
456 }
457}
458
459static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
460 int32_t disp, int mask)
461{
462 TCGv cmp_tmp;
463
464 if (unlikely(ra == 31)) {
465 cmp_tmp = tcg_const_i64(0);
466 } else {
467 cmp_tmp = tcg_temp_new();
9c29504e 468 if (mask) {
4af70374 469 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
dbb30fe6 470 } else {
4af70374 471 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
dbb30fe6 472 }
9c29504e 473 }
4af70374
RH
474
475 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
476}
477
4af70374 478/* Fold -0.0 for comparison with COND. */
dbb30fe6 479
4af70374 480static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 481{
dbb30fe6 482 uint64_t mzero = 1ull << 63;
f18cd223 483
dbb30fe6
RH
484 switch (cond) {
485 case TCG_COND_LE:
486 case TCG_COND_GT:
487 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 488 tcg_gen_mov_i64(dest, src);
a7812ae4 489 break;
dbb30fe6
RH
490
491 case TCG_COND_EQ:
492 case TCG_COND_NE:
493 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 494 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 495 break;
dbb30fe6
RH
496
497 case TCG_COND_GE:
dbb30fe6 498 case TCG_COND_LT:
4af70374
RH
499 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
500 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
501 tcg_gen_neg_i64(dest, dest);
502 tcg_gen_and_i64(dest, dest, src);
a7812ae4 503 break;
dbb30fe6 504
a7812ae4
PB
505 default:
506 abort();
f18cd223 507 }
dbb30fe6
RH
508}
509
4af70374
RH
510static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
511 int32_t disp)
dbb30fe6 512{
4af70374 513 TCGv cmp_tmp;
dbb30fe6
RH
514
515 if (unlikely(ra == 31)) {
516 /* Very uncommon case, but easier to optimize it to an integer
517 comparison than continuing with the floating point comparison. */
4af70374 518 return gen_bcond(ctx, cond, ra, disp, 0);
dbb30fe6
RH
519 }
520
4af70374
RH
521 cmp_tmp = tcg_temp_new();
522 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
523 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
524}
525
bbe1dab4 526static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
4af70374 527 int islit, uint8_t lit, int mask)
4c9649a9 528{
bbe1dab4 529 TCGCond inv_cond = tcg_invert_cond(cond);
9c29504e
AJ
530 int l1;
531
532 if (unlikely(rc == 31))
533 return;
534
535 l1 = gen_new_label();
536
537 if (ra != 31) {
538 if (mask) {
a7812ae4 539 TCGv tmp = tcg_temp_new();
9c29504e
AJ
540 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
541 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
542 tcg_temp_free(tmp);
543 } else
544 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
545 } else {
546 /* Very uncommon case - Do not bother to optimize. */
547 TCGv tmp = tcg_const_i64(0);
548 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
549 tcg_temp_free(tmp);
550 }
551
4c9649a9 552 if (islit)
9c29504e 553 tcg_gen_movi_i64(cpu_ir[rc], lit);
4c9649a9 554 else
dfaa8583 555 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
9c29504e 556 gen_set_label(l1);
4c9649a9
JM
557}
558
bbe1dab4 559static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
dbb30fe6 560{
4af70374 561 TCGv cmp_tmp;
dbb30fe6
RH
562 int l1;
563
4af70374 564 if (unlikely(rc == 31)) {
dbb30fe6 565 return;
4af70374
RH
566 }
567
568 cmp_tmp = tcg_temp_new();
dbb30fe6 569 if (unlikely(ra == 31)) {
4af70374
RH
570 tcg_gen_movi_i64(cmp_tmp, 0);
571 } else {
572 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
dbb30fe6
RH
573 }
574
575 l1 = gen_new_label();
4af70374
RH
576 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
577 tcg_temp_free(cmp_tmp);
dbb30fe6
RH
578
579 if (rb != 31)
580 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
581 else
582 tcg_gen_movi_i64(cpu_fir[rc], 0);
583 gen_set_label(l1);
584}
585
f24518b5
RH
586#define QUAL_RM_N 0x080 /* Round mode nearest even */
587#define QUAL_RM_C 0x000 /* Round mode chopped */
588#define QUAL_RM_M 0x040 /* Round mode minus infinity */
589#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
590#define QUAL_RM_MASK 0x0c0
591
592#define QUAL_U 0x100 /* Underflow enable (fp output) */
593#define QUAL_V 0x100 /* Overflow enable (int output) */
594#define QUAL_S 0x400 /* Software completion enable */
595#define QUAL_I 0x200 /* Inexact detection enable */
596
597static void gen_qual_roundmode(DisasContext *ctx, int fn11)
598{
599 TCGv_i32 tmp;
600
601 fn11 &= QUAL_RM_MASK;
602 if (fn11 == ctx->tb_rm) {
603 return;
604 }
605 ctx->tb_rm = fn11;
606
607 tmp = tcg_temp_new_i32();
608 switch (fn11) {
609 case QUAL_RM_N:
610 tcg_gen_movi_i32(tmp, float_round_nearest_even);
611 break;
612 case QUAL_RM_C:
613 tcg_gen_movi_i32(tmp, float_round_to_zero);
614 break;
615 case QUAL_RM_M:
616 tcg_gen_movi_i32(tmp, float_round_down);
617 break;
618 case QUAL_RM_D:
619 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
620 break;
621 }
622
623#if defined(CONFIG_SOFTFLOAT_INLINE)
624 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
625 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
626 sets the one field. */
627 tcg_gen_st8_i32(tmp, cpu_env,
628 offsetof(CPUState, fp_status.float_rounding_mode));
629#else
630 gen_helper_setroundmode(tmp);
631#endif
632
633 tcg_temp_free_i32(tmp);
634}
635
636static void gen_qual_flushzero(DisasContext *ctx, int fn11)
637{
638 TCGv_i32 tmp;
639
640 fn11 &= QUAL_U;
641 if (fn11 == ctx->tb_ftz) {
642 return;
643 }
644 ctx->tb_ftz = fn11;
645
646 tmp = tcg_temp_new_i32();
647 if (fn11) {
648 /* Underflow is enabled, use the FPCR setting. */
649 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
650 } else {
651 /* Underflow is disabled, force flush-to-zero. */
652 tcg_gen_movi_i32(tmp, 1);
653 }
654
655#if defined(CONFIG_SOFTFLOAT_INLINE)
656 tcg_gen_st8_i32(tmp, cpu_env,
657 offsetof(CPUState, fp_status.flush_to_zero));
658#else
659 gen_helper_setflushzero(tmp);
660#endif
661
662 tcg_temp_free_i32(tmp);
663}
664
665static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
666{
667 TCGv val = tcg_temp_new();
668 if (reg == 31) {
669 tcg_gen_movi_i64(val, 0);
670 } else if (fn11 & QUAL_S) {
671 gen_helper_ieee_input_s(val, cpu_fir[reg]);
672 } else if (is_cmp) {
673 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
674 } else {
675 gen_helper_ieee_input(val, cpu_fir[reg]);
676 }
677 return val;
678}
679
680static void gen_fp_exc_clear(void)
681{
682#if defined(CONFIG_SOFTFLOAT_INLINE)
683 TCGv_i32 zero = tcg_const_i32(0);
684 tcg_gen_st8_i32(zero, cpu_env,
685 offsetof(CPUState, fp_status.float_exception_flags));
686 tcg_temp_free_i32(zero);
687#else
688 gen_helper_fp_exc_clear();
689#endif
690}
691
692static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
693{
694 /* ??? We ought to be able to do something with imprecise exceptions.
695 E.g. notice we're still in the trap shadow of something within the
696 TB and do not generate the code to signal the exception; end the TB
697 when an exception is forced to arrive, either by consumption of a
698 register value or TRAPB or EXCB. */
699 TCGv_i32 exc = tcg_temp_new_i32();
700 TCGv_i32 reg;
701
702#if defined(CONFIG_SOFTFLOAT_INLINE)
703 tcg_gen_ld8u_i32(exc, cpu_env,
704 offsetof(CPUState, fp_status.float_exception_flags));
705#else
706 gen_helper_fp_exc_get(exc);
707#endif
708
709 if (ignore) {
710 tcg_gen_andi_i32(exc, exc, ~ignore);
711 }
712
713 /* ??? Pass in the regno of the destination so that the helper can
714 set EXC_MASK, which contains a bitmask of destination registers
715 that have caused arithmetic traps. A simple userspace emulation
716 does not require this. We do need it for a guest kernel's entArith,
717 or if we were to do something clever with imprecise exceptions. */
718 reg = tcg_const_i32(rc + 32);
719
720 if (fn11 & QUAL_S) {
721 gen_helper_fp_exc_raise_s(exc, reg);
722 } else {
723 gen_helper_fp_exc_raise(exc, reg);
724 }
725
726 tcg_temp_free_i32(reg);
727 tcg_temp_free_i32(exc);
728}
729
730static inline void gen_fp_exc_raise(int rc, int fn11)
731{
732 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 733}
f24518b5 734
593f17e5
RH
735static void gen_fcvtlq(int rb, int rc)
736{
737 if (unlikely(rc == 31)) {
738 return;
739 }
740 if (unlikely(rb == 31)) {
741 tcg_gen_movi_i64(cpu_fir[rc], 0);
742 } else {
743 TCGv tmp = tcg_temp_new();
744
745 /* The arithmetic right shift here, plus the sign-extended mask below
746 yields a sign-extended result without an explicit ext32s_i64. */
747 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
748 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
749 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
750 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
751 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
752
753 tcg_temp_free(tmp);
754 }
755}
756
735cf45f
RH
757static void gen_fcvtql(int rb, int rc)
758{
759 if (unlikely(rc == 31)) {
760 return;
761 }
762 if (unlikely(rb == 31)) {
763 tcg_gen_movi_i64(cpu_fir[rc], 0);
764 } else {
765 TCGv tmp = tcg_temp_new();
766
767 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
768 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
769 tcg_gen_shli_i64(tmp, tmp, 32);
770 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
771 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
772
773 tcg_temp_free(tmp);
774 }
775}
776
777static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
778{
779 if (rb != 31) {
780 int lab = gen_new_label();
781 TCGv tmp = tcg_temp_new();
782
783 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
784 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
785 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
786
787 gen_set_label(lab);
788 }
789 gen_fcvtql(rb, rc);
790}
791
f24518b5
RH
792#define FARITH2(name) \
793static inline void glue(gen_f, name)(int rb, int rc) \
794{ \
795 if (unlikely(rc == 31)) { \
796 return; \
797 } \
798 if (rb != 31) { \
799 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
800 } else { \
801 TCGv tmp = tcg_const_i64(0); \
802 gen_helper_ ## name (cpu_fir[rc], tmp); \
803 tcg_temp_free(tmp); \
804 } \
805}
f24518b5
RH
806
807/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
808FARITH2(sqrtf)
809FARITH2(sqrtg)
a7812ae4
PB
810FARITH2(cvtgf)
811FARITH2(cvtgq)
812FARITH2(cvtqf)
813FARITH2(cvtqg)
f24518b5
RH
814
815static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
816 int rb, int rc, int fn11)
817{
818 TCGv vb;
819
820 /* ??? This is wrong: the instruction is not a nop, it still may
821 raise exceptions. */
822 if (unlikely(rc == 31)) {
823 return;
824 }
825
826 gen_qual_roundmode(ctx, fn11);
827 gen_qual_flushzero(ctx, fn11);
828 gen_fp_exc_clear();
829
830 vb = gen_ieee_input(rb, fn11, 0);
831 helper(cpu_fir[rc], vb);
832 tcg_temp_free(vb);
833
834 gen_fp_exc_raise(rc, fn11);
835}
836
837#define IEEE_ARITH2(name) \
838static inline void glue(gen_f, name)(DisasContext *ctx, \
839 int rb, int rc, int fn11) \
840{ \
841 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
842}
843IEEE_ARITH2(sqrts)
844IEEE_ARITH2(sqrtt)
845IEEE_ARITH2(cvtst)
846IEEE_ARITH2(cvtts)
847
848static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
849{
850 TCGv vb;
851 int ignore = 0;
852
853 /* ??? This is wrong: the instruction is not a nop, it still may
854 raise exceptions. */
855 if (unlikely(rc == 31)) {
856 return;
857 }
858
859 /* No need to set flushzero, since we have an integer output. */
860 gen_fp_exc_clear();
861 vb = gen_ieee_input(rb, fn11, 0);
862
863 /* Almost all integer conversions use cropped rounding, and most
864 also do not have integer overflow enabled. Special case that. */
865 switch (fn11) {
866 case QUAL_RM_C:
867 gen_helper_cvttq_c(cpu_fir[rc], vb);
868 break;
869 case QUAL_V | QUAL_RM_C:
870 case QUAL_S | QUAL_V | QUAL_RM_C:
871 ignore = float_flag_inexact;
872 /* FALLTHRU */
873 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
874 gen_helper_cvttq_svic(cpu_fir[rc], vb);
875 break;
876 default:
877 gen_qual_roundmode(ctx, fn11);
878 gen_helper_cvttq(cpu_fir[rc], vb);
879 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
880 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
881 break;
882 }
883 tcg_temp_free(vb);
884
885 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
886}
887
f24518b5
RH
888static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
889 int rb, int rc, int fn11)
890{
891 TCGv vb;
892
893 /* ??? This is wrong: the instruction is not a nop, it still may
894 raise exceptions. */
895 if (unlikely(rc == 31)) {
896 return;
897 }
898
899 gen_qual_roundmode(ctx, fn11);
900
901 if (rb == 31) {
902 vb = tcg_const_i64(0);
903 } else {
904 vb = cpu_fir[rb];
905 }
906
907 /* The only exception that can be raised by integer conversion
908 is inexact. Thus we only need to worry about exceptions when
909 inexact handling is requested. */
910 if (fn11 & QUAL_I) {
911 gen_fp_exc_clear();
912 helper(cpu_fir[rc], vb);
913 gen_fp_exc_raise(rc, fn11);
914 } else {
915 helper(cpu_fir[rc], vb);
916 }
917
918 if (rb == 31) {
919 tcg_temp_free(vb);
920 }
921}
922
923#define IEEE_INTCVT(name) \
924static inline void glue(gen_f, name)(DisasContext *ctx, \
925 int rb, int rc, int fn11) \
926{ \
927 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
928}
929IEEE_INTCVT(cvtqs)
930IEEE_INTCVT(cvtqt)
931
dc96be4b
RH
932static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
933{
934 TCGv va, vb, vmask;
935 int za = 0, zb = 0;
936
937 if (unlikely(rc == 31)) {
938 return;
939 }
940
941 vmask = tcg_const_i64(mask);
942
943 TCGV_UNUSED_I64(va);
944 if (ra == 31) {
945 if (inv_a) {
946 va = vmask;
947 } else {
948 za = 1;
949 }
950 } else {
951 va = tcg_temp_new_i64();
952 tcg_gen_mov_i64(va, cpu_fir[ra]);
953 if (inv_a) {
954 tcg_gen_andc_i64(va, vmask, va);
955 } else {
956 tcg_gen_and_i64(va, va, vmask);
957 }
958 }
959
960 TCGV_UNUSED_I64(vb);
961 if (rb == 31) {
962 zb = 1;
963 } else {
964 vb = tcg_temp_new_i64();
965 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
966 }
967
968 switch (za << 1 | zb) {
969 case 0 | 0:
970 tcg_gen_or_i64(cpu_fir[rc], va, vb);
971 break;
972 case 0 | 1:
973 tcg_gen_mov_i64(cpu_fir[rc], va);
974 break;
975 case 2 | 0:
976 tcg_gen_mov_i64(cpu_fir[rc], vb);
977 break;
978 case 2 | 1:
979 tcg_gen_movi_i64(cpu_fir[rc], 0);
980 break;
981 }
982
983 tcg_temp_free(vmask);
984 if (ra != 31) {
985 tcg_temp_free(va);
986 }
987 if (rb != 31) {
988 tcg_temp_free(vb);
989 }
990}
991
992static inline void gen_fcpys(int ra, int rb, int rc)
993{
994 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
995}
996
997static inline void gen_fcpysn(int ra, int rb, int rc)
998{
999 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1000}
1001
1002static inline void gen_fcpyse(int ra, int rb, int rc)
1003{
1004 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1005}
1006
f24518b5
RH
1007#define FARITH3(name) \
1008static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1009{ \
1010 TCGv va, vb; \
1011 \
1012 if (unlikely(rc == 31)) { \
1013 return; \
1014 } \
1015 if (ra == 31) { \
1016 va = tcg_const_i64(0); \
1017 } else { \
1018 va = cpu_fir[ra]; \
1019 } \
1020 if (rb == 31) { \
1021 vb = tcg_const_i64(0); \
1022 } else { \
1023 vb = cpu_fir[rb]; \
1024 } \
1025 \
1026 gen_helper_ ## name (cpu_fir[rc], va, vb); \
1027 \
1028 if (ra == 31) { \
1029 tcg_temp_free(va); \
1030 } \
1031 if (rb == 31) { \
1032 tcg_temp_free(vb); \
1033 } \
1034}
f24518b5
RH
1035
1036/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
1037FARITH3(addf)
1038FARITH3(subf)
1039FARITH3(mulf)
1040FARITH3(divf)
1041FARITH3(addg)
1042FARITH3(subg)
1043FARITH3(mulg)
1044FARITH3(divg)
1045FARITH3(cmpgeq)
1046FARITH3(cmpglt)
1047FARITH3(cmpgle)
f24518b5
RH
1048
1049static void gen_ieee_arith3(DisasContext *ctx,
1050 void (*helper)(TCGv, TCGv, TCGv),
1051 int ra, int rb, int rc, int fn11)
1052{
1053 TCGv va, vb;
1054
1055 /* ??? This is wrong: the instruction is not a nop, it still may
1056 raise exceptions. */
1057 if (unlikely(rc == 31)) {
1058 return;
1059 }
1060
1061 gen_qual_roundmode(ctx, fn11);
1062 gen_qual_flushzero(ctx, fn11);
1063 gen_fp_exc_clear();
1064
1065 va = gen_ieee_input(ra, fn11, 0);
1066 vb = gen_ieee_input(rb, fn11, 0);
1067 helper(cpu_fir[rc], va, vb);
1068 tcg_temp_free(va);
1069 tcg_temp_free(vb);
1070
1071 gen_fp_exc_raise(rc, fn11);
1072}
1073
1074#define IEEE_ARITH3(name) \
1075static inline void glue(gen_f, name)(DisasContext *ctx, \
1076 int ra, int rb, int rc, int fn11) \
1077{ \
1078 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1079}
1080IEEE_ARITH3(adds)
1081IEEE_ARITH3(subs)
1082IEEE_ARITH3(muls)
1083IEEE_ARITH3(divs)
1084IEEE_ARITH3(addt)
1085IEEE_ARITH3(subt)
1086IEEE_ARITH3(mult)
1087IEEE_ARITH3(divt)
1088
1089static void gen_ieee_compare(DisasContext *ctx,
1090 void (*helper)(TCGv, TCGv, TCGv),
1091 int ra, int rb, int rc, int fn11)
1092{
1093 TCGv va, vb;
1094
1095 /* ??? This is wrong: the instruction is not a nop, it still may
1096 raise exceptions. */
1097 if (unlikely(rc == 31)) {
1098 return;
1099 }
1100
1101 gen_fp_exc_clear();
1102
1103 va = gen_ieee_input(ra, fn11, 1);
1104 vb = gen_ieee_input(rb, fn11, 1);
1105 helper(cpu_fir[rc], va, vb);
1106 tcg_temp_free(va);
1107 tcg_temp_free(vb);
1108
1109 gen_fp_exc_raise(rc, fn11);
1110}
1111
1112#define IEEE_CMP3(name) \
1113static inline void glue(gen_f, name)(DisasContext *ctx, \
1114 int ra, int rb, int rc, int fn11) \
1115{ \
1116 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1117}
1118IEEE_CMP3(cmptun)
1119IEEE_CMP3(cmpteq)
1120IEEE_CMP3(cmptlt)
1121IEEE_CMP3(cmptle)
a7812ae4 1122
248c42f3
RH
1123static inline uint64_t zapnot_mask(uint8_t lit)
1124{
1125 uint64_t mask = 0;
1126 int i;
1127
1128 for (i = 0; i < 8; ++i) {
1129 if ((lit >> i) & 1)
1130 mask |= 0xffull << (i * 8);
1131 }
1132 return mask;
1133}
1134
87d98f95
RH
1135/* Implement zapnot with an immediate operand, which expands to some
1136 form of immediate AND. This is a basic building block in the
1137 definition of many of the other byte manipulation instructions. */
248c42f3 1138static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 1139{
87d98f95
RH
1140 switch (lit) {
1141 case 0x00:
248c42f3 1142 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
1143 break;
1144 case 0x01:
248c42f3 1145 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
1146 break;
1147 case 0x03:
248c42f3 1148 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
1149 break;
1150 case 0x0f:
248c42f3 1151 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
1152 break;
1153 case 0xff:
248c42f3 1154 tcg_gen_mov_i64(dest, src);
87d98f95
RH
1155 break;
1156 default:
248c42f3 1157 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
87d98f95
RH
1158 break;
1159 }
1160}
1161
1162static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1163{
1164 if (unlikely(rc == 31))
1165 return;
1166 else if (unlikely(ra == 31))
1167 tcg_gen_movi_i64(cpu_ir[rc], 0);
1168 else if (islit)
248c42f3 1169 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
87d98f95
RH
1170 else
1171 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1172}
1173
1174static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1175{
1176 if (unlikely(rc == 31))
1177 return;
1178 else if (unlikely(ra == 31))
1179 tcg_gen_movi_i64(cpu_ir[rc], 0);
1180 else if (islit)
248c42f3 1181 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
87d98f95
RH
1182 else
1183 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1184}
1185
1186
248c42f3 1187/* EXTWH, EXTLH, EXTQH */
ffec44f1
RH
1188static void gen_ext_h(int ra, int rb, int rc, int islit,
1189 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1190{
1191 if (unlikely(rc == 31))
1192 return;
377a43b6
RH
1193 else if (unlikely(ra == 31))
1194 tcg_gen_movi_i64(cpu_ir[rc], 0);
1195 else {
dfaa8583 1196 if (islit) {
377a43b6
RH
1197 lit = (64 - (lit & 7) * 8) & 0x3f;
1198 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
fe2b269a 1199 } else {
377a43b6 1200 TCGv tmp1 = tcg_temp_new();
b3249f63
AJ
1201 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1202 tcg_gen_shli_i64(tmp1, tmp1, 3);
dbf95805
VW
1203 tcg_gen_neg_i64(tmp1, tmp1);
1204 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
dfaa8583 1205 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
b3249f63 1206 tcg_temp_free(tmp1);
dfaa8583 1207 }
248c42f3 1208 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
377a43b6 1209 }
b3249f63
AJ
1210}
1211
248c42f3 1212/* EXTBL, EXTWL, EXTLL, EXTQL */
ffec44f1
RH
1213static void gen_ext_l(int ra, int rb, int rc, int islit,
1214 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1215{
1216 if (unlikely(rc == 31))
1217 return;
377a43b6
RH
1218 else if (unlikely(ra == 31))
1219 tcg_gen_movi_i64(cpu_ir[rc], 0);
1220 else {
dfaa8583 1221 if (islit) {
377a43b6 1222 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
dfaa8583 1223 } else {
a7812ae4 1224 TCGv tmp = tcg_temp_new();
b3249f63
AJ
1225 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1226 tcg_gen_shli_i64(tmp, tmp, 3);
dfaa8583 1227 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
b3249f63 1228 tcg_temp_free(tmp);
fe2b269a 1229 }
248c42f3
RH
1230 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1231 }
1232}
1233
50eb6e5c
RH
1234/* INSWH, INSLH, INSQH */
1235static void gen_ins_h(int ra, int rb, int rc, int islit,
1236 uint8_t lit, uint8_t byte_mask)
1237{
1238 if (unlikely(rc == 31))
1239 return;
1240 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1241 tcg_gen_movi_i64(cpu_ir[rc], 0);
1242 else {
1243 TCGv tmp = tcg_temp_new();
1244
1245 /* The instruction description has us left-shift the byte mask
1246 and extract bits <15:8> and apply that zap at the end. This
1247 is equivalent to simply performing the zap first and shifting
1248 afterward. */
1249 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1250
1251 if (islit) {
1252 /* Note that we have handled the lit==0 case above. */
1253 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1254 } else {
1255 TCGv shift = tcg_temp_new();
1256
1257 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1258 Do this portably by splitting the shift into two parts:
1259 shift_count-1 and 1. Arrange for the -1 by using
1260 ones-complement instead of twos-complement in the negation:
1261 ~((B & 7) * 8) & 63. */
1262
1263 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1264 tcg_gen_shli_i64(shift, shift, 3);
1265 tcg_gen_not_i64(shift, shift);
1266 tcg_gen_andi_i64(shift, shift, 0x3f);
1267
1268 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1269 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1270 tcg_temp_free(shift);
1271 }
1272 tcg_temp_free(tmp);
1273 }
1274}
1275
248c42f3 1276/* INSBL, INSWL, INSLL, INSQL */
ffec44f1
RH
1277static void gen_ins_l(int ra, int rb, int rc, int islit,
1278 uint8_t lit, uint8_t byte_mask)
248c42f3
RH
1279{
1280 if (unlikely(rc == 31))
1281 return;
1282 else if (unlikely(ra == 31))
1283 tcg_gen_movi_i64(cpu_ir[rc], 0);
1284 else {
1285 TCGv tmp = tcg_temp_new();
1286
1287 /* The instruction description has us left-shift the byte mask
1288 the same number of byte slots as the data and apply the zap
1289 at the end. This is equivalent to simply performing the zap
1290 first and shifting afterward. */
1291 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1292
1293 if (islit) {
1294 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1295 } else {
1296 TCGv shift = tcg_temp_new();
1297 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1298 tcg_gen_shli_i64(shift, shift, 3);
1299 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1300 tcg_temp_free(shift);
1301 }
1302 tcg_temp_free(tmp);
377a43b6 1303 }
b3249f63
AJ
1304}
1305
ffec44f1
RH
1306/* MSKWH, MSKLH, MSKQH */
1307static void gen_msk_h(int ra, int rb, int rc, int islit,
1308 uint8_t lit, uint8_t byte_mask)
1309{
1310 if (unlikely(rc == 31))
1311 return;
1312 else if (unlikely(ra == 31))
1313 tcg_gen_movi_i64(cpu_ir[rc], 0);
1314 else if (islit) {
1315 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1316 } else {
1317 TCGv shift = tcg_temp_new();
1318 TCGv mask = tcg_temp_new();
1319
1320 /* The instruction description is as above, where the byte_mask
1321 is shifted left, and then we extract bits <15:8>. This can be
1322 emulated with a right-shift on the expanded byte mask. This
1323 requires extra care because for an input <2:0> == 0 we need a
1324 shift of 64 bits in order to generate a zero. This is done by
1325 splitting the shift into two parts, the variable shift - 1
1326 followed by a constant 1 shift. The code we expand below is
1327 equivalent to ~((B & 7) * 8) & 63. */
1328
1329 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1330 tcg_gen_shli_i64(shift, shift, 3);
1331 tcg_gen_not_i64(shift, shift);
1332 tcg_gen_andi_i64(shift, shift, 0x3f);
1333 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1334 tcg_gen_shr_i64(mask, mask, shift);
1335 tcg_gen_shri_i64(mask, mask, 1);
1336
1337 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1338
1339 tcg_temp_free(mask);
1340 tcg_temp_free(shift);
1341 }
1342}
1343
14ab1634 1344/* MSKBL, MSKWL, MSKLL, MSKQL */
ffec44f1
RH
1345static void gen_msk_l(int ra, int rb, int rc, int islit,
1346 uint8_t lit, uint8_t byte_mask)
14ab1634
RH
1347{
1348 if (unlikely(rc == 31))
1349 return;
1350 else if (unlikely(ra == 31))
1351 tcg_gen_movi_i64(cpu_ir[rc], 0);
1352 else if (islit) {
1353 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1354 } else {
1355 TCGv shift = tcg_temp_new();
1356 TCGv mask = tcg_temp_new();
1357
1358 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1359 tcg_gen_shli_i64(shift, shift, 3);
1360 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1361 tcg_gen_shl_i64(mask, mask, shift);
1362
1363 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1364
1365 tcg_temp_free(mask);
1366 tcg_temp_free(shift);
1367 }
1368}
1369
04acd307 1370/* Code to call arith3 helpers */
a7812ae4 1371#define ARITH3(name) \
636aa200
BS
1372static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1373 uint8_t lit) \
a7812ae4
PB
1374{ \
1375 if (unlikely(rc == 31)) \
1376 return; \
1377 \
1378 if (ra != 31) { \
1379 if (islit) { \
1380 TCGv tmp = tcg_const_i64(lit); \
1381 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1382 tcg_temp_free(tmp); \
1383 } else \
1384 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1385 } else { \
1386 TCGv tmp1 = tcg_const_i64(0); \
1387 if (islit) { \
1388 TCGv tmp2 = tcg_const_i64(lit); \
1389 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1390 tcg_temp_free(tmp2); \
1391 } else \
1392 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1393 tcg_temp_free(tmp1); \
1394 } \
b3249f63 1395}
a7812ae4
PB
1396ARITH3(cmpbge)
1397ARITH3(addlv)
1398ARITH3(sublv)
1399ARITH3(addqv)
1400ARITH3(subqv)
a7812ae4
PB
1401ARITH3(umulh)
1402ARITH3(mullv)
1403ARITH3(mulqv)
13e4df99
RH
1404ARITH3(minub8)
1405ARITH3(minsb8)
1406ARITH3(minuw4)
1407ARITH3(minsw4)
1408ARITH3(maxub8)
1409ARITH3(maxsb8)
1410ARITH3(maxuw4)
1411ARITH3(maxsw4)
1412ARITH3(perr)
1413
1414#define MVIOP2(name) \
1415static inline void glue(gen_, name)(int rb, int rc) \
1416{ \
1417 if (unlikely(rc == 31)) \
1418 return; \
1419 if (unlikely(rb == 31)) \
1420 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1421 else \
1422 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1423}
1424MVIOP2(pklb)
1425MVIOP2(pkwb)
1426MVIOP2(unpkbl)
1427MVIOP2(unpkbw)
b3249f63 1428
9e05960f
RH
1429static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1430 int islit, uint8_t lit)
01ff9cc8 1431{
9e05960f 1432 TCGv va, vb;
01ff9cc8 1433
9e05960f 1434 if (unlikely(rc == 31)) {
13e4df99 1435 return;
9e05960f 1436 }
01ff9cc8 1437
9e05960f
RH
1438 if (ra == 31) {
1439 va = tcg_const_i64(0);
1440 } else {
1441 va = cpu_ir[ra];
1442 }
1443 if (islit) {
1444 vb = tcg_const_i64(lit);
1445 } else {
1446 vb = cpu_ir[rb];
1447 }
01ff9cc8 1448
9e05960f 1449 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
01ff9cc8 1450
9e05960f
RH
1451 if (ra == 31) {
1452 tcg_temp_free(va);
1453 }
1454 if (islit) {
1455 tcg_temp_free(vb);
1456 }
01ff9cc8
AJ
1457}
1458
ac316ca4
RH
1459static void gen_rx(int ra, int set)
1460{
1461 TCGv_i32 tmp;
1462
1463 if (ra != 31) {
1464 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1465 }
1466
1467 tmp = tcg_const_i32(set);
1468 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1469 tcg_temp_free_i32(tmp);
1470}
1471
2ace7e55
RH
1472static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1473{
1474 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1475 to internal cpu registers. */
1476
1477 /* Unprivileged PAL call */
1478 if (palcode >= 0x80 && palcode < 0xC0) {
1479 switch (palcode) {
1480 case 0x86:
1481 /* IMB */
1482 /* No-op inside QEMU. */
1483 break;
1484 case 0x9E:
1485 /* RDUNIQUE */
1486 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1487 break;
1488 case 0x9F:
1489 /* WRUNIQUE */
1490 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1491 break;
1492 default:
1493 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1494 }
1495 return NO_EXIT;
1496 }
1497
1498#ifndef CONFIG_USER_ONLY
1499 /* Privileged PAL code */
1500 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1501 switch (palcode) {
1502 case 0x01:
1503 /* CFLUSH */
1504 /* No-op inside QEMU. */
1505 break;
1506 case 0x02:
1507 /* DRAINA */
1508 /* No-op inside QEMU. */
1509 break;
1510 case 0x2D:
1511 /* WRVPTPTR */
1512 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUState, vptptr));
1513 break;
1514 case 0x31:
1515 /* WRVAL */
1516 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1517 break;
1518 case 0x32:
1519 /* RDVAL */
1520 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1521 break;
1522
1523 case 0x35: {
1524 /* SWPIPL */
1525 TCGv tmp;
1526
1527 /* Note that we already know we're in kernel mode, so we know
1528 that PS only contains the 3 IPL bits. */
1529 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1530
1531 /* But make sure and store only the 3 IPL bits from the user. */
1532 tmp = tcg_temp_new();
1533 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1534 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUState, ps));
1535 tcg_temp_free(tmp);
1536 break;
1537 }
1538
1539 case 0x36:
1540 /* RDPS */
1541 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1542 break;
1543 case 0x38:
1544 /* WRUSP */
1545 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1546 break;
1547 case 0x3A:
1548 /* RDUSP */
1549 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1550 break;
1551 case 0x3C:
1552 /* WHAMI */
1553 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1554 offsetof(CPUState, cpu_index));
1555 break;
1556
1557 default:
1558 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1559 }
1560 return NO_EXIT;
1561 }
1562#endif
1563
1564 return gen_invalid(ctx);
1565}
1566
26b46094
RH
1567#ifndef CONFIG_USER_ONLY
1568
1569#define PR_BYTE 0x100000
1570#define PR_LONG 0x200000
1571
1572static int cpu_pr_data(int pr)
1573{
1574 switch (pr) {
1575 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1576 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1577 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1578 case 3: return offsetof(CPUAlphaState, trap_arg0);
1579 case 4: return offsetof(CPUAlphaState, trap_arg1);
1580 case 5: return offsetof(CPUAlphaState, trap_arg2);
1581 case 6: return offsetof(CPUAlphaState, exc_addr);
1582 case 7: return offsetof(CPUAlphaState, palbr);
1583 case 8: return offsetof(CPUAlphaState, ptbr);
1584 case 9: return offsetof(CPUAlphaState, vptptr);
1585 case 10: return offsetof(CPUAlphaState, unique);
1586 case 11: return offsetof(CPUAlphaState, sysval);
1587 case 12: return offsetof(CPUAlphaState, usp);
1588
1589 case 32 ... 39:
1590 return offsetof(CPUAlphaState, shadow[pr - 32]);
1591 case 40 ... 63:
1592 return offsetof(CPUAlphaState, scratch[pr - 40]);
c781cf96
RH
1593
1594 case 251:
1595 return offsetof(CPUAlphaState, alarm_expire);
26b46094
RH
1596 }
1597 return 0;
1598}
1599
c781cf96 1600static ExitStatus gen_mfpr(int ra, int regno)
26b46094
RH
1601{
1602 int data = cpu_pr_data(regno);
1603
1604 /* In our emulated PALcode, these processor registers have no
1605 side effects from reading. */
1606 if (ra == 31) {
c781cf96
RH
1607 return NO_EXIT;
1608 }
1609
1610 if (regno == 250) {
1611 /* WALL_TIME */
1612 if (use_icount) {
1613 gen_io_start();
1614 gen_helper_get_time(cpu_ir[ra]);
1615 gen_io_end();
1616 return EXIT_PC_STALE;
1617 } else {
1618 gen_helper_get_time(cpu_ir[ra]);
1619 return NO_EXIT;
1620 }
26b46094
RH
1621 }
1622
1623 /* The basic registers are data only, and unknown registers
1624 are read-zero, write-ignore. */
1625 if (data == 0) {
1626 tcg_gen_movi_i64(cpu_ir[ra], 0);
1627 } else if (data & PR_BYTE) {
1628 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1629 } else if (data & PR_LONG) {
1630 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1631 } else {
1632 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1633 }
c781cf96 1634 return NO_EXIT;
26b46094
RH
1635}
1636
bc24270e 1637static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
26b46094
RH
1638{
1639 TCGv tmp;
bc24270e 1640 int data;
26b46094
RH
1641
1642 if (rb == 31) {
1643 tmp = tcg_const_i64(0);
1644 } else {
1645 tmp = cpu_ir[rb];
1646 }
1647
bc24270e
RH
1648 switch (regno) {
1649 case 255:
3b4fefd6
RH
1650 /* TBIA */
1651 gen_helper_tbia();
bc24270e
RH
1652 break;
1653
1654 case 254:
3b4fefd6
RH
1655 /* TBIS */
1656 gen_helper_tbis(tmp);
bc24270e
RH
1657 break;
1658
1659 case 253:
1660 /* WAIT */
1661 tmp = tcg_const_i64(1);
1662 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUState, halted));
1663 return gen_excp(ctx, EXCP_HLT, 0);
1664
034ebc27
RH
1665 case 252:
1666 /* HALT */
1667 gen_helper_halt(tmp);
1668 return EXIT_PC_STALE;
1669
c781cf96
RH
1670 case 251:
1671 /* ALARM */
1672 gen_helper_set_alarm(tmp);
1673 break;
1674
bc24270e 1675 default:
3b4fefd6
RH
1676 /* The basic registers are data only, and unknown registers
1677 are read-zero, write-ignore. */
bc24270e 1678 data = cpu_pr_data(regno);
3b4fefd6
RH
1679 if (data != 0) {
1680 if (data & PR_BYTE) {
1681 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1682 } else if (data & PR_LONG) {
1683 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1684 } else {
1685 tcg_gen_st_i64(tmp, cpu_env, data);
1686 }
26b46094 1687 }
bc24270e 1688 break;
26b46094
RH
1689 }
1690
1691 if (rb == 31) {
1692 tcg_temp_free(tmp);
1693 }
bc24270e
RH
1694
1695 return NO_EXIT;
26b46094
RH
1696}
1697#endif /* !USER_ONLY*/
1698
4af70374 1699static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1700{
1701 uint32_t palcode;
efa64351
MT
1702 int32_t disp21, disp16;
1703#ifndef CONFIG_USER_ONLY
1704 int32_t disp12;
1705#endif
f88fe4e3 1706 uint16_t fn11;
b6fb147c 1707 uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
adf3c8b6 1708 uint8_t lit;
4af70374 1709 ExitStatus ret;
4c9649a9
JM
1710
1711 /* Decode all instruction fields */
1712 opc = insn >> 26;
1713 ra = (insn >> 21) & 0x1F;
1714 rb = (insn >> 16) & 0x1F;
1715 rc = insn & 0x1F;
13e4df99 1716 real_islit = islit = (insn >> 12) & 1;
dfaa8583
AJ
1717 if (rb == 31 && !islit) {
1718 islit = 1;
1719 lit = 0;
1720 } else
1721 lit = (insn >> 13) & 0xFF;
4c9649a9
JM
1722 palcode = insn & 0x03FFFFFF;
1723 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1724 disp16 = (int16_t)(insn & 0x0000FFFF);
efa64351 1725#ifndef CONFIG_USER_ONLY
4c9649a9 1726 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
efa64351 1727#endif
4c9649a9
JM
1728 fn11 = (insn >> 5) & 0x000007FF;
1729 fpfn = fn11 & 0x3F;
1730 fn7 = (insn >> 5) & 0x0000007F;
806991da 1731 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1732 opc, ra, rb, rc, disp16);
806991da 1733
4af70374 1734 ret = NO_EXIT;
4c9649a9
JM
1735 switch (opc) {
1736 case 0x00:
1737 /* CALL_PAL */
2ace7e55
RH
1738 ret = gen_call_pal(ctx, palcode);
1739 break;
4c9649a9
JM
1740 case 0x01:
1741 /* OPC01 */
1742 goto invalid_opc;
1743 case 0x02:
1744 /* OPC02 */
1745 goto invalid_opc;
1746 case 0x03:
1747 /* OPC03 */
1748 goto invalid_opc;
1749 case 0x04:
1750 /* OPC04 */
1751 goto invalid_opc;
1752 case 0x05:
1753 /* OPC05 */
1754 goto invalid_opc;
1755 case 0x06:
1756 /* OPC06 */
1757 goto invalid_opc;
1758 case 0x07:
1759 /* OPC07 */
1760 goto invalid_opc;
1761 case 0x08:
1762 /* LDA */
1ef4ef4e 1763 if (likely(ra != 31)) {
496cb5b9 1764 if (rb != 31)
3761035f
AJ
1765 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1766 else
1767 tcg_gen_movi_i64(cpu_ir[ra], disp16);
496cb5b9 1768 }
4c9649a9
JM
1769 break;
1770 case 0x09:
1771 /* LDAH */
1ef4ef4e 1772 if (likely(ra != 31)) {
496cb5b9 1773 if (rb != 31)
3761035f
AJ
1774 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1775 else
1776 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
496cb5b9 1777 }
4c9649a9
JM
1778 break;
1779 case 0x0A:
1780 /* LDBU */
a18ad893
RH
1781 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1782 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1783 break;
1784 }
1785 goto invalid_opc;
4c9649a9
JM
1786 case 0x0B:
1787 /* LDQ_U */
f18cd223 1788 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1789 break;
1790 case 0x0C:
1791 /* LDWU */
a18ad893
RH
1792 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1793 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1794 break;
1795 }
1796 goto invalid_opc;
4c9649a9
JM
1797 case 0x0D:
1798 /* STW */
6910b8f6 1799 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
4c9649a9
JM
1800 break;
1801 case 0x0E:
1802 /* STB */
6910b8f6 1803 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
4c9649a9
JM
1804 break;
1805 case 0x0F:
1806 /* STQ_U */
6910b8f6 1807 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1808 break;
1809 case 0x10:
1810 switch (fn7) {
1811 case 0x00:
1812 /* ADDL */
30c7183b
AJ
1813 if (likely(rc != 31)) {
1814 if (ra != 31) {
1815 if (islit) {
1816 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1817 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1818 } else {
30c7183b
AJ
1819 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1820 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1821 }
30c7183b
AJ
1822 } else {
1823 if (islit)
dfaa8583 1824 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1825 else
dfaa8583 1826 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1827 }
1828 }
4c9649a9
JM
1829 break;
1830 case 0x02:
1831 /* S4ADDL */
30c7183b
AJ
1832 if (likely(rc != 31)) {
1833 if (ra != 31) {
a7812ae4 1834 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1835 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1836 if (islit)
1837 tcg_gen_addi_i64(tmp, tmp, lit);
1838 else
1839 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1840 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1841 tcg_temp_free(tmp);
30c7183b
AJ
1842 } else {
1843 if (islit)
1844 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1845 else
dfaa8583 1846 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1847 }
1848 }
4c9649a9
JM
1849 break;
1850 case 0x09:
1851 /* SUBL */
30c7183b
AJ
1852 if (likely(rc != 31)) {
1853 if (ra != 31) {
dfaa8583 1854 if (islit)
30c7183b 1855 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
dfaa8583 1856 else
30c7183b 1857 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
dfaa8583 1858 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1859 } else {
1860 if (islit)
1861 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1862 else {
30c7183b
AJ
1863 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1864 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1865 }
1866 }
4c9649a9
JM
1867 break;
1868 case 0x0B:
1869 /* S4SUBL */
30c7183b
AJ
1870 if (likely(rc != 31)) {
1871 if (ra != 31) {
a7812ae4 1872 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1873 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1874 if (islit)
1875 tcg_gen_subi_i64(tmp, tmp, lit);
1876 else
1877 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1878 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1879 tcg_temp_free(tmp);
30c7183b
AJ
1880 } else {
1881 if (islit)
1882 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1883 else {
30c7183b
AJ
1884 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1885 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1886 }
30c7183b
AJ
1887 }
1888 }
4c9649a9
JM
1889 break;
1890 case 0x0F:
1891 /* CMPBGE */
a7812ae4 1892 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1893 break;
1894 case 0x12:
1895 /* S8ADDL */
30c7183b
AJ
1896 if (likely(rc != 31)) {
1897 if (ra != 31) {
a7812ae4 1898 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1899 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1900 if (islit)
1901 tcg_gen_addi_i64(tmp, tmp, lit);
1902 else
1903 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1904 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1905 tcg_temp_free(tmp);
30c7183b
AJ
1906 } else {
1907 if (islit)
1908 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1909 else
dfaa8583 1910 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1911 }
1912 }
4c9649a9
JM
1913 break;
1914 case 0x1B:
1915 /* S8SUBL */
30c7183b
AJ
1916 if (likely(rc != 31)) {
1917 if (ra != 31) {
a7812ae4 1918 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1919 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1920 if (islit)
1921 tcg_gen_subi_i64(tmp, tmp, lit);
1922 else
1923 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1924 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1925 tcg_temp_free(tmp);
30c7183b
AJ
1926 } else {
1927 if (islit)
1928 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1929 else
30c7183b
AJ
1930 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1931 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1932 }
30c7183b
AJ
1933 }
1934 }
4c9649a9
JM
1935 break;
1936 case 0x1D:
1937 /* CMPULT */
01ff9cc8 1938 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
4c9649a9
JM
1939 break;
1940 case 0x20:
1941 /* ADDQ */
30c7183b
AJ
1942 if (likely(rc != 31)) {
1943 if (ra != 31) {
1944 if (islit)
1945 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1946 else
dfaa8583 1947 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1948 } else {
1949 if (islit)
1950 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1951 else
dfaa8583 1952 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1953 }
1954 }
4c9649a9
JM
1955 break;
1956 case 0x22:
1957 /* S4ADDQ */
30c7183b
AJ
1958 if (likely(rc != 31)) {
1959 if (ra != 31) {
a7812ae4 1960 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1961 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1962 if (islit)
1963 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1964 else
1965 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1966 tcg_temp_free(tmp);
30c7183b
AJ
1967 } else {
1968 if (islit)
1969 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1970 else
dfaa8583 1971 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1972 }
1973 }
4c9649a9
JM
1974 break;
1975 case 0x29:
1976 /* SUBQ */
30c7183b
AJ
1977 if (likely(rc != 31)) {
1978 if (ra != 31) {
1979 if (islit)
1980 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1981 else
dfaa8583 1982 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1983 } else {
1984 if (islit)
1985 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1986 else
dfaa8583 1987 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1988 }
1989 }
4c9649a9
JM
1990 break;
1991 case 0x2B:
1992 /* S4SUBQ */
30c7183b
AJ
1993 if (likely(rc != 31)) {
1994 if (ra != 31) {
a7812ae4 1995 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1996 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1997 if (islit)
1998 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1999 else
2000 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2001 tcg_temp_free(tmp);
30c7183b
AJ
2002 } else {
2003 if (islit)
2004 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2005 else
dfaa8583 2006 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2007 }
2008 }
4c9649a9
JM
2009 break;
2010 case 0x2D:
2011 /* CMPEQ */
01ff9cc8 2012 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
4c9649a9
JM
2013 break;
2014 case 0x32:
2015 /* S8ADDQ */
30c7183b
AJ
2016 if (likely(rc != 31)) {
2017 if (ra != 31) {
a7812ae4 2018 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2019 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2020 if (islit)
2021 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2022 else
2023 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2024 tcg_temp_free(tmp);
30c7183b
AJ
2025 } else {
2026 if (islit)
2027 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2028 else
dfaa8583 2029 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2030 }
2031 }
4c9649a9
JM
2032 break;
2033 case 0x3B:
2034 /* S8SUBQ */
30c7183b
AJ
2035 if (likely(rc != 31)) {
2036 if (ra != 31) {
a7812ae4 2037 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2038 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2039 if (islit)
2040 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2041 else
2042 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2043 tcg_temp_free(tmp);
30c7183b
AJ
2044 } else {
2045 if (islit)
2046 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2047 else
dfaa8583 2048 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2049 }
2050 }
4c9649a9
JM
2051 break;
2052 case 0x3D:
2053 /* CMPULE */
01ff9cc8 2054 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
4c9649a9
JM
2055 break;
2056 case 0x40:
2057 /* ADDL/V */
a7812ae4 2058 gen_addlv(ra, rb, rc, islit, lit);
4c9649a9
JM
2059 break;
2060 case 0x49:
2061 /* SUBL/V */
a7812ae4 2062 gen_sublv(ra, rb, rc, islit, lit);
4c9649a9
JM
2063 break;
2064 case 0x4D:
2065 /* CMPLT */
01ff9cc8 2066 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
4c9649a9
JM
2067 break;
2068 case 0x60:
2069 /* ADDQ/V */
a7812ae4 2070 gen_addqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2071 break;
2072 case 0x69:
2073 /* SUBQ/V */
a7812ae4 2074 gen_subqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2075 break;
2076 case 0x6D:
2077 /* CMPLE */
01ff9cc8 2078 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
4c9649a9
JM
2079 break;
2080 default:
2081 goto invalid_opc;
2082 }
2083 break;
2084 case 0x11:
2085 switch (fn7) {
2086 case 0x00:
2087 /* AND */
30c7183b 2088 if (likely(rc != 31)) {
dfaa8583 2089 if (ra == 31)
30c7183b
AJ
2090 tcg_gen_movi_i64(cpu_ir[rc], 0);
2091 else if (islit)
2092 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2093 else
2094 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2095 }
4c9649a9
JM
2096 break;
2097 case 0x08:
2098 /* BIC */
30c7183b
AJ
2099 if (likely(rc != 31)) {
2100 if (ra != 31) {
2101 if (islit)
2102 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2103 else
2104 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2105 } else
2106 tcg_gen_movi_i64(cpu_ir[rc], 0);
2107 }
4c9649a9
JM
2108 break;
2109 case 0x14:
2110 /* CMOVLBS */
bbe1dab4 2111 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2112 break;
2113 case 0x16:
2114 /* CMOVLBC */
bbe1dab4 2115 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2116 break;
2117 case 0x20:
2118 /* BIS */
30c7183b
AJ
2119 if (likely(rc != 31)) {
2120 if (ra != 31) {
2121 if (islit)
2122 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
8bb6e981 2123 else
30c7183b 2124 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
4c9649a9 2125 } else {
30c7183b
AJ
2126 if (islit)
2127 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2128 else
dfaa8583 2129 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
4c9649a9 2130 }
4c9649a9
JM
2131 }
2132 break;
2133 case 0x24:
2134 /* CMOVEQ */
bbe1dab4 2135 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2136 break;
2137 case 0x26:
2138 /* CMOVNE */
bbe1dab4 2139 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2140 break;
2141 case 0x28:
2142 /* ORNOT */
30c7183b 2143 if (likely(rc != 31)) {
dfaa8583 2144 if (ra != 31) {
30c7183b
AJ
2145 if (islit)
2146 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2147 else
2148 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2149 } else {
2150 if (islit)
2151 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2152 else
2153 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2154 }
2155 }
4c9649a9
JM
2156 break;
2157 case 0x40:
2158 /* XOR */
30c7183b
AJ
2159 if (likely(rc != 31)) {
2160 if (ra != 31) {
2161 if (islit)
2162 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 2163 else
dfaa8583 2164 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2165 } else {
2166 if (islit)
2167 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2168 else
dfaa8583 2169 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2170 }
2171 }
4c9649a9
JM
2172 break;
2173 case 0x44:
2174 /* CMOVLT */
bbe1dab4 2175 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2176 break;
2177 case 0x46:
2178 /* CMOVGE */
bbe1dab4 2179 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2180 break;
2181 case 0x48:
2182 /* EQV */
30c7183b
AJ
2183 if (likely(rc != 31)) {
2184 if (ra != 31) {
2185 if (islit)
2186 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2187 else
2188 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2189 } else {
2190 if (islit)
2191 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
30c7183b 2192 else
dfaa8583 2193 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2194 }
2195 }
4c9649a9
JM
2196 break;
2197 case 0x61:
2198 /* AMASK */
ae8ecd42 2199 if (likely(rc != 31)) {
a18ad893
RH
2200 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2201
2202 if (islit) {
2203 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2204 } else {
2205 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
1a1f7dbc 2206 }
ae8ecd42 2207 }
4c9649a9
JM
2208 break;
2209 case 0x64:
2210 /* CMOVLE */
bbe1dab4 2211 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2212 break;
2213 case 0x66:
2214 /* CMOVGT */
bbe1dab4 2215 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2216 break;
2217 case 0x6C:
2218 /* IMPLVER */
3761035f 2219 if (rc != 31)
8579095b 2220 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
4c9649a9
JM
2221 break;
2222 default:
2223 goto invalid_opc;
2224 }
2225 break;
2226 case 0x12:
2227 switch (fn7) {
2228 case 0x02:
2229 /* MSKBL */
14ab1634 2230 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2231 break;
2232 case 0x06:
2233 /* EXTBL */
377a43b6 2234 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2235 break;
2236 case 0x0B:
2237 /* INSBL */
248c42f3 2238 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2239 break;
2240 case 0x12:
2241 /* MSKWL */
14ab1634 2242 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2243 break;
2244 case 0x16:
2245 /* EXTWL */
377a43b6 2246 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2247 break;
2248 case 0x1B:
2249 /* INSWL */
248c42f3 2250 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2251 break;
2252 case 0x22:
2253 /* MSKLL */
14ab1634 2254 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2255 break;
2256 case 0x26:
2257 /* EXTLL */
377a43b6 2258 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2259 break;
2260 case 0x2B:
2261 /* INSLL */
248c42f3 2262 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2263 break;
2264 case 0x30:
2265 /* ZAP */
a7812ae4 2266 gen_zap(ra, rb, rc, islit, lit);
4c9649a9
JM
2267 break;
2268 case 0x31:
2269 /* ZAPNOT */
a7812ae4 2270 gen_zapnot(ra, rb, rc, islit, lit);
4c9649a9
JM
2271 break;
2272 case 0x32:
2273 /* MSKQL */
14ab1634 2274 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2275 break;
2276 case 0x34:
2277 /* SRL */
30c7183b
AJ
2278 if (likely(rc != 31)) {
2279 if (ra != 31) {
2280 if (islit)
2281 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2282 else {
a7812ae4 2283 TCGv shift = tcg_temp_new();
30c7183b
AJ
2284 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2285 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2286 tcg_temp_free(shift);
dfaa8583 2287 }
30c7183b
AJ
2288 } else
2289 tcg_gen_movi_i64(cpu_ir[rc], 0);
2290 }
4c9649a9
JM
2291 break;
2292 case 0x36:
2293 /* EXTQL */
377a43b6 2294 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2295 break;
2296 case 0x39:
2297 /* SLL */
30c7183b
AJ
2298 if (likely(rc != 31)) {
2299 if (ra != 31) {
2300 if (islit)
2301 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2302 else {
a7812ae4 2303 TCGv shift = tcg_temp_new();
30c7183b
AJ
2304 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2305 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2306 tcg_temp_free(shift);
dfaa8583 2307 }
30c7183b
AJ
2308 } else
2309 tcg_gen_movi_i64(cpu_ir[rc], 0);
2310 }
4c9649a9
JM
2311 break;
2312 case 0x3B:
2313 /* INSQL */
248c42f3 2314 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2315 break;
2316 case 0x3C:
2317 /* SRA */
30c7183b
AJ
2318 if (likely(rc != 31)) {
2319 if (ra != 31) {
2320 if (islit)
2321 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2322 else {
a7812ae4 2323 TCGv shift = tcg_temp_new();
30c7183b
AJ
2324 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2325 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2326 tcg_temp_free(shift);
dfaa8583 2327 }
30c7183b
AJ
2328 } else
2329 tcg_gen_movi_i64(cpu_ir[rc], 0);
2330 }
4c9649a9
JM
2331 break;
2332 case 0x52:
2333 /* MSKWH */
ffec44f1 2334 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2335 break;
2336 case 0x57:
2337 /* INSWH */
50eb6e5c 2338 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2339 break;
2340 case 0x5A:
2341 /* EXTWH */
377a43b6 2342 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2343 break;
2344 case 0x62:
2345 /* MSKLH */
ffec44f1 2346 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2347 break;
2348 case 0x67:
2349 /* INSLH */
50eb6e5c 2350 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2351 break;
2352 case 0x6A:
2353 /* EXTLH */
377a43b6 2354 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2355 break;
2356 case 0x72:
2357 /* MSKQH */
ffec44f1 2358 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2359 break;
2360 case 0x77:
2361 /* INSQH */
50eb6e5c 2362 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2363 break;
2364 case 0x7A:
2365 /* EXTQH */
377a43b6 2366 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2367 break;
2368 default:
2369 goto invalid_opc;
2370 }
2371 break;
2372 case 0x13:
2373 switch (fn7) {
2374 case 0x00:
2375 /* MULL */
30c7183b 2376 if (likely(rc != 31)) {
dfaa8583 2377 if (ra == 31)
30c7183b
AJ
2378 tcg_gen_movi_i64(cpu_ir[rc], 0);
2379 else {
2380 if (islit)
2381 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2382 else
2383 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2384 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2385 }
2386 }
4c9649a9
JM
2387 break;
2388 case 0x20:
2389 /* MULQ */
30c7183b 2390 if (likely(rc != 31)) {
dfaa8583 2391 if (ra == 31)
30c7183b
AJ
2392 tcg_gen_movi_i64(cpu_ir[rc], 0);
2393 else if (islit)
2394 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2395 else
2396 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2397 }
4c9649a9
JM
2398 break;
2399 case 0x30:
2400 /* UMULH */
a7812ae4 2401 gen_umulh(ra, rb, rc, islit, lit);
4c9649a9
JM
2402 break;
2403 case 0x40:
2404 /* MULL/V */
a7812ae4 2405 gen_mullv(ra, rb, rc, islit, lit);
4c9649a9
JM
2406 break;
2407 case 0x60:
2408 /* MULQ/V */
a7812ae4 2409 gen_mulqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2410 break;
2411 default:
2412 goto invalid_opc;
2413 }
2414 break;
2415 case 0x14:
f24518b5 2416 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2417 case 0x04:
2418 /* ITOFS */
a18ad893 2419 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2420 goto invalid_opc;
a18ad893 2421 }
f18cd223
AJ
2422 if (likely(rc != 31)) {
2423 if (ra != 31) {
a7812ae4 2424 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2425 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2426 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2427 tcg_temp_free_i32(tmp);
f18cd223
AJ
2428 } else
2429 tcg_gen_movi_i64(cpu_fir[rc], 0);
2430 }
4c9649a9
JM
2431 break;
2432 case 0x0A:
2433 /* SQRTF */
a18ad893
RH
2434 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2435 gen_fsqrtf(rb, rc);
2436 break;
2437 }
2438 goto invalid_opc;
4c9649a9
JM
2439 case 0x0B:
2440 /* SQRTS */
a18ad893
RH
2441 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2442 gen_fsqrts(ctx, rb, rc, fn11);
2443 break;
2444 }
2445 goto invalid_opc;
4c9649a9
JM
2446 case 0x14:
2447 /* ITOFF */
a18ad893 2448 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2449 goto invalid_opc;
a18ad893 2450 }
f18cd223
AJ
2451 if (likely(rc != 31)) {
2452 if (ra != 31) {
a7812ae4 2453 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2454 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2455 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2456 tcg_temp_free_i32(tmp);
f18cd223
AJ
2457 } else
2458 tcg_gen_movi_i64(cpu_fir[rc], 0);
2459 }
4c9649a9
JM
2460 break;
2461 case 0x24:
2462 /* ITOFT */
a18ad893 2463 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2464 goto invalid_opc;
a18ad893 2465 }
f18cd223
AJ
2466 if (likely(rc != 31)) {
2467 if (ra != 31)
2468 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2469 else
2470 tcg_gen_movi_i64(cpu_fir[rc], 0);
2471 }
4c9649a9
JM
2472 break;
2473 case 0x2A:
2474 /* SQRTG */
a18ad893
RH
2475 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2476 gen_fsqrtg(rb, rc);
2477 break;
2478 }
2479 goto invalid_opc;
4c9649a9
JM
2480 case 0x02B:
2481 /* SQRTT */
a18ad893
RH
2482 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2483 gen_fsqrtt(ctx, rb, rc, fn11);
2484 break;
2485 }
2486 goto invalid_opc;
4c9649a9
JM
2487 default:
2488 goto invalid_opc;
2489 }
2490 break;
2491 case 0x15:
2492 /* VAX floating point */
2493 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2494 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2495 case 0x00:
2496 /* ADDF */
a7812ae4 2497 gen_faddf(ra, rb, rc);
4c9649a9
JM
2498 break;
2499 case 0x01:
2500 /* SUBF */
a7812ae4 2501 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2502 break;
2503 case 0x02:
2504 /* MULF */
a7812ae4 2505 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2506 break;
2507 case 0x03:
2508 /* DIVF */
a7812ae4 2509 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2510 break;
2511 case 0x1E:
2512 /* CVTDG */
2513#if 0 // TODO
a7812ae4 2514 gen_fcvtdg(rb, rc);
4c9649a9
JM
2515#else
2516 goto invalid_opc;
2517#endif
2518 break;
2519 case 0x20:
2520 /* ADDG */
a7812ae4 2521 gen_faddg(ra, rb, rc);
4c9649a9
JM
2522 break;
2523 case 0x21:
2524 /* SUBG */
a7812ae4 2525 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2526 break;
2527 case 0x22:
2528 /* MULG */
a7812ae4 2529 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2530 break;
2531 case 0x23:
2532 /* DIVG */
a7812ae4 2533 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2534 break;
2535 case 0x25:
2536 /* CMPGEQ */
a7812ae4 2537 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2538 break;
2539 case 0x26:
2540 /* CMPGLT */
a7812ae4 2541 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2542 break;
2543 case 0x27:
2544 /* CMPGLE */
a7812ae4 2545 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2546 break;
2547 case 0x2C:
2548 /* CVTGF */
a7812ae4 2549 gen_fcvtgf(rb, rc);
4c9649a9
JM
2550 break;
2551 case 0x2D:
2552 /* CVTGD */
2553#if 0 // TODO
a7812ae4 2554 gen_fcvtgd(rb, rc);
4c9649a9
JM
2555#else
2556 goto invalid_opc;
2557#endif
2558 break;
2559 case 0x2F:
2560 /* CVTGQ */
a7812ae4 2561 gen_fcvtgq(rb, rc);
4c9649a9
JM
2562 break;
2563 case 0x3C:
2564 /* CVTQF */
a7812ae4 2565 gen_fcvtqf(rb, rc);
4c9649a9
JM
2566 break;
2567 case 0x3E:
2568 /* CVTQG */
a7812ae4 2569 gen_fcvtqg(rb, rc);
4c9649a9
JM
2570 break;
2571 default:
2572 goto invalid_opc;
2573 }
2574 break;
2575 case 0x16:
2576 /* IEEE floating-point */
f24518b5 2577 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2578 case 0x00:
2579 /* ADDS */
f24518b5 2580 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2581 break;
2582 case 0x01:
2583 /* SUBS */
f24518b5 2584 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2585 break;
2586 case 0x02:
2587 /* MULS */
f24518b5 2588 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2589 break;
2590 case 0x03:
2591 /* DIVS */
f24518b5 2592 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2593 break;
2594 case 0x20:
2595 /* ADDT */
f24518b5 2596 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2597 break;
2598 case 0x21:
2599 /* SUBT */
f24518b5 2600 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2601 break;
2602 case 0x22:
2603 /* MULT */
f24518b5 2604 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2605 break;
2606 case 0x23:
2607 /* DIVT */
f24518b5 2608 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2609 break;
2610 case 0x24:
2611 /* CMPTUN */
f24518b5 2612 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2613 break;
2614 case 0x25:
2615 /* CMPTEQ */
f24518b5 2616 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2617 break;
2618 case 0x26:
2619 /* CMPTLT */
f24518b5 2620 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2621 break;
2622 case 0x27:
2623 /* CMPTLE */
f24518b5 2624 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2625 break;
2626 case 0x2C:
a74b4d2c 2627 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2628 /* CVTST */
f24518b5 2629 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2630 } else {
2631 /* CVTTS */
f24518b5 2632 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2633 }
2634 break;
2635 case 0x2F:
2636 /* CVTTQ */
f24518b5 2637 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2638 break;
2639 case 0x3C:
2640 /* CVTQS */
f24518b5 2641 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2642 break;
2643 case 0x3E:
2644 /* CVTQT */
f24518b5 2645 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2646 break;
2647 default:
2648 goto invalid_opc;
2649 }
2650 break;
2651 case 0x17:
2652 switch (fn11) {
2653 case 0x010:
2654 /* CVTLQ */
a7812ae4 2655 gen_fcvtlq(rb, rc);
4c9649a9
JM
2656 break;
2657 case 0x020:
f18cd223 2658 if (likely(rc != 31)) {
a06d48d9 2659 if (ra == rb) {
4c9649a9 2660 /* FMOV */
a06d48d9
RH
2661 if (ra == 31)
2662 tcg_gen_movi_i64(cpu_fir[rc], 0);
2663 else
2664 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2665 } else {
f18cd223 2666 /* CPYS */
a7812ae4 2667 gen_fcpys(ra, rb, rc);
a06d48d9 2668 }
4c9649a9
JM
2669 }
2670 break;
2671 case 0x021:
2672 /* CPYSN */
a7812ae4 2673 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2674 break;
2675 case 0x022:
2676 /* CPYSE */
a7812ae4 2677 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2678 break;
2679 case 0x024:
2680 /* MT_FPCR */
f18cd223 2681 if (likely(ra != 31))
a7812ae4 2682 gen_helper_store_fpcr(cpu_fir[ra]);
f18cd223
AJ
2683 else {
2684 TCGv tmp = tcg_const_i64(0);
a7812ae4 2685 gen_helper_store_fpcr(tmp);
f18cd223
AJ
2686 tcg_temp_free(tmp);
2687 }
4c9649a9
JM
2688 break;
2689 case 0x025:
2690 /* MF_FPCR */
f18cd223 2691 if (likely(ra != 31))
a7812ae4 2692 gen_helper_load_fpcr(cpu_fir[ra]);
4c9649a9
JM
2693 break;
2694 case 0x02A:
2695 /* FCMOVEQ */
bbe1dab4 2696 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2697 break;
2698 case 0x02B:
2699 /* FCMOVNE */
bbe1dab4 2700 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2701 break;
2702 case 0x02C:
2703 /* FCMOVLT */
bbe1dab4 2704 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2705 break;
2706 case 0x02D:
2707 /* FCMOVGE */
bbe1dab4 2708 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2709 break;
2710 case 0x02E:
2711 /* FCMOVLE */
bbe1dab4 2712 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2713 break;
2714 case 0x02F:
2715 /* FCMOVGT */
bbe1dab4 2716 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2717 break;
2718 case 0x030:
2719 /* CVTQL */
a7812ae4 2720 gen_fcvtql(rb, rc);
4c9649a9
JM
2721 break;
2722 case 0x130:
2723 /* CVTQL/V */
4c9649a9
JM
2724 case 0x530:
2725 /* CVTQL/SV */
735cf45f
RH
2726 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2727 /v doesn't do. The only thing I can think is that /sv is a
2728 valid instruction merely for completeness in the ISA. */
2729 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2730 break;
2731 default:
2732 goto invalid_opc;
2733 }
2734 break;
2735 case 0x18:
2736 switch ((uint16_t)disp16) {
2737 case 0x0000:
2738 /* TRAPB */
4af70374 2739 /* No-op. */
4c9649a9
JM
2740 break;
2741 case 0x0400:
2742 /* EXCB */
4af70374 2743 /* No-op. */
4c9649a9
JM
2744 break;
2745 case 0x4000:
2746 /* MB */
2747 /* No-op */
2748 break;
2749 case 0x4400:
2750 /* WMB */
2751 /* No-op */
2752 break;
2753 case 0x8000:
2754 /* FETCH */
2755 /* No-op */
2756 break;
2757 case 0xA000:
2758 /* FETCH_M */
2759 /* No-op */
2760 break;
2761 case 0xC000:
2762 /* RPCC */
a9406ea1
RH
2763 if (ra != 31) {
2764 if (use_icount) {
2765 gen_io_start();
2766 gen_helper_load_pcc(cpu_ir[ra]);
2767 gen_io_end();
2768 ret = EXIT_PC_STALE;
2769 } else {
2770 gen_helper_load_pcc(cpu_ir[ra]);
2771 }
2772 }
4c9649a9
JM
2773 break;
2774 case 0xE000:
2775 /* RC */
ac316ca4 2776 gen_rx(ra, 0);
4c9649a9
JM
2777 break;
2778 case 0xE800:
2779 /* ECB */
4c9649a9
JM
2780 break;
2781 case 0xF000:
2782 /* RS */
ac316ca4 2783 gen_rx(ra, 1);
4c9649a9
JM
2784 break;
2785 case 0xF800:
2786 /* WH64 */
2787 /* No-op */
2788 break;
2789 default:
2790 goto invalid_opc;
2791 }
2792 break;
2793 case 0x19:
2794 /* HW_MFPR (PALcode) */
26b46094 2795#ifndef CONFIG_USER_ONLY
a18ad893 2796 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
c781cf96 2797 return gen_mfpr(ra, insn & 0xffff);
26b46094
RH
2798 }
2799#endif
4c9649a9 2800 goto invalid_opc;
4c9649a9 2801 case 0x1A:
49563a72
RH
2802 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2803 prediction stack action, which of course we don't implement. */
2804 if (rb != 31) {
3761035f 2805 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
49563a72 2806 } else {
3761035f 2807 tcg_gen_movi_i64(cpu_pc, 0);
49563a72
RH
2808 }
2809 if (ra != 31) {
1304ca87 2810 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2811 }
4af70374 2812 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2813 break;
2814 case 0x1B:
2815 /* HW_LD (PALcode) */
a18ad893
RH
2816#ifndef CONFIG_USER_ONLY
2817 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2818 TCGv addr;
2819
2820 if (ra == 31) {
2821 break;
2822 }
2823
2824 addr = tcg_temp_new();
8bb6e981
AJ
2825 if (rb != 31)
2826 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2827 else
2828 tcg_gen_movi_i64(addr, disp12);
2829 switch ((insn >> 12) & 0xF) {
2830 case 0x0:
b5d51029 2831 /* Longword physical access (hw_ldl/p) */
2374e73e 2832 gen_helper_ldl_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2833 break;
2834 case 0x1:
b5d51029 2835 /* Quadword physical access (hw_ldq/p) */
2374e73e 2836 gen_helper_ldq_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2837 break;
2838 case 0x2:
b5d51029 2839 /* Longword physical access with lock (hw_ldl_l/p) */
2374e73e 2840 gen_helper_ldl_l_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2841 break;
2842 case 0x3:
b5d51029 2843 /* Quadword physical access with lock (hw_ldq_l/p) */
2374e73e 2844 gen_helper_ldq_l_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2845 break;
2846 case 0x4:
b5d51029 2847 /* Longword virtual PTE fetch (hw_ldl/v) */
2374e73e 2848 goto invalid_opc;
8bb6e981 2849 case 0x5:
b5d51029 2850 /* Quadword virtual PTE fetch (hw_ldq/v) */
2374e73e 2851 goto invalid_opc;
8bb6e981
AJ
2852 break;
2853 case 0x6:
2854 /* Incpu_ir[ra]id */
b5d51029 2855 goto invalid_opc;
8bb6e981
AJ
2856 case 0x7:
2857 /* Incpu_ir[ra]id */
b5d51029 2858 goto invalid_opc;
8bb6e981 2859 case 0x8:
b5d51029 2860 /* Longword virtual access (hw_ldl) */
2374e73e 2861 goto invalid_opc;
8bb6e981 2862 case 0x9:
b5d51029 2863 /* Quadword virtual access (hw_ldq) */
2374e73e 2864 goto invalid_opc;
8bb6e981 2865 case 0xA:
b5d51029 2866 /* Longword virtual access with protection check (hw_ldl/w) */
8417845e 2867 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2868 break;
2869 case 0xB:
b5d51029 2870 /* Quadword virtual access with protection check (hw_ldq/w) */
8417845e 2871 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2872 break;
2873 case 0xC:
b5d51029 2874 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2374e73e 2875 goto invalid_opc;
8bb6e981 2876 case 0xD:
b5d51029 2877 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2374e73e 2878 goto invalid_opc;
8bb6e981
AJ
2879 case 0xE:
2880 /* Longword virtual access with alternate access mode and
2374e73e
RH
2881 protection checks (hw_ldl/wa) */
2882 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2883 break;
2884 case 0xF:
2885 /* Quadword virtual access with alternate access mode and
2374e73e
RH
2886 protection checks (hw_ldq/wa) */
2887 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2888 break;
2889 }
2890 tcg_temp_free(addr);
a18ad893 2891 break;
4c9649a9 2892 }
4c9649a9 2893#endif
a18ad893 2894 goto invalid_opc;
4c9649a9
JM
2895 case 0x1C:
2896 switch (fn7) {
2897 case 0x00:
2898 /* SEXTB */
a18ad893 2899 if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
4c9649a9 2900 goto invalid_opc;
a18ad893 2901 }
ae8ecd42
AJ
2902 if (likely(rc != 31)) {
2903 if (islit)
2904 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
ae8ecd42 2905 else
dfaa8583 2906 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2907 }
4c9649a9
JM
2908 break;
2909 case 0x01:
2910 /* SEXTW */
a18ad893
RH
2911 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2912 if (likely(rc != 31)) {
2913 if (islit) {
2914 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2915 } else {
2916 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2917 }
2918 }
2919 break;
ae8ecd42 2920 }
a18ad893 2921 goto invalid_opc;
4c9649a9
JM
2922 case 0x30:
2923 /* CTPOP */
a18ad893
RH
2924 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2925 if (likely(rc != 31)) {
2926 if (islit) {
2927 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2928 } else {
2929 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2930 }
2931 }
2932 break;
ae8ecd42 2933 }
a18ad893 2934 goto invalid_opc;
4c9649a9
JM
2935 case 0x31:
2936 /* PERR */
a18ad893
RH
2937 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2938 gen_perr(ra, rb, rc, islit, lit);
2939 break;
2940 }
2941 goto invalid_opc;
4c9649a9
JM
2942 case 0x32:
2943 /* CTLZ */
a18ad893
RH
2944 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2945 if (likely(rc != 31)) {
2946 if (islit) {
2947 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2948 } else {
2949 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2950 }
2951 }
2952 break;
ae8ecd42 2953 }
a18ad893 2954 goto invalid_opc;
4c9649a9
JM
2955 case 0x33:
2956 /* CTTZ */
a18ad893
RH
2957 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2958 if (likely(rc != 31)) {
2959 if (islit) {
2960 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2961 } else {
2962 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2963 }
2964 }
2965 break;
ae8ecd42 2966 }
a18ad893 2967 goto invalid_opc;
4c9649a9
JM
2968 case 0x34:
2969 /* UNPKBW */
a18ad893
RH
2970 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2971 if (real_islit || ra != 31) {
2972 goto invalid_opc;
2973 }
2974 gen_unpkbw(rb, rc);
2975 break;
2976 }
2977 goto invalid_opc;
4c9649a9 2978 case 0x35:
13e4df99 2979 /* UNPKBL */
a18ad893
RH
2980 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2981 if (real_islit || ra != 31) {
2982 goto invalid_opc;
2983 }
2984 gen_unpkbl(rb, rc);
2985 break;
2986 }
2987 goto invalid_opc;
4c9649a9
JM
2988 case 0x36:
2989 /* PKWB */
a18ad893
RH
2990 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2991 if (real_islit || ra != 31) {
2992 goto invalid_opc;
2993 }
2994 gen_pkwb(rb, rc);
2995 break;
2996 }
2997 goto invalid_opc;
4c9649a9
JM
2998 case 0x37:
2999 /* PKLB */
a18ad893
RH
3000 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3001 if (real_islit || ra != 31) {
3002 goto invalid_opc;
3003 }
3004 gen_pklb(rb, rc);
3005 break;
3006 }
3007 goto invalid_opc;
4c9649a9
JM
3008 case 0x38:
3009 /* MINSB8 */
a18ad893
RH
3010 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3011 gen_minsb8(ra, rb, rc, islit, lit);
3012 break;
3013 }
3014 goto invalid_opc;
4c9649a9
JM
3015 case 0x39:
3016 /* MINSW4 */
a18ad893
RH
3017 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3018 gen_minsw4(ra, rb, rc, islit, lit);
3019 break;
3020 }
3021 goto invalid_opc;
4c9649a9
JM
3022 case 0x3A:
3023 /* MINUB8 */
a18ad893
RH
3024 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3025 gen_minub8(ra, rb, rc, islit, lit);
3026 break;
3027 }
3028 goto invalid_opc;
4c9649a9
JM
3029 case 0x3B:
3030 /* MINUW4 */
a18ad893
RH
3031 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3032 gen_minuw4(ra, rb, rc, islit, lit);
3033 break;
3034 }
3035 goto invalid_opc;
4c9649a9
JM
3036 case 0x3C:
3037 /* MAXUB8 */
a18ad893
RH
3038 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3039 gen_maxub8(ra, rb, rc, islit, lit);
3040 break;
3041 }
3042 goto invalid_opc;
4c9649a9
JM
3043 case 0x3D:
3044 /* MAXUW4 */
a18ad893
RH
3045 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3046 gen_maxuw4(ra, rb, rc, islit, lit);
3047 break;
3048 }
3049 goto invalid_opc;
4c9649a9
JM
3050 case 0x3E:
3051 /* MAXSB8 */
a18ad893
RH
3052 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3053 gen_maxsb8(ra, rb, rc, islit, lit);
3054 break;
3055 }
3056 goto invalid_opc;
4c9649a9
JM
3057 case 0x3F:
3058 /* MAXSW4 */
a18ad893
RH
3059 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3060 gen_maxsw4(ra, rb, rc, islit, lit);
3061 break;
3062 }
3063 goto invalid_opc;
4c9649a9
JM
3064 case 0x70:
3065 /* FTOIT */
a18ad893 3066 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 3067 goto invalid_opc;
a18ad893 3068 }
f18cd223
AJ
3069 if (likely(rc != 31)) {
3070 if (ra != 31)
3071 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3072 else
3073 tcg_gen_movi_i64(cpu_ir[rc], 0);
3074 }
4c9649a9
JM
3075 break;
3076 case 0x78:
3077 /* FTOIS */
a18ad893 3078 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 3079 goto invalid_opc;
a18ad893 3080 }
f18cd223 3081 if (rc != 31) {
a7812ae4 3082 TCGv_i32 tmp1 = tcg_temp_new_i32();
f18cd223 3083 if (ra != 31)
a7812ae4 3084 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
f18cd223
AJ
3085 else {
3086 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 3087 gen_helper_s_to_memory(tmp1, tmp2);
f18cd223
AJ
3088 tcg_temp_free(tmp2);
3089 }
3090 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
a7812ae4 3091 tcg_temp_free_i32(tmp1);
f18cd223 3092 }
4c9649a9
JM
3093 break;
3094 default:
3095 goto invalid_opc;
3096 }
3097 break;
3098 case 0x1D:
3099 /* HW_MTPR (PALcode) */
26b46094 3100#ifndef CONFIG_USER_ONLY
a18ad893 3101 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
bc24270e 3102 return gen_mtpr(ctx, rb, insn & 0xffff);
26b46094
RH
3103 }
3104#endif
4c9649a9 3105 goto invalid_opc;
4c9649a9 3106 case 0x1E:
508b43ea 3107 /* HW_RET (PALcode) */
a18ad893
RH
3108#ifndef CONFIG_USER_ONLY
3109 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3110 if (rb == 31) {
3111 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3112 address from EXC_ADDR. This turns out to be useful for our
3113 emulation PALcode, so continue to accept it. */
3114 TCGv tmp = tcg_temp_new();
3115 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUState, exc_addr));
3116 gen_helper_hw_ret(tmp);
3117 tcg_temp_free(tmp);
3118 } else {
3119 gen_helper_hw_ret(cpu_ir[rb]);
3120 }
3121 ret = EXIT_PC_UPDATED;
3122 break;
4c9649a9 3123 }
4c9649a9 3124#endif
a18ad893 3125 goto invalid_opc;
4c9649a9
JM
3126 case 0x1F:
3127 /* HW_ST (PALcode) */
a18ad893
RH
3128#ifndef CONFIG_USER_ONLY
3129 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
8bb6e981 3130 TCGv addr, val;
a7812ae4 3131 addr = tcg_temp_new();
8bb6e981
AJ
3132 if (rb != 31)
3133 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3134 else
3135 tcg_gen_movi_i64(addr, disp12);
3136 if (ra != 31)
3137 val = cpu_ir[ra];
3138 else {
a7812ae4 3139 val = tcg_temp_new();
8bb6e981
AJ
3140 tcg_gen_movi_i64(val, 0);
3141 }
3142 switch ((insn >> 12) & 0xF) {
3143 case 0x0:
3144 /* Longword physical access */
2374e73e 3145 gen_helper_stl_phys(addr, val);
8bb6e981
AJ
3146 break;
3147 case 0x1:
3148 /* Quadword physical access */
2374e73e 3149 gen_helper_stq_phys(addr, val);
8bb6e981
AJ
3150 break;
3151 case 0x2:
3152 /* Longword physical access with lock */
2374e73e 3153 gen_helper_stl_c_phys(val, addr, val);
8bb6e981
AJ
3154 break;
3155 case 0x3:
3156 /* Quadword physical access with lock */
2374e73e 3157 gen_helper_stq_c_phys(val, addr, val);
8bb6e981
AJ
3158 break;
3159 case 0x4:
3160 /* Longword virtual access */
2374e73e 3161 goto invalid_opc;
8bb6e981
AJ
3162 case 0x5:
3163 /* Quadword virtual access */
2374e73e 3164 goto invalid_opc;
8bb6e981
AJ
3165 case 0x6:
3166 /* Invalid */
3167 goto invalid_opc;
3168 case 0x7:
3169 /* Invalid */
3170 goto invalid_opc;
3171 case 0x8:
3172 /* Invalid */
3173 goto invalid_opc;
3174 case 0x9:
3175 /* Invalid */
3176 goto invalid_opc;
3177 case 0xA:
3178 /* Invalid */
3179 goto invalid_opc;
3180 case 0xB:
3181 /* Invalid */
3182 goto invalid_opc;
3183 case 0xC:
3184 /* Longword virtual access with alternate access mode */
2374e73e 3185 goto invalid_opc;
8bb6e981
AJ
3186 case 0xD:
3187 /* Quadword virtual access with alternate access mode */
2374e73e 3188 goto invalid_opc;
8bb6e981
AJ
3189 case 0xE:
3190 /* Invalid */
3191 goto invalid_opc;
3192 case 0xF:
3193 /* Invalid */
3194 goto invalid_opc;
3195 }
45d46ce8 3196 if (ra == 31)
8bb6e981
AJ
3197 tcg_temp_free(val);
3198 tcg_temp_free(addr);
a18ad893 3199 break;
4c9649a9 3200 }
4c9649a9 3201#endif
a18ad893 3202 goto invalid_opc;
4c9649a9
JM
3203 case 0x20:
3204 /* LDF */
f18cd223 3205 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3206 break;
3207 case 0x21:
3208 /* LDG */
f18cd223 3209 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3210 break;
3211 case 0x22:
3212 /* LDS */
f18cd223 3213 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
3214 break;
3215 case 0x23:
3216 /* LDT */
f18cd223 3217 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3218 break;
3219 case 0x24:
3220 /* STF */
6910b8f6 3221 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3222 break;
3223 case 0x25:
3224 /* STG */
6910b8f6 3225 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3226 break;
3227 case 0x26:
3228 /* STS */
6910b8f6 3229 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
4c9649a9
JM
3230 break;
3231 case 0x27:
3232 /* STT */
6910b8f6 3233 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3234 break;
3235 case 0x28:
3236 /* LDL */
f18cd223 3237 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
3238 break;
3239 case 0x29:
3240 /* LDQ */
f18cd223 3241 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3242 break;
3243 case 0x2A:
3244 /* LDL_L */
f4ed8679 3245 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3246 break;
3247 case 0x2B:
3248 /* LDQ_L */
f4ed8679 3249 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3250 break;
3251 case 0x2C:
3252 /* STL */
6910b8f6 3253 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
4c9649a9
JM
3254 break;
3255 case 0x2D:
3256 /* STQ */
6910b8f6 3257 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3258 break;
3259 case 0x2E:
3260 /* STL_C */
6910b8f6 3261 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
4c9649a9
JM
3262 break;
3263 case 0x2F:
3264 /* STQ_C */
6910b8f6 3265 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
4c9649a9
JM
3266 break;
3267 case 0x30:
3268 /* BR */
4af70374 3269 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3270 break;
a7812ae4 3271 case 0x31: /* FBEQ */
4af70374 3272 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 3273 break;
a7812ae4 3274 case 0x32: /* FBLT */
4af70374 3275 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 3276 break;
a7812ae4 3277 case 0x33: /* FBLE */
4af70374 3278 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
3279 break;
3280 case 0x34:
3281 /* BSR */
4af70374 3282 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3283 break;
a7812ae4 3284 case 0x35: /* FBNE */
4af70374 3285 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 3286 break;
a7812ae4 3287 case 0x36: /* FBGE */
4af70374 3288 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 3289 break;
a7812ae4 3290 case 0x37: /* FBGT */
4af70374 3291 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
3292 break;
3293 case 0x38:
3294 /* BLBC */
4af70374 3295 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
3296 break;
3297 case 0x39:
3298 /* BEQ */
4af70374 3299 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
3300 break;
3301 case 0x3A:
3302 /* BLT */
4af70374 3303 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
3304 break;
3305 case 0x3B:
3306 /* BLE */
4af70374 3307 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
3308 break;
3309 case 0x3C:
3310 /* BLBS */
4af70374 3311 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
3312 break;
3313 case 0x3D:
3314 /* BNE */
4af70374 3315 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
3316 break;
3317 case 0x3E:
3318 /* BGE */
4af70374 3319 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
3320 break;
3321 case 0x3F:
3322 /* BGT */
4af70374 3323 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
3324 break;
3325 invalid_opc:
8aa3fa20 3326 ret = gen_invalid(ctx);
4c9649a9
JM
3327 break;
3328 }
3329
3330 return ret;
3331}
3332
636aa200
BS
3333static inline void gen_intermediate_code_internal(CPUState *env,
3334 TranslationBlock *tb,
3335 int search_pc)
4c9649a9 3336{
4c9649a9
JM
3337 DisasContext ctx, *ctxp = &ctx;
3338 target_ulong pc_start;
3339 uint32_t insn;
3340 uint16_t *gen_opc_end;
a1d1bb31 3341 CPUBreakpoint *bp;
4c9649a9 3342 int j, lj = -1;
4af70374 3343 ExitStatus ret;
2e70f6ef
PB
3344 int num_insns;
3345 int max_insns;
4c9649a9
JM
3346
3347 pc_start = tb->pc;
4c9649a9 3348 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
4af70374
RH
3349
3350 ctx.tb = tb;
3351 ctx.env = env;
4c9649a9 3352 ctx.pc = pc_start;
bba9bdce 3353 ctx.mem_idx = cpu_mmu_index(env);
f24518b5
RH
3354
3355 /* ??? Every TB begins with unset rounding mode, to be initialized on
3356 the first fp insn of the TB. Alternately we could define a proper
3357 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3358 to reset the FP_STATUS to that default at the end of any TB that
3359 changes the default. We could even (gasp) dynamiclly figure out
3360 what default would be most efficient given the running program. */
3361 ctx.tb_rm = -1;
3362 /* Similarly for flush-to-zero. */
3363 ctx.tb_ftz = -1;
3364
2e70f6ef
PB
3365 num_insns = 0;
3366 max_insns = tb->cflags & CF_COUNT_MASK;
3367 if (max_insns == 0)
3368 max_insns = CF_COUNT_MASK;
3369
3370 gen_icount_start();
4af70374 3371 do {
72cf2d4f
BS
3372 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3373 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 3374 if (bp->pc == ctx.pc) {
4c9649a9
JM
3375 gen_excp(&ctx, EXCP_DEBUG, 0);
3376 break;
3377 }
3378 }
3379 }
3380 if (search_pc) {
3381 j = gen_opc_ptr - gen_opc_buf;
3382 if (lj < j) {
3383 lj++;
3384 while (lj < j)
3385 gen_opc_instr_start[lj++] = 0;
4c9649a9 3386 }
ed1dda53
AJ
3387 gen_opc_pc[lj] = ctx.pc;
3388 gen_opc_instr_start[lj] = 1;
3389 gen_opc_icount[lj] = num_insns;
4c9649a9 3390 }
2e70f6ef
PB
3391 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3392 gen_io_start();
4c9649a9 3393 insn = ldl_code(ctx.pc);
2e70f6ef 3394 num_insns++;
c4b3be39
RH
3395
3396 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3397 tcg_gen_debug_insn_start(ctx.pc);
3398 }
3399
4c9649a9
JM
3400 ctx.pc += 4;
3401 ret = translate_one(ctxp, insn);
19bf517b 3402
bf1b03fe
RH
3403 /* If we reach a page boundary, are single stepping,
3404 or exhaust instruction count, stop generation. */
3405 if (ret == NO_EXIT
3406 && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3407 || gen_opc_ptr >= gen_opc_end
3408 || num_insns >= max_insns
3409 || singlestep
3410 || env->singlestep_enabled)) {
3411 ret = EXIT_PC_STALE;
1b530a6d 3412 }
4af70374
RH
3413 } while (ret == NO_EXIT);
3414
3415 if (tb->cflags & CF_LAST_IO) {
3416 gen_io_end();
4c9649a9 3417 }
4af70374
RH
3418
3419 switch (ret) {
3420 case EXIT_GOTO_TB:
8aa3fa20 3421 case EXIT_NORETURN:
4af70374
RH
3422 break;
3423 case EXIT_PC_STALE:
496cb5b9 3424 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
3425 /* FALLTHRU */
3426 case EXIT_PC_UPDATED:
bf1b03fe
RH
3427 if (env->singlestep_enabled) {
3428 gen_excp_1(EXCP_DEBUG, 0);
3429 } else {
3430 tcg_gen_exit_tb(0);
3431 }
4af70374
RH
3432 break;
3433 default:
3434 abort();
4c9649a9 3435 }
4af70374 3436
2e70f6ef 3437 gen_icount_end(tb, num_insns);
4c9649a9
JM
3438 *gen_opc_ptr = INDEX_op_end;
3439 if (search_pc) {
3440 j = gen_opc_ptr - gen_opc_buf;
3441 lj++;
3442 while (lj <= j)
3443 gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3444 } else {
3445 tb->size = ctx.pc - pc_start;
2e70f6ef 3446 tb->icount = num_insns;
4c9649a9 3447 }
4af70374 3448
806991da 3449#ifdef DEBUG_DISAS
8fec2b8c 3450 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
3451 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3452 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3453 qemu_log("\n");
4c9649a9 3454 }
4c9649a9 3455#endif
4c9649a9
JM
3456}
3457
2cfc5f17 3458void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3459{
2cfc5f17 3460 gen_intermediate_code_internal(env, tb, 0);
4c9649a9
JM
3461}
3462
2cfc5f17 3463void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3464{
2cfc5f17 3465 gen_intermediate_code_internal(env, tb, 1);
4c9649a9
JM
3466}
3467
a964acc6
RH
3468struct cpu_def_t {
3469 const char *name;
3470 int implver, amask;
3471};
3472
3473static const struct cpu_def_t cpu_defs[] = {
3474 { "ev4", IMPLVER_2106x, 0 },
3475 { "ev5", IMPLVER_21164, 0 },
3476 { "ev56", IMPLVER_21164, AMASK_BWX },
3477 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3478 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3479 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3480 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3481 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3482 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3483 { "21064", IMPLVER_2106x, 0 },
3484 { "21164", IMPLVER_21164, 0 },
3485 { "21164a", IMPLVER_21164, AMASK_BWX },
3486 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3487 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3488 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3489 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3490};
3491
aaed909a 3492CPUAlphaState * cpu_alpha_init (const char *cpu_model)
4c9649a9
JM
3493{
3494 CPUAlphaState *env;
a964acc6 3495 int implver, amask, i, max;
4c9649a9 3496
7267c094 3497 env = g_malloc0(sizeof(CPUAlphaState));
4c9649a9 3498 cpu_exec_init(env);
2e70f6ef 3499 alpha_translate_init();
4c9649a9 3500 tlb_flush(env, 1);
a964acc6
RH
3501
3502 /* Default to ev67; no reason not to emulate insns by default. */
3503 implver = IMPLVER_21264;
3504 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3505 | AMASK_TRAP | AMASK_PREFETCH);
3506
3507 max = ARRAY_SIZE(cpu_defs);
3508 for (i = 0; i < max; i++) {
3509 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3510 implver = cpu_defs[i].implver;
3511 amask = cpu_defs[i].amask;
3512 break;
3513 }
3514 }
3515 env->implver = implver;
3516 env->amask = amask;
3517
4c9649a9 3518#if defined (CONFIG_USER_ONLY)
ea879fc7 3519 env->ps = PS_USER_MODE;
2edd07ef
RH
3520 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3521 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
6049f4f8 3522#endif
6910b8f6 3523 env->lock_addr = -1;
26b46094 3524 env->fen = 1;
dad081ee 3525
0bf46a40 3526 qemu_init_vcpu(env);
4c9649a9
JM
3527 return env;
3528}
aaed909a 3529
e87b7cb0 3530void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
3531{
3532 env->pc = gen_opc_pc[pc_pos];
3533}