]> git.proxmox.com Git - qemu.git/blame - target-alpha/translate.c
alpha: remove unused variable
[qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
20#include <stdint.h>
21#include <stdlib.h>
22#include <stdio.h>
23
24#include "cpu.h"
25#include "exec-all.h"
26#include "disas.h"
ae8ecd42 27#include "host-utils.h"
57fec1fe 28#include "tcg-op.h"
ca10f867 29#include "qemu-common.h"
4c9649a9 30
a7812ae4
PB
31#include "helper.h"
32#define GEN_HELPER 1
33#include "helper.h"
34
19188121 35#undef ALPHA_DEBUG_DISAS
f24518b5 36#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
37
38#ifdef ALPHA_DEBUG_DISAS
806991da 39# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
40#else
41# define LOG_DISAS(...) do { } while (0)
42#endif
43
4c9649a9
JM
44typedef struct DisasContext DisasContext;
45struct DisasContext {
4af70374
RH
46 struct TranslationBlock *tb;
47 CPUAlphaState *env;
4c9649a9
JM
48 uint64_t pc;
49 int mem_idx;
f24518b5
RH
50
51 /* Current rounding mode for this TB. */
52 int tb_rm;
53 /* Current flush-to-zero setting for this TB. */
54 int tb_ftz;
4c9649a9
JM
55};
56
4af70374
RH
57/* Return values from translate_one, indicating the state of the TB.
58 Note that zero indicates that we are not exiting the TB. */
59
60typedef enum {
61 NO_EXIT,
62
63 /* We have emitted one or more goto_tb. No fixup required. */
64 EXIT_GOTO_TB,
65
66 /* We are not using a goto_tb (for whatever reason), but have updated
67 the PC (for whatever reason), so there's no need to do it again on
68 exiting the TB. */
69 EXIT_PC_UPDATED,
70
71 /* We are exiting the TB, but have neither emitted a goto_tb, nor
72 updated the PC for the next instruction to be executed. */
8aa3fa20
RH
73 EXIT_PC_STALE,
74
75 /* We are ending the TB with a noreturn function call, e.g. longjmp.
76 No following code will be executed. */
77 EXIT_NORETURN,
4af70374
RH
78} ExitStatus;
79
3761035f 80/* global register indexes */
a7812ae4 81static TCGv_ptr cpu_env;
496cb5b9 82static TCGv cpu_ir[31];
f18cd223 83static TCGv cpu_fir[31];
496cb5b9 84static TCGv cpu_pc;
6910b8f6
RH
85static TCGv cpu_lock_addr;
86static TCGv cpu_lock_st_addr;
87static TCGv cpu_lock_value;
2ace7e55
RH
88static TCGv cpu_unique;
89#ifndef CONFIG_USER_ONLY
90static TCGv cpu_sysval;
91static TCGv cpu_usp;
ab471ade 92#endif
496cb5b9 93
3761035f 94/* register names */
f18cd223 95static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef
PB
96
97#include "gen-icount.h"
98
a5f1b965 99static void alpha_translate_init(void)
2e70f6ef 100{
496cb5b9
AJ
101 int i;
102 char *p;
2e70f6ef 103 static int done_init = 0;
496cb5b9 104
2e70f6ef
PB
105 if (done_init)
106 return;
496cb5b9 107
a7812ae4 108 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
109
110 p = cpu_reg_names;
111 for (i = 0; i < 31; i++) {
112 sprintf(p, "ir%d", i);
a7812ae4
PB
113 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
114 offsetof(CPUState, ir[i]), p);
6ba8dcd7 115 p += (i < 10) ? 4 : 5;
f18cd223
AJ
116
117 sprintf(p, "fir%d", i);
a7812ae4
PB
118 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
119 offsetof(CPUState, fir[i]), p);
f18cd223 120 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
121 }
122
a7812ae4
PB
123 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
124 offsetof(CPUState, pc), "pc");
496cb5b9 125
6910b8f6
RH
126 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
127 offsetof(CPUState, lock_addr),
128 "lock_addr");
129 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
130 offsetof(CPUState, lock_st_addr),
131 "lock_st_addr");
132 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
133 offsetof(CPUState, lock_value),
134 "lock_value");
f4ed8679 135
2ace7e55
RH
136 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
137 offsetof(CPUState, unique), "unique");
138#ifndef CONFIG_USER_ONLY
139 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
140 offsetof(CPUState, sysval), "sysval");
141 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
142 offsetof(CPUState, usp), "usp");
ab471ade
RH
143#endif
144
496cb5b9 145 /* register helpers */
a7812ae4 146#define GEN_HELPER 2
496cb5b9
AJ
147#include "helper.h"
148
2e70f6ef
PB
149 done_init = 1;
150}
151
bf1b03fe 152static void gen_excp_1(int exception, int error_code)
4c9649a9 153{
a7812ae4 154 TCGv_i32 tmp1, tmp2;
6ad02592 155
6ad02592
AJ
156 tmp1 = tcg_const_i32(exception);
157 tmp2 = tcg_const_i32(error_code);
a7812ae4
PB
158 gen_helper_excp(tmp1, tmp2);
159 tcg_temp_free_i32(tmp2);
160 tcg_temp_free_i32(tmp1);
bf1b03fe 161}
8aa3fa20 162
bf1b03fe
RH
163static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
164{
165 tcg_gen_movi_i64(cpu_pc, ctx->pc);
166 gen_excp_1(exception, error_code);
8aa3fa20 167 return EXIT_NORETURN;
4c9649a9
JM
168}
169
8aa3fa20 170static inline ExitStatus gen_invalid(DisasContext *ctx)
4c9649a9 171{
8aa3fa20 172 return gen_excp(ctx, EXCP_OPCDEC, 0);
4c9649a9
JM
173}
174
636aa200 175static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 176{
a7812ae4
PB
177 TCGv tmp = tcg_temp_new();
178 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 179 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
180 tcg_gen_trunc_i64_i32(tmp32, tmp);
181 gen_helper_memory_to_f(t0, tmp32);
182 tcg_temp_free_i32(tmp32);
f18cd223
AJ
183 tcg_temp_free(tmp);
184}
185
636aa200 186static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 187{
a7812ae4 188 TCGv tmp = tcg_temp_new();
f18cd223 189 tcg_gen_qemu_ld64(tmp, t1, flags);
a7812ae4 190 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
191 tcg_temp_free(tmp);
192}
193
636aa200 194static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 195{
a7812ae4
PB
196 TCGv tmp = tcg_temp_new();
197 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 198 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
199 tcg_gen_trunc_i64_i32(tmp32, tmp);
200 gen_helper_memory_to_s(t0, tmp32);
201 tcg_temp_free_i32(tmp32);
f18cd223
AJ
202 tcg_temp_free(tmp);
203}
204
636aa200 205static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679 206{
f4ed8679 207 tcg_gen_qemu_ld32s(t0, t1, flags);
6910b8f6
RH
208 tcg_gen_mov_i64(cpu_lock_addr, t1);
209 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
210}
211
636aa200 212static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679 213{
f4ed8679 214 tcg_gen_qemu_ld64(t0, t1, flags);
6910b8f6
RH
215 tcg_gen_mov_i64(cpu_lock_addr, t1);
216 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
217}
218
636aa200
BS
219static inline void gen_load_mem(DisasContext *ctx,
220 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
221 int flags),
222 int ra, int rb, int32_t disp16, int fp,
223 int clear)
023d8ca2 224{
6910b8f6 225 TCGv addr, va;
023d8ca2 226
6910b8f6
RH
227 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
228 prefetches, which we can treat as nops. No worries about
229 missed exceptions here. */
230 if (unlikely(ra == 31)) {
023d8ca2 231 return;
6910b8f6 232 }
023d8ca2 233
a7812ae4 234 addr = tcg_temp_new();
023d8ca2
AJ
235 if (rb != 31) {
236 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 237 if (clear) {
023d8ca2 238 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 239 }
023d8ca2 240 } else {
6910b8f6 241 if (clear) {
023d8ca2 242 disp16 &= ~0x7;
6910b8f6 243 }
023d8ca2
AJ
244 tcg_gen_movi_i64(addr, disp16);
245 }
6910b8f6
RH
246
247 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
248 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
249
023d8ca2
AJ
250 tcg_temp_free(addr);
251}
252
636aa200 253static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 254{
a7812ae4
PB
255 TCGv_i32 tmp32 = tcg_temp_new_i32();
256 TCGv tmp = tcg_temp_new();
257 gen_helper_f_to_memory(tmp32, t0);
258 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
259 tcg_gen_qemu_st32(tmp, t1, flags);
260 tcg_temp_free(tmp);
a7812ae4 261 tcg_temp_free_i32(tmp32);
f18cd223
AJ
262}
263
636aa200 264static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 265{
a7812ae4
PB
266 TCGv tmp = tcg_temp_new();
267 gen_helper_g_to_memory(tmp, t0);
f18cd223
AJ
268 tcg_gen_qemu_st64(tmp, t1, flags);
269 tcg_temp_free(tmp);
270}
271
636aa200 272static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 273{
a7812ae4
PB
274 TCGv_i32 tmp32 = tcg_temp_new_i32();
275 TCGv tmp = tcg_temp_new();
276 gen_helper_s_to_memory(tmp32, t0);
277 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
278 tcg_gen_qemu_st32(tmp, t1, flags);
279 tcg_temp_free(tmp);
a7812ae4 280 tcg_temp_free_i32(tmp32);
f18cd223
AJ
281}
282
636aa200
BS
283static inline void gen_store_mem(DisasContext *ctx,
284 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
285 int flags),
286 int ra, int rb, int32_t disp16, int fp,
6910b8f6 287 int clear)
023d8ca2 288{
6910b8f6
RH
289 TCGv addr, va;
290
291 addr = tcg_temp_new();
023d8ca2
AJ
292 if (rb != 31) {
293 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 294 if (clear) {
023d8ca2 295 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 296 }
023d8ca2 297 } else {
6910b8f6 298 if (clear) {
023d8ca2 299 disp16 &= ~0x7;
6910b8f6 300 }
023d8ca2
AJ
301 tcg_gen_movi_i64(addr, disp16);
302 }
6910b8f6
RH
303
304 if (ra == 31) {
305 va = tcg_const_i64(0);
f18cd223 306 } else {
6910b8f6 307 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
023d8ca2 308 }
6910b8f6
RH
309 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
310
023d8ca2 311 tcg_temp_free(addr);
6910b8f6
RH
312 if (ra == 31) {
313 tcg_temp_free(va);
314 }
315}
316
317static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
318 int32_t disp16, int quad)
319{
320 TCGv addr;
321
322 if (ra == 31) {
323 /* ??? Don't bother storing anything. The user can't tell
324 the difference, since the zero register always reads zero. */
325 return NO_EXIT;
326 }
327
328#if defined(CONFIG_USER_ONLY)
329 addr = cpu_lock_st_addr;
330#else
e52458fe 331 addr = tcg_temp_local_new();
6910b8f6
RH
332#endif
333
334 if (rb != 31) {
335 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
336 } else {
337 tcg_gen_movi_i64(addr, disp16);
338 }
339
340#if defined(CONFIG_USER_ONLY)
341 /* ??? This is handled via a complicated version of compare-and-swap
342 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
343 in TCG so that this isn't necessary. */
344 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
345#else
346 /* ??? In system mode we are never multi-threaded, so CAS can be
347 implemented via a non-atomic load-compare-store sequence. */
348 {
349 int lab_fail, lab_done;
350 TCGv val;
351
352 lab_fail = gen_new_label();
353 lab_done = gen_new_label();
e52458fe 354 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
6910b8f6
RH
355
356 val = tcg_temp_new();
357 if (quad) {
358 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
359 } else {
360 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
361 }
e52458fe 362 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
6910b8f6
RH
363
364 if (quad) {
365 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
366 } else {
367 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
368 }
369 tcg_gen_movi_i64(cpu_ir[ra], 1);
370 tcg_gen_br(lab_done);
371
372 gen_set_label(lab_fail);
373 tcg_gen_movi_i64(cpu_ir[ra], 0);
374
375 gen_set_label(lab_done);
376 tcg_gen_movi_i64(cpu_lock_addr, -1);
377
378 tcg_temp_free(addr);
379 return NO_EXIT;
380 }
381#endif
023d8ca2
AJ
382}
383
4af70374 384static int use_goto_tb(DisasContext *ctx, uint64_t dest)
4c9649a9 385{
4af70374
RH
386 /* Check for the dest on the same page as the start of the TB. We
387 also want to suppress goto_tb in the case of single-steping and IO. */
388 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
389 && !ctx->env->singlestep_enabled
390 && !(ctx->tb->cflags & CF_LAST_IO));
391}
dbb30fe6 392
4af70374
RH
393static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
394{
395 uint64_t dest = ctx->pc + (disp << 2);
396
397 if (ra != 31) {
398 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
399 }
400
401 /* Notice branch-to-next; used to initialize RA with the PC. */
402 if (disp == 0) {
403 return 0;
404 } else if (use_goto_tb(ctx, dest)) {
405 tcg_gen_goto_tb(0);
406 tcg_gen_movi_i64(cpu_pc, dest);
4b4a72e5 407 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
4af70374
RH
408 return EXIT_GOTO_TB;
409 } else {
410 tcg_gen_movi_i64(cpu_pc, dest);
411 return EXIT_PC_UPDATED;
412 }
dbb30fe6
RH
413}
414
4af70374
RH
415static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
416 TCGv cmp, int32_t disp)
dbb30fe6 417{
4af70374 418 uint64_t dest = ctx->pc + (disp << 2);
dbb30fe6 419 int lab_true = gen_new_label();
9c29504e 420
4af70374
RH
421 if (use_goto_tb(ctx, dest)) {
422 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
423
424 tcg_gen_goto_tb(0);
425 tcg_gen_movi_i64(cpu_pc, ctx->pc);
4b4a72e5 426 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
4af70374
RH
427
428 gen_set_label(lab_true);
429 tcg_gen_goto_tb(1);
430 tcg_gen_movi_i64(cpu_pc, dest);
4b4a72e5 431 tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
4af70374
RH
432
433 return EXIT_GOTO_TB;
434 } else {
435 int lab_over = gen_new_label();
436
437 /* ??? Consider using either
438 movi pc, next
439 addi tmp, pc, disp
440 movcond pc, cond, 0, tmp, pc
441 or
442 setcond tmp, cond, 0
443 movi pc, next
444 neg tmp, tmp
445 andi tmp, tmp, disp
446 add pc, pc, tmp
447 The current diamond subgraph surely isn't efficient. */
448
449 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
450 tcg_gen_movi_i64(cpu_pc, ctx->pc);
451 tcg_gen_br(lab_over);
452 gen_set_label(lab_true);
453 tcg_gen_movi_i64(cpu_pc, dest);
454 gen_set_label(lab_over);
455
456 return EXIT_PC_UPDATED;
457 }
458}
459
460static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
461 int32_t disp, int mask)
462{
463 TCGv cmp_tmp;
464
465 if (unlikely(ra == 31)) {
466 cmp_tmp = tcg_const_i64(0);
467 } else {
468 cmp_tmp = tcg_temp_new();
9c29504e 469 if (mask) {
4af70374 470 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
dbb30fe6 471 } else {
4af70374 472 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
dbb30fe6 473 }
9c29504e 474 }
4af70374
RH
475
476 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
477}
478
4af70374 479/* Fold -0.0 for comparison with COND. */
dbb30fe6 480
4af70374 481static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 482{
dbb30fe6 483 uint64_t mzero = 1ull << 63;
f18cd223 484
dbb30fe6
RH
485 switch (cond) {
486 case TCG_COND_LE:
487 case TCG_COND_GT:
488 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 489 tcg_gen_mov_i64(dest, src);
a7812ae4 490 break;
dbb30fe6
RH
491
492 case TCG_COND_EQ:
493 case TCG_COND_NE:
494 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 495 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 496 break;
dbb30fe6
RH
497
498 case TCG_COND_GE:
dbb30fe6 499 case TCG_COND_LT:
4af70374
RH
500 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
501 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
502 tcg_gen_neg_i64(dest, dest);
503 tcg_gen_and_i64(dest, dest, src);
a7812ae4 504 break;
dbb30fe6 505
a7812ae4
PB
506 default:
507 abort();
f18cd223 508 }
dbb30fe6
RH
509}
510
4af70374
RH
511static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
512 int32_t disp)
dbb30fe6 513{
4af70374 514 TCGv cmp_tmp;
dbb30fe6
RH
515
516 if (unlikely(ra == 31)) {
517 /* Very uncommon case, but easier to optimize it to an integer
518 comparison than continuing with the floating point comparison. */
4af70374 519 return gen_bcond(ctx, cond, ra, disp, 0);
dbb30fe6
RH
520 }
521
4af70374
RH
522 cmp_tmp = tcg_temp_new();
523 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
524 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
525}
526
bbe1dab4 527static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
4af70374 528 int islit, uint8_t lit, int mask)
4c9649a9 529{
bbe1dab4 530 TCGCond inv_cond = tcg_invert_cond(cond);
9c29504e
AJ
531 int l1;
532
533 if (unlikely(rc == 31))
534 return;
535
536 l1 = gen_new_label();
537
538 if (ra != 31) {
539 if (mask) {
a7812ae4 540 TCGv tmp = tcg_temp_new();
9c29504e
AJ
541 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
542 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
543 tcg_temp_free(tmp);
544 } else
545 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
546 } else {
547 /* Very uncommon case - Do not bother to optimize. */
548 TCGv tmp = tcg_const_i64(0);
549 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
550 tcg_temp_free(tmp);
551 }
552
4c9649a9 553 if (islit)
9c29504e 554 tcg_gen_movi_i64(cpu_ir[rc], lit);
4c9649a9 555 else
dfaa8583 556 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
9c29504e 557 gen_set_label(l1);
4c9649a9
JM
558}
559
bbe1dab4 560static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
dbb30fe6 561{
4af70374 562 TCGv cmp_tmp;
dbb30fe6
RH
563 int l1;
564
4af70374 565 if (unlikely(rc == 31)) {
dbb30fe6 566 return;
4af70374
RH
567 }
568
569 cmp_tmp = tcg_temp_new();
dbb30fe6 570 if (unlikely(ra == 31)) {
4af70374
RH
571 tcg_gen_movi_i64(cmp_tmp, 0);
572 } else {
573 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
dbb30fe6
RH
574 }
575
576 l1 = gen_new_label();
4af70374
RH
577 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
578 tcg_temp_free(cmp_tmp);
dbb30fe6
RH
579
580 if (rb != 31)
581 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
582 else
583 tcg_gen_movi_i64(cpu_fir[rc], 0);
584 gen_set_label(l1);
585}
586
f24518b5
RH
587#define QUAL_RM_N 0x080 /* Round mode nearest even */
588#define QUAL_RM_C 0x000 /* Round mode chopped */
589#define QUAL_RM_M 0x040 /* Round mode minus infinity */
590#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
591#define QUAL_RM_MASK 0x0c0
592
593#define QUAL_U 0x100 /* Underflow enable (fp output) */
594#define QUAL_V 0x100 /* Overflow enable (int output) */
595#define QUAL_S 0x400 /* Software completion enable */
596#define QUAL_I 0x200 /* Inexact detection enable */
597
598static void gen_qual_roundmode(DisasContext *ctx, int fn11)
599{
600 TCGv_i32 tmp;
601
602 fn11 &= QUAL_RM_MASK;
603 if (fn11 == ctx->tb_rm) {
604 return;
605 }
606 ctx->tb_rm = fn11;
607
608 tmp = tcg_temp_new_i32();
609 switch (fn11) {
610 case QUAL_RM_N:
611 tcg_gen_movi_i32(tmp, float_round_nearest_even);
612 break;
613 case QUAL_RM_C:
614 tcg_gen_movi_i32(tmp, float_round_to_zero);
615 break;
616 case QUAL_RM_M:
617 tcg_gen_movi_i32(tmp, float_round_down);
618 break;
619 case QUAL_RM_D:
620 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
621 break;
622 }
623
624#if defined(CONFIG_SOFTFLOAT_INLINE)
625 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
626 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
627 sets the one field. */
628 tcg_gen_st8_i32(tmp, cpu_env,
629 offsetof(CPUState, fp_status.float_rounding_mode));
630#else
631 gen_helper_setroundmode(tmp);
632#endif
633
634 tcg_temp_free_i32(tmp);
635}
636
637static void gen_qual_flushzero(DisasContext *ctx, int fn11)
638{
639 TCGv_i32 tmp;
640
641 fn11 &= QUAL_U;
642 if (fn11 == ctx->tb_ftz) {
643 return;
644 }
645 ctx->tb_ftz = fn11;
646
647 tmp = tcg_temp_new_i32();
648 if (fn11) {
649 /* Underflow is enabled, use the FPCR setting. */
650 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
651 } else {
652 /* Underflow is disabled, force flush-to-zero. */
653 tcg_gen_movi_i32(tmp, 1);
654 }
655
656#if defined(CONFIG_SOFTFLOAT_INLINE)
657 tcg_gen_st8_i32(tmp, cpu_env,
658 offsetof(CPUState, fp_status.flush_to_zero));
659#else
660 gen_helper_setflushzero(tmp);
661#endif
662
663 tcg_temp_free_i32(tmp);
664}
665
666static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
667{
668 TCGv val = tcg_temp_new();
669 if (reg == 31) {
670 tcg_gen_movi_i64(val, 0);
671 } else if (fn11 & QUAL_S) {
672 gen_helper_ieee_input_s(val, cpu_fir[reg]);
673 } else if (is_cmp) {
674 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
675 } else {
676 gen_helper_ieee_input(val, cpu_fir[reg]);
677 }
678 return val;
679}
680
681static void gen_fp_exc_clear(void)
682{
683#if defined(CONFIG_SOFTFLOAT_INLINE)
684 TCGv_i32 zero = tcg_const_i32(0);
685 tcg_gen_st8_i32(zero, cpu_env,
686 offsetof(CPUState, fp_status.float_exception_flags));
687 tcg_temp_free_i32(zero);
688#else
689 gen_helper_fp_exc_clear();
690#endif
691}
692
693static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
694{
695 /* ??? We ought to be able to do something with imprecise exceptions.
696 E.g. notice we're still in the trap shadow of something within the
697 TB and do not generate the code to signal the exception; end the TB
698 when an exception is forced to arrive, either by consumption of a
699 register value or TRAPB or EXCB. */
700 TCGv_i32 exc = tcg_temp_new_i32();
701 TCGv_i32 reg;
702
703#if defined(CONFIG_SOFTFLOAT_INLINE)
704 tcg_gen_ld8u_i32(exc, cpu_env,
705 offsetof(CPUState, fp_status.float_exception_flags));
706#else
707 gen_helper_fp_exc_get(exc);
708#endif
709
710 if (ignore) {
711 tcg_gen_andi_i32(exc, exc, ~ignore);
712 }
713
714 /* ??? Pass in the regno of the destination so that the helper can
715 set EXC_MASK, which contains a bitmask of destination registers
716 that have caused arithmetic traps. A simple userspace emulation
717 does not require this. We do need it for a guest kernel's entArith,
718 or if we were to do something clever with imprecise exceptions. */
719 reg = tcg_const_i32(rc + 32);
720
721 if (fn11 & QUAL_S) {
722 gen_helper_fp_exc_raise_s(exc, reg);
723 } else {
724 gen_helper_fp_exc_raise(exc, reg);
725 }
726
727 tcg_temp_free_i32(reg);
728 tcg_temp_free_i32(exc);
729}
730
731static inline void gen_fp_exc_raise(int rc, int fn11)
732{
733 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 734}
f24518b5 735
593f17e5
RH
736static void gen_fcvtlq(int rb, int rc)
737{
738 if (unlikely(rc == 31)) {
739 return;
740 }
741 if (unlikely(rb == 31)) {
742 tcg_gen_movi_i64(cpu_fir[rc], 0);
743 } else {
744 TCGv tmp = tcg_temp_new();
745
746 /* The arithmetic right shift here, plus the sign-extended mask below
747 yields a sign-extended result without an explicit ext32s_i64. */
748 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
749 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
750 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
751 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
752 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
753
754 tcg_temp_free(tmp);
755 }
756}
757
735cf45f
RH
758static void gen_fcvtql(int rb, int rc)
759{
760 if (unlikely(rc == 31)) {
761 return;
762 }
763 if (unlikely(rb == 31)) {
764 tcg_gen_movi_i64(cpu_fir[rc], 0);
765 } else {
766 TCGv tmp = tcg_temp_new();
767
768 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
769 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
770 tcg_gen_shli_i64(tmp, tmp, 32);
771 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
772 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
773
774 tcg_temp_free(tmp);
775 }
776}
777
778static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
779{
780 if (rb != 31) {
781 int lab = gen_new_label();
782 TCGv tmp = tcg_temp_new();
783
784 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
785 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
786 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
787
788 gen_set_label(lab);
789 }
790 gen_fcvtql(rb, rc);
791}
792
f24518b5
RH
793#define FARITH2(name) \
794static inline void glue(gen_f, name)(int rb, int rc) \
795{ \
796 if (unlikely(rc == 31)) { \
797 return; \
798 } \
799 if (rb != 31) { \
800 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
801 } else { \
802 TCGv tmp = tcg_const_i64(0); \
803 gen_helper_ ## name (cpu_fir[rc], tmp); \
804 tcg_temp_free(tmp); \
805 } \
806}
f24518b5
RH
807
808/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
809FARITH2(sqrtf)
810FARITH2(sqrtg)
a7812ae4
PB
811FARITH2(cvtgf)
812FARITH2(cvtgq)
813FARITH2(cvtqf)
814FARITH2(cvtqg)
f24518b5
RH
815
816static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
817 int rb, int rc, int fn11)
818{
819 TCGv vb;
820
821 /* ??? This is wrong: the instruction is not a nop, it still may
822 raise exceptions. */
823 if (unlikely(rc == 31)) {
824 return;
825 }
826
827 gen_qual_roundmode(ctx, fn11);
828 gen_qual_flushzero(ctx, fn11);
829 gen_fp_exc_clear();
830
831 vb = gen_ieee_input(rb, fn11, 0);
832 helper(cpu_fir[rc], vb);
833 tcg_temp_free(vb);
834
835 gen_fp_exc_raise(rc, fn11);
836}
837
838#define IEEE_ARITH2(name) \
839static inline void glue(gen_f, name)(DisasContext *ctx, \
840 int rb, int rc, int fn11) \
841{ \
842 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
843}
844IEEE_ARITH2(sqrts)
845IEEE_ARITH2(sqrtt)
846IEEE_ARITH2(cvtst)
847IEEE_ARITH2(cvtts)
848
849static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
850{
851 TCGv vb;
852 int ignore = 0;
853
854 /* ??? This is wrong: the instruction is not a nop, it still may
855 raise exceptions. */
856 if (unlikely(rc == 31)) {
857 return;
858 }
859
860 /* No need to set flushzero, since we have an integer output. */
861 gen_fp_exc_clear();
862 vb = gen_ieee_input(rb, fn11, 0);
863
864 /* Almost all integer conversions use cropped rounding, and most
865 also do not have integer overflow enabled. Special case that. */
866 switch (fn11) {
867 case QUAL_RM_C:
868 gen_helper_cvttq_c(cpu_fir[rc], vb);
869 break;
870 case QUAL_V | QUAL_RM_C:
871 case QUAL_S | QUAL_V | QUAL_RM_C:
872 ignore = float_flag_inexact;
873 /* FALLTHRU */
874 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
875 gen_helper_cvttq_svic(cpu_fir[rc], vb);
876 break;
877 default:
878 gen_qual_roundmode(ctx, fn11);
879 gen_helper_cvttq(cpu_fir[rc], vb);
880 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
881 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
882 break;
883 }
884 tcg_temp_free(vb);
885
886 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
887}
888
f24518b5
RH
889static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
890 int rb, int rc, int fn11)
891{
892 TCGv vb;
893
894 /* ??? This is wrong: the instruction is not a nop, it still may
895 raise exceptions. */
896 if (unlikely(rc == 31)) {
897 return;
898 }
899
900 gen_qual_roundmode(ctx, fn11);
901
902 if (rb == 31) {
903 vb = tcg_const_i64(0);
904 } else {
905 vb = cpu_fir[rb];
906 }
907
908 /* The only exception that can be raised by integer conversion
909 is inexact. Thus we only need to worry about exceptions when
910 inexact handling is requested. */
911 if (fn11 & QUAL_I) {
912 gen_fp_exc_clear();
913 helper(cpu_fir[rc], vb);
914 gen_fp_exc_raise(rc, fn11);
915 } else {
916 helper(cpu_fir[rc], vb);
917 }
918
919 if (rb == 31) {
920 tcg_temp_free(vb);
921 }
922}
923
924#define IEEE_INTCVT(name) \
925static inline void glue(gen_f, name)(DisasContext *ctx, \
926 int rb, int rc, int fn11) \
927{ \
928 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
929}
930IEEE_INTCVT(cvtqs)
931IEEE_INTCVT(cvtqt)
932
dc96be4b
RH
933static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
934{
935 TCGv va, vb, vmask;
936 int za = 0, zb = 0;
937
938 if (unlikely(rc == 31)) {
939 return;
940 }
941
942 vmask = tcg_const_i64(mask);
943
944 TCGV_UNUSED_I64(va);
945 if (ra == 31) {
946 if (inv_a) {
947 va = vmask;
948 } else {
949 za = 1;
950 }
951 } else {
952 va = tcg_temp_new_i64();
953 tcg_gen_mov_i64(va, cpu_fir[ra]);
954 if (inv_a) {
955 tcg_gen_andc_i64(va, vmask, va);
956 } else {
957 tcg_gen_and_i64(va, va, vmask);
958 }
959 }
960
961 TCGV_UNUSED_I64(vb);
962 if (rb == 31) {
963 zb = 1;
964 } else {
965 vb = tcg_temp_new_i64();
966 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
967 }
968
969 switch (za << 1 | zb) {
970 case 0 | 0:
971 tcg_gen_or_i64(cpu_fir[rc], va, vb);
972 break;
973 case 0 | 1:
974 tcg_gen_mov_i64(cpu_fir[rc], va);
975 break;
976 case 2 | 0:
977 tcg_gen_mov_i64(cpu_fir[rc], vb);
978 break;
979 case 2 | 1:
980 tcg_gen_movi_i64(cpu_fir[rc], 0);
981 break;
982 }
983
984 tcg_temp_free(vmask);
985 if (ra != 31) {
986 tcg_temp_free(va);
987 }
988 if (rb != 31) {
989 tcg_temp_free(vb);
990 }
991}
992
993static inline void gen_fcpys(int ra, int rb, int rc)
994{
995 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
996}
997
998static inline void gen_fcpysn(int ra, int rb, int rc)
999{
1000 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1001}
1002
1003static inline void gen_fcpyse(int ra, int rb, int rc)
1004{
1005 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1006}
1007
f24518b5
RH
1008#define FARITH3(name) \
1009static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1010{ \
1011 TCGv va, vb; \
1012 \
1013 if (unlikely(rc == 31)) { \
1014 return; \
1015 } \
1016 if (ra == 31) { \
1017 va = tcg_const_i64(0); \
1018 } else { \
1019 va = cpu_fir[ra]; \
1020 } \
1021 if (rb == 31) { \
1022 vb = tcg_const_i64(0); \
1023 } else { \
1024 vb = cpu_fir[rb]; \
1025 } \
1026 \
1027 gen_helper_ ## name (cpu_fir[rc], va, vb); \
1028 \
1029 if (ra == 31) { \
1030 tcg_temp_free(va); \
1031 } \
1032 if (rb == 31) { \
1033 tcg_temp_free(vb); \
1034 } \
1035}
f24518b5
RH
1036
1037/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
1038FARITH3(addf)
1039FARITH3(subf)
1040FARITH3(mulf)
1041FARITH3(divf)
1042FARITH3(addg)
1043FARITH3(subg)
1044FARITH3(mulg)
1045FARITH3(divg)
1046FARITH3(cmpgeq)
1047FARITH3(cmpglt)
1048FARITH3(cmpgle)
f24518b5
RH
1049
1050static void gen_ieee_arith3(DisasContext *ctx,
1051 void (*helper)(TCGv, TCGv, TCGv),
1052 int ra, int rb, int rc, int fn11)
1053{
1054 TCGv va, vb;
1055
1056 /* ??? This is wrong: the instruction is not a nop, it still may
1057 raise exceptions. */
1058 if (unlikely(rc == 31)) {
1059 return;
1060 }
1061
1062 gen_qual_roundmode(ctx, fn11);
1063 gen_qual_flushzero(ctx, fn11);
1064 gen_fp_exc_clear();
1065
1066 va = gen_ieee_input(ra, fn11, 0);
1067 vb = gen_ieee_input(rb, fn11, 0);
1068 helper(cpu_fir[rc], va, vb);
1069 tcg_temp_free(va);
1070 tcg_temp_free(vb);
1071
1072 gen_fp_exc_raise(rc, fn11);
1073}
1074
1075#define IEEE_ARITH3(name) \
1076static inline void glue(gen_f, name)(DisasContext *ctx, \
1077 int ra, int rb, int rc, int fn11) \
1078{ \
1079 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1080}
1081IEEE_ARITH3(adds)
1082IEEE_ARITH3(subs)
1083IEEE_ARITH3(muls)
1084IEEE_ARITH3(divs)
1085IEEE_ARITH3(addt)
1086IEEE_ARITH3(subt)
1087IEEE_ARITH3(mult)
1088IEEE_ARITH3(divt)
1089
1090static void gen_ieee_compare(DisasContext *ctx,
1091 void (*helper)(TCGv, TCGv, TCGv),
1092 int ra, int rb, int rc, int fn11)
1093{
1094 TCGv va, vb;
1095
1096 /* ??? This is wrong: the instruction is not a nop, it still may
1097 raise exceptions. */
1098 if (unlikely(rc == 31)) {
1099 return;
1100 }
1101
1102 gen_fp_exc_clear();
1103
1104 va = gen_ieee_input(ra, fn11, 1);
1105 vb = gen_ieee_input(rb, fn11, 1);
1106 helper(cpu_fir[rc], va, vb);
1107 tcg_temp_free(va);
1108 tcg_temp_free(vb);
1109
1110 gen_fp_exc_raise(rc, fn11);
1111}
1112
1113#define IEEE_CMP3(name) \
1114static inline void glue(gen_f, name)(DisasContext *ctx, \
1115 int ra, int rb, int rc, int fn11) \
1116{ \
1117 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1118}
1119IEEE_CMP3(cmptun)
1120IEEE_CMP3(cmpteq)
1121IEEE_CMP3(cmptlt)
1122IEEE_CMP3(cmptle)
a7812ae4 1123
248c42f3
RH
1124static inline uint64_t zapnot_mask(uint8_t lit)
1125{
1126 uint64_t mask = 0;
1127 int i;
1128
1129 for (i = 0; i < 8; ++i) {
1130 if ((lit >> i) & 1)
1131 mask |= 0xffull << (i * 8);
1132 }
1133 return mask;
1134}
1135
87d98f95
RH
1136/* Implement zapnot with an immediate operand, which expands to some
1137 form of immediate AND. This is a basic building block in the
1138 definition of many of the other byte manipulation instructions. */
248c42f3 1139static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 1140{
87d98f95
RH
1141 switch (lit) {
1142 case 0x00:
248c42f3 1143 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
1144 break;
1145 case 0x01:
248c42f3 1146 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
1147 break;
1148 case 0x03:
248c42f3 1149 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
1150 break;
1151 case 0x0f:
248c42f3 1152 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
1153 break;
1154 case 0xff:
248c42f3 1155 tcg_gen_mov_i64(dest, src);
87d98f95
RH
1156 break;
1157 default:
248c42f3 1158 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
87d98f95
RH
1159 break;
1160 }
1161}
1162
1163static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1164{
1165 if (unlikely(rc == 31))
1166 return;
1167 else if (unlikely(ra == 31))
1168 tcg_gen_movi_i64(cpu_ir[rc], 0);
1169 else if (islit)
248c42f3 1170 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
87d98f95
RH
1171 else
1172 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1173}
1174
1175static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1176{
1177 if (unlikely(rc == 31))
1178 return;
1179 else if (unlikely(ra == 31))
1180 tcg_gen_movi_i64(cpu_ir[rc], 0);
1181 else if (islit)
248c42f3 1182 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
87d98f95
RH
1183 else
1184 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1185}
1186
1187
248c42f3 1188/* EXTWH, EXTLH, EXTQH */
ffec44f1
RH
1189static void gen_ext_h(int ra, int rb, int rc, int islit,
1190 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1191{
1192 if (unlikely(rc == 31))
1193 return;
377a43b6
RH
1194 else if (unlikely(ra == 31))
1195 tcg_gen_movi_i64(cpu_ir[rc], 0);
1196 else {
dfaa8583 1197 if (islit) {
377a43b6
RH
1198 lit = (64 - (lit & 7) * 8) & 0x3f;
1199 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
fe2b269a 1200 } else {
377a43b6 1201 TCGv tmp1 = tcg_temp_new();
b3249f63
AJ
1202 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1203 tcg_gen_shli_i64(tmp1, tmp1, 3);
dbf95805
VW
1204 tcg_gen_neg_i64(tmp1, tmp1);
1205 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
dfaa8583 1206 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
b3249f63 1207 tcg_temp_free(tmp1);
dfaa8583 1208 }
248c42f3 1209 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
377a43b6 1210 }
b3249f63
AJ
1211}
1212
248c42f3 1213/* EXTBL, EXTWL, EXTLL, EXTQL */
ffec44f1
RH
1214static void gen_ext_l(int ra, int rb, int rc, int islit,
1215 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1216{
1217 if (unlikely(rc == 31))
1218 return;
377a43b6
RH
1219 else if (unlikely(ra == 31))
1220 tcg_gen_movi_i64(cpu_ir[rc], 0);
1221 else {
dfaa8583 1222 if (islit) {
377a43b6 1223 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
dfaa8583 1224 } else {
a7812ae4 1225 TCGv tmp = tcg_temp_new();
b3249f63
AJ
1226 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1227 tcg_gen_shli_i64(tmp, tmp, 3);
dfaa8583 1228 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
b3249f63 1229 tcg_temp_free(tmp);
fe2b269a 1230 }
248c42f3
RH
1231 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1232 }
1233}
1234
50eb6e5c
RH
1235/* INSWH, INSLH, INSQH */
1236static void gen_ins_h(int ra, int rb, int rc, int islit,
1237 uint8_t lit, uint8_t byte_mask)
1238{
1239 if (unlikely(rc == 31))
1240 return;
1241 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1242 tcg_gen_movi_i64(cpu_ir[rc], 0);
1243 else {
1244 TCGv tmp = tcg_temp_new();
1245
1246 /* The instruction description has us left-shift the byte mask
1247 and extract bits <15:8> and apply that zap at the end. This
1248 is equivalent to simply performing the zap first and shifting
1249 afterward. */
1250 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1251
1252 if (islit) {
1253 /* Note that we have handled the lit==0 case above. */
1254 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1255 } else {
1256 TCGv shift = tcg_temp_new();
1257
1258 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1259 Do this portably by splitting the shift into two parts:
1260 shift_count-1 and 1. Arrange for the -1 by using
1261 ones-complement instead of twos-complement in the negation:
1262 ~((B & 7) * 8) & 63. */
1263
1264 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1265 tcg_gen_shli_i64(shift, shift, 3);
1266 tcg_gen_not_i64(shift, shift);
1267 tcg_gen_andi_i64(shift, shift, 0x3f);
1268
1269 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1270 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1271 tcg_temp_free(shift);
1272 }
1273 tcg_temp_free(tmp);
1274 }
1275}
1276
248c42f3 1277/* INSBL, INSWL, INSLL, INSQL */
ffec44f1
RH
1278static void gen_ins_l(int ra, int rb, int rc, int islit,
1279 uint8_t lit, uint8_t byte_mask)
248c42f3
RH
1280{
1281 if (unlikely(rc == 31))
1282 return;
1283 else if (unlikely(ra == 31))
1284 tcg_gen_movi_i64(cpu_ir[rc], 0);
1285 else {
1286 TCGv tmp = tcg_temp_new();
1287
1288 /* The instruction description has us left-shift the byte mask
1289 the same number of byte slots as the data and apply the zap
1290 at the end. This is equivalent to simply performing the zap
1291 first and shifting afterward. */
1292 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1293
1294 if (islit) {
1295 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1296 } else {
1297 TCGv shift = tcg_temp_new();
1298 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1299 tcg_gen_shli_i64(shift, shift, 3);
1300 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1301 tcg_temp_free(shift);
1302 }
1303 tcg_temp_free(tmp);
377a43b6 1304 }
b3249f63
AJ
1305}
1306
ffec44f1
RH
1307/* MSKWH, MSKLH, MSKQH */
1308static void gen_msk_h(int ra, int rb, int rc, int islit,
1309 uint8_t lit, uint8_t byte_mask)
1310{
1311 if (unlikely(rc == 31))
1312 return;
1313 else if (unlikely(ra == 31))
1314 tcg_gen_movi_i64(cpu_ir[rc], 0);
1315 else if (islit) {
1316 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1317 } else {
1318 TCGv shift = tcg_temp_new();
1319 TCGv mask = tcg_temp_new();
1320
1321 /* The instruction description is as above, where the byte_mask
1322 is shifted left, and then we extract bits <15:8>. This can be
1323 emulated with a right-shift on the expanded byte mask. This
1324 requires extra care because for an input <2:0> == 0 we need a
1325 shift of 64 bits in order to generate a zero. This is done by
1326 splitting the shift into two parts, the variable shift - 1
1327 followed by a constant 1 shift. The code we expand below is
1328 equivalent to ~((B & 7) * 8) & 63. */
1329
1330 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1331 tcg_gen_shli_i64(shift, shift, 3);
1332 tcg_gen_not_i64(shift, shift);
1333 tcg_gen_andi_i64(shift, shift, 0x3f);
1334 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1335 tcg_gen_shr_i64(mask, mask, shift);
1336 tcg_gen_shri_i64(mask, mask, 1);
1337
1338 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1339
1340 tcg_temp_free(mask);
1341 tcg_temp_free(shift);
1342 }
1343}
1344
14ab1634 1345/* MSKBL, MSKWL, MSKLL, MSKQL */
ffec44f1
RH
1346static void gen_msk_l(int ra, int rb, int rc, int islit,
1347 uint8_t lit, uint8_t byte_mask)
14ab1634
RH
1348{
1349 if (unlikely(rc == 31))
1350 return;
1351 else if (unlikely(ra == 31))
1352 tcg_gen_movi_i64(cpu_ir[rc], 0);
1353 else if (islit) {
1354 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1355 } else {
1356 TCGv shift = tcg_temp_new();
1357 TCGv mask = tcg_temp_new();
1358
1359 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1360 tcg_gen_shli_i64(shift, shift, 3);
1361 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1362 tcg_gen_shl_i64(mask, mask, shift);
1363
1364 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1365
1366 tcg_temp_free(mask);
1367 tcg_temp_free(shift);
1368 }
1369}
1370
04acd307 1371/* Code to call arith3 helpers */
a7812ae4 1372#define ARITH3(name) \
636aa200
BS
1373static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1374 uint8_t lit) \
a7812ae4
PB
1375{ \
1376 if (unlikely(rc == 31)) \
1377 return; \
1378 \
1379 if (ra != 31) { \
1380 if (islit) { \
1381 TCGv tmp = tcg_const_i64(lit); \
1382 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1383 tcg_temp_free(tmp); \
1384 } else \
1385 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1386 } else { \
1387 TCGv tmp1 = tcg_const_i64(0); \
1388 if (islit) { \
1389 TCGv tmp2 = tcg_const_i64(lit); \
1390 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1391 tcg_temp_free(tmp2); \
1392 } else \
1393 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1394 tcg_temp_free(tmp1); \
1395 } \
b3249f63 1396}
a7812ae4
PB
1397ARITH3(cmpbge)
1398ARITH3(addlv)
1399ARITH3(sublv)
1400ARITH3(addqv)
1401ARITH3(subqv)
a7812ae4
PB
1402ARITH3(umulh)
1403ARITH3(mullv)
1404ARITH3(mulqv)
13e4df99
RH
1405ARITH3(minub8)
1406ARITH3(minsb8)
1407ARITH3(minuw4)
1408ARITH3(minsw4)
1409ARITH3(maxub8)
1410ARITH3(maxsb8)
1411ARITH3(maxuw4)
1412ARITH3(maxsw4)
1413ARITH3(perr)
1414
1415#define MVIOP2(name) \
1416static inline void glue(gen_, name)(int rb, int rc) \
1417{ \
1418 if (unlikely(rc == 31)) \
1419 return; \
1420 if (unlikely(rb == 31)) \
1421 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1422 else \
1423 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1424}
1425MVIOP2(pklb)
1426MVIOP2(pkwb)
1427MVIOP2(unpkbl)
1428MVIOP2(unpkbw)
b3249f63 1429
9e05960f
RH
1430static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1431 int islit, uint8_t lit)
01ff9cc8 1432{
9e05960f 1433 TCGv va, vb;
01ff9cc8 1434
9e05960f 1435 if (unlikely(rc == 31)) {
13e4df99 1436 return;
9e05960f 1437 }
01ff9cc8 1438
9e05960f
RH
1439 if (ra == 31) {
1440 va = tcg_const_i64(0);
1441 } else {
1442 va = cpu_ir[ra];
1443 }
1444 if (islit) {
1445 vb = tcg_const_i64(lit);
1446 } else {
1447 vb = cpu_ir[rb];
1448 }
01ff9cc8 1449
9e05960f 1450 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
01ff9cc8 1451
9e05960f
RH
1452 if (ra == 31) {
1453 tcg_temp_free(va);
1454 }
1455 if (islit) {
1456 tcg_temp_free(vb);
1457 }
01ff9cc8
AJ
1458}
1459
ac316ca4
RH
1460static void gen_rx(int ra, int set)
1461{
1462 TCGv_i32 tmp;
1463
1464 if (ra != 31) {
1465 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1466 }
1467
1468 tmp = tcg_const_i32(set);
1469 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1470 tcg_temp_free_i32(tmp);
1471}
1472
2ace7e55
RH
1473static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1474{
1475 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1476 to internal cpu registers. */
1477
1478 /* Unprivileged PAL call */
1479 if (palcode >= 0x80 && palcode < 0xC0) {
1480 switch (palcode) {
1481 case 0x86:
1482 /* IMB */
1483 /* No-op inside QEMU. */
1484 break;
1485 case 0x9E:
1486 /* RDUNIQUE */
1487 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1488 break;
1489 case 0x9F:
1490 /* WRUNIQUE */
1491 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1492 break;
1493 default:
1494 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1495 }
1496 return NO_EXIT;
1497 }
1498
1499#ifndef CONFIG_USER_ONLY
1500 /* Privileged PAL code */
1501 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1502 switch (palcode) {
1503 case 0x01:
1504 /* CFLUSH */
1505 /* No-op inside QEMU. */
1506 break;
1507 case 0x02:
1508 /* DRAINA */
1509 /* No-op inside QEMU. */
1510 break;
1511 case 0x2D:
1512 /* WRVPTPTR */
1513 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUState, vptptr));
1514 break;
1515 case 0x31:
1516 /* WRVAL */
1517 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1518 break;
1519 case 0x32:
1520 /* RDVAL */
1521 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1522 break;
1523
1524 case 0x35: {
1525 /* SWPIPL */
1526 TCGv tmp;
1527
1528 /* Note that we already know we're in kernel mode, so we know
1529 that PS only contains the 3 IPL bits. */
1530 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1531
1532 /* But make sure and store only the 3 IPL bits from the user. */
1533 tmp = tcg_temp_new();
1534 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1535 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUState, ps));
1536 tcg_temp_free(tmp);
1537 break;
1538 }
1539
1540 case 0x36:
1541 /* RDPS */
1542 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1543 break;
1544 case 0x38:
1545 /* WRUSP */
1546 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1547 break;
1548 case 0x3A:
1549 /* RDUSP */
1550 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1551 break;
1552 case 0x3C:
1553 /* WHAMI */
1554 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1555 offsetof(CPUState, cpu_index));
1556 break;
1557
1558 default:
1559 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1560 }
1561 return NO_EXIT;
1562 }
1563#endif
1564
1565 return gen_invalid(ctx);
1566}
1567
26b46094
RH
1568#ifndef CONFIG_USER_ONLY
1569
1570#define PR_BYTE 0x100000
1571#define PR_LONG 0x200000
1572
1573static int cpu_pr_data(int pr)
1574{
1575 switch (pr) {
1576 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1577 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1578 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1579 case 3: return offsetof(CPUAlphaState, trap_arg0);
1580 case 4: return offsetof(CPUAlphaState, trap_arg1);
1581 case 5: return offsetof(CPUAlphaState, trap_arg2);
1582 case 6: return offsetof(CPUAlphaState, exc_addr);
1583 case 7: return offsetof(CPUAlphaState, palbr);
1584 case 8: return offsetof(CPUAlphaState, ptbr);
1585 case 9: return offsetof(CPUAlphaState, vptptr);
1586 case 10: return offsetof(CPUAlphaState, unique);
1587 case 11: return offsetof(CPUAlphaState, sysval);
1588 case 12: return offsetof(CPUAlphaState, usp);
1589
1590 case 32 ... 39:
1591 return offsetof(CPUAlphaState, shadow[pr - 32]);
1592 case 40 ... 63:
1593 return offsetof(CPUAlphaState, scratch[pr - 40]);
1594 }
1595 return 0;
1596}
1597
1598static void gen_mfpr(int ra, int regno)
1599{
1600 int data = cpu_pr_data(regno);
1601
1602 /* In our emulated PALcode, these processor registers have no
1603 side effects from reading. */
1604 if (ra == 31) {
1605 return;
1606 }
1607
1608 /* The basic registers are data only, and unknown registers
1609 are read-zero, write-ignore. */
1610 if (data == 0) {
1611 tcg_gen_movi_i64(cpu_ir[ra], 0);
1612 } else if (data & PR_BYTE) {
1613 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1614 } else if (data & PR_LONG) {
1615 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1616 } else {
1617 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1618 }
1619}
1620
1621static void gen_mtpr(int rb, int regno)
1622{
1623 TCGv tmp;
26b46094
RH
1624
1625 if (rb == 31) {
1626 tmp = tcg_const_i64(0);
1627 } else {
1628 tmp = cpu_ir[rb];
1629 }
1630
3b4fefd6
RH
1631 /* These two register numbers perform a TLB cache flush. Thankfully we
1632 can only do this inside PALmode, which means that the current basic
1633 block cannot be affected by the change in mappings. */
1634 if (regno == 255) {
1635 /* TBIA */
1636 gen_helper_tbia();
1637 } else if (regno == 254) {
1638 /* TBIS */
1639 gen_helper_tbis(tmp);
1640 } else {
1641 /* The basic registers are data only, and unknown registers
1642 are read-zero, write-ignore. */
1643 int data = cpu_pr_data(regno);
1644 if (data != 0) {
1645 if (data & PR_BYTE) {
1646 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1647 } else if (data & PR_LONG) {
1648 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1649 } else {
1650 tcg_gen_st_i64(tmp, cpu_env, data);
1651 }
26b46094
RH
1652 }
1653 }
1654
1655 if (rb == 31) {
1656 tcg_temp_free(tmp);
1657 }
1658}
1659#endif /* !USER_ONLY*/
1660
4af70374 1661static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1662{
1663 uint32_t palcode;
efa64351
MT
1664 int32_t disp21, disp16;
1665#ifndef CONFIG_USER_ONLY
1666 int32_t disp12;
1667#endif
f88fe4e3 1668 uint16_t fn11;
b6fb147c 1669 uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
adf3c8b6 1670 uint8_t lit;
4af70374 1671 ExitStatus ret;
4c9649a9
JM
1672
1673 /* Decode all instruction fields */
1674 opc = insn >> 26;
1675 ra = (insn >> 21) & 0x1F;
1676 rb = (insn >> 16) & 0x1F;
1677 rc = insn & 0x1F;
13e4df99 1678 real_islit = islit = (insn >> 12) & 1;
dfaa8583
AJ
1679 if (rb == 31 && !islit) {
1680 islit = 1;
1681 lit = 0;
1682 } else
1683 lit = (insn >> 13) & 0xFF;
4c9649a9
JM
1684 palcode = insn & 0x03FFFFFF;
1685 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1686 disp16 = (int16_t)(insn & 0x0000FFFF);
efa64351 1687#ifndef CONFIG_USER_ONLY
4c9649a9 1688 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
efa64351 1689#endif
4c9649a9
JM
1690 fn11 = (insn >> 5) & 0x000007FF;
1691 fpfn = fn11 & 0x3F;
1692 fn7 = (insn >> 5) & 0x0000007F;
806991da 1693 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1694 opc, ra, rb, rc, disp16);
806991da 1695
4af70374 1696 ret = NO_EXIT;
4c9649a9
JM
1697 switch (opc) {
1698 case 0x00:
1699 /* CALL_PAL */
2ace7e55
RH
1700 ret = gen_call_pal(ctx, palcode);
1701 break;
4c9649a9
JM
1702 case 0x01:
1703 /* OPC01 */
1704 goto invalid_opc;
1705 case 0x02:
1706 /* OPC02 */
1707 goto invalid_opc;
1708 case 0x03:
1709 /* OPC03 */
1710 goto invalid_opc;
1711 case 0x04:
1712 /* OPC04 */
1713 goto invalid_opc;
1714 case 0x05:
1715 /* OPC05 */
1716 goto invalid_opc;
1717 case 0x06:
1718 /* OPC06 */
1719 goto invalid_opc;
1720 case 0x07:
1721 /* OPC07 */
1722 goto invalid_opc;
1723 case 0x08:
1724 /* LDA */
1ef4ef4e 1725 if (likely(ra != 31)) {
496cb5b9 1726 if (rb != 31)
3761035f
AJ
1727 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1728 else
1729 tcg_gen_movi_i64(cpu_ir[ra], disp16);
496cb5b9 1730 }
4c9649a9
JM
1731 break;
1732 case 0x09:
1733 /* LDAH */
1ef4ef4e 1734 if (likely(ra != 31)) {
496cb5b9 1735 if (rb != 31)
3761035f
AJ
1736 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1737 else
1738 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
496cb5b9 1739 }
4c9649a9
JM
1740 break;
1741 case 0x0A:
1742 /* LDBU */
a18ad893
RH
1743 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1744 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1745 break;
1746 }
1747 goto invalid_opc;
4c9649a9
JM
1748 case 0x0B:
1749 /* LDQ_U */
f18cd223 1750 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1751 break;
1752 case 0x0C:
1753 /* LDWU */
a18ad893
RH
1754 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1755 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1756 break;
1757 }
1758 goto invalid_opc;
4c9649a9
JM
1759 case 0x0D:
1760 /* STW */
6910b8f6 1761 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
4c9649a9
JM
1762 break;
1763 case 0x0E:
1764 /* STB */
6910b8f6 1765 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
4c9649a9
JM
1766 break;
1767 case 0x0F:
1768 /* STQ_U */
6910b8f6 1769 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1770 break;
1771 case 0x10:
1772 switch (fn7) {
1773 case 0x00:
1774 /* ADDL */
30c7183b
AJ
1775 if (likely(rc != 31)) {
1776 if (ra != 31) {
1777 if (islit) {
1778 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1779 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1780 } else {
30c7183b
AJ
1781 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1782 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1783 }
30c7183b
AJ
1784 } else {
1785 if (islit)
dfaa8583 1786 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1787 else
dfaa8583 1788 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1789 }
1790 }
4c9649a9
JM
1791 break;
1792 case 0x02:
1793 /* S4ADDL */
30c7183b
AJ
1794 if (likely(rc != 31)) {
1795 if (ra != 31) {
a7812ae4 1796 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1797 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1798 if (islit)
1799 tcg_gen_addi_i64(tmp, tmp, lit);
1800 else
1801 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1802 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1803 tcg_temp_free(tmp);
30c7183b
AJ
1804 } else {
1805 if (islit)
1806 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1807 else
dfaa8583 1808 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1809 }
1810 }
4c9649a9
JM
1811 break;
1812 case 0x09:
1813 /* SUBL */
30c7183b
AJ
1814 if (likely(rc != 31)) {
1815 if (ra != 31) {
dfaa8583 1816 if (islit)
30c7183b 1817 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
dfaa8583 1818 else
30c7183b 1819 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
dfaa8583 1820 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1821 } else {
1822 if (islit)
1823 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1824 else {
30c7183b
AJ
1825 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1826 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1827 }
1828 }
4c9649a9
JM
1829 break;
1830 case 0x0B:
1831 /* S4SUBL */
30c7183b
AJ
1832 if (likely(rc != 31)) {
1833 if (ra != 31) {
a7812ae4 1834 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1835 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1836 if (islit)
1837 tcg_gen_subi_i64(tmp, tmp, lit);
1838 else
1839 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1840 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1841 tcg_temp_free(tmp);
30c7183b
AJ
1842 } else {
1843 if (islit)
1844 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1845 else {
30c7183b
AJ
1846 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1847 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1848 }
30c7183b
AJ
1849 }
1850 }
4c9649a9
JM
1851 break;
1852 case 0x0F:
1853 /* CMPBGE */
a7812ae4 1854 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1855 break;
1856 case 0x12:
1857 /* S8ADDL */
30c7183b
AJ
1858 if (likely(rc != 31)) {
1859 if (ra != 31) {
a7812ae4 1860 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1861 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1862 if (islit)
1863 tcg_gen_addi_i64(tmp, tmp, lit);
1864 else
1865 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1866 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1867 tcg_temp_free(tmp);
30c7183b
AJ
1868 } else {
1869 if (islit)
1870 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1871 else
dfaa8583 1872 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1873 }
1874 }
4c9649a9
JM
1875 break;
1876 case 0x1B:
1877 /* S8SUBL */
30c7183b
AJ
1878 if (likely(rc != 31)) {
1879 if (ra != 31) {
a7812ae4 1880 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1881 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1882 if (islit)
1883 tcg_gen_subi_i64(tmp, tmp, lit);
1884 else
1885 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1886 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1887 tcg_temp_free(tmp);
30c7183b
AJ
1888 } else {
1889 if (islit)
1890 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1891 else
30c7183b
AJ
1892 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1893 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1894 }
30c7183b
AJ
1895 }
1896 }
4c9649a9
JM
1897 break;
1898 case 0x1D:
1899 /* CMPULT */
01ff9cc8 1900 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
4c9649a9
JM
1901 break;
1902 case 0x20:
1903 /* ADDQ */
30c7183b
AJ
1904 if (likely(rc != 31)) {
1905 if (ra != 31) {
1906 if (islit)
1907 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1908 else
dfaa8583 1909 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1910 } else {
1911 if (islit)
1912 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1913 else
dfaa8583 1914 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1915 }
1916 }
4c9649a9
JM
1917 break;
1918 case 0x22:
1919 /* S4ADDQ */
30c7183b
AJ
1920 if (likely(rc != 31)) {
1921 if (ra != 31) {
a7812ae4 1922 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1923 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1924 if (islit)
1925 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1926 else
1927 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1928 tcg_temp_free(tmp);
30c7183b
AJ
1929 } else {
1930 if (islit)
1931 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1932 else
dfaa8583 1933 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1934 }
1935 }
4c9649a9
JM
1936 break;
1937 case 0x29:
1938 /* SUBQ */
30c7183b
AJ
1939 if (likely(rc != 31)) {
1940 if (ra != 31) {
1941 if (islit)
1942 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1943 else
dfaa8583 1944 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1945 } else {
1946 if (islit)
1947 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1948 else
dfaa8583 1949 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1950 }
1951 }
4c9649a9
JM
1952 break;
1953 case 0x2B:
1954 /* S4SUBQ */
30c7183b
AJ
1955 if (likely(rc != 31)) {
1956 if (ra != 31) {
a7812ae4 1957 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1958 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1959 if (islit)
1960 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1961 else
1962 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1963 tcg_temp_free(tmp);
30c7183b
AJ
1964 } else {
1965 if (islit)
1966 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1967 else
dfaa8583 1968 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1969 }
1970 }
4c9649a9
JM
1971 break;
1972 case 0x2D:
1973 /* CMPEQ */
01ff9cc8 1974 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
4c9649a9
JM
1975 break;
1976 case 0x32:
1977 /* S8ADDQ */
30c7183b
AJ
1978 if (likely(rc != 31)) {
1979 if (ra != 31) {
a7812ae4 1980 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1981 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1982 if (islit)
1983 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1984 else
1985 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1986 tcg_temp_free(tmp);
30c7183b
AJ
1987 } else {
1988 if (islit)
1989 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1990 else
dfaa8583 1991 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1992 }
1993 }
4c9649a9
JM
1994 break;
1995 case 0x3B:
1996 /* S8SUBQ */
30c7183b
AJ
1997 if (likely(rc != 31)) {
1998 if (ra != 31) {
a7812ae4 1999 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
2000 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2001 if (islit)
2002 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2003 else
2004 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2005 tcg_temp_free(tmp);
30c7183b
AJ
2006 } else {
2007 if (islit)
2008 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 2009 else
dfaa8583 2010 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2011 }
2012 }
4c9649a9
JM
2013 break;
2014 case 0x3D:
2015 /* CMPULE */
01ff9cc8 2016 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
4c9649a9
JM
2017 break;
2018 case 0x40:
2019 /* ADDL/V */
a7812ae4 2020 gen_addlv(ra, rb, rc, islit, lit);
4c9649a9
JM
2021 break;
2022 case 0x49:
2023 /* SUBL/V */
a7812ae4 2024 gen_sublv(ra, rb, rc, islit, lit);
4c9649a9
JM
2025 break;
2026 case 0x4D:
2027 /* CMPLT */
01ff9cc8 2028 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
4c9649a9
JM
2029 break;
2030 case 0x60:
2031 /* ADDQ/V */
a7812ae4 2032 gen_addqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2033 break;
2034 case 0x69:
2035 /* SUBQ/V */
a7812ae4 2036 gen_subqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2037 break;
2038 case 0x6D:
2039 /* CMPLE */
01ff9cc8 2040 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
4c9649a9
JM
2041 break;
2042 default:
2043 goto invalid_opc;
2044 }
2045 break;
2046 case 0x11:
2047 switch (fn7) {
2048 case 0x00:
2049 /* AND */
30c7183b 2050 if (likely(rc != 31)) {
dfaa8583 2051 if (ra == 31)
30c7183b
AJ
2052 tcg_gen_movi_i64(cpu_ir[rc], 0);
2053 else if (islit)
2054 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2055 else
2056 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2057 }
4c9649a9
JM
2058 break;
2059 case 0x08:
2060 /* BIC */
30c7183b
AJ
2061 if (likely(rc != 31)) {
2062 if (ra != 31) {
2063 if (islit)
2064 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2065 else
2066 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2067 } else
2068 tcg_gen_movi_i64(cpu_ir[rc], 0);
2069 }
4c9649a9
JM
2070 break;
2071 case 0x14:
2072 /* CMOVLBS */
bbe1dab4 2073 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2074 break;
2075 case 0x16:
2076 /* CMOVLBC */
bbe1dab4 2077 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
2078 break;
2079 case 0x20:
2080 /* BIS */
30c7183b
AJ
2081 if (likely(rc != 31)) {
2082 if (ra != 31) {
2083 if (islit)
2084 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
8bb6e981 2085 else
30c7183b 2086 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
4c9649a9 2087 } else {
30c7183b
AJ
2088 if (islit)
2089 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2090 else
dfaa8583 2091 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
4c9649a9 2092 }
4c9649a9
JM
2093 }
2094 break;
2095 case 0x24:
2096 /* CMOVEQ */
bbe1dab4 2097 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2098 break;
2099 case 0x26:
2100 /* CMOVNE */
bbe1dab4 2101 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2102 break;
2103 case 0x28:
2104 /* ORNOT */
30c7183b 2105 if (likely(rc != 31)) {
dfaa8583 2106 if (ra != 31) {
30c7183b
AJ
2107 if (islit)
2108 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2109 else
2110 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2111 } else {
2112 if (islit)
2113 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2114 else
2115 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2116 }
2117 }
4c9649a9
JM
2118 break;
2119 case 0x40:
2120 /* XOR */
30c7183b
AJ
2121 if (likely(rc != 31)) {
2122 if (ra != 31) {
2123 if (islit)
2124 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 2125 else
dfaa8583 2126 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2127 } else {
2128 if (islit)
2129 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 2130 else
dfaa8583 2131 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2132 }
2133 }
4c9649a9
JM
2134 break;
2135 case 0x44:
2136 /* CMOVLT */
bbe1dab4 2137 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2138 break;
2139 case 0x46:
2140 /* CMOVGE */
bbe1dab4 2141 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2142 break;
2143 case 0x48:
2144 /* EQV */
30c7183b
AJ
2145 if (likely(rc != 31)) {
2146 if (ra != 31) {
2147 if (islit)
2148 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
2149 else
2150 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
2151 } else {
2152 if (islit)
2153 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
30c7183b 2154 else
dfaa8583 2155 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
2156 }
2157 }
4c9649a9
JM
2158 break;
2159 case 0x61:
2160 /* AMASK */
ae8ecd42 2161 if (likely(rc != 31)) {
a18ad893
RH
2162 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2163
2164 if (islit) {
2165 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2166 } else {
2167 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
1a1f7dbc 2168 }
ae8ecd42 2169 }
4c9649a9
JM
2170 break;
2171 case 0x64:
2172 /* CMOVLE */
bbe1dab4 2173 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2174 break;
2175 case 0x66:
2176 /* CMOVGT */
bbe1dab4 2177 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2178 break;
2179 case 0x6C:
2180 /* IMPLVER */
3761035f 2181 if (rc != 31)
8579095b 2182 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
4c9649a9
JM
2183 break;
2184 default:
2185 goto invalid_opc;
2186 }
2187 break;
2188 case 0x12:
2189 switch (fn7) {
2190 case 0x02:
2191 /* MSKBL */
14ab1634 2192 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2193 break;
2194 case 0x06:
2195 /* EXTBL */
377a43b6 2196 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2197 break;
2198 case 0x0B:
2199 /* INSBL */
248c42f3 2200 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2201 break;
2202 case 0x12:
2203 /* MSKWL */
14ab1634 2204 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2205 break;
2206 case 0x16:
2207 /* EXTWL */
377a43b6 2208 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2209 break;
2210 case 0x1B:
2211 /* INSWL */
248c42f3 2212 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2213 break;
2214 case 0x22:
2215 /* MSKLL */
14ab1634 2216 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2217 break;
2218 case 0x26:
2219 /* EXTLL */
377a43b6 2220 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2221 break;
2222 case 0x2B:
2223 /* INSLL */
248c42f3 2224 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2225 break;
2226 case 0x30:
2227 /* ZAP */
a7812ae4 2228 gen_zap(ra, rb, rc, islit, lit);
4c9649a9
JM
2229 break;
2230 case 0x31:
2231 /* ZAPNOT */
a7812ae4 2232 gen_zapnot(ra, rb, rc, islit, lit);
4c9649a9
JM
2233 break;
2234 case 0x32:
2235 /* MSKQL */
14ab1634 2236 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2237 break;
2238 case 0x34:
2239 /* SRL */
30c7183b
AJ
2240 if (likely(rc != 31)) {
2241 if (ra != 31) {
2242 if (islit)
2243 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2244 else {
a7812ae4 2245 TCGv shift = tcg_temp_new();
30c7183b
AJ
2246 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2247 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2248 tcg_temp_free(shift);
dfaa8583 2249 }
30c7183b
AJ
2250 } else
2251 tcg_gen_movi_i64(cpu_ir[rc], 0);
2252 }
4c9649a9
JM
2253 break;
2254 case 0x36:
2255 /* EXTQL */
377a43b6 2256 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2257 break;
2258 case 0x39:
2259 /* SLL */
30c7183b
AJ
2260 if (likely(rc != 31)) {
2261 if (ra != 31) {
2262 if (islit)
2263 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2264 else {
a7812ae4 2265 TCGv shift = tcg_temp_new();
30c7183b
AJ
2266 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2267 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2268 tcg_temp_free(shift);
dfaa8583 2269 }
30c7183b
AJ
2270 } else
2271 tcg_gen_movi_i64(cpu_ir[rc], 0);
2272 }
4c9649a9
JM
2273 break;
2274 case 0x3B:
2275 /* INSQL */
248c42f3 2276 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2277 break;
2278 case 0x3C:
2279 /* SRA */
30c7183b
AJ
2280 if (likely(rc != 31)) {
2281 if (ra != 31) {
2282 if (islit)
2283 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2284 else {
a7812ae4 2285 TCGv shift = tcg_temp_new();
30c7183b
AJ
2286 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2287 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2288 tcg_temp_free(shift);
dfaa8583 2289 }
30c7183b
AJ
2290 } else
2291 tcg_gen_movi_i64(cpu_ir[rc], 0);
2292 }
4c9649a9
JM
2293 break;
2294 case 0x52:
2295 /* MSKWH */
ffec44f1 2296 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2297 break;
2298 case 0x57:
2299 /* INSWH */
50eb6e5c 2300 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2301 break;
2302 case 0x5A:
2303 /* EXTWH */
377a43b6 2304 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2305 break;
2306 case 0x62:
2307 /* MSKLH */
ffec44f1 2308 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2309 break;
2310 case 0x67:
2311 /* INSLH */
50eb6e5c 2312 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2313 break;
2314 case 0x6A:
2315 /* EXTLH */
377a43b6 2316 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2317 break;
2318 case 0x72:
2319 /* MSKQH */
ffec44f1 2320 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2321 break;
2322 case 0x77:
2323 /* INSQH */
50eb6e5c 2324 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2325 break;
2326 case 0x7A:
2327 /* EXTQH */
377a43b6 2328 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2329 break;
2330 default:
2331 goto invalid_opc;
2332 }
2333 break;
2334 case 0x13:
2335 switch (fn7) {
2336 case 0x00:
2337 /* MULL */
30c7183b 2338 if (likely(rc != 31)) {
dfaa8583 2339 if (ra == 31)
30c7183b
AJ
2340 tcg_gen_movi_i64(cpu_ir[rc], 0);
2341 else {
2342 if (islit)
2343 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2344 else
2345 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2346 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2347 }
2348 }
4c9649a9
JM
2349 break;
2350 case 0x20:
2351 /* MULQ */
30c7183b 2352 if (likely(rc != 31)) {
dfaa8583 2353 if (ra == 31)
30c7183b
AJ
2354 tcg_gen_movi_i64(cpu_ir[rc], 0);
2355 else if (islit)
2356 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2357 else
2358 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2359 }
4c9649a9
JM
2360 break;
2361 case 0x30:
2362 /* UMULH */
a7812ae4 2363 gen_umulh(ra, rb, rc, islit, lit);
4c9649a9
JM
2364 break;
2365 case 0x40:
2366 /* MULL/V */
a7812ae4 2367 gen_mullv(ra, rb, rc, islit, lit);
4c9649a9
JM
2368 break;
2369 case 0x60:
2370 /* MULQ/V */
a7812ae4 2371 gen_mulqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2372 break;
2373 default:
2374 goto invalid_opc;
2375 }
2376 break;
2377 case 0x14:
f24518b5 2378 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2379 case 0x04:
2380 /* ITOFS */
a18ad893 2381 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2382 goto invalid_opc;
a18ad893 2383 }
f18cd223
AJ
2384 if (likely(rc != 31)) {
2385 if (ra != 31) {
a7812ae4 2386 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2387 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2388 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2389 tcg_temp_free_i32(tmp);
f18cd223
AJ
2390 } else
2391 tcg_gen_movi_i64(cpu_fir[rc], 0);
2392 }
4c9649a9
JM
2393 break;
2394 case 0x0A:
2395 /* SQRTF */
a18ad893
RH
2396 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2397 gen_fsqrtf(rb, rc);
2398 break;
2399 }
2400 goto invalid_opc;
4c9649a9
JM
2401 case 0x0B:
2402 /* SQRTS */
a18ad893
RH
2403 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2404 gen_fsqrts(ctx, rb, rc, fn11);
2405 break;
2406 }
2407 goto invalid_opc;
4c9649a9
JM
2408 case 0x14:
2409 /* ITOFF */
a18ad893 2410 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2411 goto invalid_opc;
a18ad893 2412 }
f18cd223
AJ
2413 if (likely(rc != 31)) {
2414 if (ra != 31) {
a7812ae4 2415 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2416 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2417 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2418 tcg_temp_free_i32(tmp);
f18cd223
AJ
2419 } else
2420 tcg_gen_movi_i64(cpu_fir[rc], 0);
2421 }
4c9649a9
JM
2422 break;
2423 case 0x24:
2424 /* ITOFT */
a18ad893 2425 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 2426 goto invalid_opc;
a18ad893 2427 }
f18cd223
AJ
2428 if (likely(rc != 31)) {
2429 if (ra != 31)
2430 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2431 else
2432 tcg_gen_movi_i64(cpu_fir[rc], 0);
2433 }
4c9649a9
JM
2434 break;
2435 case 0x2A:
2436 /* SQRTG */
a18ad893
RH
2437 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2438 gen_fsqrtg(rb, rc);
2439 break;
2440 }
2441 goto invalid_opc;
4c9649a9
JM
2442 case 0x02B:
2443 /* SQRTT */
a18ad893
RH
2444 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2445 gen_fsqrtt(ctx, rb, rc, fn11);
2446 break;
2447 }
2448 goto invalid_opc;
4c9649a9
JM
2449 default:
2450 goto invalid_opc;
2451 }
2452 break;
2453 case 0x15:
2454 /* VAX floating point */
2455 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2456 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2457 case 0x00:
2458 /* ADDF */
a7812ae4 2459 gen_faddf(ra, rb, rc);
4c9649a9
JM
2460 break;
2461 case 0x01:
2462 /* SUBF */
a7812ae4 2463 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2464 break;
2465 case 0x02:
2466 /* MULF */
a7812ae4 2467 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2468 break;
2469 case 0x03:
2470 /* DIVF */
a7812ae4 2471 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2472 break;
2473 case 0x1E:
2474 /* CVTDG */
2475#if 0 // TODO
a7812ae4 2476 gen_fcvtdg(rb, rc);
4c9649a9
JM
2477#else
2478 goto invalid_opc;
2479#endif
2480 break;
2481 case 0x20:
2482 /* ADDG */
a7812ae4 2483 gen_faddg(ra, rb, rc);
4c9649a9
JM
2484 break;
2485 case 0x21:
2486 /* SUBG */
a7812ae4 2487 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2488 break;
2489 case 0x22:
2490 /* MULG */
a7812ae4 2491 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2492 break;
2493 case 0x23:
2494 /* DIVG */
a7812ae4 2495 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2496 break;
2497 case 0x25:
2498 /* CMPGEQ */
a7812ae4 2499 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2500 break;
2501 case 0x26:
2502 /* CMPGLT */
a7812ae4 2503 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2504 break;
2505 case 0x27:
2506 /* CMPGLE */
a7812ae4 2507 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2508 break;
2509 case 0x2C:
2510 /* CVTGF */
a7812ae4 2511 gen_fcvtgf(rb, rc);
4c9649a9
JM
2512 break;
2513 case 0x2D:
2514 /* CVTGD */
2515#if 0 // TODO
a7812ae4 2516 gen_fcvtgd(rb, rc);
4c9649a9
JM
2517#else
2518 goto invalid_opc;
2519#endif
2520 break;
2521 case 0x2F:
2522 /* CVTGQ */
a7812ae4 2523 gen_fcvtgq(rb, rc);
4c9649a9
JM
2524 break;
2525 case 0x3C:
2526 /* CVTQF */
a7812ae4 2527 gen_fcvtqf(rb, rc);
4c9649a9
JM
2528 break;
2529 case 0x3E:
2530 /* CVTQG */
a7812ae4 2531 gen_fcvtqg(rb, rc);
4c9649a9
JM
2532 break;
2533 default:
2534 goto invalid_opc;
2535 }
2536 break;
2537 case 0x16:
2538 /* IEEE floating-point */
f24518b5 2539 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2540 case 0x00:
2541 /* ADDS */
f24518b5 2542 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2543 break;
2544 case 0x01:
2545 /* SUBS */
f24518b5 2546 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2547 break;
2548 case 0x02:
2549 /* MULS */
f24518b5 2550 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2551 break;
2552 case 0x03:
2553 /* DIVS */
f24518b5 2554 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2555 break;
2556 case 0x20:
2557 /* ADDT */
f24518b5 2558 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2559 break;
2560 case 0x21:
2561 /* SUBT */
f24518b5 2562 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2563 break;
2564 case 0x22:
2565 /* MULT */
f24518b5 2566 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2567 break;
2568 case 0x23:
2569 /* DIVT */
f24518b5 2570 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2571 break;
2572 case 0x24:
2573 /* CMPTUN */
f24518b5 2574 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2575 break;
2576 case 0x25:
2577 /* CMPTEQ */
f24518b5 2578 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2579 break;
2580 case 0x26:
2581 /* CMPTLT */
f24518b5 2582 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2583 break;
2584 case 0x27:
2585 /* CMPTLE */
f24518b5 2586 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2587 break;
2588 case 0x2C:
a74b4d2c 2589 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2590 /* CVTST */
f24518b5 2591 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2592 } else {
2593 /* CVTTS */
f24518b5 2594 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2595 }
2596 break;
2597 case 0x2F:
2598 /* CVTTQ */
f24518b5 2599 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2600 break;
2601 case 0x3C:
2602 /* CVTQS */
f24518b5 2603 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2604 break;
2605 case 0x3E:
2606 /* CVTQT */
f24518b5 2607 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2608 break;
2609 default:
2610 goto invalid_opc;
2611 }
2612 break;
2613 case 0x17:
2614 switch (fn11) {
2615 case 0x010:
2616 /* CVTLQ */
a7812ae4 2617 gen_fcvtlq(rb, rc);
4c9649a9
JM
2618 break;
2619 case 0x020:
f18cd223 2620 if (likely(rc != 31)) {
a06d48d9 2621 if (ra == rb) {
4c9649a9 2622 /* FMOV */
a06d48d9
RH
2623 if (ra == 31)
2624 tcg_gen_movi_i64(cpu_fir[rc], 0);
2625 else
2626 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2627 } else {
f18cd223 2628 /* CPYS */
a7812ae4 2629 gen_fcpys(ra, rb, rc);
a06d48d9 2630 }
4c9649a9
JM
2631 }
2632 break;
2633 case 0x021:
2634 /* CPYSN */
a7812ae4 2635 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2636 break;
2637 case 0x022:
2638 /* CPYSE */
a7812ae4 2639 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2640 break;
2641 case 0x024:
2642 /* MT_FPCR */
f18cd223 2643 if (likely(ra != 31))
a7812ae4 2644 gen_helper_store_fpcr(cpu_fir[ra]);
f18cd223
AJ
2645 else {
2646 TCGv tmp = tcg_const_i64(0);
a7812ae4 2647 gen_helper_store_fpcr(tmp);
f18cd223
AJ
2648 tcg_temp_free(tmp);
2649 }
4c9649a9
JM
2650 break;
2651 case 0x025:
2652 /* MF_FPCR */
f18cd223 2653 if (likely(ra != 31))
a7812ae4 2654 gen_helper_load_fpcr(cpu_fir[ra]);
4c9649a9
JM
2655 break;
2656 case 0x02A:
2657 /* FCMOVEQ */
bbe1dab4 2658 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2659 break;
2660 case 0x02B:
2661 /* FCMOVNE */
bbe1dab4 2662 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2663 break;
2664 case 0x02C:
2665 /* FCMOVLT */
bbe1dab4 2666 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2667 break;
2668 case 0x02D:
2669 /* FCMOVGE */
bbe1dab4 2670 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2671 break;
2672 case 0x02E:
2673 /* FCMOVLE */
bbe1dab4 2674 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2675 break;
2676 case 0x02F:
2677 /* FCMOVGT */
bbe1dab4 2678 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2679 break;
2680 case 0x030:
2681 /* CVTQL */
a7812ae4 2682 gen_fcvtql(rb, rc);
4c9649a9
JM
2683 break;
2684 case 0x130:
2685 /* CVTQL/V */
4c9649a9
JM
2686 case 0x530:
2687 /* CVTQL/SV */
735cf45f
RH
2688 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2689 /v doesn't do. The only thing I can think is that /sv is a
2690 valid instruction merely for completeness in the ISA. */
2691 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2692 break;
2693 default:
2694 goto invalid_opc;
2695 }
2696 break;
2697 case 0x18:
2698 switch ((uint16_t)disp16) {
2699 case 0x0000:
2700 /* TRAPB */
4af70374 2701 /* No-op. */
4c9649a9
JM
2702 break;
2703 case 0x0400:
2704 /* EXCB */
4af70374 2705 /* No-op. */
4c9649a9
JM
2706 break;
2707 case 0x4000:
2708 /* MB */
2709 /* No-op */
2710 break;
2711 case 0x4400:
2712 /* WMB */
2713 /* No-op */
2714 break;
2715 case 0x8000:
2716 /* FETCH */
2717 /* No-op */
2718 break;
2719 case 0xA000:
2720 /* FETCH_M */
2721 /* No-op */
2722 break;
2723 case 0xC000:
2724 /* RPCC */
3761035f 2725 if (ra != 31)
a7812ae4 2726 gen_helper_load_pcc(cpu_ir[ra]);
4c9649a9
JM
2727 break;
2728 case 0xE000:
2729 /* RC */
ac316ca4 2730 gen_rx(ra, 0);
4c9649a9
JM
2731 break;
2732 case 0xE800:
2733 /* ECB */
4c9649a9
JM
2734 break;
2735 case 0xF000:
2736 /* RS */
ac316ca4 2737 gen_rx(ra, 1);
4c9649a9
JM
2738 break;
2739 case 0xF800:
2740 /* WH64 */
2741 /* No-op */
2742 break;
2743 default:
2744 goto invalid_opc;
2745 }
2746 break;
2747 case 0x19:
2748 /* HW_MFPR (PALcode) */
26b46094 2749#ifndef CONFIG_USER_ONLY
a18ad893 2750 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
26b46094
RH
2751 gen_mfpr(ra, insn & 0xffff);
2752 break;
2753 }
2754#endif
4c9649a9 2755 goto invalid_opc;
4c9649a9 2756 case 0x1A:
49563a72
RH
2757 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2758 prediction stack action, which of course we don't implement. */
2759 if (rb != 31) {
3761035f 2760 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
49563a72 2761 } else {
3761035f 2762 tcg_gen_movi_i64(cpu_pc, 0);
49563a72
RH
2763 }
2764 if (ra != 31) {
1304ca87 2765 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2766 }
4af70374 2767 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2768 break;
2769 case 0x1B:
2770 /* HW_LD (PALcode) */
a18ad893
RH
2771#ifndef CONFIG_USER_ONLY
2772 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2773 TCGv addr;
2774
2775 if (ra == 31) {
2776 break;
2777 }
2778
2779 addr = tcg_temp_new();
8bb6e981
AJ
2780 if (rb != 31)
2781 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2782 else
2783 tcg_gen_movi_i64(addr, disp12);
2784 switch ((insn >> 12) & 0xF) {
2785 case 0x0:
b5d51029 2786 /* Longword physical access (hw_ldl/p) */
2374e73e 2787 gen_helper_ldl_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2788 break;
2789 case 0x1:
b5d51029 2790 /* Quadword physical access (hw_ldq/p) */
2374e73e 2791 gen_helper_ldq_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2792 break;
2793 case 0x2:
b5d51029 2794 /* Longword physical access with lock (hw_ldl_l/p) */
2374e73e 2795 gen_helper_ldl_l_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2796 break;
2797 case 0x3:
b5d51029 2798 /* Quadword physical access with lock (hw_ldq_l/p) */
2374e73e 2799 gen_helper_ldq_l_phys(cpu_ir[ra], addr);
8bb6e981
AJ
2800 break;
2801 case 0x4:
b5d51029 2802 /* Longword virtual PTE fetch (hw_ldl/v) */
2374e73e 2803 goto invalid_opc;
8bb6e981 2804 case 0x5:
b5d51029 2805 /* Quadword virtual PTE fetch (hw_ldq/v) */
2374e73e 2806 goto invalid_opc;
8bb6e981
AJ
2807 break;
2808 case 0x6:
2809 /* Incpu_ir[ra]id */
b5d51029 2810 goto invalid_opc;
8bb6e981
AJ
2811 case 0x7:
2812 /* Incpu_ir[ra]id */
b5d51029 2813 goto invalid_opc;
8bb6e981 2814 case 0x8:
b5d51029 2815 /* Longword virtual access (hw_ldl) */
2374e73e 2816 goto invalid_opc;
8bb6e981 2817 case 0x9:
b5d51029 2818 /* Quadword virtual access (hw_ldq) */
2374e73e 2819 goto invalid_opc;
8bb6e981 2820 case 0xA:
b5d51029 2821 /* Longword virtual access with protection check (hw_ldl/w) */
8417845e 2822 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2823 break;
2824 case 0xB:
b5d51029 2825 /* Quadword virtual access with protection check (hw_ldq/w) */
8417845e 2826 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
8bb6e981
AJ
2827 break;
2828 case 0xC:
b5d51029 2829 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2374e73e 2830 goto invalid_opc;
8bb6e981 2831 case 0xD:
b5d51029 2832 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2374e73e 2833 goto invalid_opc;
8bb6e981
AJ
2834 case 0xE:
2835 /* Longword virtual access with alternate access mode and
2374e73e
RH
2836 protection checks (hw_ldl/wa) */
2837 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2838 break;
2839 case 0xF:
2840 /* Quadword virtual access with alternate access mode and
2374e73e
RH
2841 protection checks (hw_ldq/wa) */
2842 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
8bb6e981
AJ
2843 break;
2844 }
2845 tcg_temp_free(addr);
a18ad893 2846 break;
4c9649a9 2847 }
4c9649a9 2848#endif
a18ad893 2849 goto invalid_opc;
4c9649a9
JM
2850 case 0x1C:
2851 switch (fn7) {
2852 case 0x00:
2853 /* SEXTB */
a18ad893 2854 if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
4c9649a9 2855 goto invalid_opc;
a18ad893 2856 }
ae8ecd42
AJ
2857 if (likely(rc != 31)) {
2858 if (islit)
2859 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
ae8ecd42 2860 else
dfaa8583 2861 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2862 }
4c9649a9
JM
2863 break;
2864 case 0x01:
2865 /* SEXTW */
a18ad893
RH
2866 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2867 if (likely(rc != 31)) {
2868 if (islit) {
2869 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2870 } else {
2871 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2872 }
2873 }
2874 break;
ae8ecd42 2875 }
a18ad893 2876 goto invalid_opc;
4c9649a9
JM
2877 case 0x30:
2878 /* CTPOP */
a18ad893
RH
2879 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2880 if (likely(rc != 31)) {
2881 if (islit) {
2882 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2883 } else {
2884 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2885 }
2886 }
2887 break;
ae8ecd42 2888 }
a18ad893 2889 goto invalid_opc;
4c9649a9
JM
2890 case 0x31:
2891 /* PERR */
a18ad893
RH
2892 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2893 gen_perr(ra, rb, rc, islit, lit);
2894 break;
2895 }
2896 goto invalid_opc;
4c9649a9
JM
2897 case 0x32:
2898 /* CTLZ */
a18ad893
RH
2899 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2900 if (likely(rc != 31)) {
2901 if (islit) {
2902 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2903 } else {
2904 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2905 }
2906 }
2907 break;
ae8ecd42 2908 }
a18ad893 2909 goto invalid_opc;
4c9649a9
JM
2910 case 0x33:
2911 /* CTTZ */
a18ad893
RH
2912 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2913 if (likely(rc != 31)) {
2914 if (islit) {
2915 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2916 } else {
2917 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2918 }
2919 }
2920 break;
ae8ecd42 2921 }
a18ad893 2922 goto invalid_opc;
4c9649a9
JM
2923 case 0x34:
2924 /* UNPKBW */
a18ad893
RH
2925 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2926 if (real_islit || ra != 31) {
2927 goto invalid_opc;
2928 }
2929 gen_unpkbw(rb, rc);
2930 break;
2931 }
2932 goto invalid_opc;
4c9649a9 2933 case 0x35:
13e4df99 2934 /* UNPKBL */
a18ad893
RH
2935 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2936 if (real_islit || ra != 31) {
2937 goto invalid_opc;
2938 }
2939 gen_unpkbl(rb, rc);
2940 break;
2941 }
2942 goto invalid_opc;
4c9649a9
JM
2943 case 0x36:
2944 /* PKWB */
a18ad893
RH
2945 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2946 if (real_islit || ra != 31) {
2947 goto invalid_opc;
2948 }
2949 gen_pkwb(rb, rc);
2950 break;
2951 }
2952 goto invalid_opc;
4c9649a9
JM
2953 case 0x37:
2954 /* PKLB */
a18ad893
RH
2955 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2956 if (real_islit || ra != 31) {
2957 goto invalid_opc;
2958 }
2959 gen_pklb(rb, rc);
2960 break;
2961 }
2962 goto invalid_opc;
4c9649a9
JM
2963 case 0x38:
2964 /* MINSB8 */
a18ad893
RH
2965 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2966 gen_minsb8(ra, rb, rc, islit, lit);
2967 break;
2968 }
2969 goto invalid_opc;
4c9649a9
JM
2970 case 0x39:
2971 /* MINSW4 */
a18ad893
RH
2972 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2973 gen_minsw4(ra, rb, rc, islit, lit);
2974 break;
2975 }
2976 goto invalid_opc;
4c9649a9
JM
2977 case 0x3A:
2978 /* MINUB8 */
a18ad893
RH
2979 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2980 gen_minub8(ra, rb, rc, islit, lit);
2981 break;
2982 }
2983 goto invalid_opc;
4c9649a9
JM
2984 case 0x3B:
2985 /* MINUW4 */
a18ad893
RH
2986 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2987 gen_minuw4(ra, rb, rc, islit, lit);
2988 break;
2989 }
2990 goto invalid_opc;
4c9649a9
JM
2991 case 0x3C:
2992 /* MAXUB8 */
a18ad893
RH
2993 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2994 gen_maxub8(ra, rb, rc, islit, lit);
2995 break;
2996 }
2997 goto invalid_opc;
4c9649a9
JM
2998 case 0x3D:
2999 /* MAXUW4 */
a18ad893
RH
3000 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3001 gen_maxuw4(ra, rb, rc, islit, lit);
3002 break;
3003 }
3004 goto invalid_opc;
4c9649a9
JM
3005 case 0x3E:
3006 /* MAXSB8 */
a18ad893
RH
3007 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3008 gen_maxsb8(ra, rb, rc, islit, lit);
3009 break;
3010 }
3011 goto invalid_opc;
4c9649a9
JM
3012 case 0x3F:
3013 /* MAXSW4 */
a18ad893
RH
3014 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3015 gen_maxsw4(ra, rb, rc, islit, lit);
3016 break;
3017 }
3018 goto invalid_opc;
4c9649a9
JM
3019 case 0x70:
3020 /* FTOIT */
a18ad893 3021 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 3022 goto invalid_opc;
a18ad893 3023 }
f18cd223
AJ
3024 if (likely(rc != 31)) {
3025 if (ra != 31)
3026 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3027 else
3028 tcg_gen_movi_i64(cpu_ir[rc], 0);
3029 }
4c9649a9
JM
3030 break;
3031 case 0x78:
3032 /* FTOIS */
a18ad893 3033 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
4c9649a9 3034 goto invalid_opc;
a18ad893 3035 }
f18cd223 3036 if (rc != 31) {
a7812ae4 3037 TCGv_i32 tmp1 = tcg_temp_new_i32();
f18cd223 3038 if (ra != 31)
a7812ae4 3039 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
f18cd223
AJ
3040 else {
3041 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 3042 gen_helper_s_to_memory(tmp1, tmp2);
f18cd223
AJ
3043 tcg_temp_free(tmp2);
3044 }
3045 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
a7812ae4 3046 tcg_temp_free_i32(tmp1);
f18cd223 3047 }
4c9649a9
JM
3048 break;
3049 default:
3050 goto invalid_opc;
3051 }
3052 break;
3053 case 0x1D:
3054 /* HW_MTPR (PALcode) */
26b46094 3055#ifndef CONFIG_USER_ONLY
a18ad893
RH
3056 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3057 gen_mtpr(rb, insn & 0xffff);
26b46094
RH
3058 break;
3059 }
3060#endif
4c9649a9 3061 goto invalid_opc;
4c9649a9 3062 case 0x1E:
508b43ea 3063 /* HW_RET (PALcode) */
a18ad893
RH
3064#ifndef CONFIG_USER_ONLY
3065 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3066 if (rb == 31) {
3067 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3068 address from EXC_ADDR. This turns out to be useful for our
3069 emulation PALcode, so continue to accept it. */
3070 TCGv tmp = tcg_temp_new();
3071 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUState, exc_addr));
3072 gen_helper_hw_ret(tmp);
3073 tcg_temp_free(tmp);
3074 } else {
3075 gen_helper_hw_ret(cpu_ir[rb]);
3076 }
3077 ret = EXIT_PC_UPDATED;
3078 break;
4c9649a9 3079 }
4c9649a9 3080#endif
a18ad893 3081 goto invalid_opc;
4c9649a9
JM
3082 case 0x1F:
3083 /* HW_ST (PALcode) */
a18ad893
RH
3084#ifndef CONFIG_USER_ONLY
3085 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
8bb6e981 3086 TCGv addr, val;
a7812ae4 3087 addr = tcg_temp_new();
8bb6e981
AJ
3088 if (rb != 31)
3089 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3090 else
3091 tcg_gen_movi_i64(addr, disp12);
3092 if (ra != 31)
3093 val = cpu_ir[ra];
3094 else {
a7812ae4 3095 val = tcg_temp_new();
8bb6e981
AJ
3096 tcg_gen_movi_i64(val, 0);
3097 }
3098 switch ((insn >> 12) & 0xF) {
3099 case 0x0:
3100 /* Longword physical access */
2374e73e 3101 gen_helper_stl_phys(addr, val);
8bb6e981
AJ
3102 break;
3103 case 0x1:
3104 /* Quadword physical access */
2374e73e 3105 gen_helper_stq_phys(addr, val);
8bb6e981
AJ
3106 break;
3107 case 0x2:
3108 /* Longword physical access with lock */
2374e73e 3109 gen_helper_stl_c_phys(val, addr, val);
8bb6e981
AJ
3110 break;
3111 case 0x3:
3112 /* Quadword physical access with lock */
2374e73e 3113 gen_helper_stq_c_phys(val, addr, val);
8bb6e981
AJ
3114 break;
3115 case 0x4:
3116 /* Longword virtual access */
2374e73e 3117 goto invalid_opc;
8bb6e981
AJ
3118 case 0x5:
3119 /* Quadword virtual access */
2374e73e 3120 goto invalid_opc;
8bb6e981
AJ
3121 case 0x6:
3122 /* Invalid */
3123 goto invalid_opc;
3124 case 0x7:
3125 /* Invalid */
3126 goto invalid_opc;
3127 case 0x8:
3128 /* Invalid */
3129 goto invalid_opc;
3130 case 0x9:
3131 /* Invalid */
3132 goto invalid_opc;
3133 case 0xA:
3134 /* Invalid */
3135 goto invalid_opc;
3136 case 0xB:
3137 /* Invalid */
3138 goto invalid_opc;
3139 case 0xC:
3140 /* Longword virtual access with alternate access mode */
2374e73e 3141 goto invalid_opc;
8bb6e981
AJ
3142 case 0xD:
3143 /* Quadword virtual access with alternate access mode */
2374e73e 3144 goto invalid_opc;
8bb6e981
AJ
3145 case 0xE:
3146 /* Invalid */
3147 goto invalid_opc;
3148 case 0xF:
3149 /* Invalid */
3150 goto invalid_opc;
3151 }
45d46ce8 3152 if (ra == 31)
8bb6e981
AJ
3153 tcg_temp_free(val);
3154 tcg_temp_free(addr);
a18ad893 3155 break;
4c9649a9 3156 }
4c9649a9 3157#endif
a18ad893 3158 goto invalid_opc;
4c9649a9
JM
3159 case 0x20:
3160 /* LDF */
f18cd223 3161 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3162 break;
3163 case 0x21:
3164 /* LDG */
f18cd223 3165 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3166 break;
3167 case 0x22:
3168 /* LDS */
f18cd223 3169 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
3170 break;
3171 case 0x23:
3172 /* LDT */
f18cd223 3173 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3174 break;
3175 case 0x24:
3176 /* STF */
6910b8f6 3177 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3178 break;
3179 case 0x25:
3180 /* STG */
6910b8f6 3181 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3182 break;
3183 case 0x26:
3184 /* STS */
6910b8f6 3185 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
4c9649a9
JM
3186 break;
3187 case 0x27:
3188 /* STT */
6910b8f6 3189 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3190 break;
3191 case 0x28:
3192 /* LDL */
f18cd223 3193 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
3194 break;
3195 case 0x29:
3196 /* LDQ */
f18cd223 3197 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3198 break;
3199 case 0x2A:
3200 /* LDL_L */
f4ed8679 3201 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3202 break;
3203 case 0x2B:
3204 /* LDQ_L */
f4ed8679 3205 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3206 break;
3207 case 0x2C:
3208 /* STL */
6910b8f6 3209 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
4c9649a9
JM
3210 break;
3211 case 0x2D:
3212 /* STQ */
6910b8f6 3213 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3214 break;
3215 case 0x2E:
3216 /* STL_C */
6910b8f6 3217 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
4c9649a9
JM
3218 break;
3219 case 0x2F:
3220 /* STQ_C */
6910b8f6 3221 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
4c9649a9
JM
3222 break;
3223 case 0x30:
3224 /* BR */
4af70374 3225 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3226 break;
a7812ae4 3227 case 0x31: /* FBEQ */
4af70374 3228 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 3229 break;
a7812ae4 3230 case 0x32: /* FBLT */
4af70374 3231 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 3232 break;
a7812ae4 3233 case 0x33: /* FBLE */
4af70374 3234 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
3235 break;
3236 case 0x34:
3237 /* BSR */
4af70374 3238 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3239 break;
a7812ae4 3240 case 0x35: /* FBNE */
4af70374 3241 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 3242 break;
a7812ae4 3243 case 0x36: /* FBGE */
4af70374 3244 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 3245 break;
a7812ae4 3246 case 0x37: /* FBGT */
4af70374 3247 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
3248 break;
3249 case 0x38:
3250 /* BLBC */
4af70374 3251 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
3252 break;
3253 case 0x39:
3254 /* BEQ */
4af70374 3255 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
3256 break;
3257 case 0x3A:
3258 /* BLT */
4af70374 3259 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
3260 break;
3261 case 0x3B:
3262 /* BLE */
4af70374 3263 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
3264 break;
3265 case 0x3C:
3266 /* BLBS */
4af70374 3267 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
3268 break;
3269 case 0x3D:
3270 /* BNE */
4af70374 3271 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
3272 break;
3273 case 0x3E:
3274 /* BGE */
4af70374 3275 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
3276 break;
3277 case 0x3F:
3278 /* BGT */
4af70374 3279 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
3280 break;
3281 invalid_opc:
8aa3fa20 3282 ret = gen_invalid(ctx);
4c9649a9
JM
3283 break;
3284 }
3285
3286 return ret;
3287}
3288
636aa200
BS
3289static inline void gen_intermediate_code_internal(CPUState *env,
3290 TranslationBlock *tb,
3291 int search_pc)
4c9649a9 3292{
4c9649a9
JM
3293 DisasContext ctx, *ctxp = &ctx;
3294 target_ulong pc_start;
3295 uint32_t insn;
3296 uint16_t *gen_opc_end;
a1d1bb31 3297 CPUBreakpoint *bp;
4c9649a9 3298 int j, lj = -1;
4af70374 3299 ExitStatus ret;
2e70f6ef
PB
3300 int num_insns;
3301 int max_insns;
4c9649a9
JM
3302
3303 pc_start = tb->pc;
4c9649a9 3304 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
4af70374
RH
3305
3306 ctx.tb = tb;
3307 ctx.env = env;
4c9649a9 3308 ctx.pc = pc_start;
bba9bdce 3309 ctx.mem_idx = cpu_mmu_index(env);
f24518b5
RH
3310
3311 /* ??? Every TB begins with unset rounding mode, to be initialized on
3312 the first fp insn of the TB. Alternately we could define a proper
3313 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3314 to reset the FP_STATUS to that default at the end of any TB that
3315 changes the default. We could even (gasp) dynamiclly figure out
3316 what default would be most efficient given the running program. */
3317 ctx.tb_rm = -1;
3318 /* Similarly for flush-to-zero. */
3319 ctx.tb_ftz = -1;
3320
2e70f6ef
PB
3321 num_insns = 0;
3322 max_insns = tb->cflags & CF_COUNT_MASK;
3323 if (max_insns == 0)
3324 max_insns = CF_COUNT_MASK;
3325
3326 gen_icount_start();
4af70374 3327 do {
72cf2d4f
BS
3328 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3329 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 3330 if (bp->pc == ctx.pc) {
4c9649a9
JM
3331 gen_excp(&ctx, EXCP_DEBUG, 0);
3332 break;
3333 }
3334 }
3335 }
3336 if (search_pc) {
3337 j = gen_opc_ptr - gen_opc_buf;
3338 if (lj < j) {
3339 lj++;
3340 while (lj < j)
3341 gen_opc_instr_start[lj++] = 0;
4c9649a9 3342 }
ed1dda53
AJ
3343 gen_opc_pc[lj] = ctx.pc;
3344 gen_opc_instr_start[lj] = 1;
3345 gen_opc_icount[lj] = num_insns;
4c9649a9 3346 }
2e70f6ef
PB
3347 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3348 gen_io_start();
4c9649a9 3349 insn = ldl_code(ctx.pc);
2e70f6ef 3350 num_insns++;
c4b3be39
RH
3351
3352 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3353 tcg_gen_debug_insn_start(ctx.pc);
3354 }
3355
4c9649a9
JM
3356 ctx.pc += 4;
3357 ret = translate_one(ctxp, insn);
19bf517b 3358
bf1b03fe
RH
3359 /* If we reach a page boundary, are single stepping,
3360 or exhaust instruction count, stop generation. */
3361 if (ret == NO_EXIT
3362 && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3363 || gen_opc_ptr >= gen_opc_end
3364 || num_insns >= max_insns
3365 || singlestep
3366 || env->singlestep_enabled)) {
3367 ret = EXIT_PC_STALE;
1b530a6d 3368 }
4af70374
RH
3369 } while (ret == NO_EXIT);
3370
3371 if (tb->cflags & CF_LAST_IO) {
3372 gen_io_end();
4c9649a9 3373 }
4af70374
RH
3374
3375 switch (ret) {
3376 case EXIT_GOTO_TB:
8aa3fa20 3377 case EXIT_NORETURN:
4af70374
RH
3378 break;
3379 case EXIT_PC_STALE:
496cb5b9 3380 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
3381 /* FALLTHRU */
3382 case EXIT_PC_UPDATED:
bf1b03fe
RH
3383 if (env->singlestep_enabled) {
3384 gen_excp_1(EXCP_DEBUG, 0);
3385 } else {
3386 tcg_gen_exit_tb(0);
3387 }
4af70374
RH
3388 break;
3389 default:
3390 abort();
4c9649a9 3391 }
4af70374 3392
2e70f6ef 3393 gen_icount_end(tb, num_insns);
4c9649a9
JM
3394 *gen_opc_ptr = INDEX_op_end;
3395 if (search_pc) {
3396 j = gen_opc_ptr - gen_opc_buf;
3397 lj++;
3398 while (lj <= j)
3399 gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3400 } else {
3401 tb->size = ctx.pc - pc_start;
2e70f6ef 3402 tb->icount = num_insns;
4c9649a9 3403 }
4af70374 3404
806991da 3405#ifdef DEBUG_DISAS
8fec2b8c 3406 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
3407 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3408 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3409 qemu_log("\n");
4c9649a9 3410 }
4c9649a9 3411#endif
4c9649a9
JM
3412}
3413
2cfc5f17 3414void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3415{
2cfc5f17 3416 gen_intermediate_code_internal(env, tb, 0);
4c9649a9
JM
3417}
3418
2cfc5f17 3419void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3420{
2cfc5f17 3421 gen_intermediate_code_internal(env, tb, 1);
4c9649a9
JM
3422}
3423
a964acc6
RH
3424struct cpu_def_t {
3425 const char *name;
3426 int implver, amask;
3427};
3428
3429static const struct cpu_def_t cpu_defs[] = {
3430 { "ev4", IMPLVER_2106x, 0 },
3431 { "ev5", IMPLVER_21164, 0 },
3432 { "ev56", IMPLVER_21164, AMASK_BWX },
3433 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3434 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3435 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3436 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3437 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3438 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3439 { "21064", IMPLVER_2106x, 0 },
3440 { "21164", IMPLVER_21164, 0 },
3441 { "21164a", IMPLVER_21164, AMASK_BWX },
3442 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3443 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3444 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3445 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3446};
3447
aaed909a 3448CPUAlphaState * cpu_alpha_init (const char *cpu_model)
4c9649a9
JM
3449{
3450 CPUAlphaState *env;
a964acc6 3451 int implver, amask, i, max;
4c9649a9
JM
3452
3453 env = qemu_mallocz(sizeof(CPUAlphaState));
4c9649a9 3454 cpu_exec_init(env);
2e70f6ef 3455 alpha_translate_init();
4c9649a9 3456 tlb_flush(env, 1);
a964acc6
RH
3457
3458 /* Default to ev67; no reason not to emulate insns by default. */
3459 implver = IMPLVER_21264;
3460 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3461 | AMASK_TRAP | AMASK_PREFETCH);
3462
3463 max = ARRAY_SIZE(cpu_defs);
3464 for (i = 0; i < max; i++) {
3465 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3466 implver = cpu_defs[i].implver;
3467 amask = cpu_defs[i].amask;
3468 break;
3469 }
3470 }
3471 env->implver = implver;
3472 env->amask = amask;
3473
4c9649a9 3474#if defined (CONFIG_USER_ONLY)
ea879fc7 3475 env->ps = PS_USER_MODE;
2edd07ef
RH
3476 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3477 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
6049f4f8 3478#endif
6910b8f6 3479 env->lock_addr = -1;
26b46094 3480 env->fen = 1;
dad081ee 3481
0bf46a40 3482 qemu_init_vcpu(env);
4c9649a9
JM
3483 return env;
3484}
aaed909a 3485
e87b7cb0 3486void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
d2856f1a
AJ
3487{
3488 env->pc = gen_opc_pc[pc_pos];
3489}