]> git.proxmox.com Git - qemu.git/blame - target-alpha/translate.c
target-alpha: Enable NPTL.
[qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
20#include <stdint.h>
21#include <stdlib.h>
22#include <stdio.h>
23
24#include "cpu.h"
25#include "exec-all.h"
26#include "disas.h"
ae8ecd42 27#include "host-utils.h"
57fec1fe 28#include "tcg-op.h"
ca10f867 29#include "qemu-common.h"
4c9649a9 30
a7812ae4
PB
31#include "helper.h"
32#define GEN_HELPER 1
33#include "helper.h"
34
19188121 35#undef ALPHA_DEBUG_DISAS
f24518b5 36#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
37
38#ifdef ALPHA_DEBUG_DISAS
806991da 39# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
40#else
41# define LOG_DISAS(...) do { } while (0)
42#endif
43
4c9649a9
JM
44typedef struct DisasContext DisasContext;
45struct DisasContext {
4af70374
RH
46 struct TranslationBlock *tb;
47 CPUAlphaState *env;
4c9649a9
JM
48 uint64_t pc;
49 int mem_idx;
50#if !defined (CONFIG_USER_ONLY)
51 int pal_mode;
52#endif
53 uint32_t amask;
f24518b5
RH
54
55 /* Current rounding mode for this TB. */
56 int tb_rm;
57 /* Current flush-to-zero setting for this TB. */
58 int tb_ftz;
4c9649a9
JM
59};
60
4af70374
RH
61/* Return values from translate_one, indicating the state of the TB.
62 Note that zero indicates that we are not exiting the TB. */
63
64typedef enum {
65 NO_EXIT,
66
67 /* We have emitted one or more goto_tb. No fixup required. */
68 EXIT_GOTO_TB,
69
70 /* We are not using a goto_tb (for whatever reason), but have updated
71 the PC (for whatever reason), so there's no need to do it again on
72 exiting the TB. */
73 EXIT_PC_UPDATED,
74
75 /* We are exiting the TB, but have neither emitted a goto_tb, nor
76 updated the PC for the next instruction to be executed. */
77 EXIT_PC_STALE
78} ExitStatus;
79
3761035f 80/* global register indexes */
a7812ae4 81static TCGv_ptr cpu_env;
496cb5b9 82static TCGv cpu_ir[31];
f18cd223 83static TCGv cpu_fir[31];
496cb5b9 84static TCGv cpu_pc;
f4ed8679 85static TCGv cpu_lock;
ab471ade
RH
86#ifdef CONFIG_USER_ONLY
87static TCGv cpu_uniq;
88#endif
496cb5b9 89
3761035f 90/* register names */
f18cd223 91static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef
PB
92
93#include "gen-icount.h"
94
a5f1b965 95static void alpha_translate_init(void)
2e70f6ef 96{
496cb5b9
AJ
97 int i;
98 char *p;
2e70f6ef 99 static int done_init = 0;
496cb5b9 100
2e70f6ef
PB
101 if (done_init)
102 return;
496cb5b9 103
a7812ae4 104 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
105
106 p = cpu_reg_names;
107 for (i = 0; i < 31; i++) {
108 sprintf(p, "ir%d", i);
a7812ae4
PB
109 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
110 offsetof(CPUState, ir[i]), p);
6ba8dcd7 111 p += (i < 10) ? 4 : 5;
f18cd223
AJ
112
113 sprintf(p, "fir%d", i);
a7812ae4
PB
114 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
115 offsetof(CPUState, fir[i]), p);
f18cd223 116 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
117 }
118
a7812ae4
PB
119 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
120 offsetof(CPUState, pc), "pc");
496cb5b9 121
a7812ae4
PB
122 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
123 offsetof(CPUState, lock), "lock");
f4ed8679 124
ab471ade
RH
125#ifdef CONFIG_USER_ONLY
126 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
127 offsetof(CPUState, unique), "uniq");
128#endif
129
496cb5b9 130 /* register helpers */
a7812ae4 131#define GEN_HELPER 2
496cb5b9
AJ
132#include "helper.h"
133
2e70f6ef
PB
134 done_init = 1;
135}
136
636aa200 137static inline void gen_excp(DisasContext *ctx, int exception, int error_code)
4c9649a9 138{
a7812ae4 139 TCGv_i32 tmp1, tmp2;
6ad02592 140
496cb5b9 141 tcg_gen_movi_i64(cpu_pc, ctx->pc);
6ad02592
AJ
142 tmp1 = tcg_const_i32(exception);
143 tmp2 = tcg_const_i32(error_code);
a7812ae4
PB
144 gen_helper_excp(tmp1, tmp2);
145 tcg_temp_free_i32(tmp2);
146 tcg_temp_free_i32(tmp1);
4c9649a9
JM
147}
148
636aa200 149static inline void gen_invalid(DisasContext *ctx)
4c9649a9
JM
150{
151 gen_excp(ctx, EXCP_OPCDEC, 0);
152}
153
636aa200 154static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 155{
a7812ae4
PB
156 TCGv tmp = tcg_temp_new();
157 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 158 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
159 tcg_gen_trunc_i64_i32(tmp32, tmp);
160 gen_helper_memory_to_f(t0, tmp32);
161 tcg_temp_free_i32(tmp32);
f18cd223
AJ
162 tcg_temp_free(tmp);
163}
164
636aa200 165static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 166{
a7812ae4 167 TCGv tmp = tcg_temp_new();
f18cd223 168 tcg_gen_qemu_ld64(tmp, t1, flags);
a7812ae4 169 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
170 tcg_temp_free(tmp);
171}
172
636aa200 173static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 174{
a7812ae4
PB
175 TCGv tmp = tcg_temp_new();
176 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 177 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
178 tcg_gen_trunc_i64_i32(tmp32, tmp);
179 gen_helper_memory_to_s(t0, tmp32);
180 tcg_temp_free_i32(tmp32);
f18cd223
AJ
181 tcg_temp_free(tmp);
182}
183
636aa200 184static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
185{
186 tcg_gen_mov_i64(cpu_lock, t1);
187 tcg_gen_qemu_ld32s(t0, t1, flags);
188}
189
636aa200 190static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
191{
192 tcg_gen_mov_i64(cpu_lock, t1);
193 tcg_gen_qemu_ld64(t0, t1, flags);
194}
195
636aa200
BS
196static inline void gen_load_mem(DisasContext *ctx,
197 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
198 int flags),
199 int ra, int rb, int32_t disp16, int fp,
200 int clear)
023d8ca2
AJ
201{
202 TCGv addr;
203
204 if (unlikely(ra == 31))
205 return;
206
a7812ae4 207 addr = tcg_temp_new();
023d8ca2
AJ
208 if (rb != 31) {
209 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
210 if (clear)
211 tcg_gen_andi_i64(addr, addr, ~0x7);
212 } else {
213 if (clear)
214 disp16 &= ~0x7;
215 tcg_gen_movi_i64(addr, disp16);
216 }
f18cd223
AJ
217 if (fp)
218 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
219 else
220 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
023d8ca2
AJ
221 tcg_temp_free(addr);
222}
223
636aa200 224static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 225{
a7812ae4
PB
226 TCGv_i32 tmp32 = tcg_temp_new_i32();
227 TCGv tmp = tcg_temp_new();
228 gen_helper_f_to_memory(tmp32, t0);
229 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
230 tcg_gen_qemu_st32(tmp, t1, flags);
231 tcg_temp_free(tmp);
a7812ae4 232 tcg_temp_free_i32(tmp32);
f18cd223
AJ
233}
234
636aa200 235static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 236{
a7812ae4
PB
237 TCGv tmp = tcg_temp_new();
238 gen_helper_g_to_memory(tmp, t0);
f18cd223
AJ
239 tcg_gen_qemu_st64(tmp, t1, flags);
240 tcg_temp_free(tmp);
241}
242
636aa200 243static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 244{
a7812ae4
PB
245 TCGv_i32 tmp32 = tcg_temp_new_i32();
246 TCGv tmp = tcg_temp_new();
247 gen_helper_s_to_memory(tmp32, t0);
248 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
249 tcg_gen_qemu_st32(tmp, t1, flags);
250 tcg_temp_free(tmp);
a7812ae4 251 tcg_temp_free_i32(tmp32);
f18cd223
AJ
252}
253
636aa200 254static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
255{
256 int l1, l2;
257
258 l1 = gen_new_label();
259 l2 = gen_new_label();
260 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
261 tcg_gen_qemu_st32(t0, t1, flags);
6223246a 262 tcg_gen_movi_i64(t0, 1);
f4ed8679
AJ
263 tcg_gen_br(l2);
264 gen_set_label(l1);
6223246a 265 tcg_gen_movi_i64(t0, 0);
f4ed8679
AJ
266 gen_set_label(l2);
267 tcg_gen_movi_i64(cpu_lock, -1);
268}
269
636aa200 270static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
271{
272 int l1, l2;
273
274 l1 = gen_new_label();
275 l2 = gen_new_label();
276 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
277 tcg_gen_qemu_st64(t0, t1, flags);
6223246a 278 tcg_gen_movi_i64(t0, 1);
f4ed8679
AJ
279 tcg_gen_br(l2);
280 gen_set_label(l1);
6223246a 281 tcg_gen_movi_i64(t0, 0);
f4ed8679
AJ
282 gen_set_label(l2);
283 tcg_gen_movi_i64(cpu_lock, -1);
284}
285
636aa200
BS
286static inline void gen_store_mem(DisasContext *ctx,
287 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
288 int flags),
289 int ra, int rb, int32_t disp16, int fp,
290 int clear, int local)
023d8ca2 291{
9cd38c23 292 TCGv addr;
57a92c8e 293 if (local)
a7812ae4 294 addr = tcg_temp_local_new();
57a92c8e 295 else
a7812ae4 296 addr = tcg_temp_new();
023d8ca2
AJ
297 if (rb != 31) {
298 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
299 if (clear)
300 tcg_gen_andi_i64(addr, addr, ~0x7);
301 } else {
302 if (clear)
303 disp16 &= ~0x7;
304 tcg_gen_movi_i64(addr, disp16);
305 }
f18cd223
AJ
306 if (ra != 31) {
307 if (fp)
308 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
309 else
310 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
311 } else {
57a92c8e
AJ
312 TCGv zero;
313 if (local)
314 zero = tcg_const_local_i64(0);
315 else
316 zero = tcg_const_i64(0);
023d8ca2
AJ
317 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
318 tcg_temp_free(zero);
319 }
320 tcg_temp_free(addr);
321}
322
4af70374 323static int use_goto_tb(DisasContext *ctx, uint64_t dest)
4c9649a9 324{
4af70374
RH
325 /* Check for the dest on the same page as the start of the TB. We
326 also want to suppress goto_tb in the case of single-steping and IO. */
327 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
328 && !ctx->env->singlestep_enabled
329 && !(ctx->tb->cflags & CF_LAST_IO));
330}
dbb30fe6 331
4af70374
RH
332static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
333{
334 uint64_t dest = ctx->pc + (disp << 2);
335
336 if (ra != 31) {
337 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
338 }
339
340 /* Notice branch-to-next; used to initialize RA with the PC. */
341 if (disp == 0) {
342 return 0;
343 } else if (use_goto_tb(ctx, dest)) {
344 tcg_gen_goto_tb(0);
345 tcg_gen_movi_i64(cpu_pc, dest);
346 tcg_gen_exit_tb((long)ctx->tb);
347 return EXIT_GOTO_TB;
348 } else {
349 tcg_gen_movi_i64(cpu_pc, dest);
350 return EXIT_PC_UPDATED;
351 }
dbb30fe6
RH
352}
353
4af70374
RH
354static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
355 TCGv cmp, int32_t disp)
dbb30fe6 356{
4af70374 357 uint64_t dest = ctx->pc + (disp << 2);
dbb30fe6 358 int lab_true = gen_new_label();
9c29504e 359
4af70374
RH
360 if (use_goto_tb(ctx, dest)) {
361 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
362
363 tcg_gen_goto_tb(0);
364 tcg_gen_movi_i64(cpu_pc, ctx->pc);
365 tcg_gen_exit_tb((long)ctx->tb);
366
367 gen_set_label(lab_true);
368 tcg_gen_goto_tb(1);
369 tcg_gen_movi_i64(cpu_pc, dest);
370 tcg_gen_exit_tb((long)ctx->tb + 1);
371
372 return EXIT_GOTO_TB;
373 } else {
374 int lab_over = gen_new_label();
375
376 /* ??? Consider using either
377 movi pc, next
378 addi tmp, pc, disp
379 movcond pc, cond, 0, tmp, pc
380 or
381 setcond tmp, cond, 0
382 movi pc, next
383 neg tmp, tmp
384 andi tmp, tmp, disp
385 add pc, pc, tmp
386 The current diamond subgraph surely isn't efficient. */
387
388 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
389 tcg_gen_movi_i64(cpu_pc, ctx->pc);
390 tcg_gen_br(lab_over);
391 gen_set_label(lab_true);
392 tcg_gen_movi_i64(cpu_pc, dest);
393 gen_set_label(lab_over);
394
395 return EXIT_PC_UPDATED;
396 }
397}
398
399static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
400 int32_t disp, int mask)
401{
402 TCGv cmp_tmp;
403
404 if (unlikely(ra == 31)) {
405 cmp_tmp = tcg_const_i64(0);
406 } else {
407 cmp_tmp = tcg_temp_new();
9c29504e 408 if (mask) {
4af70374 409 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
dbb30fe6 410 } else {
4af70374 411 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
dbb30fe6 412 }
9c29504e 413 }
4af70374
RH
414
415 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
416}
417
4af70374 418/* Fold -0.0 for comparison with COND. */
dbb30fe6 419
4af70374 420static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 421{
dbb30fe6 422 uint64_t mzero = 1ull << 63;
f18cd223 423
dbb30fe6
RH
424 switch (cond) {
425 case TCG_COND_LE:
426 case TCG_COND_GT:
427 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 428 tcg_gen_mov_i64(dest, src);
a7812ae4 429 break;
dbb30fe6
RH
430
431 case TCG_COND_EQ:
432 case TCG_COND_NE:
433 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 434 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 435 break;
dbb30fe6
RH
436
437 case TCG_COND_GE:
dbb30fe6 438 case TCG_COND_LT:
4af70374
RH
439 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
440 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
441 tcg_gen_neg_i64(dest, dest);
442 tcg_gen_and_i64(dest, dest, src);
a7812ae4 443 break;
dbb30fe6 444
a7812ae4
PB
445 default:
446 abort();
f18cd223 447 }
dbb30fe6
RH
448}
449
4af70374
RH
450static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
451 int32_t disp)
dbb30fe6 452{
4af70374 453 TCGv cmp_tmp;
dbb30fe6
RH
454
455 if (unlikely(ra == 31)) {
456 /* Very uncommon case, but easier to optimize it to an integer
457 comparison than continuing with the floating point comparison. */
4af70374 458 return gen_bcond(ctx, cond, ra, disp, 0);
dbb30fe6
RH
459 }
460
4af70374
RH
461 cmp_tmp = tcg_temp_new();
462 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
463 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
464}
465
bbe1dab4 466static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
4af70374 467 int islit, uint8_t lit, int mask)
4c9649a9 468{
bbe1dab4 469 TCGCond inv_cond = tcg_invert_cond(cond);
9c29504e
AJ
470 int l1;
471
472 if (unlikely(rc == 31))
473 return;
474
475 l1 = gen_new_label();
476
477 if (ra != 31) {
478 if (mask) {
a7812ae4 479 TCGv tmp = tcg_temp_new();
9c29504e
AJ
480 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
481 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
482 tcg_temp_free(tmp);
483 } else
484 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
485 } else {
486 /* Very uncommon case - Do not bother to optimize. */
487 TCGv tmp = tcg_const_i64(0);
488 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
489 tcg_temp_free(tmp);
490 }
491
4c9649a9 492 if (islit)
9c29504e 493 tcg_gen_movi_i64(cpu_ir[rc], lit);
4c9649a9 494 else
dfaa8583 495 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
9c29504e 496 gen_set_label(l1);
4c9649a9
JM
497}
498
bbe1dab4 499static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
dbb30fe6 500{
4af70374 501 TCGv cmp_tmp;
dbb30fe6
RH
502 int l1;
503
4af70374 504 if (unlikely(rc == 31)) {
dbb30fe6 505 return;
4af70374
RH
506 }
507
508 cmp_tmp = tcg_temp_new();
dbb30fe6 509 if (unlikely(ra == 31)) {
4af70374
RH
510 tcg_gen_movi_i64(cmp_tmp, 0);
511 } else {
512 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
dbb30fe6
RH
513 }
514
515 l1 = gen_new_label();
4af70374
RH
516 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
517 tcg_temp_free(cmp_tmp);
dbb30fe6
RH
518
519 if (rb != 31)
520 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
521 else
522 tcg_gen_movi_i64(cpu_fir[rc], 0);
523 gen_set_label(l1);
524}
525
f24518b5
RH
526#define QUAL_RM_N 0x080 /* Round mode nearest even */
527#define QUAL_RM_C 0x000 /* Round mode chopped */
528#define QUAL_RM_M 0x040 /* Round mode minus infinity */
529#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
530#define QUAL_RM_MASK 0x0c0
531
532#define QUAL_U 0x100 /* Underflow enable (fp output) */
533#define QUAL_V 0x100 /* Overflow enable (int output) */
534#define QUAL_S 0x400 /* Software completion enable */
535#define QUAL_I 0x200 /* Inexact detection enable */
536
537static void gen_qual_roundmode(DisasContext *ctx, int fn11)
538{
539 TCGv_i32 tmp;
540
541 fn11 &= QUAL_RM_MASK;
542 if (fn11 == ctx->tb_rm) {
543 return;
544 }
545 ctx->tb_rm = fn11;
546
547 tmp = tcg_temp_new_i32();
548 switch (fn11) {
549 case QUAL_RM_N:
550 tcg_gen_movi_i32(tmp, float_round_nearest_even);
551 break;
552 case QUAL_RM_C:
553 tcg_gen_movi_i32(tmp, float_round_to_zero);
554 break;
555 case QUAL_RM_M:
556 tcg_gen_movi_i32(tmp, float_round_down);
557 break;
558 case QUAL_RM_D:
559 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
560 break;
561 }
562
563#if defined(CONFIG_SOFTFLOAT_INLINE)
564 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
565 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
566 sets the one field. */
567 tcg_gen_st8_i32(tmp, cpu_env,
568 offsetof(CPUState, fp_status.float_rounding_mode));
569#else
570 gen_helper_setroundmode(tmp);
571#endif
572
573 tcg_temp_free_i32(tmp);
574}
575
576static void gen_qual_flushzero(DisasContext *ctx, int fn11)
577{
578 TCGv_i32 tmp;
579
580 fn11 &= QUAL_U;
581 if (fn11 == ctx->tb_ftz) {
582 return;
583 }
584 ctx->tb_ftz = fn11;
585
586 tmp = tcg_temp_new_i32();
587 if (fn11) {
588 /* Underflow is enabled, use the FPCR setting. */
589 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
590 } else {
591 /* Underflow is disabled, force flush-to-zero. */
592 tcg_gen_movi_i32(tmp, 1);
593 }
594
595#if defined(CONFIG_SOFTFLOAT_INLINE)
596 tcg_gen_st8_i32(tmp, cpu_env,
597 offsetof(CPUState, fp_status.flush_to_zero));
598#else
599 gen_helper_setflushzero(tmp);
600#endif
601
602 tcg_temp_free_i32(tmp);
603}
604
605static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
606{
607 TCGv val = tcg_temp_new();
608 if (reg == 31) {
609 tcg_gen_movi_i64(val, 0);
610 } else if (fn11 & QUAL_S) {
611 gen_helper_ieee_input_s(val, cpu_fir[reg]);
612 } else if (is_cmp) {
613 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
614 } else {
615 gen_helper_ieee_input(val, cpu_fir[reg]);
616 }
617 return val;
618}
619
620static void gen_fp_exc_clear(void)
621{
622#if defined(CONFIG_SOFTFLOAT_INLINE)
623 TCGv_i32 zero = tcg_const_i32(0);
624 tcg_gen_st8_i32(zero, cpu_env,
625 offsetof(CPUState, fp_status.float_exception_flags));
626 tcg_temp_free_i32(zero);
627#else
628 gen_helper_fp_exc_clear();
629#endif
630}
631
632static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
633{
634 /* ??? We ought to be able to do something with imprecise exceptions.
635 E.g. notice we're still in the trap shadow of something within the
636 TB and do not generate the code to signal the exception; end the TB
637 when an exception is forced to arrive, either by consumption of a
638 register value or TRAPB or EXCB. */
639 TCGv_i32 exc = tcg_temp_new_i32();
640 TCGv_i32 reg;
641
642#if defined(CONFIG_SOFTFLOAT_INLINE)
643 tcg_gen_ld8u_i32(exc, cpu_env,
644 offsetof(CPUState, fp_status.float_exception_flags));
645#else
646 gen_helper_fp_exc_get(exc);
647#endif
648
649 if (ignore) {
650 tcg_gen_andi_i32(exc, exc, ~ignore);
651 }
652
653 /* ??? Pass in the regno of the destination so that the helper can
654 set EXC_MASK, which contains a bitmask of destination registers
655 that have caused arithmetic traps. A simple userspace emulation
656 does not require this. We do need it for a guest kernel's entArith,
657 or if we were to do something clever with imprecise exceptions. */
658 reg = tcg_const_i32(rc + 32);
659
660 if (fn11 & QUAL_S) {
661 gen_helper_fp_exc_raise_s(exc, reg);
662 } else {
663 gen_helper_fp_exc_raise(exc, reg);
664 }
665
666 tcg_temp_free_i32(reg);
667 tcg_temp_free_i32(exc);
668}
669
670static inline void gen_fp_exc_raise(int rc, int fn11)
671{
672 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 673}
f24518b5 674
593f17e5
RH
675static void gen_fcvtlq(int rb, int rc)
676{
677 if (unlikely(rc == 31)) {
678 return;
679 }
680 if (unlikely(rb == 31)) {
681 tcg_gen_movi_i64(cpu_fir[rc], 0);
682 } else {
683 TCGv tmp = tcg_temp_new();
684
685 /* The arithmetic right shift here, plus the sign-extended mask below
686 yields a sign-extended result without an explicit ext32s_i64. */
687 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
688 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
689 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
690 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
691 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
692
693 tcg_temp_free(tmp);
694 }
695}
696
735cf45f
RH
697static void gen_fcvtql(int rb, int rc)
698{
699 if (unlikely(rc == 31)) {
700 return;
701 }
702 if (unlikely(rb == 31)) {
703 tcg_gen_movi_i64(cpu_fir[rc], 0);
704 } else {
705 TCGv tmp = tcg_temp_new();
706
707 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
708 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
709 tcg_gen_shli_i64(tmp, tmp, 32);
710 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
711 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
712
713 tcg_temp_free(tmp);
714 }
715}
716
717static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
718{
719 if (rb != 31) {
720 int lab = gen_new_label();
721 TCGv tmp = tcg_temp_new();
722
723 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
724 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
725 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
726
727 gen_set_label(lab);
728 }
729 gen_fcvtql(rb, rc);
730}
731
f24518b5
RH
732#define FARITH2(name) \
733static inline void glue(gen_f, name)(int rb, int rc) \
734{ \
735 if (unlikely(rc == 31)) { \
736 return; \
737 } \
738 if (rb != 31) { \
739 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
740 } else { \
741 TCGv tmp = tcg_const_i64(0); \
742 gen_helper_ ## name (cpu_fir[rc], tmp); \
743 tcg_temp_free(tmp); \
744 } \
745}
f24518b5
RH
746
747/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
748FARITH2(sqrtf)
749FARITH2(sqrtg)
a7812ae4
PB
750FARITH2(cvtgf)
751FARITH2(cvtgq)
752FARITH2(cvtqf)
753FARITH2(cvtqg)
f24518b5
RH
754
755static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
756 int rb, int rc, int fn11)
757{
758 TCGv vb;
759
760 /* ??? This is wrong: the instruction is not a nop, it still may
761 raise exceptions. */
762 if (unlikely(rc == 31)) {
763 return;
764 }
765
766 gen_qual_roundmode(ctx, fn11);
767 gen_qual_flushzero(ctx, fn11);
768 gen_fp_exc_clear();
769
770 vb = gen_ieee_input(rb, fn11, 0);
771 helper(cpu_fir[rc], vb);
772 tcg_temp_free(vb);
773
774 gen_fp_exc_raise(rc, fn11);
775}
776
777#define IEEE_ARITH2(name) \
778static inline void glue(gen_f, name)(DisasContext *ctx, \
779 int rb, int rc, int fn11) \
780{ \
781 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
782}
783IEEE_ARITH2(sqrts)
784IEEE_ARITH2(sqrtt)
785IEEE_ARITH2(cvtst)
786IEEE_ARITH2(cvtts)
787
788static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
789{
790 TCGv vb;
791 int ignore = 0;
792
793 /* ??? This is wrong: the instruction is not a nop, it still may
794 raise exceptions. */
795 if (unlikely(rc == 31)) {
796 return;
797 }
798
799 /* No need to set flushzero, since we have an integer output. */
800 gen_fp_exc_clear();
801 vb = gen_ieee_input(rb, fn11, 0);
802
803 /* Almost all integer conversions use cropped rounding, and most
804 also do not have integer overflow enabled. Special case that. */
805 switch (fn11) {
806 case QUAL_RM_C:
807 gen_helper_cvttq_c(cpu_fir[rc], vb);
808 break;
809 case QUAL_V | QUAL_RM_C:
810 case QUAL_S | QUAL_V | QUAL_RM_C:
811 ignore = float_flag_inexact;
812 /* FALLTHRU */
813 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
814 gen_helper_cvttq_svic(cpu_fir[rc], vb);
815 break;
816 default:
817 gen_qual_roundmode(ctx, fn11);
818 gen_helper_cvttq(cpu_fir[rc], vb);
819 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
820 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
821 break;
822 }
823 tcg_temp_free(vb);
824
825 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
826}
827
f24518b5
RH
828static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
829 int rb, int rc, int fn11)
830{
831 TCGv vb;
832
833 /* ??? This is wrong: the instruction is not a nop, it still may
834 raise exceptions. */
835 if (unlikely(rc == 31)) {
836 return;
837 }
838
839 gen_qual_roundmode(ctx, fn11);
840
841 if (rb == 31) {
842 vb = tcg_const_i64(0);
843 } else {
844 vb = cpu_fir[rb];
845 }
846
847 /* The only exception that can be raised by integer conversion
848 is inexact. Thus we only need to worry about exceptions when
849 inexact handling is requested. */
850 if (fn11 & QUAL_I) {
851 gen_fp_exc_clear();
852 helper(cpu_fir[rc], vb);
853 gen_fp_exc_raise(rc, fn11);
854 } else {
855 helper(cpu_fir[rc], vb);
856 }
857
858 if (rb == 31) {
859 tcg_temp_free(vb);
860 }
861}
862
863#define IEEE_INTCVT(name) \
864static inline void glue(gen_f, name)(DisasContext *ctx, \
865 int rb, int rc, int fn11) \
866{ \
867 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
868}
869IEEE_INTCVT(cvtqs)
870IEEE_INTCVT(cvtqt)
871
dc96be4b
RH
872static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
873{
874 TCGv va, vb, vmask;
875 int za = 0, zb = 0;
876
877 if (unlikely(rc == 31)) {
878 return;
879 }
880
881 vmask = tcg_const_i64(mask);
882
883 TCGV_UNUSED_I64(va);
884 if (ra == 31) {
885 if (inv_a) {
886 va = vmask;
887 } else {
888 za = 1;
889 }
890 } else {
891 va = tcg_temp_new_i64();
892 tcg_gen_mov_i64(va, cpu_fir[ra]);
893 if (inv_a) {
894 tcg_gen_andc_i64(va, vmask, va);
895 } else {
896 tcg_gen_and_i64(va, va, vmask);
897 }
898 }
899
900 TCGV_UNUSED_I64(vb);
901 if (rb == 31) {
902 zb = 1;
903 } else {
904 vb = tcg_temp_new_i64();
905 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
906 }
907
908 switch (za << 1 | zb) {
909 case 0 | 0:
910 tcg_gen_or_i64(cpu_fir[rc], va, vb);
911 break;
912 case 0 | 1:
913 tcg_gen_mov_i64(cpu_fir[rc], va);
914 break;
915 case 2 | 0:
916 tcg_gen_mov_i64(cpu_fir[rc], vb);
917 break;
918 case 2 | 1:
919 tcg_gen_movi_i64(cpu_fir[rc], 0);
920 break;
921 }
922
923 tcg_temp_free(vmask);
924 if (ra != 31) {
925 tcg_temp_free(va);
926 }
927 if (rb != 31) {
928 tcg_temp_free(vb);
929 }
930}
931
932static inline void gen_fcpys(int ra, int rb, int rc)
933{
934 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
935}
936
937static inline void gen_fcpysn(int ra, int rb, int rc)
938{
939 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
940}
941
942static inline void gen_fcpyse(int ra, int rb, int rc)
943{
944 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
945}
946
f24518b5
RH
947#define FARITH3(name) \
948static inline void glue(gen_f, name)(int ra, int rb, int rc) \
949{ \
950 TCGv va, vb; \
951 \
952 if (unlikely(rc == 31)) { \
953 return; \
954 } \
955 if (ra == 31) { \
956 va = tcg_const_i64(0); \
957 } else { \
958 va = cpu_fir[ra]; \
959 } \
960 if (rb == 31) { \
961 vb = tcg_const_i64(0); \
962 } else { \
963 vb = cpu_fir[rb]; \
964 } \
965 \
966 gen_helper_ ## name (cpu_fir[rc], va, vb); \
967 \
968 if (ra == 31) { \
969 tcg_temp_free(va); \
970 } \
971 if (rb == 31) { \
972 tcg_temp_free(vb); \
973 } \
974}
f24518b5
RH
975
976/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
977FARITH3(addf)
978FARITH3(subf)
979FARITH3(mulf)
980FARITH3(divf)
981FARITH3(addg)
982FARITH3(subg)
983FARITH3(mulg)
984FARITH3(divg)
985FARITH3(cmpgeq)
986FARITH3(cmpglt)
987FARITH3(cmpgle)
f24518b5
RH
988
989static void gen_ieee_arith3(DisasContext *ctx,
990 void (*helper)(TCGv, TCGv, TCGv),
991 int ra, int rb, int rc, int fn11)
992{
993 TCGv va, vb;
994
995 /* ??? This is wrong: the instruction is not a nop, it still may
996 raise exceptions. */
997 if (unlikely(rc == 31)) {
998 return;
999 }
1000
1001 gen_qual_roundmode(ctx, fn11);
1002 gen_qual_flushzero(ctx, fn11);
1003 gen_fp_exc_clear();
1004
1005 va = gen_ieee_input(ra, fn11, 0);
1006 vb = gen_ieee_input(rb, fn11, 0);
1007 helper(cpu_fir[rc], va, vb);
1008 tcg_temp_free(va);
1009 tcg_temp_free(vb);
1010
1011 gen_fp_exc_raise(rc, fn11);
1012}
1013
1014#define IEEE_ARITH3(name) \
1015static inline void glue(gen_f, name)(DisasContext *ctx, \
1016 int ra, int rb, int rc, int fn11) \
1017{ \
1018 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1019}
1020IEEE_ARITH3(adds)
1021IEEE_ARITH3(subs)
1022IEEE_ARITH3(muls)
1023IEEE_ARITH3(divs)
1024IEEE_ARITH3(addt)
1025IEEE_ARITH3(subt)
1026IEEE_ARITH3(mult)
1027IEEE_ARITH3(divt)
1028
1029static void gen_ieee_compare(DisasContext *ctx,
1030 void (*helper)(TCGv, TCGv, TCGv),
1031 int ra, int rb, int rc, int fn11)
1032{
1033 TCGv va, vb;
1034
1035 /* ??? This is wrong: the instruction is not a nop, it still may
1036 raise exceptions. */
1037 if (unlikely(rc == 31)) {
1038 return;
1039 }
1040
1041 gen_fp_exc_clear();
1042
1043 va = gen_ieee_input(ra, fn11, 1);
1044 vb = gen_ieee_input(rb, fn11, 1);
1045 helper(cpu_fir[rc], va, vb);
1046 tcg_temp_free(va);
1047 tcg_temp_free(vb);
1048
1049 gen_fp_exc_raise(rc, fn11);
1050}
1051
1052#define IEEE_CMP3(name) \
1053static inline void glue(gen_f, name)(DisasContext *ctx, \
1054 int ra, int rb, int rc, int fn11) \
1055{ \
1056 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1057}
1058IEEE_CMP3(cmptun)
1059IEEE_CMP3(cmpteq)
1060IEEE_CMP3(cmptlt)
1061IEEE_CMP3(cmptle)
a7812ae4 1062
248c42f3
RH
1063static inline uint64_t zapnot_mask(uint8_t lit)
1064{
1065 uint64_t mask = 0;
1066 int i;
1067
1068 for (i = 0; i < 8; ++i) {
1069 if ((lit >> i) & 1)
1070 mask |= 0xffull << (i * 8);
1071 }
1072 return mask;
1073}
1074
87d98f95
RH
1075/* Implement zapnot with an immediate operand, which expands to some
1076 form of immediate AND. This is a basic building block in the
1077 definition of many of the other byte manipulation instructions. */
248c42f3 1078static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 1079{
87d98f95
RH
1080 switch (lit) {
1081 case 0x00:
248c42f3 1082 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
1083 break;
1084 case 0x01:
248c42f3 1085 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
1086 break;
1087 case 0x03:
248c42f3 1088 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
1089 break;
1090 case 0x0f:
248c42f3 1091 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
1092 break;
1093 case 0xff:
248c42f3 1094 tcg_gen_mov_i64(dest, src);
87d98f95
RH
1095 break;
1096 default:
248c42f3 1097 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
87d98f95
RH
1098 break;
1099 }
1100}
1101
1102static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1103{
1104 if (unlikely(rc == 31))
1105 return;
1106 else if (unlikely(ra == 31))
1107 tcg_gen_movi_i64(cpu_ir[rc], 0);
1108 else if (islit)
248c42f3 1109 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
87d98f95
RH
1110 else
1111 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1112}
1113
1114static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1115{
1116 if (unlikely(rc == 31))
1117 return;
1118 else if (unlikely(ra == 31))
1119 tcg_gen_movi_i64(cpu_ir[rc], 0);
1120 else if (islit)
248c42f3 1121 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
87d98f95
RH
1122 else
1123 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1124}
1125
1126
248c42f3 1127/* EXTWH, EXTLH, EXTQH */
ffec44f1
RH
1128static void gen_ext_h(int ra, int rb, int rc, int islit,
1129 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1130{
1131 if (unlikely(rc == 31))
1132 return;
377a43b6
RH
1133 else if (unlikely(ra == 31))
1134 tcg_gen_movi_i64(cpu_ir[rc], 0);
1135 else {
dfaa8583 1136 if (islit) {
377a43b6
RH
1137 lit = (64 - (lit & 7) * 8) & 0x3f;
1138 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
fe2b269a 1139 } else {
377a43b6 1140 TCGv tmp1 = tcg_temp_new();
b3249f63
AJ
1141 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1142 tcg_gen_shli_i64(tmp1, tmp1, 3);
dbf95805
VW
1143 tcg_gen_neg_i64(tmp1, tmp1);
1144 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
dfaa8583 1145 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
b3249f63 1146 tcg_temp_free(tmp1);
dfaa8583 1147 }
248c42f3 1148 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
377a43b6 1149 }
b3249f63
AJ
1150}
1151
248c42f3 1152/* EXTBL, EXTWL, EXTLL, EXTQL */
ffec44f1
RH
1153static void gen_ext_l(int ra, int rb, int rc, int islit,
1154 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1155{
1156 if (unlikely(rc == 31))
1157 return;
377a43b6
RH
1158 else if (unlikely(ra == 31))
1159 tcg_gen_movi_i64(cpu_ir[rc], 0);
1160 else {
dfaa8583 1161 if (islit) {
377a43b6 1162 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
dfaa8583 1163 } else {
a7812ae4 1164 TCGv tmp = tcg_temp_new();
b3249f63
AJ
1165 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1166 tcg_gen_shli_i64(tmp, tmp, 3);
dfaa8583 1167 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
b3249f63 1168 tcg_temp_free(tmp);
fe2b269a 1169 }
248c42f3
RH
1170 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1171 }
1172}
1173
50eb6e5c
RH
1174/* INSWH, INSLH, INSQH */
1175static void gen_ins_h(int ra, int rb, int rc, int islit,
1176 uint8_t lit, uint8_t byte_mask)
1177{
1178 if (unlikely(rc == 31))
1179 return;
1180 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1181 tcg_gen_movi_i64(cpu_ir[rc], 0);
1182 else {
1183 TCGv tmp = tcg_temp_new();
1184
1185 /* The instruction description has us left-shift the byte mask
1186 and extract bits <15:8> and apply that zap at the end. This
1187 is equivalent to simply performing the zap first and shifting
1188 afterward. */
1189 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1190
1191 if (islit) {
1192 /* Note that we have handled the lit==0 case above. */
1193 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1194 } else {
1195 TCGv shift = tcg_temp_new();
1196
1197 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1198 Do this portably by splitting the shift into two parts:
1199 shift_count-1 and 1. Arrange for the -1 by using
1200 ones-complement instead of twos-complement in the negation:
1201 ~((B & 7) * 8) & 63. */
1202
1203 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1204 tcg_gen_shli_i64(shift, shift, 3);
1205 tcg_gen_not_i64(shift, shift);
1206 tcg_gen_andi_i64(shift, shift, 0x3f);
1207
1208 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1209 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1210 tcg_temp_free(shift);
1211 }
1212 tcg_temp_free(tmp);
1213 }
1214}
1215
248c42f3 1216/* INSBL, INSWL, INSLL, INSQL */
ffec44f1
RH
1217static void gen_ins_l(int ra, int rb, int rc, int islit,
1218 uint8_t lit, uint8_t byte_mask)
248c42f3
RH
1219{
1220 if (unlikely(rc == 31))
1221 return;
1222 else if (unlikely(ra == 31))
1223 tcg_gen_movi_i64(cpu_ir[rc], 0);
1224 else {
1225 TCGv tmp = tcg_temp_new();
1226
1227 /* The instruction description has us left-shift the byte mask
1228 the same number of byte slots as the data and apply the zap
1229 at the end. This is equivalent to simply performing the zap
1230 first and shifting afterward. */
1231 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1232
1233 if (islit) {
1234 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1235 } else {
1236 TCGv shift = tcg_temp_new();
1237 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1238 tcg_gen_shli_i64(shift, shift, 3);
1239 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1240 tcg_temp_free(shift);
1241 }
1242 tcg_temp_free(tmp);
377a43b6 1243 }
b3249f63
AJ
1244}
1245
ffec44f1
RH
1246/* MSKWH, MSKLH, MSKQH */
1247static void gen_msk_h(int ra, int rb, int rc, int islit,
1248 uint8_t lit, uint8_t byte_mask)
1249{
1250 if (unlikely(rc == 31))
1251 return;
1252 else if (unlikely(ra == 31))
1253 tcg_gen_movi_i64(cpu_ir[rc], 0);
1254 else if (islit) {
1255 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1256 } else {
1257 TCGv shift = tcg_temp_new();
1258 TCGv mask = tcg_temp_new();
1259
1260 /* The instruction description is as above, where the byte_mask
1261 is shifted left, and then we extract bits <15:8>. This can be
1262 emulated with a right-shift on the expanded byte mask. This
1263 requires extra care because for an input <2:0> == 0 we need a
1264 shift of 64 bits in order to generate a zero. This is done by
1265 splitting the shift into two parts, the variable shift - 1
1266 followed by a constant 1 shift. The code we expand below is
1267 equivalent to ~((B & 7) * 8) & 63. */
1268
1269 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1270 tcg_gen_shli_i64(shift, shift, 3);
1271 tcg_gen_not_i64(shift, shift);
1272 tcg_gen_andi_i64(shift, shift, 0x3f);
1273 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1274 tcg_gen_shr_i64(mask, mask, shift);
1275 tcg_gen_shri_i64(mask, mask, 1);
1276
1277 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1278
1279 tcg_temp_free(mask);
1280 tcg_temp_free(shift);
1281 }
1282}
1283
14ab1634 1284/* MSKBL, MSKWL, MSKLL, MSKQL */
ffec44f1
RH
1285static void gen_msk_l(int ra, int rb, int rc, int islit,
1286 uint8_t lit, uint8_t byte_mask)
14ab1634
RH
1287{
1288 if (unlikely(rc == 31))
1289 return;
1290 else if (unlikely(ra == 31))
1291 tcg_gen_movi_i64(cpu_ir[rc], 0);
1292 else if (islit) {
1293 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1294 } else {
1295 TCGv shift = tcg_temp_new();
1296 TCGv mask = tcg_temp_new();
1297
1298 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1299 tcg_gen_shli_i64(shift, shift, 3);
1300 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1301 tcg_gen_shl_i64(mask, mask, shift);
1302
1303 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1304
1305 tcg_temp_free(mask);
1306 tcg_temp_free(shift);
1307 }
1308}
1309
04acd307 1310/* Code to call arith3 helpers */
a7812ae4 1311#define ARITH3(name) \
636aa200
BS
1312static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1313 uint8_t lit) \
a7812ae4
PB
1314{ \
1315 if (unlikely(rc == 31)) \
1316 return; \
1317 \
1318 if (ra != 31) { \
1319 if (islit) { \
1320 TCGv tmp = tcg_const_i64(lit); \
1321 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1322 tcg_temp_free(tmp); \
1323 } else \
1324 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1325 } else { \
1326 TCGv tmp1 = tcg_const_i64(0); \
1327 if (islit) { \
1328 TCGv tmp2 = tcg_const_i64(lit); \
1329 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1330 tcg_temp_free(tmp2); \
1331 } else \
1332 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1333 tcg_temp_free(tmp1); \
1334 } \
b3249f63 1335}
a7812ae4
PB
1336ARITH3(cmpbge)
1337ARITH3(addlv)
1338ARITH3(sublv)
1339ARITH3(addqv)
1340ARITH3(subqv)
a7812ae4
PB
1341ARITH3(umulh)
1342ARITH3(mullv)
1343ARITH3(mulqv)
13e4df99
RH
1344ARITH3(minub8)
1345ARITH3(minsb8)
1346ARITH3(minuw4)
1347ARITH3(minsw4)
1348ARITH3(maxub8)
1349ARITH3(maxsb8)
1350ARITH3(maxuw4)
1351ARITH3(maxsw4)
1352ARITH3(perr)
1353
1354#define MVIOP2(name) \
1355static inline void glue(gen_, name)(int rb, int rc) \
1356{ \
1357 if (unlikely(rc == 31)) \
1358 return; \
1359 if (unlikely(rb == 31)) \
1360 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1361 else \
1362 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1363}
1364MVIOP2(pklb)
1365MVIOP2(pkwb)
1366MVIOP2(unpkbl)
1367MVIOP2(unpkbw)
b3249f63 1368
9e05960f
RH
1369static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1370 int islit, uint8_t lit)
01ff9cc8 1371{
9e05960f 1372 TCGv va, vb;
01ff9cc8 1373
9e05960f 1374 if (unlikely(rc == 31)) {
13e4df99 1375 return;
9e05960f 1376 }
01ff9cc8 1377
9e05960f
RH
1378 if (ra == 31) {
1379 va = tcg_const_i64(0);
1380 } else {
1381 va = cpu_ir[ra];
1382 }
1383 if (islit) {
1384 vb = tcg_const_i64(lit);
1385 } else {
1386 vb = cpu_ir[rb];
1387 }
01ff9cc8 1388
9e05960f 1389 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
01ff9cc8 1390
9e05960f
RH
1391 if (ra == 31) {
1392 tcg_temp_free(va);
1393 }
1394 if (islit) {
1395 tcg_temp_free(vb);
1396 }
01ff9cc8
AJ
1397}
1398
ac316ca4
RH
1399static void gen_rx(int ra, int set)
1400{
1401 TCGv_i32 tmp;
1402
1403 if (ra != 31) {
1404 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1405 }
1406
1407 tmp = tcg_const_i32(set);
1408 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1409 tcg_temp_free_i32(tmp);
1410}
1411
4af70374 1412static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1413{
1414 uint32_t palcode;
1415 int32_t disp21, disp16, disp12;
f88fe4e3
BS
1416 uint16_t fn11;
1417 uint8_t opc, ra, rb, rc, fpfn, fn7, fn2, islit, real_islit;
adf3c8b6 1418 uint8_t lit;
4af70374 1419 ExitStatus ret;
4c9649a9
JM
1420
1421 /* Decode all instruction fields */
1422 opc = insn >> 26;
1423 ra = (insn >> 21) & 0x1F;
1424 rb = (insn >> 16) & 0x1F;
1425 rc = insn & 0x1F;
13e4df99 1426 real_islit = islit = (insn >> 12) & 1;
dfaa8583
AJ
1427 if (rb == 31 && !islit) {
1428 islit = 1;
1429 lit = 0;
1430 } else
1431 lit = (insn >> 13) & 0xFF;
4c9649a9
JM
1432 palcode = insn & 0x03FFFFFF;
1433 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1434 disp16 = (int16_t)(insn & 0x0000FFFF);
1435 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
4c9649a9
JM
1436 fn11 = (insn >> 5) & 0x000007FF;
1437 fpfn = fn11 & 0x3F;
1438 fn7 = (insn >> 5) & 0x0000007F;
1439 fn2 = (insn >> 5) & 0x00000003;
806991da 1440 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1441 opc, ra, rb, rc, disp16);
806991da 1442
4af70374 1443 ret = NO_EXIT;
4c9649a9
JM
1444 switch (opc) {
1445 case 0x00:
1446 /* CALL_PAL */
ab471ade
RH
1447#ifdef CONFIG_USER_ONLY
1448 if (palcode == 0x9E) {
1449 /* RDUNIQUE */
1450 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1451 break;
1452 } else if (palcode == 0x9F) {
1453 /* WRUNIQUE */
1454 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1455 break;
1456 }
1457#endif
4c9649a9
JM
1458 if (palcode >= 0x80 && palcode < 0xC0) {
1459 /* Unprivileged PAL call */
31a877f2 1460 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
4af70374
RH
1461 /* PC updated by gen_excp. */
1462 ret = EXIT_PC_UPDATED;
ab471ade
RH
1463 break;
1464 }
1465#ifndef CONFIG_USER_ONLY
1466 if (palcode < 0x40) {
4c9649a9
JM
1467 /* Privileged PAL code */
1468 if (ctx->mem_idx & 1)
1469 goto invalid_opc;
ab471ade 1470 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
4c9649a9 1471 }
ab471ade
RH
1472#endif
1473 /* Invalid PAL call */
1474 goto invalid_opc;
4c9649a9
JM
1475 case 0x01:
1476 /* OPC01 */
1477 goto invalid_opc;
1478 case 0x02:
1479 /* OPC02 */
1480 goto invalid_opc;
1481 case 0x03:
1482 /* OPC03 */
1483 goto invalid_opc;
1484 case 0x04:
1485 /* OPC04 */
1486 goto invalid_opc;
1487 case 0x05:
1488 /* OPC05 */
1489 goto invalid_opc;
1490 case 0x06:
1491 /* OPC06 */
1492 goto invalid_opc;
1493 case 0x07:
1494 /* OPC07 */
1495 goto invalid_opc;
1496 case 0x08:
1497 /* LDA */
1ef4ef4e 1498 if (likely(ra != 31)) {
496cb5b9 1499 if (rb != 31)
3761035f
AJ
1500 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1501 else
1502 tcg_gen_movi_i64(cpu_ir[ra], disp16);
496cb5b9 1503 }
4c9649a9
JM
1504 break;
1505 case 0x09:
1506 /* LDAH */
1ef4ef4e 1507 if (likely(ra != 31)) {
496cb5b9 1508 if (rb != 31)
3761035f
AJ
1509 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1510 else
1511 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
496cb5b9 1512 }
4c9649a9
JM
1513 break;
1514 case 0x0A:
1515 /* LDBU */
1516 if (!(ctx->amask & AMASK_BWX))
1517 goto invalid_opc;
f18cd223 1518 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
4c9649a9
JM
1519 break;
1520 case 0x0B:
1521 /* LDQ_U */
f18cd223 1522 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1523 break;
1524 case 0x0C:
1525 /* LDWU */
1526 if (!(ctx->amask & AMASK_BWX))
1527 goto invalid_opc;
577d5e7f 1528 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
4c9649a9
JM
1529 break;
1530 case 0x0D:
1531 /* STW */
57a92c8e 1532 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
1533 break;
1534 case 0x0E:
1535 /* STB */
57a92c8e 1536 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
1537 break;
1538 case 0x0F:
1539 /* STQ_U */
57a92c8e 1540 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
4c9649a9
JM
1541 break;
1542 case 0x10:
1543 switch (fn7) {
1544 case 0x00:
1545 /* ADDL */
30c7183b
AJ
1546 if (likely(rc != 31)) {
1547 if (ra != 31) {
1548 if (islit) {
1549 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1550 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1551 } else {
30c7183b
AJ
1552 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1553 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1554 }
30c7183b
AJ
1555 } else {
1556 if (islit)
dfaa8583 1557 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1558 else
dfaa8583 1559 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1560 }
1561 }
4c9649a9
JM
1562 break;
1563 case 0x02:
1564 /* S4ADDL */
30c7183b
AJ
1565 if (likely(rc != 31)) {
1566 if (ra != 31) {
a7812ae4 1567 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1568 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1569 if (islit)
1570 tcg_gen_addi_i64(tmp, tmp, lit);
1571 else
1572 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1573 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1574 tcg_temp_free(tmp);
30c7183b
AJ
1575 } else {
1576 if (islit)
1577 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1578 else
dfaa8583 1579 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1580 }
1581 }
4c9649a9
JM
1582 break;
1583 case 0x09:
1584 /* SUBL */
30c7183b
AJ
1585 if (likely(rc != 31)) {
1586 if (ra != 31) {
dfaa8583 1587 if (islit)
30c7183b 1588 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
dfaa8583 1589 else
30c7183b 1590 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
dfaa8583 1591 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1592 } else {
1593 if (islit)
1594 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1595 else {
30c7183b
AJ
1596 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1597 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1598 }
1599 }
4c9649a9
JM
1600 break;
1601 case 0x0B:
1602 /* S4SUBL */
30c7183b
AJ
1603 if (likely(rc != 31)) {
1604 if (ra != 31) {
a7812ae4 1605 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1606 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1607 if (islit)
1608 tcg_gen_subi_i64(tmp, tmp, lit);
1609 else
1610 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1611 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1612 tcg_temp_free(tmp);
30c7183b
AJ
1613 } else {
1614 if (islit)
1615 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1616 else {
30c7183b
AJ
1617 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1618 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1619 }
30c7183b
AJ
1620 }
1621 }
4c9649a9
JM
1622 break;
1623 case 0x0F:
1624 /* CMPBGE */
a7812ae4 1625 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1626 break;
1627 case 0x12:
1628 /* S8ADDL */
30c7183b
AJ
1629 if (likely(rc != 31)) {
1630 if (ra != 31) {
a7812ae4 1631 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1632 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1633 if (islit)
1634 tcg_gen_addi_i64(tmp, tmp, lit);
1635 else
1636 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1637 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1638 tcg_temp_free(tmp);
30c7183b
AJ
1639 } else {
1640 if (islit)
1641 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1642 else
dfaa8583 1643 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1644 }
1645 }
4c9649a9
JM
1646 break;
1647 case 0x1B:
1648 /* S8SUBL */
30c7183b
AJ
1649 if (likely(rc != 31)) {
1650 if (ra != 31) {
a7812ae4 1651 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1652 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1653 if (islit)
1654 tcg_gen_subi_i64(tmp, tmp, lit);
1655 else
1656 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1657 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1658 tcg_temp_free(tmp);
30c7183b
AJ
1659 } else {
1660 if (islit)
1661 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1662 else
30c7183b
AJ
1663 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1664 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1665 }
30c7183b
AJ
1666 }
1667 }
4c9649a9
JM
1668 break;
1669 case 0x1D:
1670 /* CMPULT */
01ff9cc8 1671 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
4c9649a9
JM
1672 break;
1673 case 0x20:
1674 /* ADDQ */
30c7183b
AJ
1675 if (likely(rc != 31)) {
1676 if (ra != 31) {
1677 if (islit)
1678 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1679 else
dfaa8583 1680 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1681 } else {
1682 if (islit)
1683 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1684 else
dfaa8583 1685 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1686 }
1687 }
4c9649a9
JM
1688 break;
1689 case 0x22:
1690 /* S4ADDQ */
30c7183b
AJ
1691 if (likely(rc != 31)) {
1692 if (ra != 31) {
a7812ae4 1693 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1694 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1695 if (islit)
1696 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1697 else
1698 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1699 tcg_temp_free(tmp);
30c7183b
AJ
1700 } else {
1701 if (islit)
1702 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1703 else
dfaa8583 1704 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1705 }
1706 }
4c9649a9
JM
1707 break;
1708 case 0x29:
1709 /* SUBQ */
30c7183b
AJ
1710 if (likely(rc != 31)) {
1711 if (ra != 31) {
1712 if (islit)
1713 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1714 else
dfaa8583 1715 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1716 } else {
1717 if (islit)
1718 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1719 else
dfaa8583 1720 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1721 }
1722 }
4c9649a9
JM
1723 break;
1724 case 0x2B:
1725 /* S4SUBQ */
30c7183b
AJ
1726 if (likely(rc != 31)) {
1727 if (ra != 31) {
a7812ae4 1728 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1729 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1730 if (islit)
1731 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1732 else
1733 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1734 tcg_temp_free(tmp);
30c7183b
AJ
1735 } else {
1736 if (islit)
1737 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1738 else
dfaa8583 1739 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1740 }
1741 }
4c9649a9
JM
1742 break;
1743 case 0x2D:
1744 /* CMPEQ */
01ff9cc8 1745 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
4c9649a9
JM
1746 break;
1747 case 0x32:
1748 /* S8ADDQ */
30c7183b
AJ
1749 if (likely(rc != 31)) {
1750 if (ra != 31) {
a7812ae4 1751 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1752 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1753 if (islit)
1754 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1755 else
1756 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1757 tcg_temp_free(tmp);
30c7183b
AJ
1758 } else {
1759 if (islit)
1760 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1761 else
dfaa8583 1762 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1763 }
1764 }
4c9649a9
JM
1765 break;
1766 case 0x3B:
1767 /* S8SUBQ */
30c7183b
AJ
1768 if (likely(rc != 31)) {
1769 if (ra != 31) {
a7812ae4 1770 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1771 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1772 if (islit)
1773 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1774 else
1775 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1776 tcg_temp_free(tmp);
30c7183b
AJ
1777 } else {
1778 if (islit)
1779 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1780 else
dfaa8583 1781 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1782 }
1783 }
4c9649a9
JM
1784 break;
1785 case 0x3D:
1786 /* CMPULE */
01ff9cc8 1787 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
4c9649a9
JM
1788 break;
1789 case 0x40:
1790 /* ADDL/V */
a7812ae4 1791 gen_addlv(ra, rb, rc, islit, lit);
4c9649a9
JM
1792 break;
1793 case 0x49:
1794 /* SUBL/V */
a7812ae4 1795 gen_sublv(ra, rb, rc, islit, lit);
4c9649a9
JM
1796 break;
1797 case 0x4D:
1798 /* CMPLT */
01ff9cc8 1799 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
4c9649a9
JM
1800 break;
1801 case 0x60:
1802 /* ADDQ/V */
a7812ae4 1803 gen_addqv(ra, rb, rc, islit, lit);
4c9649a9
JM
1804 break;
1805 case 0x69:
1806 /* SUBQ/V */
a7812ae4 1807 gen_subqv(ra, rb, rc, islit, lit);
4c9649a9
JM
1808 break;
1809 case 0x6D:
1810 /* CMPLE */
01ff9cc8 1811 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
4c9649a9
JM
1812 break;
1813 default:
1814 goto invalid_opc;
1815 }
1816 break;
1817 case 0x11:
1818 switch (fn7) {
1819 case 0x00:
1820 /* AND */
30c7183b 1821 if (likely(rc != 31)) {
dfaa8583 1822 if (ra == 31)
30c7183b
AJ
1823 tcg_gen_movi_i64(cpu_ir[rc], 0);
1824 else if (islit)
1825 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1826 else
1827 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1828 }
4c9649a9
JM
1829 break;
1830 case 0x08:
1831 /* BIC */
30c7183b
AJ
1832 if (likely(rc != 31)) {
1833 if (ra != 31) {
1834 if (islit)
1835 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1836 else
1837 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1838 } else
1839 tcg_gen_movi_i64(cpu_ir[rc], 0);
1840 }
4c9649a9
JM
1841 break;
1842 case 0x14:
1843 /* CMOVLBS */
bbe1dab4 1844 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
1845 break;
1846 case 0x16:
1847 /* CMOVLBC */
bbe1dab4 1848 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
1849 break;
1850 case 0x20:
1851 /* BIS */
30c7183b
AJ
1852 if (likely(rc != 31)) {
1853 if (ra != 31) {
1854 if (islit)
1855 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
8bb6e981 1856 else
30c7183b 1857 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
4c9649a9 1858 } else {
30c7183b
AJ
1859 if (islit)
1860 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1861 else
dfaa8583 1862 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
4c9649a9 1863 }
4c9649a9
JM
1864 }
1865 break;
1866 case 0x24:
1867 /* CMOVEQ */
bbe1dab4 1868 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1869 break;
1870 case 0x26:
1871 /* CMOVNE */
bbe1dab4 1872 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1873 break;
1874 case 0x28:
1875 /* ORNOT */
30c7183b 1876 if (likely(rc != 31)) {
dfaa8583 1877 if (ra != 31) {
30c7183b
AJ
1878 if (islit)
1879 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1880 else
1881 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1882 } else {
1883 if (islit)
1884 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1885 else
1886 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1887 }
1888 }
4c9649a9
JM
1889 break;
1890 case 0x40:
1891 /* XOR */
30c7183b
AJ
1892 if (likely(rc != 31)) {
1893 if (ra != 31) {
1894 if (islit)
1895 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1896 else
dfaa8583 1897 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1898 } else {
1899 if (islit)
1900 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1901 else
dfaa8583 1902 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1903 }
1904 }
4c9649a9
JM
1905 break;
1906 case 0x44:
1907 /* CMOVLT */
bbe1dab4 1908 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1909 break;
1910 case 0x46:
1911 /* CMOVGE */
bbe1dab4 1912 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1913 break;
1914 case 0x48:
1915 /* EQV */
30c7183b
AJ
1916 if (likely(rc != 31)) {
1917 if (ra != 31) {
1918 if (islit)
1919 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1920 else
1921 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1922 } else {
1923 if (islit)
1924 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
30c7183b 1925 else
dfaa8583 1926 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1927 }
1928 }
4c9649a9
JM
1929 break;
1930 case 0x61:
1931 /* AMASK */
ae8ecd42
AJ
1932 if (likely(rc != 31)) {
1933 if (islit)
1a1f7dbc 1934 tcg_gen_movi_i64(cpu_ir[rc], lit);
ae8ecd42 1935 else
1a1f7dbc
AJ
1936 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1937 switch (ctx->env->implver) {
1938 case IMPLVER_2106x:
1939 /* EV4, EV45, LCA, LCA45 & EV5 */
1940 break;
1941 case IMPLVER_21164:
1942 case IMPLVER_21264:
1943 case IMPLVER_21364:
1944 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1945 ~(uint64_t)ctx->amask);
1946 break;
1947 }
ae8ecd42 1948 }
4c9649a9
JM
1949 break;
1950 case 0x64:
1951 /* CMOVLE */
bbe1dab4 1952 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1953 break;
1954 case 0x66:
1955 /* CMOVGT */
bbe1dab4 1956 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1957 break;
1958 case 0x6C:
1959 /* IMPLVER */
3761035f 1960 if (rc != 31)
8579095b 1961 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
4c9649a9
JM
1962 break;
1963 default:
1964 goto invalid_opc;
1965 }
1966 break;
1967 case 0x12:
1968 switch (fn7) {
1969 case 0x02:
1970 /* MSKBL */
14ab1634 1971 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
1972 break;
1973 case 0x06:
1974 /* EXTBL */
377a43b6 1975 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
1976 break;
1977 case 0x0B:
1978 /* INSBL */
248c42f3 1979 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
1980 break;
1981 case 0x12:
1982 /* MSKWL */
14ab1634 1983 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1984 break;
1985 case 0x16:
1986 /* EXTWL */
377a43b6 1987 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1988 break;
1989 case 0x1B:
1990 /* INSWL */
248c42f3 1991 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1992 break;
1993 case 0x22:
1994 /* MSKLL */
14ab1634 1995 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
1996 break;
1997 case 0x26:
1998 /* EXTLL */
377a43b6 1999 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2000 break;
2001 case 0x2B:
2002 /* INSLL */
248c42f3 2003 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2004 break;
2005 case 0x30:
2006 /* ZAP */
a7812ae4 2007 gen_zap(ra, rb, rc, islit, lit);
4c9649a9
JM
2008 break;
2009 case 0x31:
2010 /* ZAPNOT */
a7812ae4 2011 gen_zapnot(ra, rb, rc, islit, lit);
4c9649a9
JM
2012 break;
2013 case 0x32:
2014 /* MSKQL */
14ab1634 2015 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2016 break;
2017 case 0x34:
2018 /* SRL */
30c7183b
AJ
2019 if (likely(rc != 31)) {
2020 if (ra != 31) {
2021 if (islit)
2022 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2023 else {
a7812ae4 2024 TCGv shift = tcg_temp_new();
30c7183b
AJ
2025 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2026 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2027 tcg_temp_free(shift);
dfaa8583 2028 }
30c7183b
AJ
2029 } else
2030 tcg_gen_movi_i64(cpu_ir[rc], 0);
2031 }
4c9649a9
JM
2032 break;
2033 case 0x36:
2034 /* EXTQL */
377a43b6 2035 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2036 break;
2037 case 0x39:
2038 /* SLL */
30c7183b
AJ
2039 if (likely(rc != 31)) {
2040 if (ra != 31) {
2041 if (islit)
2042 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2043 else {
a7812ae4 2044 TCGv shift = tcg_temp_new();
30c7183b
AJ
2045 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2046 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2047 tcg_temp_free(shift);
dfaa8583 2048 }
30c7183b
AJ
2049 } else
2050 tcg_gen_movi_i64(cpu_ir[rc], 0);
2051 }
4c9649a9
JM
2052 break;
2053 case 0x3B:
2054 /* INSQL */
248c42f3 2055 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2056 break;
2057 case 0x3C:
2058 /* SRA */
30c7183b
AJ
2059 if (likely(rc != 31)) {
2060 if (ra != 31) {
2061 if (islit)
2062 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2063 else {
a7812ae4 2064 TCGv shift = tcg_temp_new();
30c7183b
AJ
2065 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2066 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2067 tcg_temp_free(shift);
dfaa8583 2068 }
30c7183b
AJ
2069 } else
2070 tcg_gen_movi_i64(cpu_ir[rc], 0);
2071 }
4c9649a9
JM
2072 break;
2073 case 0x52:
2074 /* MSKWH */
ffec44f1 2075 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2076 break;
2077 case 0x57:
2078 /* INSWH */
50eb6e5c 2079 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2080 break;
2081 case 0x5A:
2082 /* EXTWH */
377a43b6 2083 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2084 break;
2085 case 0x62:
2086 /* MSKLH */
ffec44f1 2087 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2088 break;
2089 case 0x67:
2090 /* INSLH */
50eb6e5c 2091 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2092 break;
2093 case 0x6A:
2094 /* EXTLH */
377a43b6 2095 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2096 break;
2097 case 0x72:
2098 /* MSKQH */
ffec44f1 2099 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2100 break;
2101 case 0x77:
2102 /* INSQH */
50eb6e5c 2103 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2104 break;
2105 case 0x7A:
2106 /* EXTQH */
377a43b6 2107 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2108 break;
2109 default:
2110 goto invalid_opc;
2111 }
2112 break;
2113 case 0x13:
2114 switch (fn7) {
2115 case 0x00:
2116 /* MULL */
30c7183b 2117 if (likely(rc != 31)) {
dfaa8583 2118 if (ra == 31)
30c7183b
AJ
2119 tcg_gen_movi_i64(cpu_ir[rc], 0);
2120 else {
2121 if (islit)
2122 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2123 else
2124 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2125 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2126 }
2127 }
4c9649a9
JM
2128 break;
2129 case 0x20:
2130 /* MULQ */
30c7183b 2131 if (likely(rc != 31)) {
dfaa8583 2132 if (ra == 31)
30c7183b
AJ
2133 tcg_gen_movi_i64(cpu_ir[rc], 0);
2134 else if (islit)
2135 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2136 else
2137 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2138 }
4c9649a9
JM
2139 break;
2140 case 0x30:
2141 /* UMULH */
a7812ae4 2142 gen_umulh(ra, rb, rc, islit, lit);
4c9649a9
JM
2143 break;
2144 case 0x40:
2145 /* MULL/V */
a7812ae4 2146 gen_mullv(ra, rb, rc, islit, lit);
4c9649a9
JM
2147 break;
2148 case 0x60:
2149 /* MULQ/V */
a7812ae4 2150 gen_mulqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2151 break;
2152 default:
2153 goto invalid_opc;
2154 }
2155 break;
2156 case 0x14:
f24518b5 2157 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2158 case 0x04:
2159 /* ITOFS */
2160 if (!(ctx->amask & AMASK_FIX))
2161 goto invalid_opc;
f18cd223
AJ
2162 if (likely(rc != 31)) {
2163 if (ra != 31) {
a7812ae4 2164 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2165 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2166 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2167 tcg_temp_free_i32(tmp);
f18cd223
AJ
2168 } else
2169 tcg_gen_movi_i64(cpu_fir[rc], 0);
2170 }
4c9649a9
JM
2171 break;
2172 case 0x0A:
2173 /* SQRTF */
2174 if (!(ctx->amask & AMASK_FIX))
2175 goto invalid_opc;
a7812ae4 2176 gen_fsqrtf(rb, rc);
4c9649a9
JM
2177 break;
2178 case 0x0B:
2179 /* SQRTS */
2180 if (!(ctx->amask & AMASK_FIX))
2181 goto invalid_opc;
f24518b5 2182 gen_fsqrts(ctx, rb, rc, fn11);
4c9649a9
JM
2183 break;
2184 case 0x14:
2185 /* ITOFF */
2186 if (!(ctx->amask & AMASK_FIX))
2187 goto invalid_opc;
f18cd223
AJ
2188 if (likely(rc != 31)) {
2189 if (ra != 31) {
a7812ae4 2190 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2191 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2192 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2193 tcg_temp_free_i32(tmp);
f18cd223
AJ
2194 } else
2195 tcg_gen_movi_i64(cpu_fir[rc], 0);
2196 }
4c9649a9
JM
2197 break;
2198 case 0x24:
2199 /* ITOFT */
2200 if (!(ctx->amask & AMASK_FIX))
2201 goto invalid_opc;
f18cd223
AJ
2202 if (likely(rc != 31)) {
2203 if (ra != 31)
2204 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2205 else
2206 tcg_gen_movi_i64(cpu_fir[rc], 0);
2207 }
4c9649a9
JM
2208 break;
2209 case 0x2A:
2210 /* SQRTG */
2211 if (!(ctx->amask & AMASK_FIX))
2212 goto invalid_opc;
a7812ae4 2213 gen_fsqrtg(rb, rc);
4c9649a9
JM
2214 break;
2215 case 0x02B:
2216 /* SQRTT */
2217 if (!(ctx->amask & AMASK_FIX))
2218 goto invalid_opc;
f24518b5 2219 gen_fsqrtt(ctx, rb, rc, fn11);
4c9649a9
JM
2220 break;
2221 default:
2222 goto invalid_opc;
2223 }
2224 break;
2225 case 0x15:
2226 /* VAX floating point */
2227 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2228 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2229 case 0x00:
2230 /* ADDF */
a7812ae4 2231 gen_faddf(ra, rb, rc);
4c9649a9
JM
2232 break;
2233 case 0x01:
2234 /* SUBF */
a7812ae4 2235 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2236 break;
2237 case 0x02:
2238 /* MULF */
a7812ae4 2239 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2240 break;
2241 case 0x03:
2242 /* DIVF */
a7812ae4 2243 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2244 break;
2245 case 0x1E:
2246 /* CVTDG */
2247#if 0 // TODO
a7812ae4 2248 gen_fcvtdg(rb, rc);
4c9649a9
JM
2249#else
2250 goto invalid_opc;
2251#endif
2252 break;
2253 case 0x20:
2254 /* ADDG */
a7812ae4 2255 gen_faddg(ra, rb, rc);
4c9649a9
JM
2256 break;
2257 case 0x21:
2258 /* SUBG */
a7812ae4 2259 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2260 break;
2261 case 0x22:
2262 /* MULG */
a7812ae4 2263 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2264 break;
2265 case 0x23:
2266 /* DIVG */
a7812ae4 2267 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2268 break;
2269 case 0x25:
2270 /* CMPGEQ */
a7812ae4 2271 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2272 break;
2273 case 0x26:
2274 /* CMPGLT */
a7812ae4 2275 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2276 break;
2277 case 0x27:
2278 /* CMPGLE */
a7812ae4 2279 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2280 break;
2281 case 0x2C:
2282 /* CVTGF */
a7812ae4 2283 gen_fcvtgf(rb, rc);
4c9649a9
JM
2284 break;
2285 case 0x2D:
2286 /* CVTGD */
2287#if 0 // TODO
a7812ae4 2288 gen_fcvtgd(rb, rc);
4c9649a9
JM
2289#else
2290 goto invalid_opc;
2291#endif
2292 break;
2293 case 0x2F:
2294 /* CVTGQ */
a7812ae4 2295 gen_fcvtgq(rb, rc);
4c9649a9
JM
2296 break;
2297 case 0x3C:
2298 /* CVTQF */
a7812ae4 2299 gen_fcvtqf(rb, rc);
4c9649a9
JM
2300 break;
2301 case 0x3E:
2302 /* CVTQG */
a7812ae4 2303 gen_fcvtqg(rb, rc);
4c9649a9
JM
2304 break;
2305 default:
2306 goto invalid_opc;
2307 }
2308 break;
2309 case 0x16:
2310 /* IEEE floating-point */
f24518b5 2311 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2312 case 0x00:
2313 /* ADDS */
f24518b5 2314 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2315 break;
2316 case 0x01:
2317 /* SUBS */
f24518b5 2318 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2319 break;
2320 case 0x02:
2321 /* MULS */
f24518b5 2322 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2323 break;
2324 case 0x03:
2325 /* DIVS */
f24518b5 2326 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2327 break;
2328 case 0x20:
2329 /* ADDT */
f24518b5 2330 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2331 break;
2332 case 0x21:
2333 /* SUBT */
f24518b5 2334 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2335 break;
2336 case 0x22:
2337 /* MULT */
f24518b5 2338 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2339 break;
2340 case 0x23:
2341 /* DIVT */
f24518b5 2342 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2343 break;
2344 case 0x24:
2345 /* CMPTUN */
f24518b5 2346 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2347 break;
2348 case 0x25:
2349 /* CMPTEQ */
f24518b5 2350 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2351 break;
2352 case 0x26:
2353 /* CMPTLT */
f24518b5 2354 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2355 break;
2356 case 0x27:
2357 /* CMPTLE */
f24518b5 2358 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2359 break;
2360 case 0x2C:
a74b4d2c 2361 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2362 /* CVTST */
f24518b5 2363 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2364 } else {
2365 /* CVTTS */
f24518b5 2366 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2367 }
2368 break;
2369 case 0x2F:
2370 /* CVTTQ */
f24518b5 2371 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2372 break;
2373 case 0x3C:
2374 /* CVTQS */
f24518b5 2375 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2376 break;
2377 case 0x3E:
2378 /* CVTQT */
f24518b5 2379 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2380 break;
2381 default:
2382 goto invalid_opc;
2383 }
2384 break;
2385 case 0x17:
2386 switch (fn11) {
2387 case 0x010:
2388 /* CVTLQ */
a7812ae4 2389 gen_fcvtlq(rb, rc);
4c9649a9
JM
2390 break;
2391 case 0x020:
f18cd223 2392 if (likely(rc != 31)) {
a06d48d9 2393 if (ra == rb) {
4c9649a9 2394 /* FMOV */
a06d48d9
RH
2395 if (ra == 31)
2396 tcg_gen_movi_i64(cpu_fir[rc], 0);
2397 else
2398 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2399 } else {
f18cd223 2400 /* CPYS */
a7812ae4 2401 gen_fcpys(ra, rb, rc);
a06d48d9 2402 }
4c9649a9
JM
2403 }
2404 break;
2405 case 0x021:
2406 /* CPYSN */
a7812ae4 2407 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2408 break;
2409 case 0x022:
2410 /* CPYSE */
a7812ae4 2411 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2412 break;
2413 case 0x024:
2414 /* MT_FPCR */
f18cd223 2415 if (likely(ra != 31))
a7812ae4 2416 gen_helper_store_fpcr(cpu_fir[ra]);
f18cd223
AJ
2417 else {
2418 TCGv tmp = tcg_const_i64(0);
a7812ae4 2419 gen_helper_store_fpcr(tmp);
f18cd223
AJ
2420 tcg_temp_free(tmp);
2421 }
4c9649a9
JM
2422 break;
2423 case 0x025:
2424 /* MF_FPCR */
f18cd223 2425 if (likely(ra != 31))
a7812ae4 2426 gen_helper_load_fpcr(cpu_fir[ra]);
4c9649a9
JM
2427 break;
2428 case 0x02A:
2429 /* FCMOVEQ */
bbe1dab4 2430 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2431 break;
2432 case 0x02B:
2433 /* FCMOVNE */
bbe1dab4 2434 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2435 break;
2436 case 0x02C:
2437 /* FCMOVLT */
bbe1dab4 2438 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2439 break;
2440 case 0x02D:
2441 /* FCMOVGE */
bbe1dab4 2442 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2443 break;
2444 case 0x02E:
2445 /* FCMOVLE */
bbe1dab4 2446 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2447 break;
2448 case 0x02F:
2449 /* FCMOVGT */
bbe1dab4 2450 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2451 break;
2452 case 0x030:
2453 /* CVTQL */
a7812ae4 2454 gen_fcvtql(rb, rc);
4c9649a9
JM
2455 break;
2456 case 0x130:
2457 /* CVTQL/V */
4c9649a9
JM
2458 case 0x530:
2459 /* CVTQL/SV */
735cf45f
RH
2460 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2461 /v doesn't do. The only thing I can think is that /sv is a
2462 valid instruction merely for completeness in the ISA. */
2463 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2464 break;
2465 default:
2466 goto invalid_opc;
2467 }
2468 break;
2469 case 0x18:
2470 switch ((uint16_t)disp16) {
2471 case 0x0000:
2472 /* TRAPB */
4af70374 2473 /* No-op. */
4c9649a9
JM
2474 break;
2475 case 0x0400:
2476 /* EXCB */
4af70374 2477 /* No-op. */
4c9649a9
JM
2478 break;
2479 case 0x4000:
2480 /* MB */
2481 /* No-op */
2482 break;
2483 case 0x4400:
2484 /* WMB */
2485 /* No-op */
2486 break;
2487 case 0x8000:
2488 /* FETCH */
2489 /* No-op */
2490 break;
2491 case 0xA000:
2492 /* FETCH_M */
2493 /* No-op */
2494 break;
2495 case 0xC000:
2496 /* RPCC */
3761035f 2497 if (ra != 31)
a7812ae4 2498 gen_helper_load_pcc(cpu_ir[ra]);
4c9649a9
JM
2499 break;
2500 case 0xE000:
2501 /* RC */
ac316ca4 2502 gen_rx(ra, 0);
4c9649a9
JM
2503 break;
2504 case 0xE800:
2505 /* ECB */
4c9649a9
JM
2506 break;
2507 case 0xF000:
2508 /* RS */
ac316ca4 2509 gen_rx(ra, 1);
4c9649a9
JM
2510 break;
2511 case 0xF800:
2512 /* WH64 */
2513 /* No-op */
2514 break;
2515 default:
2516 goto invalid_opc;
2517 }
2518 break;
2519 case 0x19:
2520 /* HW_MFPR (PALcode) */
2521#if defined (CONFIG_USER_ONLY)
2522 goto invalid_opc;
2523#else
2524 if (!ctx->pal_mode)
2525 goto invalid_opc;
8bb6e981
AJ
2526 if (ra != 31) {
2527 TCGv tmp = tcg_const_i32(insn & 0xFF);
a7812ae4 2528 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
8bb6e981
AJ
2529 tcg_temp_free(tmp);
2530 }
4c9649a9
JM
2531 break;
2532#endif
2533 case 0x1A:
49563a72
RH
2534 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2535 prediction stack action, which of course we don't implement. */
2536 if (rb != 31) {
3761035f 2537 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
49563a72 2538 } else {
3761035f 2539 tcg_gen_movi_i64(cpu_pc, 0);
49563a72
RH
2540 }
2541 if (ra != 31) {
1304ca87 2542 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2543 }
4af70374 2544 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2545 break;
2546 case 0x1B:
2547 /* HW_LD (PALcode) */
2548#if defined (CONFIG_USER_ONLY)
2549 goto invalid_opc;
2550#else
2551 if (!ctx->pal_mode)
2552 goto invalid_opc;
8bb6e981 2553 if (ra != 31) {
a7812ae4 2554 TCGv addr = tcg_temp_new();
8bb6e981
AJ
2555 if (rb != 31)
2556 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2557 else
2558 tcg_gen_movi_i64(addr, disp12);
2559 switch ((insn >> 12) & 0xF) {
2560 case 0x0:
b5d51029 2561 /* Longword physical access (hw_ldl/p) */
a7812ae4 2562 gen_helper_ldl_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2563 break;
2564 case 0x1:
b5d51029 2565 /* Quadword physical access (hw_ldq/p) */
a7812ae4 2566 gen_helper_ldq_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2567 break;
2568 case 0x2:
b5d51029 2569 /* Longword physical access with lock (hw_ldl_l/p) */
a7812ae4 2570 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2571 break;
2572 case 0x3:
b5d51029 2573 /* Quadword physical access with lock (hw_ldq_l/p) */
a7812ae4 2574 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2575 break;
2576 case 0x4:
b5d51029
AJ
2577 /* Longword virtual PTE fetch (hw_ldl/v) */
2578 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2579 break;
2580 case 0x5:
b5d51029
AJ
2581 /* Quadword virtual PTE fetch (hw_ldq/v) */
2582 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2583 break;
2584 case 0x6:
2585 /* Incpu_ir[ra]id */
b5d51029 2586 goto invalid_opc;
8bb6e981
AJ
2587 case 0x7:
2588 /* Incpu_ir[ra]id */
b5d51029 2589 goto invalid_opc;
8bb6e981 2590 case 0x8:
b5d51029 2591 /* Longword virtual access (hw_ldl) */
a7812ae4
PB
2592 gen_helper_st_virt_to_phys(addr, addr);
2593 gen_helper_ldl_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2594 break;
2595 case 0x9:
b5d51029 2596 /* Quadword virtual access (hw_ldq) */
a7812ae4
PB
2597 gen_helper_st_virt_to_phys(addr, addr);
2598 gen_helper_ldq_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2599 break;
2600 case 0xA:
b5d51029
AJ
2601 /* Longword virtual access with protection check (hw_ldl/w) */
2602 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2603 break;
2604 case 0xB:
b5d51029
AJ
2605 /* Quadword virtual access with protection check (hw_ldq/w) */
2606 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2607 break;
2608 case 0xC:
b5d51029 2609 /* Longword virtual access with alt access mode (hw_ldl/a)*/
a7812ae4
PB
2610 gen_helper_set_alt_mode();
2611 gen_helper_st_virt_to_phys(addr, addr);
2612 gen_helper_ldl_raw(cpu_ir[ra], addr);
2613 gen_helper_restore_mode();
8bb6e981
AJ
2614 break;
2615 case 0xD:
b5d51029 2616 /* Quadword virtual access with alt access mode (hw_ldq/a) */
a7812ae4
PB
2617 gen_helper_set_alt_mode();
2618 gen_helper_st_virt_to_phys(addr, addr);
2619 gen_helper_ldq_raw(cpu_ir[ra], addr);
2620 gen_helper_restore_mode();
8bb6e981
AJ
2621 break;
2622 case 0xE:
2623 /* Longword virtual access with alternate access mode and
b5d51029 2624 * protection checks (hw_ldl/wa)
8bb6e981 2625 */
a7812ae4
PB
2626 gen_helper_set_alt_mode();
2627 gen_helper_ldl_data(cpu_ir[ra], addr);
2628 gen_helper_restore_mode();
8bb6e981
AJ
2629 break;
2630 case 0xF:
2631 /* Quadword virtual access with alternate access mode and
b5d51029 2632 * protection checks (hw_ldq/wa)
8bb6e981 2633 */
a7812ae4
PB
2634 gen_helper_set_alt_mode();
2635 gen_helper_ldq_data(cpu_ir[ra], addr);
2636 gen_helper_restore_mode();
8bb6e981
AJ
2637 break;
2638 }
2639 tcg_temp_free(addr);
4c9649a9 2640 }
4c9649a9
JM
2641 break;
2642#endif
2643 case 0x1C:
2644 switch (fn7) {
2645 case 0x00:
2646 /* SEXTB */
2647 if (!(ctx->amask & AMASK_BWX))
2648 goto invalid_opc;
ae8ecd42
AJ
2649 if (likely(rc != 31)) {
2650 if (islit)
2651 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
ae8ecd42 2652 else
dfaa8583 2653 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2654 }
4c9649a9
JM
2655 break;
2656 case 0x01:
2657 /* SEXTW */
2658 if (!(ctx->amask & AMASK_BWX))
2659 goto invalid_opc;
ae8ecd42
AJ
2660 if (likely(rc != 31)) {
2661 if (islit)
2662 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
ae8ecd42 2663 else
dfaa8583 2664 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2665 }
4c9649a9
JM
2666 break;
2667 case 0x30:
2668 /* CTPOP */
2669 if (!(ctx->amask & AMASK_CIX))
2670 goto invalid_opc;
ae8ecd42
AJ
2671 if (likely(rc != 31)) {
2672 if (islit)
2673 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
ae8ecd42 2674 else
a7812ae4 2675 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2676 }
4c9649a9
JM
2677 break;
2678 case 0x31:
2679 /* PERR */
2680 if (!(ctx->amask & AMASK_MVI))
2681 goto invalid_opc;
13e4df99 2682 gen_perr(ra, rb, rc, islit, lit);
4c9649a9
JM
2683 break;
2684 case 0x32:
2685 /* CTLZ */
2686 if (!(ctx->amask & AMASK_CIX))
2687 goto invalid_opc;
ae8ecd42
AJ
2688 if (likely(rc != 31)) {
2689 if (islit)
2690 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
ae8ecd42 2691 else
a7812ae4 2692 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2693 }
4c9649a9
JM
2694 break;
2695 case 0x33:
2696 /* CTTZ */
2697 if (!(ctx->amask & AMASK_CIX))
2698 goto invalid_opc;
ae8ecd42
AJ
2699 if (likely(rc != 31)) {
2700 if (islit)
2701 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
ae8ecd42 2702 else
a7812ae4 2703 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2704 }
4c9649a9
JM
2705 break;
2706 case 0x34:
2707 /* UNPKBW */
2708 if (!(ctx->amask & AMASK_MVI))
2709 goto invalid_opc;
13e4df99
RH
2710 if (real_islit || ra != 31)
2711 goto invalid_opc;
2712 gen_unpkbw (rb, rc);
4c9649a9
JM
2713 break;
2714 case 0x35:
13e4df99 2715 /* UNPKBL */
4c9649a9
JM
2716 if (!(ctx->amask & AMASK_MVI))
2717 goto invalid_opc;
13e4df99
RH
2718 if (real_islit || ra != 31)
2719 goto invalid_opc;
2720 gen_unpkbl (rb, rc);
4c9649a9
JM
2721 break;
2722 case 0x36:
2723 /* PKWB */
2724 if (!(ctx->amask & AMASK_MVI))
2725 goto invalid_opc;
13e4df99
RH
2726 if (real_islit || ra != 31)
2727 goto invalid_opc;
2728 gen_pkwb (rb, rc);
4c9649a9
JM
2729 break;
2730 case 0x37:
2731 /* PKLB */
2732 if (!(ctx->amask & AMASK_MVI))
2733 goto invalid_opc;
13e4df99
RH
2734 if (real_islit || ra != 31)
2735 goto invalid_opc;
2736 gen_pklb (rb, rc);
4c9649a9
JM
2737 break;
2738 case 0x38:
2739 /* MINSB8 */
2740 if (!(ctx->amask & AMASK_MVI))
2741 goto invalid_opc;
13e4df99 2742 gen_minsb8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2743 break;
2744 case 0x39:
2745 /* MINSW4 */
2746 if (!(ctx->amask & AMASK_MVI))
2747 goto invalid_opc;
13e4df99 2748 gen_minsw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2749 break;
2750 case 0x3A:
2751 /* MINUB8 */
2752 if (!(ctx->amask & AMASK_MVI))
2753 goto invalid_opc;
13e4df99 2754 gen_minub8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2755 break;
2756 case 0x3B:
2757 /* MINUW4 */
2758 if (!(ctx->amask & AMASK_MVI))
2759 goto invalid_opc;
13e4df99 2760 gen_minuw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2761 break;
2762 case 0x3C:
2763 /* MAXUB8 */
2764 if (!(ctx->amask & AMASK_MVI))
2765 goto invalid_opc;
13e4df99 2766 gen_maxub8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2767 break;
2768 case 0x3D:
2769 /* MAXUW4 */
2770 if (!(ctx->amask & AMASK_MVI))
2771 goto invalid_opc;
13e4df99 2772 gen_maxuw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2773 break;
2774 case 0x3E:
2775 /* MAXSB8 */
2776 if (!(ctx->amask & AMASK_MVI))
2777 goto invalid_opc;
13e4df99 2778 gen_maxsb8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2779 break;
2780 case 0x3F:
2781 /* MAXSW4 */
2782 if (!(ctx->amask & AMASK_MVI))
2783 goto invalid_opc;
13e4df99 2784 gen_maxsw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2785 break;
2786 case 0x70:
2787 /* FTOIT */
2788 if (!(ctx->amask & AMASK_FIX))
2789 goto invalid_opc;
f18cd223
AJ
2790 if (likely(rc != 31)) {
2791 if (ra != 31)
2792 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2793 else
2794 tcg_gen_movi_i64(cpu_ir[rc], 0);
2795 }
4c9649a9
JM
2796 break;
2797 case 0x78:
2798 /* FTOIS */
2799 if (!(ctx->amask & AMASK_FIX))
2800 goto invalid_opc;
f18cd223 2801 if (rc != 31) {
a7812ae4 2802 TCGv_i32 tmp1 = tcg_temp_new_i32();
f18cd223 2803 if (ra != 31)
a7812ae4 2804 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
f18cd223
AJ
2805 else {
2806 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 2807 gen_helper_s_to_memory(tmp1, tmp2);
f18cd223
AJ
2808 tcg_temp_free(tmp2);
2809 }
2810 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
a7812ae4 2811 tcg_temp_free_i32(tmp1);
f18cd223 2812 }
4c9649a9
JM
2813 break;
2814 default:
2815 goto invalid_opc;
2816 }
2817 break;
2818 case 0x1D:
2819 /* HW_MTPR (PALcode) */
2820#if defined (CONFIG_USER_ONLY)
2821 goto invalid_opc;
2822#else
2823 if (!ctx->pal_mode)
2824 goto invalid_opc;
8bb6e981
AJ
2825 else {
2826 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2827 if (ra != 31)
a7812ae4 2828 gen_helper_mtpr(tmp1, cpu_ir[ra]);
8bb6e981
AJ
2829 else {
2830 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 2831 gen_helper_mtpr(tmp1, tmp2);
8bb6e981
AJ
2832 tcg_temp_free(tmp2);
2833 }
2834 tcg_temp_free(tmp1);
4af70374 2835 ret = EXIT_PC_STALE;
8bb6e981 2836 }
4c9649a9
JM
2837 break;
2838#endif
2839 case 0x1E:
2840 /* HW_REI (PALcode) */
2841#if defined (CONFIG_USER_ONLY)
2842 goto invalid_opc;
2843#else
2844 if (!ctx->pal_mode)
2845 goto invalid_opc;
2846 if (rb == 31) {
2847 /* "Old" alpha */
a7812ae4 2848 gen_helper_hw_rei();
4c9649a9 2849 } else {
8bb6e981
AJ
2850 TCGv tmp;
2851
2852 if (ra != 31) {
a7812ae4 2853 tmp = tcg_temp_new();
8bb6e981
AJ
2854 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2855 } else
2856 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
a7812ae4 2857 gen_helper_hw_ret(tmp);
8bb6e981 2858 tcg_temp_free(tmp);
4c9649a9 2859 }
4af70374 2860 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2861 break;
2862#endif
2863 case 0x1F:
2864 /* HW_ST (PALcode) */
2865#if defined (CONFIG_USER_ONLY)
2866 goto invalid_opc;
2867#else
2868 if (!ctx->pal_mode)
2869 goto invalid_opc;
8bb6e981
AJ
2870 else {
2871 TCGv addr, val;
a7812ae4 2872 addr = tcg_temp_new();
8bb6e981
AJ
2873 if (rb != 31)
2874 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2875 else
2876 tcg_gen_movi_i64(addr, disp12);
2877 if (ra != 31)
2878 val = cpu_ir[ra];
2879 else {
a7812ae4 2880 val = tcg_temp_new();
8bb6e981
AJ
2881 tcg_gen_movi_i64(val, 0);
2882 }
2883 switch ((insn >> 12) & 0xF) {
2884 case 0x0:
2885 /* Longword physical access */
a7812ae4 2886 gen_helper_stl_raw(val, addr);
8bb6e981
AJ
2887 break;
2888 case 0x1:
2889 /* Quadword physical access */
a7812ae4 2890 gen_helper_stq_raw(val, addr);
8bb6e981
AJ
2891 break;
2892 case 0x2:
2893 /* Longword physical access with lock */
a7812ae4 2894 gen_helper_stl_c_raw(val, val, addr);
8bb6e981
AJ
2895 break;
2896 case 0x3:
2897 /* Quadword physical access with lock */
a7812ae4 2898 gen_helper_stq_c_raw(val, val, addr);
8bb6e981
AJ
2899 break;
2900 case 0x4:
2901 /* Longword virtual access */
a7812ae4
PB
2902 gen_helper_st_virt_to_phys(addr, addr);
2903 gen_helper_stl_raw(val, addr);
8bb6e981
AJ
2904 break;
2905 case 0x5:
2906 /* Quadword virtual access */
a7812ae4
PB
2907 gen_helper_st_virt_to_phys(addr, addr);
2908 gen_helper_stq_raw(val, addr);
8bb6e981
AJ
2909 break;
2910 case 0x6:
2911 /* Invalid */
2912 goto invalid_opc;
2913 case 0x7:
2914 /* Invalid */
2915 goto invalid_opc;
2916 case 0x8:
2917 /* Invalid */
2918 goto invalid_opc;
2919 case 0x9:
2920 /* Invalid */
2921 goto invalid_opc;
2922 case 0xA:
2923 /* Invalid */
2924 goto invalid_opc;
2925 case 0xB:
2926 /* Invalid */
2927 goto invalid_opc;
2928 case 0xC:
2929 /* Longword virtual access with alternate access mode */
a7812ae4
PB
2930 gen_helper_set_alt_mode();
2931 gen_helper_st_virt_to_phys(addr, addr);
2932 gen_helper_stl_raw(val, addr);
2933 gen_helper_restore_mode();
8bb6e981
AJ
2934 break;
2935 case 0xD:
2936 /* Quadword virtual access with alternate access mode */
a7812ae4
PB
2937 gen_helper_set_alt_mode();
2938 gen_helper_st_virt_to_phys(addr, addr);
2939 gen_helper_stl_raw(val, addr);
2940 gen_helper_restore_mode();
8bb6e981
AJ
2941 break;
2942 case 0xE:
2943 /* Invalid */
2944 goto invalid_opc;
2945 case 0xF:
2946 /* Invalid */
2947 goto invalid_opc;
2948 }
45d46ce8 2949 if (ra == 31)
8bb6e981
AJ
2950 tcg_temp_free(val);
2951 tcg_temp_free(addr);
4c9649a9 2952 }
4c9649a9
JM
2953 break;
2954#endif
2955 case 0x20:
2956 /* LDF */
f18cd223 2957 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
2958 break;
2959 case 0x21:
2960 /* LDG */
f18cd223 2961 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
2962 break;
2963 case 0x22:
2964 /* LDS */
f18cd223 2965 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
2966 break;
2967 case 0x23:
2968 /* LDT */
f18cd223 2969 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
2970 break;
2971 case 0x24:
2972 /* STF */
57a92c8e 2973 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2974 break;
2975 case 0x25:
2976 /* STG */
57a92c8e 2977 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2978 break;
2979 case 0x26:
2980 /* STS */
57a92c8e 2981 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2982 break;
2983 case 0x27:
2984 /* STT */
57a92c8e 2985 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2986 break;
2987 case 0x28:
2988 /* LDL */
f18cd223 2989 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
2990 break;
2991 case 0x29:
2992 /* LDQ */
f18cd223 2993 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
2994 break;
2995 case 0x2A:
2996 /* LDL_L */
f4ed8679 2997 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
2998 break;
2999 case 0x2B:
3000 /* LDQ_L */
f4ed8679 3001 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3002 break;
3003 case 0x2C:
3004 /* STL */
57a92c8e 3005 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
3006 break;
3007 case 0x2D:
3008 /* STQ */
57a92c8e 3009 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
3010 break;
3011 case 0x2E:
3012 /* STL_C */
57a92c8e 3013 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
4c9649a9
JM
3014 break;
3015 case 0x2F:
3016 /* STQ_C */
57a92c8e 3017 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
4c9649a9
JM
3018 break;
3019 case 0x30:
3020 /* BR */
4af70374 3021 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3022 break;
a7812ae4 3023 case 0x31: /* FBEQ */
4af70374 3024 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 3025 break;
a7812ae4 3026 case 0x32: /* FBLT */
4af70374 3027 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 3028 break;
a7812ae4 3029 case 0x33: /* FBLE */
4af70374 3030 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
3031 break;
3032 case 0x34:
3033 /* BSR */
4af70374 3034 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3035 break;
a7812ae4 3036 case 0x35: /* FBNE */
4af70374 3037 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 3038 break;
a7812ae4 3039 case 0x36: /* FBGE */
4af70374 3040 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 3041 break;
a7812ae4 3042 case 0x37: /* FBGT */
4af70374 3043 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
3044 break;
3045 case 0x38:
3046 /* BLBC */
4af70374 3047 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
3048 break;
3049 case 0x39:
3050 /* BEQ */
4af70374 3051 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
3052 break;
3053 case 0x3A:
3054 /* BLT */
4af70374 3055 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
3056 break;
3057 case 0x3B:
3058 /* BLE */
4af70374 3059 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
3060 break;
3061 case 0x3C:
3062 /* BLBS */
4af70374 3063 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
3064 break;
3065 case 0x3D:
3066 /* BNE */
4af70374 3067 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
3068 break;
3069 case 0x3E:
3070 /* BGE */
4af70374 3071 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
3072 break;
3073 case 0x3F:
3074 /* BGT */
4af70374 3075 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
3076 break;
3077 invalid_opc:
3078 gen_invalid(ctx);
4af70374
RH
3079 /* PC updated by gen_excp. */
3080 ret = EXIT_PC_UPDATED;
4c9649a9
JM
3081 break;
3082 }
3083
3084 return ret;
3085}
3086
636aa200
BS
3087static inline void gen_intermediate_code_internal(CPUState *env,
3088 TranslationBlock *tb,
3089 int search_pc)
4c9649a9 3090{
4c9649a9
JM
3091 DisasContext ctx, *ctxp = &ctx;
3092 target_ulong pc_start;
3093 uint32_t insn;
3094 uint16_t *gen_opc_end;
a1d1bb31 3095 CPUBreakpoint *bp;
4c9649a9 3096 int j, lj = -1;
4af70374 3097 ExitStatus ret;
2e70f6ef
PB
3098 int num_insns;
3099 int max_insns;
4c9649a9
JM
3100
3101 pc_start = tb->pc;
4c9649a9 3102 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
4af70374
RH
3103
3104 ctx.tb = tb;
3105 ctx.env = env;
4c9649a9
JM
3106 ctx.pc = pc_start;
3107 ctx.amask = env->amask;
3108#if defined (CONFIG_USER_ONLY)
3109 ctx.mem_idx = 0;
3110#else
3111 ctx.mem_idx = ((env->ps >> 3) & 3);
3112 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
3113#endif
f24518b5
RH
3114
3115 /* ??? Every TB begins with unset rounding mode, to be initialized on
3116 the first fp insn of the TB. Alternately we could define a proper
3117 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3118 to reset the FP_STATUS to that default at the end of any TB that
3119 changes the default. We could even (gasp) dynamiclly figure out
3120 what default would be most efficient given the running program. */
3121 ctx.tb_rm = -1;
3122 /* Similarly for flush-to-zero. */
3123 ctx.tb_ftz = -1;
3124
2e70f6ef
PB
3125 num_insns = 0;
3126 max_insns = tb->cflags & CF_COUNT_MASK;
3127 if (max_insns == 0)
3128 max_insns = CF_COUNT_MASK;
3129
3130 gen_icount_start();
4af70374 3131 do {
72cf2d4f
BS
3132 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3133 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 3134 if (bp->pc == ctx.pc) {
4c9649a9
JM
3135 gen_excp(&ctx, EXCP_DEBUG, 0);
3136 break;
3137 }
3138 }
3139 }
3140 if (search_pc) {
3141 j = gen_opc_ptr - gen_opc_buf;
3142 if (lj < j) {
3143 lj++;
3144 while (lj < j)
3145 gen_opc_instr_start[lj++] = 0;
4c9649a9 3146 }
ed1dda53
AJ
3147 gen_opc_pc[lj] = ctx.pc;
3148 gen_opc_instr_start[lj] = 1;
3149 gen_opc_icount[lj] = num_insns;
4c9649a9 3150 }
2e70f6ef
PB
3151 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3152 gen_io_start();
4c9649a9 3153 insn = ldl_code(ctx.pc);
2e70f6ef 3154 num_insns++;
c4b3be39
RH
3155
3156 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3157 tcg_gen_debug_insn_start(ctx.pc);
3158 }
3159
4c9649a9
JM
3160 ctx.pc += 4;
3161 ret = translate_one(ctxp, insn);
19bf517b 3162
4af70374
RH
3163 if (ret == NO_EXIT) {
3164 /* If we reach a page boundary, are single stepping,
3165 or exhaust instruction count, stop generation. */
3166 if (env->singlestep_enabled) {
3167 gen_excp(&ctx, EXCP_DEBUG, 0);
3168 ret = EXIT_PC_UPDATED;
3169 } else if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3170 || gen_opc_ptr >= gen_opc_end
3171 || num_insns >= max_insns
3172 || singlestep) {
3173 ret = EXIT_PC_STALE;
3174 }
1b530a6d 3175 }
4af70374
RH
3176 } while (ret == NO_EXIT);
3177
3178 if (tb->cflags & CF_LAST_IO) {
3179 gen_io_end();
4c9649a9 3180 }
4af70374
RH
3181
3182 switch (ret) {
3183 case EXIT_GOTO_TB:
3184 break;
3185 case EXIT_PC_STALE:
496cb5b9 3186 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
3187 /* FALLTHRU */
3188 case EXIT_PC_UPDATED:
3189 tcg_gen_exit_tb(0);
3190 break;
3191 default:
3192 abort();
4c9649a9 3193 }
4af70374 3194
2e70f6ef 3195 gen_icount_end(tb, num_insns);
4c9649a9
JM
3196 *gen_opc_ptr = INDEX_op_end;
3197 if (search_pc) {
3198 j = gen_opc_ptr - gen_opc_buf;
3199 lj++;
3200 while (lj <= j)
3201 gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3202 } else {
3203 tb->size = ctx.pc - pc_start;
2e70f6ef 3204 tb->icount = num_insns;
4c9649a9 3205 }
4af70374 3206
806991da 3207#ifdef DEBUG_DISAS
8fec2b8c 3208 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
3209 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3210 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3211 qemu_log("\n");
4c9649a9 3212 }
4c9649a9 3213#endif
4c9649a9
JM
3214}
3215
2cfc5f17 3216void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3217{
2cfc5f17 3218 gen_intermediate_code_internal(env, tb, 0);
4c9649a9
JM
3219}
3220
2cfc5f17 3221void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3222{
2cfc5f17 3223 gen_intermediate_code_internal(env, tb, 1);
4c9649a9
JM
3224}
3225
a964acc6
RH
3226struct cpu_def_t {
3227 const char *name;
3228 int implver, amask;
3229};
3230
3231static const struct cpu_def_t cpu_defs[] = {
3232 { "ev4", IMPLVER_2106x, 0 },
3233 { "ev5", IMPLVER_21164, 0 },
3234 { "ev56", IMPLVER_21164, AMASK_BWX },
3235 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3236 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3237 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3238 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3239 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3240 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3241 { "21064", IMPLVER_2106x, 0 },
3242 { "21164", IMPLVER_21164, 0 },
3243 { "21164a", IMPLVER_21164, AMASK_BWX },
3244 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3245 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3246 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3247 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3248};
3249
aaed909a 3250CPUAlphaState * cpu_alpha_init (const char *cpu_model)
4c9649a9
JM
3251{
3252 CPUAlphaState *env;
a964acc6 3253 int implver, amask, i, max;
4c9649a9
JM
3254
3255 env = qemu_mallocz(sizeof(CPUAlphaState));
4c9649a9 3256 cpu_exec_init(env);
2e70f6ef 3257 alpha_translate_init();
4c9649a9 3258 tlb_flush(env, 1);
a964acc6
RH
3259
3260 /* Default to ev67; no reason not to emulate insns by default. */
3261 implver = IMPLVER_21264;
3262 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3263 | AMASK_TRAP | AMASK_PREFETCH);
3264
3265 max = ARRAY_SIZE(cpu_defs);
3266 for (i = 0; i < max; i++) {
3267 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3268 implver = cpu_defs[i].implver;
3269 amask = cpu_defs[i].amask;
3270 break;
3271 }
3272 }
3273 env->implver = implver;
3274 env->amask = amask;
3275
4c9649a9
JM
3276 env->ps = 0x1F00;
3277#if defined (CONFIG_USER_ONLY)
3278 env->ps |= 1 << 3;
2edd07ef
RH
3279 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3280 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
6049f4f8 3281#else
4c9649a9 3282 pal_init(env);
6049f4f8 3283#endif
dad081ee 3284
4c9649a9 3285 /* Initialize IPR */
dad081ee
RH
3286#if defined (CONFIG_USER_ONLY)
3287 env->ipr[IPR_EXC_ADDR] = 0;
3288 env->ipr[IPR_EXC_SUM] = 0;
3289 env->ipr[IPR_EXC_MASK] = 0;
3290#else
3291 {
f88fe4e3
BS
3292 // uint64_t hwpcb;
3293 // hwpcb = env->ipr[IPR_PCBB];
dad081ee
RH
3294 env->ipr[IPR_ASN] = 0;
3295 env->ipr[IPR_ASTEN] = 0;
3296 env->ipr[IPR_ASTSR] = 0;
3297 env->ipr[IPR_DATFX] = 0;
3298 /* XXX: fix this */
3299 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3300 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3301 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3302 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3303 env->ipr[IPR_FEN] = 0;
3304 env->ipr[IPR_IPL] = 31;
3305 env->ipr[IPR_MCES] = 0;
3306 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
3307 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3308 env->ipr[IPR_SISR] = 0;
3309 env->ipr[IPR_VIRBND] = -1ULL;
3310 }
3311#endif
4c9649a9 3312
0bf46a40 3313 qemu_init_vcpu(env);
4c9649a9
JM
3314 return env;
3315}
aaed909a 3316
d2856f1a
AJ
3317void gen_pc_load(CPUState *env, TranslationBlock *tb,
3318 unsigned long searched_pc, int pc_pos, void *puc)
3319{
3320 env->pc = gen_opc_pc[pc_pos];
3321}