]> git.proxmox.com Git - qemu.git/blame - target-alpha/translate.c
target-alpha: Indicate NORETURN status when raising exception.
[qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
20#include <stdint.h>
21#include <stdlib.h>
22#include <stdio.h>
23
24#include "cpu.h"
25#include "exec-all.h"
26#include "disas.h"
ae8ecd42 27#include "host-utils.h"
57fec1fe 28#include "tcg-op.h"
ca10f867 29#include "qemu-common.h"
4c9649a9 30
a7812ae4
PB
31#include "helper.h"
32#define GEN_HELPER 1
33#include "helper.h"
34
19188121 35#undef ALPHA_DEBUG_DISAS
f24518b5 36#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
37
38#ifdef ALPHA_DEBUG_DISAS
806991da 39# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
40#else
41# define LOG_DISAS(...) do { } while (0)
42#endif
43
4c9649a9
JM
44typedef struct DisasContext DisasContext;
45struct DisasContext {
4af70374
RH
46 struct TranslationBlock *tb;
47 CPUAlphaState *env;
4c9649a9
JM
48 uint64_t pc;
49 int mem_idx;
50#if !defined (CONFIG_USER_ONLY)
51 int pal_mode;
52#endif
53 uint32_t amask;
f24518b5
RH
54
55 /* Current rounding mode for this TB. */
56 int tb_rm;
57 /* Current flush-to-zero setting for this TB. */
58 int tb_ftz;
4c9649a9
JM
59};
60
4af70374
RH
61/* Return values from translate_one, indicating the state of the TB.
62 Note that zero indicates that we are not exiting the TB. */
63
64typedef enum {
65 NO_EXIT,
66
67 /* We have emitted one or more goto_tb. No fixup required. */
68 EXIT_GOTO_TB,
69
70 /* We are not using a goto_tb (for whatever reason), but have updated
71 the PC (for whatever reason), so there's no need to do it again on
72 exiting the TB. */
73 EXIT_PC_UPDATED,
74
75 /* We are exiting the TB, but have neither emitted a goto_tb, nor
76 updated the PC for the next instruction to be executed. */
8aa3fa20
RH
77 EXIT_PC_STALE,
78
79 /* We are ending the TB with a noreturn function call, e.g. longjmp.
80 No following code will be executed. */
81 EXIT_NORETURN,
4af70374
RH
82} ExitStatus;
83
3761035f 84/* global register indexes */
a7812ae4 85static TCGv_ptr cpu_env;
496cb5b9 86static TCGv cpu_ir[31];
f18cd223 87static TCGv cpu_fir[31];
496cb5b9 88static TCGv cpu_pc;
f4ed8679 89static TCGv cpu_lock;
ab471ade
RH
90#ifdef CONFIG_USER_ONLY
91static TCGv cpu_uniq;
92#endif
496cb5b9 93
3761035f 94/* register names */
f18cd223 95static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef
PB
96
97#include "gen-icount.h"
98
a5f1b965 99static void alpha_translate_init(void)
2e70f6ef 100{
496cb5b9
AJ
101 int i;
102 char *p;
2e70f6ef 103 static int done_init = 0;
496cb5b9 104
2e70f6ef
PB
105 if (done_init)
106 return;
496cb5b9 107
a7812ae4 108 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
109
110 p = cpu_reg_names;
111 for (i = 0; i < 31; i++) {
112 sprintf(p, "ir%d", i);
a7812ae4
PB
113 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
114 offsetof(CPUState, ir[i]), p);
6ba8dcd7 115 p += (i < 10) ? 4 : 5;
f18cd223
AJ
116
117 sprintf(p, "fir%d", i);
a7812ae4
PB
118 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
119 offsetof(CPUState, fir[i]), p);
f18cd223 120 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
121 }
122
a7812ae4
PB
123 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
124 offsetof(CPUState, pc), "pc");
496cb5b9 125
a7812ae4
PB
126 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
127 offsetof(CPUState, lock), "lock");
f4ed8679 128
ab471ade
RH
129#ifdef CONFIG_USER_ONLY
130 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
131 offsetof(CPUState, unique), "uniq");
132#endif
133
496cb5b9 134 /* register helpers */
a7812ae4 135#define GEN_HELPER 2
496cb5b9
AJ
136#include "helper.h"
137
2e70f6ef
PB
138 done_init = 1;
139}
140
8aa3fa20 141static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
4c9649a9 142{
a7812ae4 143 TCGv_i32 tmp1, tmp2;
6ad02592 144
496cb5b9 145 tcg_gen_movi_i64(cpu_pc, ctx->pc);
6ad02592
AJ
146 tmp1 = tcg_const_i32(exception);
147 tmp2 = tcg_const_i32(error_code);
a7812ae4
PB
148 gen_helper_excp(tmp1, tmp2);
149 tcg_temp_free_i32(tmp2);
150 tcg_temp_free_i32(tmp1);
8aa3fa20
RH
151
152 return EXIT_NORETURN;
4c9649a9
JM
153}
154
8aa3fa20 155static inline ExitStatus gen_invalid(DisasContext *ctx)
4c9649a9 156{
8aa3fa20 157 return gen_excp(ctx, EXCP_OPCDEC, 0);
4c9649a9
JM
158}
159
636aa200 160static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 161{
a7812ae4
PB
162 TCGv tmp = tcg_temp_new();
163 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 164 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
165 tcg_gen_trunc_i64_i32(tmp32, tmp);
166 gen_helper_memory_to_f(t0, tmp32);
167 tcg_temp_free_i32(tmp32);
f18cd223
AJ
168 tcg_temp_free(tmp);
169}
170
636aa200 171static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 172{
a7812ae4 173 TCGv tmp = tcg_temp_new();
f18cd223 174 tcg_gen_qemu_ld64(tmp, t1, flags);
a7812ae4 175 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
176 tcg_temp_free(tmp);
177}
178
636aa200 179static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 180{
a7812ae4
PB
181 TCGv tmp = tcg_temp_new();
182 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 183 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
184 tcg_gen_trunc_i64_i32(tmp32, tmp);
185 gen_helper_memory_to_s(t0, tmp32);
186 tcg_temp_free_i32(tmp32);
f18cd223
AJ
187 tcg_temp_free(tmp);
188}
189
636aa200 190static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
191{
192 tcg_gen_mov_i64(cpu_lock, t1);
193 tcg_gen_qemu_ld32s(t0, t1, flags);
194}
195
636aa200 196static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
197{
198 tcg_gen_mov_i64(cpu_lock, t1);
199 tcg_gen_qemu_ld64(t0, t1, flags);
200}
201
636aa200
BS
202static inline void gen_load_mem(DisasContext *ctx,
203 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
204 int flags),
205 int ra, int rb, int32_t disp16, int fp,
206 int clear)
023d8ca2
AJ
207{
208 TCGv addr;
209
210 if (unlikely(ra == 31))
211 return;
212
a7812ae4 213 addr = tcg_temp_new();
023d8ca2
AJ
214 if (rb != 31) {
215 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
216 if (clear)
217 tcg_gen_andi_i64(addr, addr, ~0x7);
218 } else {
219 if (clear)
220 disp16 &= ~0x7;
221 tcg_gen_movi_i64(addr, disp16);
222 }
f18cd223
AJ
223 if (fp)
224 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
225 else
226 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
023d8ca2
AJ
227 tcg_temp_free(addr);
228}
229
636aa200 230static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 231{
a7812ae4
PB
232 TCGv_i32 tmp32 = tcg_temp_new_i32();
233 TCGv tmp = tcg_temp_new();
234 gen_helper_f_to_memory(tmp32, t0);
235 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
236 tcg_gen_qemu_st32(tmp, t1, flags);
237 tcg_temp_free(tmp);
a7812ae4 238 tcg_temp_free_i32(tmp32);
f18cd223
AJ
239}
240
636aa200 241static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 242{
a7812ae4
PB
243 TCGv tmp = tcg_temp_new();
244 gen_helper_g_to_memory(tmp, t0);
f18cd223
AJ
245 tcg_gen_qemu_st64(tmp, t1, flags);
246 tcg_temp_free(tmp);
247}
248
636aa200 249static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 250{
a7812ae4
PB
251 TCGv_i32 tmp32 = tcg_temp_new_i32();
252 TCGv tmp = tcg_temp_new();
253 gen_helper_s_to_memory(tmp32, t0);
254 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
255 tcg_gen_qemu_st32(tmp, t1, flags);
256 tcg_temp_free(tmp);
a7812ae4 257 tcg_temp_free_i32(tmp32);
f18cd223
AJ
258}
259
636aa200 260static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
261{
262 int l1, l2;
263
264 l1 = gen_new_label();
265 l2 = gen_new_label();
266 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
267 tcg_gen_qemu_st32(t0, t1, flags);
6223246a 268 tcg_gen_movi_i64(t0, 1);
f4ed8679
AJ
269 tcg_gen_br(l2);
270 gen_set_label(l1);
6223246a 271 tcg_gen_movi_i64(t0, 0);
f4ed8679
AJ
272 gen_set_label(l2);
273 tcg_gen_movi_i64(cpu_lock, -1);
274}
275
636aa200 276static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
f4ed8679
AJ
277{
278 int l1, l2;
279
280 l1 = gen_new_label();
281 l2 = gen_new_label();
282 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
283 tcg_gen_qemu_st64(t0, t1, flags);
6223246a 284 tcg_gen_movi_i64(t0, 1);
f4ed8679
AJ
285 tcg_gen_br(l2);
286 gen_set_label(l1);
6223246a 287 tcg_gen_movi_i64(t0, 0);
f4ed8679
AJ
288 gen_set_label(l2);
289 tcg_gen_movi_i64(cpu_lock, -1);
290}
291
636aa200
BS
292static inline void gen_store_mem(DisasContext *ctx,
293 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
294 int flags),
295 int ra, int rb, int32_t disp16, int fp,
296 int clear, int local)
023d8ca2 297{
9cd38c23 298 TCGv addr;
57a92c8e 299 if (local)
a7812ae4 300 addr = tcg_temp_local_new();
57a92c8e 301 else
a7812ae4 302 addr = tcg_temp_new();
023d8ca2
AJ
303 if (rb != 31) {
304 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
305 if (clear)
306 tcg_gen_andi_i64(addr, addr, ~0x7);
307 } else {
308 if (clear)
309 disp16 &= ~0x7;
310 tcg_gen_movi_i64(addr, disp16);
311 }
f18cd223
AJ
312 if (ra != 31) {
313 if (fp)
314 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
315 else
316 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
317 } else {
57a92c8e
AJ
318 TCGv zero;
319 if (local)
320 zero = tcg_const_local_i64(0);
321 else
322 zero = tcg_const_i64(0);
023d8ca2
AJ
323 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
324 tcg_temp_free(zero);
325 }
326 tcg_temp_free(addr);
327}
328
4af70374 329static int use_goto_tb(DisasContext *ctx, uint64_t dest)
4c9649a9 330{
4af70374
RH
331 /* Check for the dest on the same page as the start of the TB. We
332 also want to suppress goto_tb in the case of single-steping and IO. */
333 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
334 && !ctx->env->singlestep_enabled
335 && !(ctx->tb->cflags & CF_LAST_IO));
336}
dbb30fe6 337
4af70374
RH
338static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
339{
340 uint64_t dest = ctx->pc + (disp << 2);
341
342 if (ra != 31) {
343 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
344 }
345
346 /* Notice branch-to-next; used to initialize RA with the PC. */
347 if (disp == 0) {
348 return 0;
349 } else if (use_goto_tb(ctx, dest)) {
350 tcg_gen_goto_tb(0);
351 tcg_gen_movi_i64(cpu_pc, dest);
352 tcg_gen_exit_tb((long)ctx->tb);
353 return EXIT_GOTO_TB;
354 } else {
355 tcg_gen_movi_i64(cpu_pc, dest);
356 return EXIT_PC_UPDATED;
357 }
dbb30fe6
RH
358}
359
4af70374
RH
360static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
361 TCGv cmp, int32_t disp)
dbb30fe6 362{
4af70374 363 uint64_t dest = ctx->pc + (disp << 2);
dbb30fe6 364 int lab_true = gen_new_label();
9c29504e 365
4af70374
RH
366 if (use_goto_tb(ctx, dest)) {
367 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
368
369 tcg_gen_goto_tb(0);
370 tcg_gen_movi_i64(cpu_pc, ctx->pc);
371 tcg_gen_exit_tb((long)ctx->tb);
372
373 gen_set_label(lab_true);
374 tcg_gen_goto_tb(1);
375 tcg_gen_movi_i64(cpu_pc, dest);
376 tcg_gen_exit_tb((long)ctx->tb + 1);
377
378 return EXIT_GOTO_TB;
379 } else {
380 int lab_over = gen_new_label();
381
382 /* ??? Consider using either
383 movi pc, next
384 addi tmp, pc, disp
385 movcond pc, cond, 0, tmp, pc
386 or
387 setcond tmp, cond, 0
388 movi pc, next
389 neg tmp, tmp
390 andi tmp, tmp, disp
391 add pc, pc, tmp
392 The current diamond subgraph surely isn't efficient. */
393
394 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
395 tcg_gen_movi_i64(cpu_pc, ctx->pc);
396 tcg_gen_br(lab_over);
397 gen_set_label(lab_true);
398 tcg_gen_movi_i64(cpu_pc, dest);
399 gen_set_label(lab_over);
400
401 return EXIT_PC_UPDATED;
402 }
403}
404
405static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
406 int32_t disp, int mask)
407{
408 TCGv cmp_tmp;
409
410 if (unlikely(ra == 31)) {
411 cmp_tmp = tcg_const_i64(0);
412 } else {
413 cmp_tmp = tcg_temp_new();
9c29504e 414 if (mask) {
4af70374 415 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
dbb30fe6 416 } else {
4af70374 417 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
dbb30fe6 418 }
9c29504e 419 }
4af70374
RH
420
421 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
422}
423
4af70374 424/* Fold -0.0 for comparison with COND. */
dbb30fe6 425
4af70374 426static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 427{
dbb30fe6 428 uint64_t mzero = 1ull << 63;
f18cd223 429
dbb30fe6
RH
430 switch (cond) {
431 case TCG_COND_LE:
432 case TCG_COND_GT:
433 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 434 tcg_gen_mov_i64(dest, src);
a7812ae4 435 break;
dbb30fe6
RH
436
437 case TCG_COND_EQ:
438 case TCG_COND_NE:
439 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 440 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 441 break;
dbb30fe6
RH
442
443 case TCG_COND_GE:
dbb30fe6 444 case TCG_COND_LT:
4af70374
RH
445 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
446 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
447 tcg_gen_neg_i64(dest, dest);
448 tcg_gen_and_i64(dest, dest, src);
a7812ae4 449 break;
dbb30fe6 450
a7812ae4
PB
451 default:
452 abort();
f18cd223 453 }
dbb30fe6
RH
454}
455
4af70374
RH
456static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
457 int32_t disp)
dbb30fe6 458{
4af70374 459 TCGv cmp_tmp;
dbb30fe6
RH
460
461 if (unlikely(ra == 31)) {
462 /* Very uncommon case, but easier to optimize it to an integer
463 comparison than continuing with the floating point comparison. */
4af70374 464 return gen_bcond(ctx, cond, ra, disp, 0);
dbb30fe6
RH
465 }
466
4af70374
RH
467 cmp_tmp = tcg_temp_new();
468 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
469 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
470}
471
bbe1dab4 472static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
4af70374 473 int islit, uint8_t lit, int mask)
4c9649a9 474{
bbe1dab4 475 TCGCond inv_cond = tcg_invert_cond(cond);
9c29504e
AJ
476 int l1;
477
478 if (unlikely(rc == 31))
479 return;
480
481 l1 = gen_new_label();
482
483 if (ra != 31) {
484 if (mask) {
a7812ae4 485 TCGv tmp = tcg_temp_new();
9c29504e
AJ
486 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
487 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
488 tcg_temp_free(tmp);
489 } else
490 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
491 } else {
492 /* Very uncommon case - Do not bother to optimize. */
493 TCGv tmp = tcg_const_i64(0);
494 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
495 tcg_temp_free(tmp);
496 }
497
4c9649a9 498 if (islit)
9c29504e 499 tcg_gen_movi_i64(cpu_ir[rc], lit);
4c9649a9 500 else
dfaa8583 501 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
9c29504e 502 gen_set_label(l1);
4c9649a9
JM
503}
504
bbe1dab4 505static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
dbb30fe6 506{
4af70374 507 TCGv cmp_tmp;
dbb30fe6
RH
508 int l1;
509
4af70374 510 if (unlikely(rc == 31)) {
dbb30fe6 511 return;
4af70374
RH
512 }
513
514 cmp_tmp = tcg_temp_new();
dbb30fe6 515 if (unlikely(ra == 31)) {
4af70374
RH
516 tcg_gen_movi_i64(cmp_tmp, 0);
517 } else {
518 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
dbb30fe6
RH
519 }
520
521 l1 = gen_new_label();
4af70374
RH
522 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
523 tcg_temp_free(cmp_tmp);
dbb30fe6
RH
524
525 if (rb != 31)
526 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
527 else
528 tcg_gen_movi_i64(cpu_fir[rc], 0);
529 gen_set_label(l1);
530}
531
f24518b5
RH
532#define QUAL_RM_N 0x080 /* Round mode nearest even */
533#define QUAL_RM_C 0x000 /* Round mode chopped */
534#define QUAL_RM_M 0x040 /* Round mode minus infinity */
535#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
536#define QUAL_RM_MASK 0x0c0
537
538#define QUAL_U 0x100 /* Underflow enable (fp output) */
539#define QUAL_V 0x100 /* Overflow enable (int output) */
540#define QUAL_S 0x400 /* Software completion enable */
541#define QUAL_I 0x200 /* Inexact detection enable */
542
543static void gen_qual_roundmode(DisasContext *ctx, int fn11)
544{
545 TCGv_i32 tmp;
546
547 fn11 &= QUAL_RM_MASK;
548 if (fn11 == ctx->tb_rm) {
549 return;
550 }
551 ctx->tb_rm = fn11;
552
553 tmp = tcg_temp_new_i32();
554 switch (fn11) {
555 case QUAL_RM_N:
556 tcg_gen_movi_i32(tmp, float_round_nearest_even);
557 break;
558 case QUAL_RM_C:
559 tcg_gen_movi_i32(tmp, float_round_to_zero);
560 break;
561 case QUAL_RM_M:
562 tcg_gen_movi_i32(tmp, float_round_down);
563 break;
564 case QUAL_RM_D:
565 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
566 break;
567 }
568
569#if defined(CONFIG_SOFTFLOAT_INLINE)
570 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
571 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
572 sets the one field. */
573 tcg_gen_st8_i32(tmp, cpu_env,
574 offsetof(CPUState, fp_status.float_rounding_mode));
575#else
576 gen_helper_setroundmode(tmp);
577#endif
578
579 tcg_temp_free_i32(tmp);
580}
581
582static void gen_qual_flushzero(DisasContext *ctx, int fn11)
583{
584 TCGv_i32 tmp;
585
586 fn11 &= QUAL_U;
587 if (fn11 == ctx->tb_ftz) {
588 return;
589 }
590 ctx->tb_ftz = fn11;
591
592 tmp = tcg_temp_new_i32();
593 if (fn11) {
594 /* Underflow is enabled, use the FPCR setting. */
595 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
596 } else {
597 /* Underflow is disabled, force flush-to-zero. */
598 tcg_gen_movi_i32(tmp, 1);
599 }
600
601#if defined(CONFIG_SOFTFLOAT_INLINE)
602 tcg_gen_st8_i32(tmp, cpu_env,
603 offsetof(CPUState, fp_status.flush_to_zero));
604#else
605 gen_helper_setflushzero(tmp);
606#endif
607
608 tcg_temp_free_i32(tmp);
609}
610
611static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
612{
613 TCGv val = tcg_temp_new();
614 if (reg == 31) {
615 tcg_gen_movi_i64(val, 0);
616 } else if (fn11 & QUAL_S) {
617 gen_helper_ieee_input_s(val, cpu_fir[reg]);
618 } else if (is_cmp) {
619 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
620 } else {
621 gen_helper_ieee_input(val, cpu_fir[reg]);
622 }
623 return val;
624}
625
626static void gen_fp_exc_clear(void)
627{
628#if defined(CONFIG_SOFTFLOAT_INLINE)
629 TCGv_i32 zero = tcg_const_i32(0);
630 tcg_gen_st8_i32(zero, cpu_env,
631 offsetof(CPUState, fp_status.float_exception_flags));
632 tcg_temp_free_i32(zero);
633#else
634 gen_helper_fp_exc_clear();
635#endif
636}
637
638static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
639{
640 /* ??? We ought to be able to do something with imprecise exceptions.
641 E.g. notice we're still in the trap shadow of something within the
642 TB and do not generate the code to signal the exception; end the TB
643 when an exception is forced to arrive, either by consumption of a
644 register value or TRAPB or EXCB. */
645 TCGv_i32 exc = tcg_temp_new_i32();
646 TCGv_i32 reg;
647
648#if defined(CONFIG_SOFTFLOAT_INLINE)
649 tcg_gen_ld8u_i32(exc, cpu_env,
650 offsetof(CPUState, fp_status.float_exception_flags));
651#else
652 gen_helper_fp_exc_get(exc);
653#endif
654
655 if (ignore) {
656 tcg_gen_andi_i32(exc, exc, ~ignore);
657 }
658
659 /* ??? Pass in the regno of the destination so that the helper can
660 set EXC_MASK, which contains a bitmask of destination registers
661 that have caused arithmetic traps. A simple userspace emulation
662 does not require this. We do need it for a guest kernel's entArith,
663 or if we were to do something clever with imprecise exceptions. */
664 reg = tcg_const_i32(rc + 32);
665
666 if (fn11 & QUAL_S) {
667 gen_helper_fp_exc_raise_s(exc, reg);
668 } else {
669 gen_helper_fp_exc_raise(exc, reg);
670 }
671
672 tcg_temp_free_i32(reg);
673 tcg_temp_free_i32(exc);
674}
675
676static inline void gen_fp_exc_raise(int rc, int fn11)
677{
678 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 679}
f24518b5 680
593f17e5
RH
681static void gen_fcvtlq(int rb, int rc)
682{
683 if (unlikely(rc == 31)) {
684 return;
685 }
686 if (unlikely(rb == 31)) {
687 tcg_gen_movi_i64(cpu_fir[rc], 0);
688 } else {
689 TCGv tmp = tcg_temp_new();
690
691 /* The arithmetic right shift here, plus the sign-extended mask below
692 yields a sign-extended result without an explicit ext32s_i64. */
693 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
694 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
695 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
696 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
697 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
698
699 tcg_temp_free(tmp);
700 }
701}
702
735cf45f
RH
703static void gen_fcvtql(int rb, int rc)
704{
705 if (unlikely(rc == 31)) {
706 return;
707 }
708 if (unlikely(rb == 31)) {
709 tcg_gen_movi_i64(cpu_fir[rc], 0);
710 } else {
711 TCGv tmp = tcg_temp_new();
712
713 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
714 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
715 tcg_gen_shli_i64(tmp, tmp, 32);
716 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
717 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
718
719 tcg_temp_free(tmp);
720 }
721}
722
723static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
724{
725 if (rb != 31) {
726 int lab = gen_new_label();
727 TCGv tmp = tcg_temp_new();
728
729 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
730 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
731 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
732
733 gen_set_label(lab);
734 }
735 gen_fcvtql(rb, rc);
736}
737
f24518b5
RH
738#define FARITH2(name) \
739static inline void glue(gen_f, name)(int rb, int rc) \
740{ \
741 if (unlikely(rc == 31)) { \
742 return; \
743 } \
744 if (rb != 31) { \
745 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
746 } else { \
747 TCGv tmp = tcg_const_i64(0); \
748 gen_helper_ ## name (cpu_fir[rc], tmp); \
749 tcg_temp_free(tmp); \
750 } \
751}
f24518b5
RH
752
753/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
754FARITH2(sqrtf)
755FARITH2(sqrtg)
a7812ae4
PB
756FARITH2(cvtgf)
757FARITH2(cvtgq)
758FARITH2(cvtqf)
759FARITH2(cvtqg)
f24518b5
RH
760
761static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
762 int rb, int rc, int fn11)
763{
764 TCGv vb;
765
766 /* ??? This is wrong: the instruction is not a nop, it still may
767 raise exceptions. */
768 if (unlikely(rc == 31)) {
769 return;
770 }
771
772 gen_qual_roundmode(ctx, fn11);
773 gen_qual_flushzero(ctx, fn11);
774 gen_fp_exc_clear();
775
776 vb = gen_ieee_input(rb, fn11, 0);
777 helper(cpu_fir[rc], vb);
778 tcg_temp_free(vb);
779
780 gen_fp_exc_raise(rc, fn11);
781}
782
783#define IEEE_ARITH2(name) \
784static inline void glue(gen_f, name)(DisasContext *ctx, \
785 int rb, int rc, int fn11) \
786{ \
787 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
788}
789IEEE_ARITH2(sqrts)
790IEEE_ARITH2(sqrtt)
791IEEE_ARITH2(cvtst)
792IEEE_ARITH2(cvtts)
793
794static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
795{
796 TCGv vb;
797 int ignore = 0;
798
799 /* ??? This is wrong: the instruction is not a nop, it still may
800 raise exceptions. */
801 if (unlikely(rc == 31)) {
802 return;
803 }
804
805 /* No need to set flushzero, since we have an integer output. */
806 gen_fp_exc_clear();
807 vb = gen_ieee_input(rb, fn11, 0);
808
809 /* Almost all integer conversions use cropped rounding, and most
810 also do not have integer overflow enabled. Special case that. */
811 switch (fn11) {
812 case QUAL_RM_C:
813 gen_helper_cvttq_c(cpu_fir[rc], vb);
814 break;
815 case QUAL_V | QUAL_RM_C:
816 case QUAL_S | QUAL_V | QUAL_RM_C:
817 ignore = float_flag_inexact;
818 /* FALLTHRU */
819 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
820 gen_helper_cvttq_svic(cpu_fir[rc], vb);
821 break;
822 default:
823 gen_qual_roundmode(ctx, fn11);
824 gen_helper_cvttq(cpu_fir[rc], vb);
825 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
826 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
827 break;
828 }
829 tcg_temp_free(vb);
830
831 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
832}
833
f24518b5
RH
834static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
835 int rb, int rc, int fn11)
836{
837 TCGv vb;
838
839 /* ??? This is wrong: the instruction is not a nop, it still may
840 raise exceptions. */
841 if (unlikely(rc == 31)) {
842 return;
843 }
844
845 gen_qual_roundmode(ctx, fn11);
846
847 if (rb == 31) {
848 vb = tcg_const_i64(0);
849 } else {
850 vb = cpu_fir[rb];
851 }
852
853 /* The only exception that can be raised by integer conversion
854 is inexact. Thus we only need to worry about exceptions when
855 inexact handling is requested. */
856 if (fn11 & QUAL_I) {
857 gen_fp_exc_clear();
858 helper(cpu_fir[rc], vb);
859 gen_fp_exc_raise(rc, fn11);
860 } else {
861 helper(cpu_fir[rc], vb);
862 }
863
864 if (rb == 31) {
865 tcg_temp_free(vb);
866 }
867}
868
869#define IEEE_INTCVT(name) \
870static inline void glue(gen_f, name)(DisasContext *ctx, \
871 int rb, int rc, int fn11) \
872{ \
873 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
874}
875IEEE_INTCVT(cvtqs)
876IEEE_INTCVT(cvtqt)
877
dc96be4b
RH
878static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
879{
880 TCGv va, vb, vmask;
881 int za = 0, zb = 0;
882
883 if (unlikely(rc == 31)) {
884 return;
885 }
886
887 vmask = tcg_const_i64(mask);
888
889 TCGV_UNUSED_I64(va);
890 if (ra == 31) {
891 if (inv_a) {
892 va = vmask;
893 } else {
894 za = 1;
895 }
896 } else {
897 va = tcg_temp_new_i64();
898 tcg_gen_mov_i64(va, cpu_fir[ra]);
899 if (inv_a) {
900 tcg_gen_andc_i64(va, vmask, va);
901 } else {
902 tcg_gen_and_i64(va, va, vmask);
903 }
904 }
905
906 TCGV_UNUSED_I64(vb);
907 if (rb == 31) {
908 zb = 1;
909 } else {
910 vb = tcg_temp_new_i64();
911 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
912 }
913
914 switch (za << 1 | zb) {
915 case 0 | 0:
916 tcg_gen_or_i64(cpu_fir[rc], va, vb);
917 break;
918 case 0 | 1:
919 tcg_gen_mov_i64(cpu_fir[rc], va);
920 break;
921 case 2 | 0:
922 tcg_gen_mov_i64(cpu_fir[rc], vb);
923 break;
924 case 2 | 1:
925 tcg_gen_movi_i64(cpu_fir[rc], 0);
926 break;
927 }
928
929 tcg_temp_free(vmask);
930 if (ra != 31) {
931 tcg_temp_free(va);
932 }
933 if (rb != 31) {
934 tcg_temp_free(vb);
935 }
936}
937
938static inline void gen_fcpys(int ra, int rb, int rc)
939{
940 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
941}
942
943static inline void gen_fcpysn(int ra, int rb, int rc)
944{
945 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
946}
947
948static inline void gen_fcpyse(int ra, int rb, int rc)
949{
950 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
951}
952
f24518b5
RH
953#define FARITH3(name) \
954static inline void glue(gen_f, name)(int ra, int rb, int rc) \
955{ \
956 TCGv va, vb; \
957 \
958 if (unlikely(rc == 31)) { \
959 return; \
960 } \
961 if (ra == 31) { \
962 va = tcg_const_i64(0); \
963 } else { \
964 va = cpu_fir[ra]; \
965 } \
966 if (rb == 31) { \
967 vb = tcg_const_i64(0); \
968 } else { \
969 vb = cpu_fir[rb]; \
970 } \
971 \
972 gen_helper_ ## name (cpu_fir[rc], va, vb); \
973 \
974 if (ra == 31) { \
975 tcg_temp_free(va); \
976 } \
977 if (rb == 31) { \
978 tcg_temp_free(vb); \
979 } \
980}
f24518b5
RH
981
982/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
983FARITH3(addf)
984FARITH3(subf)
985FARITH3(mulf)
986FARITH3(divf)
987FARITH3(addg)
988FARITH3(subg)
989FARITH3(mulg)
990FARITH3(divg)
991FARITH3(cmpgeq)
992FARITH3(cmpglt)
993FARITH3(cmpgle)
f24518b5
RH
994
995static void gen_ieee_arith3(DisasContext *ctx,
996 void (*helper)(TCGv, TCGv, TCGv),
997 int ra, int rb, int rc, int fn11)
998{
999 TCGv va, vb;
1000
1001 /* ??? This is wrong: the instruction is not a nop, it still may
1002 raise exceptions. */
1003 if (unlikely(rc == 31)) {
1004 return;
1005 }
1006
1007 gen_qual_roundmode(ctx, fn11);
1008 gen_qual_flushzero(ctx, fn11);
1009 gen_fp_exc_clear();
1010
1011 va = gen_ieee_input(ra, fn11, 0);
1012 vb = gen_ieee_input(rb, fn11, 0);
1013 helper(cpu_fir[rc], va, vb);
1014 tcg_temp_free(va);
1015 tcg_temp_free(vb);
1016
1017 gen_fp_exc_raise(rc, fn11);
1018}
1019
1020#define IEEE_ARITH3(name) \
1021static inline void glue(gen_f, name)(DisasContext *ctx, \
1022 int ra, int rb, int rc, int fn11) \
1023{ \
1024 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1025}
1026IEEE_ARITH3(adds)
1027IEEE_ARITH3(subs)
1028IEEE_ARITH3(muls)
1029IEEE_ARITH3(divs)
1030IEEE_ARITH3(addt)
1031IEEE_ARITH3(subt)
1032IEEE_ARITH3(mult)
1033IEEE_ARITH3(divt)
1034
1035static void gen_ieee_compare(DisasContext *ctx,
1036 void (*helper)(TCGv, TCGv, TCGv),
1037 int ra, int rb, int rc, int fn11)
1038{
1039 TCGv va, vb;
1040
1041 /* ??? This is wrong: the instruction is not a nop, it still may
1042 raise exceptions. */
1043 if (unlikely(rc == 31)) {
1044 return;
1045 }
1046
1047 gen_fp_exc_clear();
1048
1049 va = gen_ieee_input(ra, fn11, 1);
1050 vb = gen_ieee_input(rb, fn11, 1);
1051 helper(cpu_fir[rc], va, vb);
1052 tcg_temp_free(va);
1053 tcg_temp_free(vb);
1054
1055 gen_fp_exc_raise(rc, fn11);
1056}
1057
1058#define IEEE_CMP3(name) \
1059static inline void glue(gen_f, name)(DisasContext *ctx, \
1060 int ra, int rb, int rc, int fn11) \
1061{ \
1062 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1063}
1064IEEE_CMP3(cmptun)
1065IEEE_CMP3(cmpteq)
1066IEEE_CMP3(cmptlt)
1067IEEE_CMP3(cmptle)
a7812ae4 1068
248c42f3
RH
1069static inline uint64_t zapnot_mask(uint8_t lit)
1070{
1071 uint64_t mask = 0;
1072 int i;
1073
1074 for (i = 0; i < 8; ++i) {
1075 if ((lit >> i) & 1)
1076 mask |= 0xffull << (i * 8);
1077 }
1078 return mask;
1079}
1080
87d98f95
RH
1081/* Implement zapnot with an immediate operand, which expands to some
1082 form of immediate AND. This is a basic building block in the
1083 definition of many of the other byte manipulation instructions. */
248c42f3 1084static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 1085{
87d98f95
RH
1086 switch (lit) {
1087 case 0x00:
248c42f3 1088 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
1089 break;
1090 case 0x01:
248c42f3 1091 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
1092 break;
1093 case 0x03:
248c42f3 1094 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
1095 break;
1096 case 0x0f:
248c42f3 1097 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
1098 break;
1099 case 0xff:
248c42f3 1100 tcg_gen_mov_i64(dest, src);
87d98f95
RH
1101 break;
1102 default:
248c42f3 1103 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
87d98f95
RH
1104 break;
1105 }
1106}
1107
1108static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1109{
1110 if (unlikely(rc == 31))
1111 return;
1112 else if (unlikely(ra == 31))
1113 tcg_gen_movi_i64(cpu_ir[rc], 0);
1114 else if (islit)
248c42f3 1115 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
87d98f95
RH
1116 else
1117 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1118}
1119
1120static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1121{
1122 if (unlikely(rc == 31))
1123 return;
1124 else if (unlikely(ra == 31))
1125 tcg_gen_movi_i64(cpu_ir[rc], 0);
1126 else if (islit)
248c42f3 1127 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
87d98f95
RH
1128 else
1129 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1130}
1131
1132
248c42f3 1133/* EXTWH, EXTLH, EXTQH */
ffec44f1
RH
1134static void gen_ext_h(int ra, int rb, int rc, int islit,
1135 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1136{
1137 if (unlikely(rc == 31))
1138 return;
377a43b6
RH
1139 else if (unlikely(ra == 31))
1140 tcg_gen_movi_i64(cpu_ir[rc], 0);
1141 else {
dfaa8583 1142 if (islit) {
377a43b6
RH
1143 lit = (64 - (lit & 7) * 8) & 0x3f;
1144 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
fe2b269a 1145 } else {
377a43b6 1146 TCGv tmp1 = tcg_temp_new();
b3249f63
AJ
1147 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1148 tcg_gen_shli_i64(tmp1, tmp1, 3);
dbf95805
VW
1149 tcg_gen_neg_i64(tmp1, tmp1);
1150 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
dfaa8583 1151 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
b3249f63 1152 tcg_temp_free(tmp1);
dfaa8583 1153 }
248c42f3 1154 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
377a43b6 1155 }
b3249f63
AJ
1156}
1157
248c42f3 1158/* EXTBL, EXTWL, EXTLL, EXTQL */
ffec44f1
RH
1159static void gen_ext_l(int ra, int rb, int rc, int islit,
1160 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1161{
1162 if (unlikely(rc == 31))
1163 return;
377a43b6
RH
1164 else if (unlikely(ra == 31))
1165 tcg_gen_movi_i64(cpu_ir[rc], 0);
1166 else {
dfaa8583 1167 if (islit) {
377a43b6 1168 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
dfaa8583 1169 } else {
a7812ae4 1170 TCGv tmp = tcg_temp_new();
b3249f63
AJ
1171 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1172 tcg_gen_shli_i64(tmp, tmp, 3);
dfaa8583 1173 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
b3249f63 1174 tcg_temp_free(tmp);
fe2b269a 1175 }
248c42f3
RH
1176 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1177 }
1178}
1179
50eb6e5c
RH
1180/* INSWH, INSLH, INSQH */
1181static void gen_ins_h(int ra, int rb, int rc, int islit,
1182 uint8_t lit, uint8_t byte_mask)
1183{
1184 if (unlikely(rc == 31))
1185 return;
1186 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1187 tcg_gen_movi_i64(cpu_ir[rc], 0);
1188 else {
1189 TCGv tmp = tcg_temp_new();
1190
1191 /* The instruction description has us left-shift the byte mask
1192 and extract bits <15:8> and apply that zap at the end. This
1193 is equivalent to simply performing the zap first and shifting
1194 afterward. */
1195 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1196
1197 if (islit) {
1198 /* Note that we have handled the lit==0 case above. */
1199 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1200 } else {
1201 TCGv shift = tcg_temp_new();
1202
1203 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1204 Do this portably by splitting the shift into two parts:
1205 shift_count-1 and 1. Arrange for the -1 by using
1206 ones-complement instead of twos-complement in the negation:
1207 ~((B & 7) * 8) & 63. */
1208
1209 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1210 tcg_gen_shli_i64(shift, shift, 3);
1211 tcg_gen_not_i64(shift, shift);
1212 tcg_gen_andi_i64(shift, shift, 0x3f);
1213
1214 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1215 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1216 tcg_temp_free(shift);
1217 }
1218 tcg_temp_free(tmp);
1219 }
1220}
1221
248c42f3 1222/* INSBL, INSWL, INSLL, INSQL */
ffec44f1
RH
1223static void gen_ins_l(int ra, int rb, int rc, int islit,
1224 uint8_t lit, uint8_t byte_mask)
248c42f3
RH
1225{
1226 if (unlikely(rc == 31))
1227 return;
1228 else if (unlikely(ra == 31))
1229 tcg_gen_movi_i64(cpu_ir[rc], 0);
1230 else {
1231 TCGv tmp = tcg_temp_new();
1232
1233 /* The instruction description has us left-shift the byte mask
1234 the same number of byte slots as the data and apply the zap
1235 at the end. This is equivalent to simply performing the zap
1236 first and shifting afterward. */
1237 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1238
1239 if (islit) {
1240 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1241 } else {
1242 TCGv shift = tcg_temp_new();
1243 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1244 tcg_gen_shli_i64(shift, shift, 3);
1245 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1246 tcg_temp_free(shift);
1247 }
1248 tcg_temp_free(tmp);
377a43b6 1249 }
b3249f63
AJ
1250}
1251
ffec44f1
RH
1252/* MSKWH, MSKLH, MSKQH */
1253static void gen_msk_h(int ra, int rb, int rc, int islit,
1254 uint8_t lit, uint8_t byte_mask)
1255{
1256 if (unlikely(rc == 31))
1257 return;
1258 else if (unlikely(ra == 31))
1259 tcg_gen_movi_i64(cpu_ir[rc], 0);
1260 else if (islit) {
1261 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1262 } else {
1263 TCGv shift = tcg_temp_new();
1264 TCGv mask = tcg_temp_new();
1265
1266 /* The instruction description is as above, where the byte_mask
1267 is shifted left, and then we extract bits <15:8>. This can be
1268 emulated with a right-shift on the expanded byte mask. This
1269 requires extra care because for an input <2:0> == 0 we need a
1270 shift of 64 bits in order to generate a zero. This is done by
1271 splitting the shift into two parts, the variable shift - 1
1272 followed by a constant 1 shift. The code we expand below is
1273 equivalent to ~((B & 7) * 8) & 63. */
1274
1275 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1276 tcg_gen_shli_i64(shift, shift, 3);
1277 tcg_gen_not_i64(shift, shift);
1278 tcg_gen_andi_i64(shift, shift, 0x3f);
1279 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1280 tcg_gen_shr_i64(mask, mask, shift);
1281 tcg_gen_shri_i64(mask, mask, 1);
1282
1283 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1284
1285 tcg_temp_free(mask);
1286 tcg_temp_free(shift);
1287 }
1288}
1289
14ab1634 1290/* MSKBL, MSKWL, MSKLL, MSKQL */
ffec44f1
RH
1291static void gen_msk_l(int ra, int rb, int rc, int islit,
1292 uint8_t lit, uint8_t byte_mask)
14ab1634
RH
1293{
1294 if (unlikely(rc == 31))
1295 return;
1296 else if (unlikely(ra == 31))
1297 tcg_gen_movi_i64(cpu_ir[rc], 0);
1298 else if (islit) {
1299 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1300 } else {
1301 TCGv shift = tcg_temp_new();
1302 TCGv mask = tcg_temp_new();
1303
1304 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1305 tcg_gen_shli_i64(shift, shift, 3);
1306 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1307 tcg_gen_shl_i64(mask, mask, shift);
1308
1309 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1310
1311 tcg_temp_free(mask);
1312 tcg_temp_free(shift);
1313 }
1314}
1315
04acd307 1316/* Code to call arith3 helpers */
a7812ae4 1317#define ARITH3(name) \
636aa200
BS
1318static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1319 uint8_t lit) \
a7812ae4
PB
1320{ \
1321 if (unlikely(rc == 31)) \
1322 return; \
1323 \
1324 if (ra != 31) { \
1325 if (islit) { \
1326 TCGv tmp = tcg_const_i64(lit); \
1327 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1328 tcg_temp_free(tmp); \
1329 } else \
1330 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1331 } else { \
1332 TCGv tmp1 = tcg_const_i64(0); \
1333 if (islit) { \
1334 TCGv tmp2 = tcg_const_i64(lit); \
1335 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1336 tcg_temp_free(tmp2); \
1337 } else \
1338 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1339 tcg_temp_free(tmp1); \
1340 } \
b3249f63 1341}
a7812ae4
PB
1342ARITH3(cmpbge)
1343ARITH3(addlv)
1344ARITH3(sublv)
1345ARITH3(addqv)
1346ARITH3(subqv)
a7812ae4
PB
1347ARITH3(umulh)
1348ARITH3(mullv)
1349ARITH3(mulqv)
13e4df99
RH
1350ARITH3(minub8)
1351ARITH3(minsb8)
1352ARITH3(minuw4)
1353ARITH3(minsw4)
1354ARITH3(maxub8)
1355ARITH3(maxsb8)
1356ARITH3(maxuw4)
1357ARITH3(maxsw4)
1358ARITH3(perr)
1359
1360#define MVIOP2(name) \
1361static inline void glue(gen_, name)(int rb, int rc) \
1362{ \
1363 if (unlikely(rc == 31)) \
1364 return; \
1365 if (unlikely(rb == 31)) \
1366 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1367 else \
1368 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1369}
1370MVIOP2(pklb)
1371MVIOP2(pkwb)
1372MVIOP2(unpkbl)
1373MVIOP2(unpkbw)
b3249f63 1374
9e05960f
RH
1375static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1376 int islit, uint8_t lit)
01ff9cc8 1377{
9e05960f 1378 TCGv va, vb;
01ff9cc8 1379
9e05960f 1380 if (unlikely(rc == 31)) {
13e4df99 1381 return;
9e05960f 1382 }
01ff9cc8 1383
9e05960f
RH
1384 if (ra == 31) {
1385 va = tcg_const_i64(0);
1386 } else {
1387 va = cpu_ir[ra];
1388 }
1389 if (islit) {
1390 vb = tcg_const_i64(lit);
1391 } else {
1392 vb = cpu_ir[rb];
1393 }
01ff9cc8 1394
9e05960f 1395 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
01ff9cc8 1396
9e05960f
RH
1397 if (ra == 31) {
1398 tcg_temp_free(va);
1399 }
1400 if (islit) {
1401 tcg_temp_free(vb);
1402 }
01ff9cc8
AJ
1403}
1404
ac316ca4
RH
1405static void gen_rx(int ra, int set)
1406{
1407 TCGv_i32 tmp;
1408
1409 if (ra != 31) {
1410 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1411 }
1412
1413 tmp = tcg_const_i32(set);
1414 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1415 tcg_temp_free_i32(tmp);
1416}
1417
4af70374 1418static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1419{
1420 uint32_t palcode;
1421 int32_t disp21, disp16, disp12;
f88fe4e3
BS
1422 uint16_t fn11;
1423 uint8_t opc, ra, rb, rc, fpfn, fn7, fn2, islit, real_islit;
adf3c8b6 1424 uint8_t lit;
4af70374 1425 ExitStatus ret;
4c9649a9
JM
1426
1427 /* Decode all instruction fields */
1428 opc = insn >> 26;
1429 ra = (insn >> 21) & 0x1F;
1430 rb = (insn >> 16) & 0x1F;
1431 rc = insn & 0x1F;
13e4df99 1432 real_islit = islit = (insn >> 12) & 1;
dfaa8583
AJ
1433 if (rb == 31 && !islit) {
1434 islit = 1;
1435 lit = 0;
1436 } else
1437 lit = (insn >> 13) & 0xFF;
4c9649a9
JM
1438 palcode = insn & 0x03FFFFFF;
1439 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1440 disp16 = (int16_t)(insn & 0x0000FFFF);
1441 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
4c9649a9
JM
1442 fn11 = (insn >> 5) & 0x000007FF;
1443 fpfn = fn11 & 0x3F;
1444 fn7 = (insn >> 5) & 0x0000007F;
1445 fn2 = (insn >> 5) & 0x00000003;
806991da 1446 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1447 opc, ra, rb, rc, disp16);
806991da 1448
4af70374 1449 ret = NO_EXIT;
4c9649a9
JM
1450 switch (opc) {
1451 case 0x00:
1452 /* CALL_PAL */
ab471ade
RH
1453#ifdef CONFIG_USER_ONLY
1454 if (palcode == 0x9E) {
1455 /* RDUNIQUE */
1456 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1457 break;
1458 } else if (palcode == 0x9F) {
1459 /* WRUNIQUE */
1460 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1461 break;
1462 }
1463#endif
4c9649a9
JM
1464 if (palcode >= 0x80 && palcode < 0xC0) {
1465 /* Unprivileged PAL call */
8aa3fa20 1466 ret = gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
ab471ade
RH
1467 break;
1468 }
1469#ifndef CONFIG_USER_ONLY
1470 if (palcode < 0x40) {
4c9649a9
JM
1471 /* Privileged PAL code */
1472 if (ctx->mem_idx & 1)
1473 goto invalid_opc;
8aa3fa20 1474 ret = gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
4c9649a9 1475 }
ab471ade
RH
1476#endif
1477 /* Invalid PAL call */
1478 goto invalid_opc;
4c9649a9
JM
1479 case 0x01:
1480 /* OPC01 */
1481 goto invalid_opc;
1482 case 0x02:
1483 /* OPC02 */
1484 goto invalid_opc;
1485 case 0x03:
1486 /* OPC03 */
1487 goto invalid_opc;
1488 case 0x04:
1489 /* OPC04 */
1490 goto invalid_opc;
1491 case 0x05:
1492 /* OPC05 */
1493 goto invalid_opc;
1494 case 0x06:
1495 /* OPC06 */
1496 goto invalid_opc;
1497 case 0x07:
1498 /* OPC07 */
1499 goto invalid_opc;
1500 case 0x08:
1501 /* LDA */
1ef4ef4e 1502 if (likely(ra != 31)) {
496cb5b9 1503 if (rb != 31)
3761035f
AJ
1504 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1505 else
1506 tcg_gen_movi_i64(cpu_ir[ra], disp16);
496cb5b9 1507 }
4c9649a9
JM
1508 break;
1509 case 0x09:
1510 /* LDAH */
1ef4ef4e 1511 if (likely(ra != 31)) {
496cb5b9 1512 if (rb != 31)
3761035f
AJ
1513 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1514 else
1515 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
496cb5b9 1516 }
4c9649a9
JM
1517 break;
1518 case 0x0A:
1519 /* LDBU */
1520 if (!(ctx->amask & AMASK_BWX))
1521 goto invalid_opc;
f18cd223 1522 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
4c9649a9
JM
1523 break;
1524 case 0x0B:
1525 /* LDQ_U */
f18cd223 1526 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1527 break;
1528 case 0x0C:
1529 /* LDWU */
1530 if (!(ctx->amask & AMASK_BWX))
1531 goto invalid_opc;
577d5e7f 1532 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
4c9649a9
JM
1533 break;
1534 case 0x0D:
1535 /* STW */
57a92c8e 1536 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
1537 break;
1538 case 0x0E:
1539 /* STB */
57a92c8e 1540 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
1541 break;
1542 case 0x0F:
1543 /* STQ_U */
57a92c8e 1544 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
4c9649a9
JM
1545 break;
1546 case 0x10:
1547 switch (fn7) {
1548 case 0x00:
1549 /* ADDL */
30c7183b
AJ
1550 if (likely(rc != 31)) {
1551 if (ra != 31) {
1552 if (islit) {
1553 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1554 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1555 } else {
30c7183b
AJ
1556 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1557 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1558 }
30c7183b
AJ
1559 } else {
1560 if (islit)
dfaa8583 1561 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1562 else
dfaa8583 1563 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1564 }
1565 }
4c9649a9
JM
1566 break;
1567 case 0x02:
1568 /* S4ADDL */
30c7183b
AJ
1569 if (likely(rc != 31)) {
1570 if (ra != 31) {
a7812ae4 1571 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1572 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1573 if (islit)
1574 tcg_gen_addi_i64(tmp, tmp, lit);
1575 else
1576 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1577 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1578 tcg_temp_free(tmp);
30c7183b
AJ
1579 } else {
1580 if (islit)
1581 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1582 else
dfaa8583 1583 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1584 }
1585 }
4c9649a9
JM
1586 break;
1587 case 0x09:
1588 /* SUBL */
30c7183b
AJ
1589 if (likely(rc != 31)) {
1590 if (ra != 31) {
dfaa8583 1591 if (islit)
30c7183b 1592 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
dfaa8583 1593 else
30c7183b 1594 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
dfaa8583 1595 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1596 } else {
1597 if (islit)
1598 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1599 else {
30c7183b
AJ
1600 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1601 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1602 }
1603 }
4c9649a9
JM
1604 break;
1605 case 0x0B:
1606 /* S4SUBL */
30c7183b
AJ
1607 if (likely(rc != 31)) {
1608 if (ra != 31) {
a7812ae4 1609 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1610 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1611 if (islit)
1612 tcg_gen_subi_i64(tmp, tmp, lit);
1613 else
1614 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1615 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1616 tcg_temp_free(tmp);
30c7183b
AJ
1617 } else {
1618 if (islit)
1619 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1620 else {
30c7183b
AJ
1621 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1622 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1623 }
30c7183b
AJ
1624 }
1625 }
4c9649a9
JM
1626 break;
1627 case 0x0F:
1628 /* CMPBGE */
a7812ae4 1629 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1630 break;
1631 case 0x12:
1632 /* S8ADDL */
30c7183b
AJ
1633 if (likely(rc != 31)) {
1634 if (ra != 31) {
a7812ae4 1635 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1636 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1637 if (islit)
1638 tcg_gen_addi_i64(tmp, tmp, lit);
1639 else
1640 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1641 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1642 tcg_temp_free(tmp);
30c7183b
AJ
1643 } else {
1644 if (islit)
1645 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1646 else
dfaa8583 1647 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1648 }
1649 }
4c9649a9
JM
1650 break;
1651 case 0x1B:
1652 /* S8SUBL */
30c7183b
AJ
1653 if (likely(rc != 31)) {
1654 if (ra != 31) {
a7812ae4 1655 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1656 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1657 if (islit)
1658 tcg_gen_subi_i64(tmp, tmp, lit);
1659 else
1660 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1661 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1662 tcg_temp_free(tmp);
30c7183b
AJ
1663 } else {
1664 if (islit)
1665 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1666 else
30c7183b
AJ
1667 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1668 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1669 }
30c7183b
AJ
1670 }
1671 }
4c9649a9
JM
1672 break;
1673 case 0x1D:
1674 /* CMPULT */
01ff9cc8 1675 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
4c9649a9
JM
1676 break;
1677 case 0x20:
1678 /* ADDQ */
30c7183b
AJ
1679 if (likely(rc != 31)) {
1680 if (ra != 31) {
1681 if (islit)
1682 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1683 else
dfaa8583 1684 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1685 } else {
1686 if (islit)
1687 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1688 else
dfaa8583 1689 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1690 }
1691 }
4c9649a9
JM
1692 break;
1693 case 0x22:
1694 /* S4ADDQ */
30c7183b
AJ
1695 if (likely(rc != 31)) {
1696 if (ra != 31) {
a7812ae4 1697 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1698 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1699 if (islit)
1700 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1701 else
1702 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1703 tcg_temp_free(tmp);
30c7183b
AJ
1704 } else {
1705 if (islit)
1706 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1707 else
dfaa8583 1708 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1709 }
1710 }
4c9649a9
JM
1711 break;
1712 case 0x29:
1713 /* SUBQ */
30c7183b
AJ
1714 if (likely(rc != 31)) {
1715 if (ra != 31) {
1716 if (islit)
1717 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1718 else
dfaa8583 1719 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1720 } else {
1721 if (islit)
1722 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1723 else
dfaa8583 1724 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1725 }
1726 }
4c9649a9
JM
1727 break;
1728 case 0x2B:
1729 /* S4SUBQ */
30c7183b
AJ
1730 if (likely(rc != 31)) {
1731 if (ra != 31) {
a7812ae4 1732 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1733 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1734 if (islit)
1735 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1736 else
1737 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1738 tcg_temp_free(tmp);
30c7183b
AJ
1739 } else {
1740 if (islit)
1741 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1742 else
dfaa8583 1743 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1744 }
1745 }
4c9649a9
JM
1746 break;
1747 case 0x2D:
1748 /* CMPEQ */
01ff9cc8 1749 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
4c9649a9
JM
1750 break;
1751 case 0x32:
1752 /* S8ADDQ */
30c7183b
AJ
1753 if (likely(rc != 31)) {
1754 if (ra != 31) {
a7812ae4 1755 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1756 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1757 if (islit)
1758 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1759 else
1760 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1761 tcg_temp_free(tmp);
30c7183b
AJ
1762 } else {
1763 if (islit)
1764 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1765 else
dfaa8583 1766 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1767 }
1768 }
4c9649a9
JM
1769 break;
1770 case 0x3B:
1771 /* S8SUBQ */
30c7183b
AJ
1772 if (likely(rc != 31)) {
1773 if (ra != 31) {
a7812ae4 1774 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1775 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1776 if (islit)
1777 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1778 else
1779 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1780 tcg_temp_free(tmp);
30c7183b
AJ
1781 } else {
1782 if (islit)
1783 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1784 else
dfaa8583 1785 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1786 }
1787 }
4c9649a9
JM
1788 break;
1789 case 0x3D:
1790 /* CMPULE */
01ff9cc8 1791 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
4c9649a9
JM
1792 break;
1793 case 0x40:
1794 /* ADDL/V */
a7812ae4 1795 gen_addlv(ra, rb, rc, islit, lit);
4c9649a9
JM
1796 break;
1797 case 0x49:
1798 /* SUBL/V */
a7812ae4 1799 gen_sublv(ra, rb, rc, islit, lit);
4c9649a9
JM
1800 break;
1801 case 0x4D:
1802 /* CMPLT */
01ff9cc8 1803 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
4c9649a9
JM
1804 break;
1805 case 0x60:
1806 /* ADDQ/V */
a7812ae4 1807 gen_addqv(ra, rb, rc, islit, lit);
4c9649a9
JM
1808 break;
1809 case 0x69:
1810 /* SUBQ/V */
a7812ae4 1811 gen_subqv(ra, rb, rc, islit, lit);
4c9649a9
JM
1812 break;
1813 case 0x6D:
1814 /* CMPLE */
01ff9cc8 1815 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
4c9649a9
JM
1816 break;
1817 default:
1818 goto invalid_opc;
1819 }
1820 break;
1821 case 0x11:
1822 switch (fn7) {
1823 case 0x00:
1824 /* AND */
30c7183b 1825 if (likely(rc != 31)) {
dfaa8583 1826 if (ra == 31)
30c7183b
AJ
1827 tcg_gen_movi_i64(cpu_ir[rc], 0);
1828 else if (islit)
1829 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1830 else
1831 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1832 }
4c9649a9
JM
1833 break;
1834 case 0x08:
1835 /* BIC */
30c7183b
AJ
1836 if (likely(rc != 31)) {
1837 if (ra != 31) {
1838 if (islit)
1839 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1840 else
1841 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1842 } else
1843 tcg_gen_movi_i64(cpu_ir[rc], 0);
1844 }
4c9649a9
JM
1845 break;
1846 case 0x14:
1847 /* CMOVLBS */
bbe1dab4 1848 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
1849 break;
1850 case 0x16:
1851 /* CMOVLBC */
bbe1dab4 1852 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
1853 break;
1854 case 0x20:
1855 /* BIS */
30c7183b
AJ
1856 if (likely(rc != 31)) {
1857 if (ra != 31) {
1858 if (islit)
1859 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
8bb6e981 1860 else
30c7183b 1861 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
4c9649a9 1862 } else {
30c7183b
AJ
1863 if (islit)
1864 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1865 else
dfaa8583 1866 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
4c9649a9 1867 }
4c9649a9
JM
1868 }
1869 break;
1870 case 0x24:
1871 /* CMOVEQ */
bbe1dab4 1872 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1873 break;
1874 case 0x26:
1875 /* CMOVNE */
bbe1dab4 1876 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1877 break;
1878 case 0x28:
1879 /* ORNOT */
30c7183b 1880 if (likely(rc != 31)) {
dfaa8583 1881 if (ra != 31) {
30c7183b
AJ
1882 if (islit)
1883 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1884 else
1885 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1886 } else {
1887 if (islit)
1888 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1889 else
1890 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1891 }
1892 }
4c9649a9
JM
1893 break;
1894 case 0x40:
1895 /* XOR */
30c7183b
AJ
1896 if (likely(rc != 31)) {
1897 if (ra != 31) {
1898 if (islit)
1899 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1900 else
dfaa8583 1901 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1902 } else {
1903 if (islit)
1904 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1905 else
dfaa8583 1906 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1907 }
1908 }
4c9649a9
JM
1909 break;
1910 case 0x44:
1911 /* CMOVLT */
bbe1dab4 1912 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1913 break;
1914 case 0x46:
1915 /* CMOVGE */
bbe1dab4 1916 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1917 break;
1918 case 0x48:
1919 /* EQV */
30c7183b
AJ
1920 if (likely(rc != 31)) {
1921 if (ra != 31) {
1922 if (islit)
1923 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1924 else
1925 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1926 } else {
1927 if (islit)
1928 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
30c7183b 1929 else
dfaa8583 1930 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1931 }
1932 }
4c9649a9
JM
1933 break;
1934 case 0x61:
1935 /* AMASK */
ae8ecd42
AJ
1936 if (likely(rc != 31)) {
1937 if (islit)
1a1f7dbc 1938 tcg_gen_movi_i64(cpu_ir[rc], lit);
ae8ecd42 1939 else
1a1f7dbc
AJ
1940 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1941 switch (ctx->env->implver) {
1942 case IMPLVER_2106x:
1943 /* EV4, EV45, LCA, LCA45 & EV5 */
1944 break;
1945 case IMPLVER_21164:
1946 case IMPLVER_21264:
1947 case IMPLVER_21364:
1948 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1949 ~(uint64_t)ctx->amask);
1950 break;
1951 }
ae8ecd42 1952 }
4c9649a9
JM
1953 break;
1954 case 0x64:
1955 /* CMOVLE */
bbe1dab4 1956 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1957 break;
1958 case 0x66:
1959 /* CMOVGT */
bbe1dab4 1960 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1961 break;
1962 case 0x6C:
1963 /* IMPLVER */
3761035f 1964 if (rc != 31)
8579095b 1965 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
4c9649a9
JM
1966 break;
1967 default:
1968 goto invalid_opc;
1969 }
1970 break;
1971 case 0x12:
1972 switch (fn7) {
1973 case 0x02:
1974 /* MSKBL */
14ab1634 1975 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
1976 break;
1977 case 0x06:
1978 /* EXTBL */
377a43b6 1979 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
1980 break;
1981 case 0x0B:
1982 /* INSBL */
248c42f3 1983 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
1984 break;
1985 case 0x12:
1986 /* MSKWL */
14ab1634 1987 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1988 break;
1989 case 0x16:
1990 /* EXTWL */
377a43b6 1991 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1992 break;
1993 case 0x1B:
1994 /* INSWL */
248c42f3 1995 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
1996 break;
1997 case 0x22:
1998 /* MSKLL */
14ab1634 1999 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2000 break;
2001 case 0x26:
2002 /* EXTLL */
377a43b6 2003 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2004 break;
2005 case 0x2B:
2006 /* INSLL */
248c42f3 2007 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2008 break;
2009 case 0x30:
2010 /* ZAP */
a7812ae4 2011 gen_zap(ra, rb, rc, islit, lit);
4c9649a9
JM
2012 break;
2013 case 0x31:
2014 /* ZAPNOT */
a7812ae4 2015 gen_zapnot(ra, rb, rc, islit, lit);
4c9649a9
JM
2016 break;
2017 case 0x32:
2018 /* MSKQL */
14ab1634 2019 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2020 break;
2021 case 0x34:
2022 /* SRL */
30c7183b
AJ
2023 if (likely(rc != 31)) {
2024 if (ra != 31) {
2025 if (islit)
2026 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2027 else {
a7812ae4 2028 TCGv shift = tcg_temp_new();
30c7183b
AJ
2029 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2030 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2031 tcg_temp_free(shift);
dfaa8583 2032 }
30c7183b
AJ
2033 } else
2034 tcg_gen_movi_i64(cpu_ir[rc], 0);
2035 }
4c9649a9
JM
2036 break;
2037 case 0x36:
2038 /* EXTQL */
377a43b6 2039 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2040 break;
2041 case 0x39:
2042 /* SLL */
30c7183b
AJ
2043 if (likely(rc != 31)) {
2044 if (ra != 31) {
2045 if (islit)
2046 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2047 else {
a7812ae4 2048 TCGv shift = tcg_temp_new();
30c7183b
AJ
2049 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2050 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2051 tcg_temp_free(shift);
dfaa8583 2052 }
30c7183b
AJ
2053 } else
2054 tcg_gen_movi_i64(cpu_ir[rc], 0);
2055 }
4c9649a9
JM
2056 break;
2057 case 0x3B:
2058 /* INSQL */
248c42f3 2059 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2060 break;
2061 case 0x3C:
2062 /* SRA */
30c7183b
AJ
2063 if (likely(rc != 31)) {
2064 if (ra != 31) {
2065 if (islit)
2066 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2067 else {
a7812ae4 2068 TCGv shift = tcg_temp_new();
30c7183b
AJ
2069 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2070 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2071 tcg_temp_free(shift);
dfaa8583 2072 }
30c7183b
AJ
2073 } else
2074 tcg_gen_movi_i64(cpu_ir[rc], 0);
2075 }
4c9649a9
JM
2076 break;
2077 case 0x52:
2078 /* MSKWH */
ffec44f1 2079 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2080 break;
2081 case 0x57:
2082 /* INSWH */
50eb6e5c 2083 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2084 break;
2085 case 0x5A:
2086 /* EXTWH */
377a43b6 2087 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2088 break;
2089 case 0x62:
2090 /* MSKLH */
ffec44f1 2091 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2092 break;
2093 case 0x67:
2094 /* INSLH */
50eb6e5c 2095 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2096 break;
2097 case 0x6A:
2098 /* EXTLH */
377a43b6 2099 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2100 break;
2101 case 0x72:
2102 /* MSKQH */
ffec44f1 2103 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2104 break;
2105 case 0x77:
2106 /* INSQH */
50eb6e5c 2107 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2108 break;
2109 case 0x7A:
2110 /* EXTQH */
377a43b6 2111 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2112 break;
2113 default:
2114 goto invalid_opc;
2115 }
2116 break;
2117 case 0x13:
2118 switch (fn7) {
2119 case 0x00:
2120 /* MULL */
30c7183b 2121 if (likely(rc != 31)) {
dfaa8583 2122 if (ra == 31)
30c7183b
AJ
2123 tcg_gen_movi_i64(cpu_ir[rc], 0);
2124 else {
2125 if (islit)
2126 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2127 else
2128 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2129 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2130 }
2131 }
4c9649a9
JM
2132 break;
2133 case 0x20:
2134 /* MULQ */
30c7183b 2135 if (likely(rc != 31)) {
dfaa8583 2136 if (ra == 31)
30c7183b
AJ
2137 tcg_gen_movi_i64(cpu_ir[rc], 0);
2138 else if (islit)
2139 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2140 else
2141 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2142 }
4c9649a9
JM
2143 break;
2144 case 0x30:
2145 /* UMULH */
a7812ae4 2146 gen_umulh(ra, rb, rc, islit, lit);
4c9649a9
JM
2147 break;
2148 case 0x40:
2149 /* MULL/V */
a7812ae4 2150 gen_mullv(ra, rb, rc, islit, lit);
4c9649a9
JM
2151 break;
2152 case 0x60:
2153 /* MULQ/V */
a7812ae4 2154 gen_mulqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2155 break;
2156 default:
2157 goto invalid_opc;
2158 }
2159 break;
2160 case 0x14:
f24518b5 2161 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2162 case 0x04:
2163 /* ITOFS */
2164 if (!(ctx->amask & AMASK_FIX))
2165 goto invalid_opc;
f18cd223
AJ
2166 if (likely(rc != 31)) {
2167 if (ra != 31) {
a7812ae4 2168 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2169 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2170 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2171 tcg_temp_free_i32(tmp);
f18cd223
AJ
2172 } else
2173 tcg_gen_movi_i64(cpu_fir[rc], 0);
2174 }
4c9649a9
JM
2175 break;
2176 case 0x0A:
2177 /* SQRTF */
2178 if (!(ctx->amask & AMASK_FIX))
2179 goto invalid_opc;
a7812ae4 2180 gen_fsqrtf(rb, rc);
4c9649a9
JM
2181 break;
2182 case 0x0B:
2183 /* SQRTS */
2184 if (!(ctx->amask & AMASK_FIX))
2185 goto invalid_opc;
f24518b5 2186 gen_fsqrts(ctx, rb, rc, fn11);
4c9649a9
JM
2187 break;
2188 case 0x14:
2189 /* ITOFF */
2190 if (!(ctx->amask & AMASK_FIX))
2191 goto invalid_opc;
f18cd223
AJ
2192 if (likely(rc != 31)) {
2193 if (ra != 31) {
a7812ae4 2194 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2195 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2196 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2197 tcg_temp_free_i32(tmp);
f18cd223
AJ
2198 } else
2199 tcg_gen_movi_i64(cpu_fir[rc], 0);
2200 }
4c9649a9
JM
2201 break;
2202 case 0x24:
2203 /* ITOFT */
2204 if (!(ctx->amask & AMASK_FIX))
2205 goto invalid_opc;
f18cd223
AJ
2206 if (likely(rc != 31)) {
2207 if (ra != 31)
2208 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2209 else
2210 tcg_gen_movi_i64(cpu_fir[rc], 0);
2211 }
4c9649a9
JM
2212 break;
2213 case 0x2A:
2214 /* SQRTG */
2215 if (!(ctx->amask & AMASK_FIX))
2216 goto invalid_opc;
a7812ae4 2217 gen_fsqrtg(rb, rc);
4c9649a9
JM
2218 break;
2219 case 0x02B:
2220 /* SQRTT */
2221 if (!(ctx->amask & AMASK_FIX))
2222 goto invalid_opc;
f24518b5 2223 gen_fsqrtt(ctx, rb, rc, fn11);
4c9649a9
JM
2224 break;
2225 default:
2226 goto invalid_opc;
2227 }
2228 break;
2229 case 0x15:
2230 /* VAX floating point */
2231 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2232 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2233 case 0x00:
2234 /* ADDF */
a7812ae4 2235 gen_faddf(ra, rb, rc);
4c9649a9
JM
2236 break;
2237 case 0x01:
2238 /* SUBF */
a7812ae4 2239 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2240 break;
2241 case 0x02:
2242 /* MULF */
a7812ae4 2243 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2244 break;
2245 case 0x03:
2246 /* DIVF */
a7812ae4 2247 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2248 break;
2249 case 0x1E:
2250 /* CVTDG */
2251#if 0 // TODO
a7812ae4 2252 gen_fcvtdg(rb, rc);
4c9649a9
JM
2253#else
2254 goto invalid_opc;
2255#endif
2256 break;
2257 case 0x20:
2258 /* ADDG */
a7812ae4 2259 gen_faddg(ra, rb, rc);
4c9649a9
JM
2260 break;
2261 case 0x21:
2262 /* SUBG */
a7812ae4 2263 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2264 break;
2265 case 0x22:
2266 /* MULG */
a7812ae4 2267 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2268 break;
2269 case 0x23:
2270 /* DIVG */
a7812ae4 2271 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2272 break;
2273 case 0x25:
2274 /* CMPGEQ */
a7812ae4 2275 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2276 break;
2277 case 0x26:
2278 /* CMPGLT */
a7812ae4 2279 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2280 break;
2281 case 0x27:
2282 /* CMPGLE */
a7812ae4 2283 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2284 break;
2285 case 0x2C:
2286 /* CVTGF */
a7812ae4 2287 gen_fcvtgf(rb, rc);
4c9649a9
JM
2288 break;
2289 case 0x2D:
2290 /* CVTGD */
2291#if 0 // TODO
a7812ae4 2292 gen_fcvtgd(rb, rc);
4c9649a9
JM
2293#else
2294 goto invalid_opc;
2295#endif
2296 break;
2297 case 0x2F:
2298 /* CVTGQ */
a7812ae4 2299 gen_fcvtgq(rb, rc);
4c9649a9
JM
2300 break;
2301 case 0x3C:
2302 /* CVTQF */
a7812ae4 2303 gen_fcvtqf(rb, rc);
4c9649a9
JM
2304 break;
2305 case 0x3E:
2306 /* CVTQG */
a7812ae4 2307 gen_fcvtqg(rb, rc);
4c9649a9
JM
2308 break;
2309 default:
2310 goto invalid_opc;
2311 }
2312 break;
2313 case 0x16:
2314 /* IEEE floating-point */
f24518b5 2315 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2316 case 0x00:
2317 /* ADDS */
f24518b5 2318 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2319 break;
2320 case 0x01:
2321 /* SUBS */
f24518b5 2322 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2323 break;
2324 case 0x02:
2325 /* MULS */
f24518b5 2326 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2327 break;
2328 case 0x03:
2329 /* DIVS */
f24518b5 2330 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2331 break;
2332 case 0x20:
2333 /* ADDT */
f24518b5 2334 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2335 break;
2336 case 0x21:
2337 /* SUBT */
f24518b5 2338 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2339 break;
2340 case 0x22:
2341 /* MULT */
f24518b5 2342 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2343 break;
2344 case 0x23:
2345 /* DIVT */
f24518b5 2346 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2347 break;
2348 case 0x24:
2349 /* CMPTUN */
f24518b5 2350 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2351 break;
2352 case 0x25:
2353 /* CMPTEQ */
f24518b5 2354 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2355 break;
2356 case 0x26:
2357 /* CMPTLT */
f24518b5 2358 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2359 break;
2360 case 0x27:
2361 /* CMPTLE */
f24518b5 2362 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2363 break;
2364 case 0x2C:
a74b4d2c 2365 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2366 /* CVTST */
f24518b5 2367 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2368 } else {
2369 /* CVTTS */
f24518b5 2370 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2371 }
2372 break;
2373 case 0x2F:
2374 /* CVTTQ */
f24518b5 2375 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2376 break;
2377 case 0x3C:
2378 /* CVTQS */
f24518b5 2379 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2380 break;
2381 case 0x3E:
2382 /* CVTQT */
f24518b5 2383 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2384 break;
2385 default:
2386 goto invalid_opc;
2387 }
2388 break;
2389 case 0x17:
2390 switch (fn11) {
2391 case 0x010:
2392 /* CVTLQ */
a7812ae4 2393 gen_fcvtlq(rb, rc);
4c9649a9
JM
2394 break;
2395 case 0x020:
f18cd223 2396 if (likely(rc != 31)) {
a06d48d9 2397 if (ra == rb) {
4c9649a9 2398 /* FMOV */
a06d48d9
RH
2399 if (ra == 31)
2400 tcg_gen_movi_i64(cpu_fir[rc], 0);
2401 else
2402 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2403 } else {
f18cd223 2404 /* CPYS */
a7812ae4 2405 gen_fcpys(ra, rb, rc);
a06d48d9 2406 }
4c9649a9
JM
2407 }
2408 break;
2409 case 0x021:
2410 /* CPYSN */
a7812ae4 2411 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2412 break;
2413 case 0x022:
2414 /* CPYSE */
a7812ae4 2415 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2416 break;
2417 case 0x024:
2418 /* MT_FPCR */
f18cd223 2419 if (likely(ra != 31))
a7812ae4 2420 gen_helper_store_fpcr(cpu_fir[ra]);
f18cd223
AJ
2421 else {
2422 TCGv tmp = tcg_const_i64(0);
a7812ae4 2423 gen_helper_store_fpcr(tmp);
f18cd223
AJ
2424 tcg_temp_free(tmp);
2425 }
4c9649a9
JM
2426 break;
2427 case 0x025:
2428 /* MF_FPCR */
f18cd223 2429 if (likely(ra != 31))
a7812ae4 2430 gen_helper_load_fpcr(cpu_fir[ra]);
4c9649a9
JM
2431 break;
2432 case 0x02A:
2433 /* FCMOVEQ */
bbe1dab4 2434 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2435 break;
2436 case 0x02B:
2437 /* FCMOVNE */
bbe1dab4 2438 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2439 break;
2440 case 0x02C:
2441 /* FCMOVLT */
bbe1dab4 2442 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2443 break;
2444 case 0x02D:
2445 /* FCMOVGE */
bbe1dab4 2446 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2447 break;
2448 case 0x02E:
2449 /* FCMOVLE */
bbe1dab4 2450 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2451 break;
2452 case 0x02F:
2453 /* FCMOVGT */
bbe1dab4 2454 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2455 break;
2456 case 0x030:
2457 /* CVTQL */
a7812ae4 2458 gen_fcvtql(rb, rc);
4c9649a9
JM
2459 break;
2460 case 0x130:
2461 /* CVTQL/V */
4c9649a9
JM
2462 case 0x530:
2463 /* CVTQL/SV */
735cf45f
RH
2464 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2465 /v doesn't do. The only thing I can think is that /sv is a
2466 valid instruction merely for completeness in the ISA. */
2467 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2468 break;
2469 default:
2470 goto invalid_opc;
2471 }
2472 break;
2473 case 0x18:
2474 switch ((uint16_t)disp16) {
2475 case 0x0000:
2476 /* TRAPB */
4af70374 2477 /* No-op. */
4c9649a9
JM
2478 break;
2479 case 0x0400:
2480 /* EXCB */
4af70374 2481 /* No-op. */
4c9649a9
JM
2482 break;
2483 case 0x4000:
2484 /* MB */
2485 /* No-op */
2486 break;
2487 case 0x4400:
2488 /* WMB */
2489 /* No-op */
2490 break;
2491 case 0x8000:
2492 /* FETCH */
2493 /* No-op */
2494 break;
2495 case 0xA000:
2496 /* FETCH_M */
2497 /* No-op */
2498 break;
2499 case 0xC000:
2500 /* RPCC */
3761035f 2501 if (ra != 31)
a7812ae4 2502 gen_helper_load_pcc(cpu_ir[ra]);
4c9649a9
JM
2503 break;
2504 case 0xE000:
2505 /* RC */
ac316ca4 2506 gen_rx(ra, 0);
4c9649a9
JM
2507 break;
2508 case 0xE800:
2509 /* ECB */
4c9649a9
JM
2510 break;
2511 case 0xF000:
2512 /* RS */
ac316ca4 2513 gen_rx(ra, 1);
4c9649a9
JM
2514 break;
2515 case 0xF800:
2516 /* WH64 */
2517 /* No-op */
2518 break;
2519 default:
2520 goto invalid_opc;
2521 }
2522 break;
2523 case 0x19:
2524 /* HW_MFPR (PALcode) */
2525#if defined (CONFIG_USER_ONLY)
2526 goto invalid_opc;
2527#else
2528 if (!ctx->pal_mode)
2529 goto invalid_opc;
8bb6e981
AJ
2530 if (ra != 31) {
2531 TCGv tmp = tcg_const_i32(insn & 0xFF);
a7812ae4 2532 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
8bb6e981
AJ
2533 tcg_temp_free(tmp);
2534 }
4c9649a9
JM
2535 break;
2536#endif
2537 case 0x1A:
49563a72
RH
2538 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2539 prediction stack action, which of course we don't implement. */
2540 if (rb != 31) {
3761035f 2541 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
49563a72 2542 } else {
3761035f 2543 tcg_gen_movi_i64(cpu_pc, 0);
49563a72
RH
2544 }
2545 if (ra != 31) {
1304ca87 2546 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2547 }
4af70374 2548 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2549 break;
2550 case 0x1B:
2551 /* HW_LD (PALcode) */
2552#if defined (CONFIG_USER_ONLY)
2553 goto invalid_opc;
2554#else
2555 if (!ctx->pal_mode)
2556 goto invalid_opc;
8bb6e981 2557 if (ra != 31) {
a7812ae4 2558 TCGv addr = tcg_temp_new();
8bb6e981
AJ
2559 if (rb != 31)
2560 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2561 else
2562 tcg_gen_movi_i64(addr, disp12);
2563 switch ((insn >> 12) & 0xF) {
2564 case 0x0:
b5d51029 2565 /* Longword physical access (hw_ldl/p) */
a7812ae4 2566 gen_helper_ldl_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2567 break;
2568 case 0x1:
b5d51029 2569 /* Quadword physical access (hw_ldq/p) */
a7812ae4 2570 gen_helper_ldq_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2571 break;
2572 case 0x2:
b5d51029 2573 /* Longword physical access with lock (hw_ldl_l/p) */
a7812ae4 2574 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2575 break;
2576 case 0x3:
b5d51029 2577 /* Quadword physical access with lock (hw_ldq_l/p) */
a7812ae4 2578 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2579 break;
2580 case 0x4:
b5d51029
AJ
2581 /* Longword virtual PTE fetch (hw_ldl/v) */
2582 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2583 break;
2584 case 0x5:
b5d51029
AJ
2585 /* Quadword virtual PTE fetch (hw_ldq/v) */
2586 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2587 break;
2588 case 0x6:
2589 /* Incpu_ir[ra]id */
b5d51029 2590 goto invalid_opc;
8bb6e981
AJ
2591 case 0x7:
2592 /* Incpu_ir[ra]id */
b5d51029 2593 goto invalid_opc;
8bb6e981 2594 case 0x8:
b5d51029 2595 /* Longword virtual access (hw_ldl) */
a7812ae4
PB
2596 gen_helper_st_virt_to_phys(addr, addr);
2597 gen_helper_ldl_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2598 break;
2599 case 0x9:
b5d51029 2600 /* Quadword virtual access (hw_ldq) */
a7812ae4
PB
2601 gen_helper_st_virt_to_phys(addr, addr);
2602 gen_helper_ldq_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2603 break;
2604 case 0xA:
b5d51029
AJ
2605 /* Longword virtual access with protection check (hw_ldl/w) */
2606 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2607 break;
2608 case 0xB:
b5d51029
AJ
2609 /* Quadword virtual access with protection check (hw_ldq/w) */
2610 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2611 break;
2612 case 0xC:
b5d51029 2613 /* Longword virtual access with alt access mode (hw_ldl/a)*/
a7812ae4
PB
2614 gen_helper_set_alt_mode();
2615 gen_helper_st_virt_to_phys(addr, addr);
2616 gen_helper_ldl_raw(cpu_ir[ra], addr);
2617 gen_helper_restore_mode();
8bb6e981
AJ
2618 break;
2619 case 0xD:
b5d51029 2620 /* Quadword virtual access with alt access mode (hw_ldq/a) */
a7812ae4
PB
2621 gen_helper_set_alt_mode();
2622 gen_helper_st_virt_to_phys(addr, addr);
2623 gen_helper_ldq_raw(cpu_ir[ra], addr);
2624 gen_helper_restore_mode();
8bb6e981
AJ
2625 break;
2626 case 0xE:
2627 /* Longword virtual access with alternate access mode and
b5d51029 2628 * protection checks (hw_ldl/wa)
8bb6e981 2629 */
a7812ae4
PB
2630 gen_helper_set_alt_mode();
2631 gen_helper_ldl_data(cpu_ir[ra], addr);
2632 gen_helper_restore_mode();
8bb6e981
AJ
2633 break;
2634 case 0xF:
2635 /* Quadword virtual access with alternate access mode and
b5d51029 2636 * protection checks (hw_ldq/wa)
8bb6e981 2637 */
a7812ae4
PB
2638 gen_helper_set_alt_mode();
2639 gen_helper_ldq_data(cpu_ir[ra], addr);
2640 gen_helper_restore_mode();
8bb6e981
AJ
2641 break;
2642 }
2643 tcg_temp_free(addr);
4c9649a9 2644 }
4c9649a9
JM
2645 break;
2646#endif
2647 case 0x1C:
2648 switch (fn7) {
2649 case 0x00:
2650 /* SEXTB */
2651 if (!(ctx->amask & AMASK_BWX))
2652 goto invalid_opc;
ae8ecd42
AJ
2653 if (likely(rc != 31)) {
2654 if (islit)
2655 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
ae8ecd42 2656 else
dfaa8583 2657 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2658 }
4c9649a9
JM
2659 break;
2660 case 0x01:
2661 /* SEXTW */
2662 if (!(ctx->amask & AMASK_BWX))
2663 goto invalid_opc;
ae8ecd42
AJ
2664 if (likely(rc != 31)) {
2665 if (islit)
2666 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
ae8ecd42 2667 else
dfaa8583 2668 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2669 }
4c9649a9
JM
2670 break;
2671 case 0x30:
2672 /* CTPOP */
2673 if (!(ctx->amask & AMASK_CIX))
2674 goto invalid_opc;
ae8ecd42
AJ
2675 if (likely(rc != 31)) {
2676 if (islit)
2677 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
ae8ecd42 2678 else
a7812ae4 2679 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2680 }
4c9649a9
JM
2681 break;
2682 case 0x31:
2683 /* PERR */
2684 if (!(ctx->amask & AMASK_MVI))
2685 goto invalid_opc;
13e4df99 2686 gen_perr(ra, rb, rc, islit, lit);
4c9649a9
JM
2687 break;
2688 case 0x32:
2689 /* CTLZ */
2690 if (!(ctx->amask & AMASK_CIX))
2691 goto invalid_opc;
ae8ecd42
AJ
2692 if (likely(rc != 31)) {
2693 if (islit)
2694 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
ae8ecd42 2695 else
a7812ae4 2696 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2697 }
4c9649a9
JM
2698 break;
2699 case 0x33:
2700 /* CTTZ */
2701 if (!(ctx->amask & AMASK_CIX))
2702 goto invalid_opc;
ae8ecd42
AJ
2703 if (likely(rc != 31)) {
2704 if (islit)
2705 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
ae8ecd42 2706 else
a7812ae4 2707 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2708 }
4c9649a9
JM
2709 break;
2710 case 0x34:
2711 /* UNPKBW */
2712 if (!(ctx->amask & AMASK_MVI))
2713 goto invalid_opc;
13e4df99
RH
2714 if (real_islit || ra != 31)
2715 goto invalid_opc;
2716 gen_unpkbw (rb, rc);
4c9649a9
JM
2717 break;
2718 case 0x35:
13e4df99 2719 /* UNPKBL */
4c9649a9
JM
2720 if (!(ctx->amask & AMASK_MVI))
2721 goto invalid_opc;
13e4df99
RH
2722 if (real_islit || ra != 31)
2723 goto invalid_opc;
2724 gen_unpkbl (rb, rc);
4c9649a9
JM
2725 break;
2726 case 0x36:
2727 /* PKWB */
2728 if (!(ctx->amask & AMASK_MVI))
2729 goto invalid_opc;
13e4df99
RH
2730 if (real_islit || ra != 31)
2731 goto invalid_opc;
2732 gen_pkwb (rb, rc);
4c9649a9
JM
2733 break;
2734 case 0x37:
2735 /* PKLB */
2736 if (!(ctx->amask & AMASK_MVI))
2737 goto invalid_opc;
13e4df99
RH
2738 if (real_islit || ra != 31)
2739 goto invalid_opc;
2740 gen_pklb (rb, rc);
4c9649a9
JM
2741 break;
2742 case 0x38:
2743 /* MINSB8 */
2744 if (!(ctx->amask & AMASK_MVI))
2745 goto invalid_opc;
13e4df99 2746 gen_minsb8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2747 break;
2748 case 0x39:
2749 /* MINSW4 */
2750 if (!(ctx->amask & AMASK_MVI))
2751 goto invalid_opc;
13e4df99 2752 gen_minsw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2753 break;
2754 case 0x3A:
2755 /* MINUB8 */
2756 if (!(ctx->amask & AMASK_MVI))
2757 goto invalid_opc;
13e4df99 2758 gen_minub8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2759 break;
2760 case 0x3B:
2761 /* MINUW4 */
2762 if (!(ctx->amask & AMASK_MVI))
2763 goto invalid_opc;
13e4df99 2764 gen_minuw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2765 break;
2766 case 0x3C:
2767 /* MAXUB8 */
2768 if (!(ctx->amask & AMASK_MVI))
2769 goto invalid_opc;
13e4df99 2770 gen_maxub8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2771 break;
2772 case 0x3D:
2773 /* MAXUW4 */
2774 if (!(ctx->amask & AMASK_MVI))
2775 goto invalid_opc;
13e4df99 2776 gen_maxuw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2777 break;
2778 case 0x3E:
2779 /* MAXSB8 */
2780 if (!(ctx->amask & AMASK_MVI))
2781 goto invalid_opc;
13e4df99 2782 gen_maxsb8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2783 break;
2784 case 0x3F:
2785 /* MAXSW4 */
2786 if (!(ctx->amask & AMASK_MVI))
2787 goto invalid_opc;
13e4df99 2788 gen_maxsw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2789 break;
2790 case 0x70:
2791 /* FTOIT */
2792 if (!(ctx->amask & AMASK_FIX))
2793 goto invalid_opc;
f18cd223
AJ
2794 if (likely(rc != 31)) {
2795 if (ra != 31)
2796 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2797 else
2798 tcg_gen_movi_i64(cpu_ir[rc], 0);
2799 }
4c9649a9
JM
2800 break;
2801 case 0x78:
2802 /* FTOIS */
2803 if (!(ctx->amask & AMASK_FIX))
2804 goto invalid_opc;
f18cd223 2805 if (rc != 31) {
a7812ae4 2806 TCGv_i32 tmp1 = tcg_temp_new_i32();
f18cd223 2807 if (ra != 31)
a7812ae4 2808 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
f18cd223
AJ
2809 else {
2810 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 2811 gen_helper_s_to_memory(tmp1, tmp2);
f18cd223
AJ
2812 tcg_temp_free(tmp2);
2813 }
2814 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
a7812ae4 2815 tcg_temp_free_i32(tmp1);
f18cd223 2816 }
4c9649a9
JM
2817 break;
2818 default:
2819 goto invalid_opc;
2820 }
2821 break;
2822 case 0x1D:
2823 /* HW_MTPR (PALcode) */
2824#if defined (CONFIG_USER_ONLY)
2825 goto invalid_opc;
2826#else
2827 if (!ctx->pal_mode)
2828 goto invalid_opc;
8bb6e981
AJ
2829 else {
2830 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2831 if (ra != 31)
a7812ae4 2832 gen_helper_mtpr(tmp1, cpu_ir[ra]);
8bb6e981
AJ
2833 else {
2834 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 2835 gen_helper_mtpr(tmp1, tmp2);
8bb6e981
AJ
2836 tcg_temp_free(tmp2);
2837 }
2838 tcg_temp_free(tmp1);
4af70374 2839 ret = EXIT_PC_STALE;
8bb6e981 2840 }
4c9649a9
JM
2841 break;
2842#endif
2843 case 0x1E:
2844 /* HW_REI (PALcode) */
2845#if defined (CONFIG_USER_ONLY)
2846 goto invalid_opc;
2847#else
2848 if (!ctx->pal_mode)
2849 goto invalid_opc;
2850 if (rb == 31) {
2851 /* "Old" alpha */
a7812ae4 2852 gen_helper_hw_rei();
4c9649a9 2853 } else {
8bb6e981
AJ
2854 TCGv tmp;
2855
2856 if (ra != 31) {
a7812ae4 2857 tmp = tcg_temp_new();
8bb6e981
AJ
2858 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2859 } else
2860 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
a7812ae4 2861 gen_helper_hw_ret(tmp);
8bb6e981 2862 tcg_temp_free(tmp);
4c9649a9 2863 }
4af70374 2864 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2865 break;
2866#endif
2867 case 0x1F:
2868 /* HW_ST (PALcode) */
2869#if defined (CONFIG_USER_ONLY)
2870 goto invalid_opc;
2871#else
2872 if (!ctx->pal_mode)
2873 goto invalid_opc;
8bb6e981
AJ
2874 else {
2875 TCGv addr, val;
a7812ae4 2876 addr = tcg_temp_new();
8bb6e981
AJ
2877 if (rb != 31)
2878 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2879 else
2880 tcg_gen_movi_i64(addr, disp12);
2881 if (ra != 31)
2882 val = cpu_ir[ra];
2883 else {
a7812ae4 2884 val = tcg_temp_new();
8bb6e981
AJ
2885 tcg_gen_movi_i64(val, 0);
2886 }
2887 switch ((insn >> 12) & 0xF) {
2888 case 0x0:
2889 /* Longword physical access */
a7812ae4 2890 gen_helper_stl_raw(val, addr);
8bb6e981
AJ
2891 break;
2892 case 0x1:
2893 /* Quadword physical access */
a7812ae4 2894 gen_helper_stq_raw(val, addr);
8bb6e981
AJ
2895 break;
2896 case 0x2:
2897 /* Longword physical access with lock */
a7812ae4 2898 gen_helper_stl_c_raw(val, val, addr);
8bb6e981
AJ
2899 break;
2900 case 0x3:
2901 /* Quadword physical access with lock */
a7812ae4 2902 gen_helper_stq_c_raw(val, val, addr);
8bb6e981
AJ
2903 break;
2904 case 0x4:
2905 /* Longword virtual access */
a7812ae4
PB
2906 gen_helper_st_virt_to_phys(addr, addr);
2907 gen_helper_stl_raw(val, addr);
8bb6e981
AJ
2908 break;
2909 case 0x5:
2910 /* Quadword virtual access */
a7812ae4
PB
2911 gen_helper_st_virt_to_phys(addr, addr);
2912 gen_helper_stq_raw(val, addr);
8bb6e981
AJ
2913 break;
2914 case 0x6:
2915 /* Invalid */
2916 goto invalid_opc;
2917 case 0x7:
2918 /* Invalid */
2919 goto invalid_opc;
2920 case 0x8:
2921 /* Invalid */
2922 goto invalid_opc;
2923 case 0x9:
2924 /* Invalid */
2925 goto invalid_opc;
2926 case 0xA:
2927 /* Invalid */
2928 goto invalid_opc;
2929 case 0xB:
2930 /* Invalid */
2931 goto invalid_opc;
2932 case 0xC:
2933 /* Longword virtual access with alternate access mode */
a7812ae4
PB
2934 gen_helper_set_alt_mode();
2935 gen_helper_st_virt_to_phys(addr, addr);
2936 gen_helper_stl_raw(val, addr);
2937 gen_helper_restore_mode();
8bb6e981
AJ
2938 break;
2939 case 0xD:
2940 /* Quadword virtual access with alternate access mode */
a7812ae4
PB
2941 gen_helper_set_alt_mode();
2942 gen_helper_st_virt_to_phys(addr, addr);
2943 gen_helper_stl_raw(val, addr);
2944 gen_helper_restore_mode();
8bb6e981
AJ
2945 break;
2946 case 0xE:
2947 /* Invalid */
2948 goto invalid_opc;
2949 case 0xF:
2950 /* Invalid */
2951 goto invalid_opc;
2952 }
45d46ce8 2953 if (ra == 31)
8bb6e981
AJ
2954 tcg_temp_free(val);
2955 tcg_temp_free(addr);
4c9649a9 2956 }
4c9649a9
JM
2957 break;
2958#endif
2959 case 0x20:
2960 /* LDF */
f18cd223 2961 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
2962 break;
2963 case 0x21:
2964 /* LDG */
f18cd223 2965 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
2966 break;
2967 case 0x22:
2968 /* LDS */
f18cd223 2969 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
2970 break;
2971 case 0x23:
2972 /* LDT */
f18cd223 2973 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
2974 break;
2975 case 0x24:
2976 /* STF */
57a92c8e 2977 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2978 break;
2979 case 0x25:
2980 /* STG */
57a92c8e 2981 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2982 break;
2983 case 0x26:
2984 /* STS */
57a92c8e 2985 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2986 break;
2987 case 0x27:
2988 /* STT */
57a92c8e 2989 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
4c9649a9
JM
2990 break;
2991 case 0x28:
2992 /* LDL */
f18cd223 2993 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
2994 break;
2995 case 0x29:
2996 /* LDQ */
f18cd223 2997 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
2998 break;
2999 case 0x2A:
3000 /* LDL_L */
f4ed8679 3001 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3002 break;
3003 case 0x2B:
3004 /* LDQ_L */
f4ed8679 3005 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3006 break;
3007 case 0x2C:
3008 /* STL */
57a92c8e 3009 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
3010 break;
3011 case 0x2D:
3012 /* STQ */
57a92c8e 3013 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
4c9649a9
JM
3014 break;
3015 case 0x2E:
3016 /* STL_C */
57a92c8e 3017 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
4c9649a9
JM
3018 break;
3019 case 0x2F:
3020 /* STQ_C */
57a92c8e 3021 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
4c9649a9
JM
3022 break;
3023 case 0x30:
3024 /* BR */
4af70374 3025 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3026 break;
a7812ae4 3027 case 0x31: /* FBEQ */
4af70374 3028 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 3029 break;
a7812ae4 3030 case 0x32: /* FBLT */
4af70374 3031 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 3032 break;
a7812ae4 3033 case 0x33: /* FBLE */
4af70374 3034 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
3035 break;
3036 case 0x34:
3037 /* BSR */
4af70374 3038 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3039 break;
a7812ae4 3040 case 0x35: /* FBNE */
4af70374 3041 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 3042 break;
a7812ae4 3043 case 0x36: /* FBGE */
4af70374 3044 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 3045 break;
a7812ae4 3046 case 0x37: /* FBGT */
4af70374 3047 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
3048 break;
3049 case 0x38:
3050 /* BLBC */
4af70374 3051 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
3052 break;
3053 case 0x39:
3054 /* BEQ */
4af70374 3055 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
3056 break;
3057 case 0x3A:
3058 /* BLT */
4af70374 3059 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
3060 break;
3061 case 0x3B:
3062 /* BLE */
4af70374 3063 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
3064 break;
3065 case 0x3C:
3066 /* BLBS */
4af70374 3067 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
3068 break;
3069 case 0x3D:
3070 /* BNE */
4af70374 3071 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
3072 break;
3073 case 0x3E:
3074 /* BGE */
4af70374 3075 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
3076 break;
3077 case 0x3F:
3078 /* BGT */
4af70374 3079 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
3080 break;
3081 invalid_opc:
8aa3fa20 3082 ret = gen_invalid(ctx);
4c9649a9
JM
3083 break;
3084 }
3085
3086 return ret;
3087}
3088
636aa200
BS
3089static inline void gen_intermediate_code_internal(CPUState *env,
3090 TranslationBlock *tb,
3091 int search_pc)
4c9649a9 3092{
4c9649a9
JM
3093 DisasContext ctx, *ctxp = &ctx;
3094 target_ulong pc_start;
3095 uint32_t insn;
3096 uint16_t *gen_opc_end;
a1d1bb31 3097 CPUBreakpoint *bp;
4c9649a9 3098 int j, lj = -1;
4af70374 3099 ExitStatus ret;
2e70f6ef
PB
3100 int num_insns;
3101 int max_insns;
4c9649a9
JM
3102
3103 pc_start = tb->pc;
4c9649a9 3104 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
4af70374
RH
3105
3106 ctx.tb = tb;
3107 ctx.env = env;
4c9649a9
JM
3108 ctx.pc = pc_start;
3109 ctx.amask = env->amask;
3110#if defined (CONFIG_USER_ONLY)
3111 ctx.mem_idx = 0;
3112#else
3113 ctx.mem_idx = ((env->ps >> 3) & 3);
3114 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
3115#endif
f24518b5
RH
3116
3117 /* ??? Every TB begins with unset rounding mode, to be initialized on
3118 the first fp insn of the TB. Alternately we could define a proper
3119 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3120 to reset the FP_STATUS to that default at the end of any TB that
3121 changes the default. We could even (gasp) dynamiclly figure out
3122 what default would be most efficient given the running program. */
3123 ctx.tb_rm = -1;
3124 /* Similarly for flush-to-zero. */
3125 ctx.tb_ftz = -1;
3126
2e70f6ef
PB
3127 num_insns = 0;
3128 max_insns = tb->cflags & CF_COUNT_MASK;
3129 if (max_insns == 0)
3130 max_insns = CF_COUNT_MASK;
3131
3132 gen_icount_start();
4af70374 3133 do {
72cf2d4f
BS
3134 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3135 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 3136 if (bp->pc == ctx.pc) {
4c9649a9
JM
3137 gen_excp(&ctx, EXCP_DEBUG, 0);
3138 break;
3139 }
3140 }
3141 }
3142 if (search_pc) {
3143 j = gen_opc_ptr - gen_opc_buf;
3144 if (lj < j) {
3145 lj++;
3146 while (lj < j)
3147 gen_opc_instr_start[lj++] = 0;
4c9649a9 3148 }
ed1dda53
AJ
3149 gen_opc_pc[lj] = ctx.pc;
3150 gen_opc_instr_start[lj] = 1;
3151 gen_opc_icount[lj] = num_insns;
4c9649a9 3152 }
2e70f6ef
PB
3153 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3154 gen_io_start();
4c9649a9 3155 insn = ldl_code(ctx.pc);
2e70f6ef 3156 num_insns++;
c4b3be39
RH
3157
3158 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3159 tcg_gen_debug_insn_start(ctx.pc);
3160 }
3161
4c9649a9
JM
3162 ctx.pc += 4;
3163 ret = translate_one(ctxp, insn);
19bf517b 3164
4af70374
RH
3165 if (ret == NO_EXIT) {
3166 /* If we reach a page boundary, are single stepping,
3167 or exhaust instruction count, stop generation. */
3168 if (env->singlestep_enabled) {
3169 gen_excp(&ctx, EXCP_DEBUG, 0);
3170 ret = EXIT_PC_UPDATED;
3171 } else if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3172 || gen_opc_ptr >= gen_opc_end
3173 || num_insns >= max_insns
3174 || singlestep) {
3175 ret = EXIT_PC_STALE;
3176 }
1b530a6d 3177 }
4af70374
RH
3178 } while (ret == NO_EXIT);
3179
3180 if (tb->cflags & CF_LAST_IO) {
3181 gen_io_end();
4c9649a9 3182 }
4af70374
RH
3183
3184 switch (ret) {
3185 case EXIT_GOTO_TB:
8aa3fa20 3186 case EXIT_NORETURN:
4af70374
RH
3187 break;
3188 case EXIT_PC_STALE:
496cb5b9 3189 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
3190 /* FALLTHRU */
3191 case EXIT_PC_UPDATED:
3192 tcg_gen_exit_tb(0);
3193 break;
3194 default:
3195 abort();
4c9649a9 3196 }
4af70374 3197
2e70f6ef 3198 gen_icount_end(tb, num_insns);
4c9649a9
JM
3199 *gen_opc_ptr = INDEX_op_end;
3200 if (search_pc) {
3201 j = gen_opc_ptr - gen_opc_buf;
3202 lj++;
3203 while (lj <= j)
3204 gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3205 } else {
3206 tb->size = ctx.pc - pc_start;
2e70f6ef 3207 tb->icount = num_insns;
4c9649a9 3208 }
4af70374 3209
806991da 3210#ifdef DEBUG_DISAS
8fec2b8c 3211 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
3212 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3213 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3214 qemu_log("\n");
4c9649a9 3215 }
4c9649a9 3216#endif
4c9649a9
JM
3217}
3218
2cfc5f17 3219void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3220{
2cfc5f17 3221 gen_intermediate_code_internal(env, tb, 0);
4c9649a9
JM
3222}
3223
2cfc5f17 3224void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3225{
2cfc5f17 3226 gen_intermediate_code_internal(env, tb, 1);
4c9649a9
JM
3227}
3228
a964acc6
RH
3229struct cpu_def_t {
3230 const char *name;
3231 int implver, amask;
3232};
3233
3234static const struct cpu_def_t cpu_defs[] = {
3235 { "ev4", IMPLVER_2106x, 0 },
3236 { "ev5", IMPLVER_21164, 0 },
3237 { "ev56", IMPLVER_21164, AMASK_BWX },
3238 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3239 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3240 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3241 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3242 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3243 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3244 { "21064", IMPLVER_2106x, 0 },
3245 { "21164", IMPLVER_21164, 0 },
3246 { "21164a", IMPLVER_21164, AMASK_BWX },
3247 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3248 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3249 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3250 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3251};
3252
aaed909a 3253CPUAlphaState * cpu_alpha_init (const char *cpu_model)
4c9649a9
JM
3254{
3255 CPUAlphaState *env;
a964acc6 3256 int implver, amask, i, max;
4c9649a9
JM
3257
3258 env = qemu_mallocz(sizeof(CPUAlphaState));
4c9649a9 3259 cpu_exec_init(env);
2e70f6ef 3260 alpha_translate_init();
4c9649a9 3261 tlb_flush(env, 1);
a964acc6
RH
3262
3263 /* Default to ev67; no reason not to emulate insns by default. */
3264 implver = IMPLVER_21264;
3265 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3266 | AMASK_TRAP | AMASK_PREFETCH);
3267
3268 max = ARRAY_SIZE(cpu_defs);
3269 for (i = 0; i < max; i++) {
3270 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3271 implver = cpu_defs[i].implver;
3272 amask = cpu_defs[i].amask;
3273 break;
3274 }
3275 }
3276 env->implver = implver;
3277 env->amask = amask;
3278
4c9649a9
JM
3279 env->ps = 0x1F00;
3280#if defined (CONFIG_USER_ONLY)
3281 env->ps |= 1 << 3;
2edd07ef
RH
3282 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3283 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
6049f4f8 3284#else
4c9649a9 3285 pal_init(env);
6049f4f8 3286#endif
dad081ee 3287
4c9649a9 3288 /* Initialize IPR */
dad081ee
RH
3289#if defined (CONFIG_USER_ONLY)
3290 env->ipr[IPR_EXC_ADDR] = 0;
3291 env->ipr[IPR_EXC_SUM] = 0;
3292 env->ipr[IPR_EXC_MASK] = 0;
3293#else
3294 {
f88fe4e3
BS
3295 // uint64_t hwpcb;
3296 // hwpcb = env->ipr[IPR_PCBB];
dad081ee
RH
3297 env->ipr[IPR_ASN] = 0;
3298 env->ipr[IPR_ASTEN] = 0;
3299 env->ipr[IPR_ASTSR] = 0;
3300 env->ipr[IPR_DATFX] = 0;
3301 /* XXX: fix this */
3302 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3303 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3304 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3305 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3306 env->ipr[IPR_FEN] = 0;
3307 env->ipr[IPR_IPL] = 31;
3308 env->ipr[IPR_MCES] = 0;
3309 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
3310 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3311 env->ipr[IPR_SISR] = 0;
3312 env->ipr[IPR_VIRBND] = -1ULL;
3313 }
3314#endif
4c9649a9 3315
0bf46a40 3316 qemu_init_vcpu(env);
4c9649a9
JM
3317 return env;
3318}
aaed909a 3319
d2856f1a
AJ
3320void gen_pc_load(CPUState *env, TranslationBlock *tb,
3321 unsigned long searched_pc, int pc_pos, void *puc)
3322{
3323 env->pc = gen_opc_pc[pc_pos];
3324}