]> git.proxmox.com Git - qemu.git/blame - target-alpha/translate.c
softfloat: Rename float*_is_nan() functions to float*_is_quiet_nan()
[qemu.git] / target-alpha / translate.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu translation for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
20#include <stdint.h>
21#include <stdlib.h>
22#include <stdio.h>
23
24#include "cpu.h"
25#include "exec-all.h"
26#include "disas.h"
ae8ecd42 27#include "host-utils.h"
57fec1fe 28#include "tcg-op.h"
ca10f867 29#include "qemu-common.h"
4c9649a9 30
a7812ae4
PB
31#include "helper.h"
32#define GEN_HELPER 1
33#include "helper.h"
34
19188121 35#undef ALPHA_DEBUG_DISAS
f24518b5 36#define CONFIG_SOFTFLOAT_INLINE
d12d51d5
AL
37
38#ifdef ALPHA_DEBUG_DISAS
806991da 39# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
d12d51d5
AL
40#else
41# define LOG_DISAS(...) do { } while (0)
42#endif
43
4c9649a9
JM
44typedef struct DisasContext DisasContext;
45struct DisasContext {
4af70374
RH
46 struct TranslationBlock *tb;
47 CPUAlphaState *env;
4c9649a9
JM
48 uint64_t pc;
49 int mem_idx;
50#if !defined (CONFIG_USER_ONLY)
51 int pal_mode;
52#endif
53 uint32_t amask;
f24518b5
RH
54
55 /* Current rounding mode for this TB. */
56 int tb_rm;
57 /* Current flush-to-zero setting for this TB. */
58 int tb_ftz;
4c9649a9
JM
59};
60
4af70374
RH
61/* Return values from translate_one, indicating the state of the TB.
62 Note that zero indicates that we are not exiting the TB. */
63
64typedef enum {
65 NO_EXIT,
66
67 /* We have emitted one or more goto_tb. No fixup required. */
68 EXIT_GOTO_TB,
69
70 /* We are not using a goto_tb (for whatever reason), but have updated
71 the PC (for whatever reason), so there's no need to do it again on
72 exiting the TB. */
73 EXIT_PC_UPDATED,
74
75 /* We are exiting the TB, but have neither emitted a goto_tb, nor
76 updated the PC for the next instruction to be executed. */
8aa3fa20
RH
77 EXIT_PC_STALE,
78
79 /* We are ending the TB with a noreturn function call, e.g. longjmp.
80 No following code will be executed. */
81 EXIT_NORETURN,
4af70374
RH
82} ExitStatus;
83
3761035f 84/* global register indexes */
a7812ae4 85static TCGv_ptr cpu_env;
496cb5b9 86static TCGv cpu_ir[31];
f18cd223 87static TCGv cpu_fir[31];
496cb5b9 88static TCGv cpu_pc;
6910b8f6
RH
89static TCGv cpu_lock_addr;
90static TCGv cpu_lock_st_addr;
91static TCGv cpu_lock_value;
ab471ade
RH
92#ifdef CONFIG_USER_ONLY
93static TCGv cpu_uniq;
94#endif
496cb5b9 95
3761035f 96/* register names */
f18cd223 97static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
2e70f6ef
PB
98
99#include "gen-icount.h"
100
a5f1b965 101static void alpha_translate_init(void)
2e70f6ef 102{
496cb5b9
AJ
103 int i;
104 char *p;
2e70f6ef 105 static int done_init = 0;
496cb5b9 106
2e70f6ef
PB
107 if (done_init)
108 return;
496cb5b9 109
a7812ae4 110 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
496cb5b9
AJ
111
112 p = cpu_reg_names;
113 for (i = 0; i < 31; i++) {
114 sprintf(p, "ir%d", i);
a7812ae4
PB
115 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
116 offsetof(CPUState, ir[i]), p);
6ba8dcd7 117 p += (i < 10) ? 4 : 5;
f18cd223
AJ
118
119 sprintf(p, "fir%d", i);
a7812ae4
PB
120 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
121 offsetof(CPUState, fir[i]), p);
f18cd223 122 p += (i < 10) ? 5 : 6;
496cb5b9
AJ
123 }
124
a7812ae4
PB
125 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
126 offsetof(CPUState, pc), "pc");
496cb5b9 127
6910b8f6
RH
128 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
129 offsetof(CPUState, lock_addr),
130 "lock_addr");
131 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
132 offsetof(CPUState, lock_st_addr),
133 "lock_st_addr");
134 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
135 offsetof(CPUState, lock_value),
136 "lock_value");
f4ed8679 137
ab471ade
RH
138#ifdef CONFIG_USER_ONLY
139 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
140 offsetof(CPUState, unique), "uniq");
141#endif
142
496cb5b9 143 /* register helpers */
a7812ae4 144#define GEN_HELPER 2
496cb5b9
AJ
145#include "helper.h"
146
2e70f6ef
PB
147 done_init = 1;
148}
149
8aa3fa20 150static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
4c9649a9 151{
a7812ae4 152 TCGv_i32 tmp1, tmp2;
6ad02592 153
496cb5b9 154 tcg_gen_movi_i64(cpu_pc, ctx->pc);
6ad02592
AJ
155 tmp1 = tcg_const_i32(exception);
156 tmp2 = tcg_const_i32(error_code);
a7812ae4
PB
157 gen_helper_excp(tmp1, tmp2);
158 tcg_temp_free_i32(tmp2);
159 tcg_temp_free_i32(tmp1);
8aa3fa20
RH
160
161 return EXIT_NORETURN;
4c9649a9
JM
162}
163
8aa3fa20 164static inline ExitStatus gen_invalid(DisasContext *ctx)
4c9649a9 165{
8aa3fa20 166 return gen_excp(ctx, EXCP_OPCDEC, 0);
4c9649a9
JM
167}
168
636aa200 169static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
f18cd223 170{
a7812ae4
PB
171 TCGv tmp = tcg_temp_new();
172 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 173 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
174 tcg_gen_trunc_i64_i32(tmp32, tmp);
175 gen_helper_memory_to_f(t0, tmp32);
176 tcg_temp_free_i32(tmp32);
f18cd223
AJ
177 tcg_temp_free(tmp);
178}
179
636aa200 180static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
f18cd223 181{
a7812ae4 182 TCGv tmp = tcg_temp_new();
f18cd223 183 tcg_gen_qemu_ld64(tmp, t1, flags);
a7812ae4 184 gen_helper_memory_to_g(t0, tmp);
f18cd223
AJ
185 tcg_temp_free(tmp);
186}
187
636aa200 188static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
f18cd223 189{
a7812ae4
PB
190 TCGv tmp = tcg_temp_new();
191 TCGv_i32 tmp32 = tcg_temp_new_i32();
f18cd223 192 tcg_gen_qemu_ld32u(tmp, t1, flags);
a7812ae4
PB
193 tcg_gen_trunc_i64_i32(tmp32, tmp);
194 gen_helper_memory_to_s(t0, tmp32);
195 tcg_temp_free_i32(tmp32);
f18cd223
AJ
196 tcg_temp_free(tmp);
197}
198
636aa200 199static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
f4ed8679 200{
f4ed8679 201 tcg_gen_qemu_ld32s(t0, t1, flags);
6910b8f6
RH
202 tcg_gen_mov_i64(cpu_lock_addr, t1);
203 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
204}
205
636aa200 206static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
f4ed8679 207{
f4ed8679 208 tcg_gen_qemu_ld64(t0, t1, flags);
6910b8f6
RH
209 tcg_gen_mov_i64(cpu_lock_addr, t1);
210 tcg_gen_mov_i64(cpu_lock_value, t0);
f4ed8679
AJ
211}
212
636aa200
BS
213static inline void gen_load_mem(DisasContext *ctx,
214 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
215 int flags),
216 int ra, int rb, int32_t disp16, int fp,
217 int clear)
023d8ca2 218{
6910b8f6 219 TCGv addr, va;
023d8ca2 220
6910b8f6
RH
221 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
222 prefetches, which we can treat as nops. No worries about
223 missed exceptions here. */
224 if (unlikely(ra == 31)) {
023d8ca2 225 return;
6910b8f6 226 }
023d8ca2 227
a7812ae4 228 addr = tcg_temp_new();
023d8ca2
AJ
229 if (rb != 31) {
230 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 231 if (clear) {
023d8ca2 232 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 233 }
023d8ca2 234 } else {
6910b8f6 235 if (clear) {
023d8ca2 236 disp16 &= ~0x7;
6910b8f6 237 }
023d8ca2
AJ
238 tcg_gen_movi_i64(addr, disp16);
239 }
6910b8f6
RH
240
241 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
242 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
243
023d8ca2
AJ
244 tcg_temp_free(addr);
245}
246
636aa200 247static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
f18cd223 248{
a7812ae4
PB
249 TCGv_i32 tmp32 = tcg_temp_new_i32();
250 TCGv tmp = tcg_temp_new();
251 gen_helper_f_to_memory(tmp32, t0);
252 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
253 tcg_gen_qemu_st32(tmp, t1, flags);
254 tcg_temp_free(tmp);
a7812ae4 255 tcg_temp_free_i32(tmp32);
f18cd223
AJ
256}
257
636aa200 258static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
f18cd223 259{
a7812ae4
PB
260 TCGv tmp = tcg_temp_new();
261 gen_helper_g_to_memory(tmp, t0);
f18cd223
AJ
262 tcg_gen_qemu_st64(tmp, t1, flags);
263 tcg_temp_free(tmp);
264}
265
636aa200 266static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
f18cd223 267{
a7812ae4
PB
268 TCGv_i32 tmp32 = tcg_temp_new_i32();
269 TCGv tmp = tcg_temp_new();
270 gen_helper_s_to_memory(tmp32, t0);
271 tcg_gen_extu_i32_i64(tmp, tmp32);
f18cd223
AJ
272 tcg_gen_qemu_st32(tmp, t1, flags);
273 tcg_temp_free(tmp);
a7812ae4 274 tcg_temp_free_i32(tmp32);
f18cd223
AJ
275}
276
636aa200
BS
277static inline void gen_store_mem(DisasContext *ctx,
278 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
279 int flags),
280 int ra, int rb, int32_t disp16, int fp,
6910b8f6 281 int clear)
023d8ca2 282{
6910b8f6
RH
283 TCGv addr, va;
284
285 addr = tcg_temp_new();
023d8ca2
AJ
286 if (rb != 31) {
287 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
6910b8f6 288 if (clear) {
023d8ca2 289 tcg_gen_andi_i64(addr, addr, ~0x7);
6910b8f6 290 }
023d8ca2 291 } else {
6910b8f6 292 if (clear) {
023d8ca2 293 disp16 &= ~0x7;
6910b8f6 294 }
023d8ca2
AJ
295 tcg_gen_movi_i64(addr, disp16);
296 }
6910b8f6
RH
297
298 if (ra == 31) {
299 va = tcg_const_i64(0);
f18cd223 300 } else {
6910b8f6 301 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
023d8ca2 302 }
6910b8f6
RH
303 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
304
023d8ca2 305 tcg_temp_free(addr);
6910b8f6
RH
306 if (ra == 31) {
307 tcg_temp_free(va);
308 }
309}
310
311static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
312 int32_t disp16, int quad)
313{
314 TCGv addr;
315
316 if (ra == 31) {
317 /* ??? Don't bother storing anything. The user can't tell
318 the difference, since the zero register always reads zero. */
319 return NO_EXIT;
320 }
321
322#if defined(CONFIG_USER_ONLY)
323 addr = cpu_lock_st_addr;
324#else
325 addr = tcg_local_new();
326#endif
327
328 if (rb != 31) {
329 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
330 } else {
331 tcg_gen_movi_i64(addr, disp16);
332 }
333
334#if defined(CONFIG_USER_ONLY)
335 /* ??? This is handled via a complicated version of compare-and-swap
336 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
337 in TCG so that this isn't necessary. */
338 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
339#else
340 /* ??? In system mode we are never multi-threaded, so CAS can be
341 implemented via a non-atomic load-compare-store sequence. */
342 {
343 int lab_fail, lab_done;
344 TCGv val;
345
346 lab_fail = gen_new_label();
347 lab_done = gen_new_label();
348 tcg_gen_brcond(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
349
350 val = tcg_temp_new();
351 if (quad) {
352 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
353 } else {
354 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
355 }
356 tcg_gen_brcond(TCG_COND_NE, val, cpu_lock_value, lab_fail);
357
358 if (quad) {
359 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
360 } else {
361 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
362 }
363 tcg_gen_movi_i64(cpu_ir[ra], 1);
364 tcg_gen_br(lab_done);
365
366 gen_set_label(lab_fail);
367 tcg_gen_movi_i64(cpu_ir[ra], 0);
368
369 gen_set_label(lab_done);
370 tcg_gen_movi_i64(cpu_lock_addr, -1);
371
372 tcg_temp_free(addr);
373 return NO_EXIT;
374 }
375#endif
023d8ca2
AJ
376}
377
4af70374 378static int use_goto_tb(DisasContext *ctx, uint64_t dest)
4c9649a9 379{
4af70374
RH
380 /* Check for the dest on the same page as the start of the TB. We
381 also want to suppress goto_tb in the case of single-steping and IO. */
382 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
383 && !ctx->env->singlestep_enabled
384 && !(ctx->tb->cflags & CF_LAST_IO));
385}
dbb30fe6 386
4af70374
RH
387static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
388{
389 uint64_t dest = ctx->pc + (disp << 2);
390
391 if (ra != 31) {
392 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
393 }
394
395 /* Notice branch-to-next; used to initialize RA with the PC. */
396 if (disp == 0) {
397 return 0;
398 } else if (use_goto_tb(ctx, dest)) {
399 tcg_gen_goto_tb(0);
400 tcg_gen_movi_i64(cpu_pc, dest);
401 tcg_gen_exit_tb((long)ctx->tb);
402 return EXIT_GOTO_TB;
403 } else {
404 tcg_gen_movi_i64(cpu_pc, dest);
405 return EXIT_PC_UPDATED;
406 }
dbb30fe6
RH
407}
408
4af70374
RH
409static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
410 TCGv cmp, int32_t disp)
dbb30fe6 411{
4af70374 412 uint64_t dest = ctx->pc + (disp << 2);
dbb30fe6 413 int lab_true = gen_new_label();
9c29504e 414
4af70374
RH
415 if (use_goto_tb(ctx, dest)) {
416 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
417
418 tcg_gen_goto_tb(0);
419 tcg_gen_movi_i64(cpu_pc, ctx->pc);
420 tcg_gen_exit_tb((long)ctx->tb);
421
422 gen_set_label(lab_true);
423 tcg_gen_goto_tb(1);
424 tcg_gen_movi_i64(cpu_pc, dest);
425 tcg_gen_exit_tb((long)ctx->tb + 1);
426
427 return EXIT_GOTO_TB;
428 } else {
429 int lab_over = gen_new_label();
430
431 /* ??? Consider using either
432 movi pc, next
433 addi tmp, pc, disp
434 movcond pc, cond, 0, tmp, pc
435 or
436 setcond tmp, cond, 0
437 movi pc, next
438 neg tmp, tmp
439 andi tmp, tmp, disp
440 add pc, pc, tmp
441 The current diamond subgraph surely isn't efficient. */
442
443 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
444 tcg_gen_movi_i64(cpu_pc, ctx->pc);
445 tcg_gen_br(lab_over);
446 gen_set_label(lab_true);
447 tcg_gen_movi_i64(cpu_pc, dest);
448 gen_set_label(lab_over);
449
450 return EXIT_PC_UPDATED;
451 }
452}
453
454static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
455 int32_t disp, int mask)
456{
457 TCGv cmp_tmp;
458
459 if (unlikely(ra == 31)) {
460 cmp_tmp = tcg_const_i64(0);
461 } else {
462 cmp_tmp = tcg_temp_new();
9c29504e 463 if (mask) {
4af70374 464 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
dbb30fe6 465 } else {
4af70374 466 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
dbb30fe6 467 }
9c29504e 468 }
4af70374
RH
469
470 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
471}
472
4af70374 473/* Fold -0.0 for comparison with COND. */
dbb30fe6 474
4af70374 475static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
4c9649a9 476{
dbb30fe6 477 uint64_t mzero = 1ull << 63;
f18cd223 478
dbb30fe6
RH
479 switch (cond) {
480 case TCG_COND_LE:
481 case TCG_COND_GT:
482 /* For <= or >, the -0.0 value directly compares the way we want. */
4af70374 483 tcg_gen_mov_i64(dest, src);
a7812ae4 484 break;
dbb30fe6
RH
485
486 case TCG_COND_EQ:
487 case TCG_COND_NE:
488 /* For == or !=, we can simply mask off the sign bit and compare. */
4af70374 489 tcg_gen_andi_i64(dest, src, mzero - 1);
a7812ae4 490 break;
dbb30fe6
RH
491
492 case TCG_COND_GE:
dbb30fe6 493 case TCG_COND_LT:
4af70374
RH
494 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
495 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
496 tcg_gen_neg_i64(dest, dest);
497 tcg_gen_and_i64(dest, dest, src);
a7812ae4 498 break;
dbb30fe6 499
a7812ae4
PB
500 default:
501 abort();
f18cd223 502 }
dbb30fe6
RH
503}
504
4af70374
RH
505static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
506 int32_t disp)
dbb30fe6 507{
4af70374 508 TCGv cmp_tmp;
dbb30fe6
RH
509
510 if (unlikely(ra == 31)) {
511 /* Very uncommon case, but easier to optimize it to an integer
512 comparison than continuing with the floating point comparison. */
4af70374 513 return gen_bcond(ctx, cond, ra, disp, 0);
dbb30fe6
RH
514 }
515
4af70374
RH
516 cmp_tmp = tcg_temp_new();
517 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
518 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
4c9649a9
JM
519}
520
bbe1dab4 521static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
4af70374 522 int islit, uint8_t lit, int mask)
4c9649a9 523{
bbe1dab4 524 TCGCond inv_cond = tcg_invert_cond(cond);
9c29504e
AJ
525 int l1;
526
527 if (unlikely(rc == 31))
528 return;
529
530 l1 = gen_new_label();
531
532 if (ra != 31) {
533 if (mask) {
a7812ae4 534 TCGv tmp = tcg_temp_new();
9c29504e
AJ
535 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
536 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
537 tcg_temp_free(tmp);
538 } else
539 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
540 } else {
541 /* Very uncommon case - Do not bother to optimize. */
542 TCGv tmp = tcg_const_i64(0);
543 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
544 tcg_temp_free(tmp);
545 }
546
4c9649a9 547 if (islit)
9c29504e 548 tcg_gen_movi_i64(cpu_ir[rc], lit);
4c9649a9 549 else
dfaa8583 550 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
9c29504e 551 gen_set_label(l1);
4c9649a9
JM
552}
553
bbe1dab4 554static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
dbb30fe6 555{
4af70374 556 TCGv cmp_tmp;
dbb30fe6
RH
557 int l1;
558
4af70374 559 if (unlikely(rc == 31)) {
dbb30fe6 560 return;
4af70374
RH
561 }
562
563 cmp_tmp = tcg_temp_new();
dbb30fe6 564 if (unlikely(ra == 31)) {
4af70374
RH
565 tcg_gen_movi_i64(cmp_tmp, 0);
566 } else {
567 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
dbb30fe6
RH
568 }
569
570 l1 = gen_new_label();
4af70374
RH
571 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
572 tcg_temp_free(cmp_tmp);
dbb30fe6
RH
573
574 if (rb != 31)
575 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
576 else
577 tcg_gen_movi_i64(cpu_fir[rc], 0);
578 gen_set_label(l1);
579}
580
f24518b5
RH
581#define QUAL_RM_N 0x080 /* Round mode nearest even */
582#define QUAL_RM_C 0x000 /* Round mode chopped */
583#define QUAL_RM_M 0x040 /* Round mode minus infinity */
584#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
585#define QUAL_RM_MASK 0x0c0
586
587#define QUAL_U 0x100 /* Underflow enable (fp output) */
588#define QUAL_V 0x100 /* Overflow enable (int output) */
589#define QUAL_S 0x400 /* Software completion enable */
590#define QUAL_I 0x200 /* Inexact detection enable */
591
592static void gen_qual_roundmode(DisasContext *ctx, int fn11)
593{
594 TCGv_i32 tmp;
595
596 fn11 &= QUAL_RM_MASK;
597 if (fn11 == ctx->tb_rm) {
598 return;
599 }
600 ctx->tb_rm = fn11;
601
602 tmp = tcg_temp_new_i32();
603 switch (fn11) {
604 case QUAL_RM_N:
605 tcg_gen_movi_i32(tmp, float_round_nearest_even);
606 break;
607 case QUAL_RM_C:
608 tcg_gen_movi_i32(tmp, float_round_to_zero);
609 break;
610 case QUAL_RM_M:
611 tcg_gen_movi_i32(tmp, float_round_down);
612 break;
613 case QUAL_RM_D:
614 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
615 break;
616 }
617
618#if defined(CONFIG_SOFTFLOAT_INLINE)
619 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
620 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
621 sets the one field. */
622 tcg_gen_st8_i32(tmp, cpu_env,
623 offsetof(CPUState, fp_status.float_rounding_mode));
624#else
625 gen_helper_setroundmode(tmp);
626#endif
627
628 tcg_temp_free_i32(tmp);
629}
630
631static void gen_qual_flushzero(DisasContext *ctx, int fn11)
632{
633 TCGv_i32 tmp;
634
635 fn11 &= QUAL_U;
636 if (fn11 == ctx->tb_ftz) {
637 return;
638 }
639 ctx->tb_ftz = fn11;
640
641 tmp = tcg_temp_new_i32();
642 if (fn11) {
643 /* Underflow is enabled, use the FPCR setting. */
644 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
645 } else {
646 /* Underflow is disabled, force flush-to-zero. */
647 tcg_gen_movi_i32(tmp, 1);
648 }
649
650#if defined(CONFIG_SOFTFLOAT_INLINE)
651 tcg_gen_st8_i32(tmp, cpu_env,
652 offsetof(CPUState, fp_status.flush_to_zero));
653#else
654 gen_helper_setflushzero(tmp);
655#endif
656
657 tcg_temp_free_i32(tmp);
658}
659
660static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
661{
662 TCGv val = tcg_temp_new();
663 if (reg == 31) {
664 tcg_gen_movi_i64(val, 0);
665 } else if (fn11 & QUAL_S) {
666 gen_helper_ieee_input_s(val, cpu_fir[reg]);
667 } else if (is_cmp) {
668 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
669 } else {
670 gen_helper_ieee_input(val, cpu_fir[reg]);
671 }
672 return val;
673}
674
675static void gen_fp_exc_clear(void)
676{
677#if defined(CONFIG_SOFTFLOAT_INLINE)
678 TCGv_i32 zero = tcg_const_i32(0);
679 tcg_gen_st8_i32(zero, cpu_env,
680 offsetof(CPUState, fp_status.float_exception_flags));
681 tcg_temp_free_i32(zero);
682#else
683 gen_helper_fp_exc_clear();
684#endif
685}
686
687static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
688{
689 /* ??? We ought to be able to do something with imprecise exceptions.
690 E.g. notice we're still in the trap shadow of something within the
691 TB and do not generate the code to signal the exception; end the TB
692 when an exception is forced to arrive, either by consumption of a
693 register value or TRAPB or EXCB. */
694 TCGv_i32 exc = tcg_temp_new_i32();
695 TCGv_i32 reg;
696
697#if defined(CONFIG_SOFTFLOAT_INLINE)
698 tcg_gen_ld8u_i32(exc, cpu_env,
699 offsetof(CPUState, fp_status.float_exception_flags));
700#else
701 gen_helper_fp_exc_get(exc);
702#endif
703
704 if (ignore) {
705 tcg_gen_andi_i32(exc, exc, ~ignore);
706 }
707
708 /* ??? Pass in the regno of the destination so that the helper can
709 set EXC_MASK, which contains a bitmask of destination registers
710 that have caused arithmetic traps. A simple userspace emulation
711 does not require this. We do need it for a guest kernel's entArith,
712 or if we were to do something clever with imprecise exceptions. */
713 reg = tcg_const_i32(rc + 32);
714
715 if (fn11 & QUAL_S) {
716 gen_helper_fp_exc_raise_s(exc, reg);
717 } else {
718 gen_helper_fp_exc_raise(exc, reg);
719 }
720
721 tcg_temp_free_i32(reg);
722 tcg_temp_free_i32(exc);
723}
724
725static inline void gen_fp_exc_raise(int rc, int fn11)
726{
727 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
4c9649a9 728}
f24518b5 729
593f17e5
RH
730static void gen_fcvtlq(int rb, int rc)
731{
732 if (unlikely(rc == 31)) {
733 return;
734 }
735 if (unlikely(rb == 31)) {
736 tcg_gen_movi_i64(cpu_fir[rc], 0);
737 } else {
738 TCGv tmp = tcg_temp_new();
739
740 /* The arithmetic right shift here, plus the sign-extended mask below
741 yields a sign-extended result without an explicit ext32s_i64. */
742 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
743 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
744 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
745 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
746 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
747
748 tcg_temp_free(tmp);
749 }
750}
751
735cf45f
RH
752static void gen_fcvtql(int rb, int rc)
753{
754 if (unlikely(rc == 31)) {
755 return;
756 }
757 if (unlikely(rb == 31)) {
758 tcg_gen_movi_i64(cpu_fir[rc], 0);
759 } else {
760 TCGv tmp = tcg_temp_new();
761
762 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
763 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
764 tcg_gen_shli_i64(tmp, tmp, 32);
765 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
766 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
767
768 tcg_temp_free(tmp);
769 }
770}
771
772static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
773{
774 if (rb != 31) {
775 int lab = gen_new_label();
776 TCGv tmp = tcg_temp_new();
777
778 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
779 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
780 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
781
782 gen_set_label(lab);
783 }
784 gen_fcvtql(rb, rc);
785}
786
f24518b5
RH
787#define FARITH2(name) \
788static inline void glue(gen_f, name)(int rb, int rc) \
789{ \
790 if (unlikely(rc == 31)) { \
791 return; \
792 } \
793 if (rb != 31) { \
794 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
795 } else { \
796 TCGv tmp = tcg_const_i64(0); \
797 gen_helper_ ## name (cpu_fir[rc], tmp); \
798 tcg_temp_free(tmp); \
799 } \
800}
f24518b5
RH
801
802/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
803FARITH2(sqrtf)
804FARITH2(sqrtg)
a7812ae4
PB
805FARITH2(cvtgf)
806FARITH2(cvtgq)
807FARITH2(cvtqf)
808FARITH2(cvtqg)
f24518b5
RH
809
810static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
811 int rb, int rc, int fn11)
812{
813 TCGv vb;
814
815 /* ??? This is wrong: the instruction is not a nop, it still may
816 raise exceptions. */
817 if (unlikely(rc == 31)) {
818 return;
819 }
820
821 gen_qual_roundmode(ctx, fn11);
822 gen_qual_flushzero(ctx, fn11);
823 gen_fp_exc_clear();
824
825 vb = gen_ieee_input(rb, fn11, 0);
826 helper(cpu_fir[rc], vb);
827 tcg_temp_free(vb);
828
829 gen_fp_exc_raise(rc, fn11);
830}
831
832#define IEEE_ARITH2(name) \
833static inline void glue(gen_f, name)(DisasContext *ctx, \
834 int rb, int rc, int fn11) \
835{ \
836 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
837}
838IEEE_ARITH2(sqrts)
839IEEE_ARITH2(sqrtt)
840IEEE_ARITH2(cvtst)
841IEEE_ARITH2(cvtts)
842
843static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
844{
845 TCGv vb;
846 int ignore = 0;
847
848 /* ??? This is wrong: the instruction is not a nop, it still may
849 raise exceptions. */
850 if (unlikely(rc == 31)) {
851 return;
852 }
853
854 /* No need to set flushzero, since we have an integer output. */
855 gen_fp_exc_clear();
856 vb = gen_ieee_input(rb, fn11, 0);
857
858 /* Almost all integer conversions use cropped rounding, and most
859 also do not have integer overflow enabled. Special case that. */
860 switch (fn11) {
861 case QUAL_RM_C:
862 gen_helper_cvttq_c(cpu_fir[rc], vb);
863 break;
864 case QUAL_V | QUAL_RM_C:
865 case QUAL_S | QUAL_V | QUAL_RM_C:
866 ignore = float_flag_inexact;
867 /* FALLTHRU */
868 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
869 gen_helper_cvttq_svic(cpu_fir[rc], vb);
870 break;
871 default:
872 gen_qual_roundmode(ctx, fn11);
873 gen_helper_cvttq(cpu_fir[rc], vb);
874 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
875 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
876 break;
877 }
878 tcg_temp_free(vb);
879
880 gen_fp_exc_raise_ignore(rc, fn11, ignore);
4c9649a9
JM
881}
882
f24518b5
RH
883static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
884 int rb, int rc, int fn11)
885{
886 TCGv vb;
887
888 /* ??? This is wrong: the instruction is not a nop, it still may
889 raise exceptions. */
890 if (unlikely(rc == 31)) {
891 return;
892 }
893
894 gen_qual_roundmode(ctx, fn11);
895
896 if (rb == 31) {
897 vb = tcg_const_i64(0);
898 } else {
899 vb = cpu_fir[rb];
900 }
901
902 /* The only exception that can be raised by integer conversion
903 is inexact. Thus we only need to worry about exceptions when
904 inexact handling is requested. */
905 if (fn11 & QUAL_I) {
906 gen_fp_exc_clear();
907 helper(cpu_fir[rc], vb);
908 gen_fp_exc_raise(rc, fn11);
909 } else {
910 helper(cpu_fir[rc], vb);
911 }
912
913 if (rb == 31) {
914 tcg_temp_free(vb);
915 }
916}
917
918#define IEEE_INTCVT(name) \
919static inline void glue(gen_f, name)(DisasContext *ctx, \
920 int rb, int rc, int fn11) \
921{ \
922 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
923}
924IEEE_INTCVT(cvtqs)
925IEEE_INTCVT(cvtqt)
926
dc96be4b
RH
927static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
928{
929 TCGv va, vb, vmask;
930 int za = 0, zb = 0;
931
932 if (unlikely(rc == 31)) {
933 return;
934 }
935
936 vmask = tcg_const_i64(mask);
937
938 TCGV_UNUSED_I64(va);
939 if (ra == 31) {
940 if (inv_a) {
941 va = vmask;
942 } else {
943 za = 1;
944 }
945 } else {
946 va = tcg_temp_new_i64();
947 tcg_gen_mov_i64(va, cpu_fir[ra]);
948 if (inv_a) {
949 tcg_gen_andc_i64(va, vmask, va);
950 } else {
951 tcg_gen_and_i64(va, va, vmask);
952 }
953 }
954
955 TCGV_UNUSED_I64(vb);
956 if (rb == 31) {
957 zb = 1;
958 } else {
959 vb = tcg_temp_new_i64();
960 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
961 }
962
963 switch (za << 1 | zb) {
964 case 0 | 0:
965 tcg_gen_or_i64(cpu_fir[rc], va, vb);
966 break;
967 case 0 | 1:
968 tcg_gen_mov_i64(cpu_fir[rc], va);
969 break;
970 case 2 | 0:
971 tcg_gen_mov_i64(cpu_fir[rc], vb);
972 break;
973 case 2 | 1:
974 tcg_gen_movi_i64(cpu_fir[rc], 0);
975 break;
976 }
977
978 tcg_temp_free(vmask);
979 if (ra != 31) {
980 tcg_temp_free(va);
981 }
982 if (rb != 31) {
983 tcg_temp_free(vb);
984 }
985}
986
987static inline void gen_fcpys(int ra, int rb, int rc)
988{
989 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
990}
991
992static inline void gen_fcpysn(int ra, int rb, int rc)
993{
994 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
995}
996
997static inline void gen_fcpyse(int ra, int rb, int rc)
998{
999 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1000}
1001
f24518b5
RH
1002#define FARITH3(name) \
1003static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1004{ \
1005 TCGv va, vb; \
1006 \
1007 if (unlikely(rc == 31)) { \
1008 return; \
1009 } \
1010 if (ra == 31) { \
1011 va = tcg_const_i64(0); \
1012 } else { \
1013 va = cpu_fir[ra]; \
1014 } \
1015 if (rb == 31) { \
1016 vb = tcg_const_i64(0); \
1017 } else { \
1018 vb = cpu_fir[rb]; \
1019 } \
1020 \
1021 gen_helper_ ## name (cpu_fir[rc], va, vb); \
1022 \
1023 if (ra == 31) { \
1024 tcg_temp_free(va); \
1025 } \
1026 if (rb == 31) { \
1027 tcg_temp_free(vb); \
1028 } \
1029}
f24518b5
RH
1030
1031/* ??? VAX instruction qualifiers ignored. */
a7812ae4
PB
1032FARITH3(addf)
1033FARITH3(subf)
1034FARITH3(mulf)
1035FARITH3(divf)
1036FARITH3(addg)
1037FARITH3(subg)
1038FARITH3(mulg)
1039FARITH3(divg)
1040FARITH3(cmpgeq)
1041FARITH3(cmpglt)
1042FARITH3(cmpgle)
f24518b5
RH
1043
1044static void gen_ieee_arith3(DisasContext *ctx,
1045 void (*helper)(TCGv, TCGv, TCGv),
1046 int ra, int rb, int rc, int fn11)
1047{
1048 TCGv va, vb;
1049
1050 /* ??? This is wrong: the instruction is not a nop, it still may
1051 raise exceptions. */
1052 if (unlikely(rc == 31)) {
1053 return;
1054 }
1055
1056 gen_qual_roundmode(ctx, fn11);
1057 gen_qual_flushzero(ctx, fn11);
1058 gen_fp_exc_clear();
1059
1060 va = gen_ieee_input(ra, fn11, 0);
1061 vb = gen_ieee_input(rb, fn11, 0);
1062 helper(cpu_fir[rc], va, vb);
1063 tcg_temp_free(va);
1064 tcg_temp_free(vb);
1065
1066 gen_fp_exc_raise(rc, fn11);
1067}
1068
1069#define IEEE_ARITH3(name) \
1070static inline void glue(gen_f, name)(DisasContext *ctx, \
1071 int ra, int rb, int rc, int fn11) \
1072{ \
1073 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1074}
1075IEEE_ARITH3(adds)
1076IEEE_ARITH3(subs)
1077IEEE_ARITH3(muls)
1078IEEE_ARITH3(divs)
1079IEEE_ARITH3(addt)
1080IEEE_ARITH3(subt)
1081IEEE_ARITH3(mult)
1082IEEE_ARITH3(divt)
1083
1084static void gen_ieee_compare(DisasContext *ctx,
1085 void (*helper)(TCGv, TCGv, TCGv),
1086 int ra, int rb, int rc, int fn11)
1087{
1088 TCGv va, vb;
1089
1090 /* ??? This is wrong: the instruction is not a nop, it still may
1091 raise exceptions. */
1092 if (unlikely(rc == 31)) {
1093 return;
1094 }
1095
1096 gen_fp_exc_clear();
1097
1098 va = gen_ieee_input(ra, fn11, 1);
1099 vb = gen_ieee_input(rb, fn11, 1);
1100 helper(cpu_fir[rc], va, vb);
1101 tcg_temp_free(va);
1102 tcg_temp_free(vb);
1103
1104 gen_fp_exc_raise(rc, fn11);
1105}
1106
1107#define IEEE_CMP3(name) \
1108static inline void glue(gen_f, name)(DisasContext *ctx, \
1109 int ra, int rb, int rc, int fn11) \
1110{ \
1111 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1112}
1113IEEE_CMP3(cmptun)
1114IEEE_CMP3(cmpteq)
1115IEEE_CMP3(cmptlt)
1116IEEE_CMP3(cmptle)
a7812ae4 1117
248c42f3
RH
1118static inline uint64_t zapnot_mask(uint8_t lit)
1119{
1120 uint64_t mask = 0;
1121 int i;
1122
1123 for (i = 0; i < 8; ++i) {
1124 if ((lit >> i) & 1)
1125 mask |= 0xffull << (i * 8);
1126 }
1127 return mask;
1128}
1129
87d98f95
RH
1130/* Implement zapnot with an immediate operand, which expands to some
1131 form of immediate AND. This is a basic building block in the
1132 definition of many of the other byte manipulation instructions. */
248c42f3 1133static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
87d98f95 1134{
87d98f95
RH
1135 switch (lit) {
1136 case 0x00:
248c42f3 1137 tcg_gen_movi_i64(dest, 0);
87d98f95
RH
1138 break;
1139 case 0x01:
248c42f3 1140 tcg_gen_ext8u_i64(dest, src);
87d98f95
RH
1141 break;
1142 case 0x03:
248c42f3 1143 tcg_gen_ext16u_i64(dest, src);
87d98f95
RH
1144 break;
1145 case 0x0f:
248c42f3 1146 tcg_gen_ext32u_i64(dest, src);
87d98f95
RH
1147 break;
1148 case 0xff:
248c42f3 1149 tcg_gen_mov_i64(dest, src);
87d98f95
RH
1150 break;
1151 default:
248c42f3 1152 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
87d98f95
RH
1153 break;
1154 }
1155}
1156
1157static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1158{
1159 if (unlikely(rc == 31))
1160 return;
1161 else if (unlikely(ra == 31))
1162 tcg_gen_movi_i64(cpu_ir[rc], 0);
1163 else if (islit)
248c42f3 1164 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
87d98f95
RH
1165 else
1166 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1167}
1168
1169static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1170{
1171 if (unlikely(rc == 31))
1172 return;
1173 else if (unlikely(ra == 31))
1174 tcg_gen_movi_i64(cpu_ir[rc], 0);
1175 else if (islit)
248c42f3 1176 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
87d98f95
RH
1177 else
1178 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1179}
1180
1181
248c42f3 1182/* EXTWH, EXTLH, EXTQH */
ffec44f1
RH
1183static void gen_ext_h(int ra, int rb, int rc, int islit,
1184 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1185{
1186 if (unlikely(rc == 31))
1187 return;
377a43b6
RH
1188 else if (unlikely(ra == 31))
1189 tcg_gen_movi_i64(cpu_ir[rc], 0);
1190 else {
dfaa8583 1191 if (islit) {
377a43b6
RH
1192 lit = (64 - (lit & 7) * 8) & 0x3f;
1193 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
fe2b269a 1194 } else {
377a43b6 1195 TCGv tmp1 = tcg_temp_new();
b3249f63
AJ
1196 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1197 tcg_gen_shli_i64(tmp1, tmp1, 3);
dbf95805
VW
1198 tcg_gen_neg_i64(tmp1, tmp1);
1199 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
dfaa8583 1200 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
b3249f63 1201 tcg_temp_free(tmp1);
dfaa8583 1202 }
248c42f3 1203 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
377a43b6 1204 }
b3249f63
AJ
1205}
1206
248c42f3 1207/* EXTBL, EXTWL, EXTLL, EXTQL */
ffec44f1
RH
1208static void gen_ext_l(int ra, int rb, int rc, int islit,
1209 uint8_t lit, uint8_t byte_mask)
b3249f63
AJ
1210{
1211 if (unlikely(rc == 31))
1212 return;
377a43b6
RH
1213 else if (unlikely(ra == 31))
1214 tcg_gen_movi_i64(cpu_ir[rc], 0);
1215 else {
dfaa8583 1216 if (islit) {
377a43b6 1217 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
dfaa8583 1218 } else {
a7812ae4 1219 TCGv tmp = tcg_temp_new();
b3249f63
AJ
1220 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1221 tcg_gen_shli_i64(tmp, tmp, 3);
dfaa8583 1222 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
b3249f63 1223 tcg_temp_free(tmp);
fe2b269a 1224 }
248c42f3
RH
1225 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1226 }
1227}
1228
50eb6e5c
RH
1229/* INSWH, INSLH, INSQH */
1230static void gen_ins_h(int ra, int rb, int rc, int islit,
1231 uint8_t lit, uint8_t byte_mask)
1232{
1233 if (unlikely(rc == 31))
1234 return;
1235 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1236 tcg_gen_movi_i64(cpu_ir[rc], 0);
1237 else {
1238 TCGv tmp = tcg_temp_new();
1239
1240 /* The instruction description has us left-shift the byte mask
1241 and extract bits <15:8> and apply that zap at the end. This
1242 is equivalent to simply performing the zap first and shifting
1243 afterward. */
1244 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1245
1246 if (islit) {
1247 /* Note that we have handled the lit==0 case above. */
1248 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1249 } else {
1250 TCGv shift = tcg_temp_new();
1251
1252 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1253 Do this portably by splitting the shift into two parts:
1254 shift_count-1 and 1. Arrange for the -1 by using
1255 ones-complement instead of twos-complement in the negation:
1256 ~((B & 7) * 8) & 63. */
1257
1258 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1259 tcg_gen_shli_i64(shift, shift, 3);
1260 tcg_gen_not_i64(shift, shift);
1261 tcg_gen_andi_i64(shift, shift, 0x3f);
1262
1263 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1264 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1265 tcg_temp_free(shift);
1266 }
1267 tcg_temp_free(tmp);
1268 }
1269}
1270
248c42f3 1271/* INSBL, INSWL, INSLL, INSQL */
ffec44f1
RH
1272static void gen_ins_l(int ra, int rb, int rc, int islit,
1273 uint8_t lit, uint8_t byte_mask)
248c42f3
RH
1274{
1275 if (unlikely(rc == 31))
1276 return;
1277 else if (unlikely(ra == 31))
1278 tcg_gen_movi_i64(cpu_ir[rc], 0);
1279 else {
1280 TCGv tmp = tcg_temp_new();
1281
1282 /* The instruction description has us left-shift the byte mask
1283 the same number of byte slots as the data and apply the zap
1284 at the end. This is equivalent to simply performing the zap
1285 first and shifting afterward. */
1286 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1287
1288 if (islit) {
1289 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1290 } else {
1291 TCGv shift = tcg_temp_new();
1292 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1293 tcg_gen_shli_i64(shift, shift, 3);
1294 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1295 tcg_temp_free(shift);
1296 }
1297 tcg_temp_free(tmp);
377a43b6 1298 }
b3249f63
AJ
1299}
1300
ffec44f1
RH
1301/* MSKWH, MSKLH, MSKQH */
1302static void gen_msk_h(int ra, int rb, int rc, int islit,
1303 uint8_t lit, uint8_t byte_mask)
1304{
1305 if (unlikely(rc == 31))
1306 return;
1307 else if (unlikely(ra == 31))
1308 tcg_gen_movi_i64(cpu_ir[rc], 0);
1309 else if (islit) {
1310 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1311 } else {
1312 TCGv shift = tcg_temp_new();
1313 TCGv mask = tcg_temp_new();
1314
1315 /* The instruction description is as above, where the byte_mask
1316 is shifted left, and then we extract bits <15:8>. This can be
1317 emulated with a right-shift on the expanded byte mask. This
1318 requires extra care because for an input <2:0> == 0 we need a
1319 shift of 64 bits in order to generate a zero. This is done by
1320 splitting the shift into two parts, the variable shift - 1
1321 followed by a constant 1 shift. The code we expand below is
1322 equivalent to ~((B & 7) * 8) & 63. */
1323
1324 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1325 tcg_gen_shli_i64(shift, shift, 3);
1326 tcg_gen_not_i64(shift, shift);
1327 tcg_gen_andi_i64(shift, shift, 0x3f);
1328 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1329 tcg_gen_shr_i64(mask, mask, shift);
1330 tcg_gen_shri_i64(mask, mask, 1);
1331
1332 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1333
1334 tcg_temp_free(mask);
1335 tcg_temp_free(shift);
1336 }
1337}
1338
14ab1634 1339/* MSKBL, MSKWL, MSKLL, MSKQL */
ffec44f1
RH
1340static void gen_msk_l(int ra, int rb, int rc, int islit,
1341 uint8_t lit, uint8_t byte_mask)
14ab1634
RH
1342{
1343 if (unlikely(rc == 31))
1344 return;
1345 else if (unlikely(ra == 31))
1346 tcg_gen_movi_i64(cpu_ir[rc], 0);
1347 else if (islit) {
1348 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1349 } else {
1350 TCGv shift = tcg_temp_new();
1351 TCGv mask = tcg_temp_new();
1352
1353 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1354 tcg_gen_shli_i64(shift, shift, 3);
1355 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1356 tcg_gen_shl_i64(mask, mask, shift);
1357
1358 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1359
1360 tcg_temp_free(mask);
1361 tcg_temp_free(shift);
1362 }
1363}
1364
04acd307 1365/* Code to call arith3 helpers */
a7812ae4 1366#define ARITH3(name) \
636aa200
BS
1367static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1368 uint8_t lit) \
a7812ae4
PB
1369{ \
1370 if (unlikely(rc == 31)) \
1371 return; \
1372 \
1373 if (ra != 31) { \
1374 if (islit) { \
1375 TCGv tmp = tcg_const_i64(lit); \
1376 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1377 tcg_temp_free(tmp); \
1378 } else \
1379 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1380 } else { \
1381 TCGv tmp1 = tcg_const_i64(0); \
1382 if (islit) { \
1383 TCGv tmp2 = tcg_const_i64(lit); \
1384 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1385 tcg_temp_free(tmp2); \
1386 } else \
1387 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1388 tcg_temp_free(tmp1); \
1389 } \
b3249f63 1390}
a7812ae4
PB
1391ARITH3(cmpbge)
1392ARITH3(addlv)
1393ARITH3(sublv)
1394ARITH3(addqv)
1395ARITH3(subqv)
a7812ae4
PB
1396ARITH3(umulh)
1397ARITH3(mullv)
1398ARITH3(mulqv)
13e4df99
RH
1399ARITH3(minub8)
1400ARITH3(minsb8)
1401ARITH3(minuw4)
1402ARITH3(minsw4)
1403ARITH3(maxub8)
1404ARITH3(maxsb8)
1405ARITH3(maxuw4)
1406ARITH3(maxsw4)
1407ARITH3(perr)
1408
1409#define MVIOP2(name) \
1410static inline void glue(gen_, name)(int rb, int rc) \
1411{ \
1412 if (unlikely(rc == 31)) \
1413 return; \
1414 if (unlikely(rb == 31)) \
1415 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1416 else \
1417 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1418}
1419MVIOP2(pklb)
1420MVIOP2(pkwb)
1421MVIOP2(unpkbl)
1422MVIOP2(unpkbw)
b3249f63 1423
9e05960f
RH
1424static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1425 int islit, uint8_t lit)
01ff9cc8 1426{
9e05960f 1427 TCGv va, vb;
01ff9cc8 1428
9e05960f 1429 if (unlikely(rc == 31)) {
13e4df99 1430 return;
9e05960f 1431 }
01ff9cc8 1432
9e05960f
RH
1433 if (ra == 31) {
1434 va = tcg_const_i64(0);
1435 } else {
1436 va = cpu_ir[ra];
1437 }
1438 if (islit) {
1439 vb = tcg_const_i64(lit);
1440 } else {
1441 vb = cpu_ir[rb];
1442 }
01ff9cc8 1443
9e05960f 1444 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
01ff9cc8 1445
9e05960f
RH
1446 if (ra == 31) {
1447 tcg_temp_free(va);
1448 }
1449 if (islit) {
1450 tcg_temp_free(vb);
1451 }
01ff9cc8
AJ
1452}
1453
ac316ca4
RH
1454static void gen_rx(int ra, int set)
1455{
1456 TCGv_i32 tmp;
1457
1458 if (ra != 31) {
1459 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1460 }
1461
1462 tmp = tcg_const_i32(set);
1463 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1464 tcg_temp_free_i32(tmp);
1465}
1466
4af70374 1467static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
4c9649a9
JM
1468{
1469 uint32_t palcode;
1470 int32_t disp21, disp16, disp12;
f88fe4e3
BS
1471 uint16_t fn11;
1472 uint8_t opc, ra, rb, rc, fpfn, fn7, fn2, islit, real_islit;
adf3c8b6 1473 uint8_t lit;
4af70374 1474 ExitStatus ret;
4c9649a9
JM
1475
1476 /* Decode all instruction fields */
1477 opc = insn >> 26;
1478 ra = (insn >> 21) & 0x1F;
1479 rb = (insn >> 16) & 0x1F;
1480 rc = insn & 0x1F;
13e4df99 1481 real_islit = islit = (insn >> 12) & 1;
dfaa8583
AJ
1482 if (rb == 31 && !islit) {
1483 islit = 1;
1484 lit = 0;
1485 } else
1486 lit = (insn >> 13) & 0xFF;
4c9649a9
JM
1487 palcode = insn & 0x03FFFFFF;
1488 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1489 disp16 = (int16_t)(insn & 0x0000FFFF);
1490 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
4c9649a9
JM
1491 fn11 = (insn >> 5) & 0x000007FF;
1492 fpfn = fn11 & 0x3F;
1493 fn7 = (insn >> 5) & 0x0000007F;
1494 fn2 = (insn >> 5) & 0x00000003;
806991da 1495 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
d12d51d5 1496 opc, ra, rb, rc, disp16);
806991da 1497
4af70374 1498 ret = NO_EXIT;
4c9649a9
JM
1499 switch (opc) {
1500 case 0x00:
1501 /* CALL_PAL */
ab471ade
RH
1502#ifdef CONFIG_USER_ONLY
1503 if (palcode == 0x9E) {
1504 /* RDUNIQUE */
1505 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1506 break;
1507 } else if (palcode == 0x9F) {
1508 /* WRUNIQUE */
1509 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1510 break;
1511 }
1512#endif
4c9649a9
JM
1513 if (palcode >= 0x80 && palcode < 0xC0) {
1514 /* Unprivileged PAL call */
8aa3fa20 1515 ret = gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
ab471ade
RH
1516 break;
1517 }
1518#ifndef CONFIG_USER_ONLY
1519 if (palcode < 0x40) {
4c9649a9
JM
1520 /* Privileged PAL code */
1521 if (ctx->mem_idx & 1)
1522 goto invalid_opc;
8aa3fa20 1523 ret = gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
4c9649a9 1524 }
ab471ade
RH
1525#endif
1526 /* Invalid PAL call */
1527 goto invalid_opc;
4c9649a9
JM
1528 case 0x01:
1529 /* OPC01 */
1530 goto invalid_opc;
1531 case 0x02:
1532 /* OPC02 */
1533 goto invalid_opc;
1534 case 0x03:
1535 /* OPC03 */
1536 goto invalid_opc;
1537 case 0x04:
1538 /* OPC04 */
1539 goto invalid_opc;
1540 case 0x05:
1541 /* OPC05 */
1542 goto invalid_opc;
1543 case 0x06:
1544 /* OPC06 */
1545 goto invalid_opc;
1546 case 0x07:
1547 /* OPC07 */
1548 goto invalid_opc;
1549 case 0x08:
1550 /* LDA */
1ef4ef4e 1551 if (likely(ra != 31)) {
496cb5b9 1552 if (rb != 31)
3761035f
AJ
1553 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1554 else
1555 tcg_gen_movi_i64(cpu_ir[ra], disp16);
496cb5b9 1556 }
4c9649a9
JM
1557 break;
1558 case 0x09:
1559 /* LDAH */
1ef4ef4e 1560 if (likely(ra != 31)) {
496cb5b9 1561 if (rb != 31)
3761035f
AJ
1562 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1563 else
1564 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
496cb5b9 1565 }
4c9649a9
JM
1566 break;
1567 case 0x0A:
1568 /* LDBU */
1569 if (!(ctx->amask & AMASK_BWX))
1570 goto invalid_opc;
f18cd223 1571 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
4c9649a9
JM
1572 break;
1573 case 0x0B:
1574 /* LDQ_U */
f18cd223 1575 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1576 break;
1577 case 0x0C:
1578 /* LDWU */
1579 if (!(ctx->amask & AMASK_BWX))
1580 goto invalid_opc;
577d5e7f 1581 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
4c9649a9
JM
1582 break;
1583 case 0x0D:
1584 /* STW */
6910b8f6 1585 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
4c9649a9
JM
1586 break;
1587 case 0x0E:
1588 /* STB */
6910b8f6 1589 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
4c9649a9
JM
1590 break;
1591 case 0x0F:
1592 /* STQ_U */
6910b8f6 1593 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
4c9649a9
JM
1594 break;
1595 case 0x10:
1596 switch (fn7) {
1597 case 0x00:
1598 /* ADDL */
30c7183b
AJ
1599 if (likely(rc != 31)) {
1600 if (ra != 31) {
1601 if (islit) {
1602 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1603 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1604 } else {
30c7183b
AJ
1605 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1606 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1607 }
30c7183b
AJ
1608 } else {
1609 if (islit)
dfaa8583 1610 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1611 else
dfaa8583 1612 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1613 }
1614 }
4c9649a9
JM
1615 break;
1616 case 0x02:
1617 /* S4ADDL */
30c7183b
AJ
1618 if (likely(rc != 31)) {
1619 if (ra != 31) {
a7812ae4 1620 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1621 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1622 if (islit)
1623 tcg_gen_addi_i64(tmp, tmp, lit);
1624 else
1625 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1626 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1627 tcg_temp_free(tmp);
30c7183b
AJ
1628 } else {
1629 if (islit)
1630 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1631 else
dfaa8583 1632 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1633 }
1634 }
4c9649a9
JM
1635 break;
1636 case 0x09:
1637 /* SUBL */
30c7183b
AJ
1638 if (likely(rc != 31)) {
1639 if (ra != 31) {
dfaa8583 1640 if (islit)
30c7183b 1641 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
dfaa8583 1642 else
30c7183b 1643 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
dfaa8583 1644 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1645 } else {
1646 if (islit)
1647 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1648 else {
30c7183b
AJ
1649 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1650 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
30c7183b
AJ
1651 }
1652 }
4c9649a9
JM
1653 break;
1654 case 0x0B:
1655 /* S4SUBL */
30c7183b
AJ
1656 if (likely(rc != 31)) {
1657 if (ra != 31) {
a7812ae4 1658 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1659 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1660 if (islit)
1661 tcg_gen_subi_i64(tmp, tmp, lit);
1662 else
1663 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1664 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1665 tcg_temp_free(tmp);
30c7183b
AJ
1666 } else {
1667 if (islit)
1668 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1669 else {
30c7183b
AJ
1670 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1671 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1672 }
30c7183b
AJ
1673 }
1674 }
4c9649a9
JM
1675 break;
1676 case 0x0F:
1677 /* CMPBGE */
a7812ae4 1678 gen_cmpbge(ra, rb, rc, islit, lit);
4c9649a9
JM
1679 break;
1680 case 0x12:
1681 /* S8ADDL */
30c7183b
AJ
1682 if (likely(rc != 31)) {
1683 if (ra != 31) {
a7812ae4 1684 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1685 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1686 if (islit)
1687 tcg_gen_addi_i64(tmp, tmp, lit);
1688 else
1689 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1690 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1691 tcg_temp_free(tmp);
30c7183b
AJ
1692 } else {
1693 if (islit)
1694 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1695 else
dfaa8583 1696 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1697 }
1698 }
4c9649a9
JM
1699 break;
1700 case 0x1B:
1701 /* S8SUBL */
30c7183b
AJ
1702 if (likely(rc != 31)) {
1703 if (ra != 31) {
a7812ae4 1704 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1705 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1706 if (islit)
1707 tcg_gen_subi_i64(tmp, tmp, lit);
1708 else
1709 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1710 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1711 tcg_temp_free(tmp);
30c7183b
AJ
1712 } else {
1713 if (islit)
1714 tcg_gen_movi_i64(cpu_ir[rc], -lit);
dfaa8583 1715 else
30c7183b
AJ
1716 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1717 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
dfaa8583 1718 }
30c7183b
AJ
1719 }
1720 }
4c9649a9
JM
1721 break;
1722 case 0x1D:
1723 /* CMPULT */
01ff9cc8 1724 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
4c9649a9
JM
1725 break;
1726 case 0x20:
1727 /* ADDQ */
30c7183b
AJ
1728 if (likely(rc != 31)) {
1729 if (ra != 31) {
1730 if (islit)
1731 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1732 else
dfaa8583 1733 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1734 } else {
1735 if (islit)
1736 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1737 else
dfaa8583 1738 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1739 }
1740 }
4c9649a9
JM
1741 break;
1742 case 0x22:
1743 /* S4ADDQ */
30c7183b
AJ
1744 if (likely(rc != 31)) {
1745 if (ra != 31) {
a7812ae4 1746 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1747 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1748 if (islit)
1749 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1750 else
1751 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1752 tcg_temp_free(tmp);
30c7183b
AJ
1753 } else {
1754 if (islit)
1755 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1756 else
dfaa8583 1757 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1758 }
1759 }
4c9649a9
JM
1760 break;
1761 case 0x29:
1762 /* SUBQ */
30c7183b
AJ
1763 if (likely(rc != 31)) {
1764 if (ra != 31) {
1765 if (islit)
1766 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1767 else
dfaa8583 1768 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1769 } else {
1770 if (islit)
1771 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1772 else
dfaa8583 1773 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1774 }
1775 }
4c9649a9
JM
1776 break;
1777 case 0x2B:
1778 /* S4SUBQ */
30c7183b
AJ
1779 if (likely(rc != 31)) {
1780 if (ra != 31) {
a7812ae4 1781 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1782 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1783 if (islit)
1784 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1785 else
1786 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1787 tcg_temp_free(tmp);
30c7183b
AJ
1788 } else {
1789 if (islit)
1790 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1791 else
dfaa8583 1792 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1793 }
1794 }
4c9649a9
JM
1795 break;
1796 case 0x2D:
1797 /* CMPEQ */
01ff9cc8 1798 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
4c9649a9
JM
1799 break;
1800 case 0x32:
1801 /* S8ADDQ */
30c7183b
AJ
1802 if (likely(rc != 31)) {
1803 if (ra != 31) {
a7812ae4 1804 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1805 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1806 if (islit)
1807 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1808 else
1809 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1810 tcg_temp_free(tmp);
30c7183b
AJ
1811 } else {
1812 if (islit)
1813 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1814 else
dfaa8583 1815 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1816 }
1817 }
4c9649a9
JM
1818 break;
1819 case 0x3B:
1820 /* S8SUBQ */
30c7183b
AJ
1821 if (likely(rc != 31)) {
1822 if (ra != 31) {
a7812ae4 1823 TCGv tmp = tcg_temp_new();
dfaa8583
AJ
1824 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1825 if (islit)
1826 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1827 else
1828 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1829 tcg_temp_free(tmp);
30c7183b
AJ
1830 } else {
1831 if (islit)
1832 tcg_gen_movi_i64(cpu_ir[rc], -lit);
30c7183b 1833 else
dfaa8583 1834 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1835 }
1836 }
4c9649a9
JM
1837 break;
1838 case 0x3D:
1839 /* CMPULE */
01ff9cc8 1840 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
4c9649a9
JM
1841 break;
1842 case 0x40:
1843 /* ADDL/V */
a7812ae4 1844 gen_addlv(ra, rb, rc, islit, lit);
4c9649a9
JM
1845 break;
1846 case 0x49:
1847 /* SUBL/V */
a7812ae4 1848 gen_sublv(ra, rb, rc, islit, lit);
4c9649a9
JM
1849 break;
1850 case 0x4D:
1851 /* CMPLT */
01ff9cc8 1852 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
4c9649a9
JM
1853 break;
1854 case 0x60:
1855 /* ADDQ/V */
a7812ae4 1856 gen_addqv(ra, rb, rc, islit, lit);
4c9649a9
JM
1857 break;
1858 case 0x69:
1859 /* SUBQ/V */
a7812ae4 1860 gen_subqv(ra, rb, rc, islit, lit);
4c9649a9
JM
1861 break;
1862 case 0x6D:
1863 /* CMPLE */
01ff9cc8 1864 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
4c9649a9
JM
1865 break;
1866 default:
1867 goto invalid_opc;
1868 }
1869 break;
1870 case 0x11:
1871 switch (fn7) {
1872 case 0x00:
1873 /* AND */
30c7183b 1874 if (likely(rc != 31)) {
dfaa8583 1875 if (ra == 31)
30c7183b
AJ
1876 tcg_gen_movi_i64(cpu_ir[rc], 0);
1877 else if (islit)
1878 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1879 else
1880 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1881 }
4c9649a9
JM
1882 break;
1883 case 0x08:
1884 /* BIC */
30c7183b
AJ
1885 if (likely(rc != 31)) {
1886 if (ra != 31) {
1887 if (islit)
1888 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1889 else
1890 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1891 } else
1892 tcg_gen_movi_i64(cpu_ir[rc], 0);
1893 }
4c9649a9
JM
1894 break;
1895 case 0x14:
1896 /* CMOVLBS */
bbe1dab4 1897 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
1898 break;
1899 case 0x16:
1900 /* CMOVLBC */
bbe1dab4 1901 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
4c9649a9
JM
1902 break;
1903 case 0x20:
1904 /* BIS */
30c7183b
AJ
1905 if (likely(rc != 31)) {
1906 if (ra != 31) {
1907 if (islit)
1908 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
8bb6e981 1909 else
30c7183b 1910 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
4c9649a9 1911 } else {
30c7183b
AJ
1912 if (islit)
1913 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1914 else
dfaa8583 1915 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
4c9649a9 1916 }
4c9649a9
JM
1917 }
1918 break;
1919 case 0x24:
1920 /* CMOVEQ */
bbe1dab4 1921 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1922 break;
1923 case 0x26:
1924 /* CMOVNE */
bbe1dab4 1925 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1926 break;
1927 case 0x28:
1928 /* ORNOT */
30c7183b 1929 if (likely(rc != 31)) {
dfaa8583 1930 if (ra != 31) {
30c7183b
AJ
1931 if (islit)
1932 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1933 else
1934 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1935 } else {
1936 if (islit)
1937 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1938 else
1939 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1940 }
1941 }
4c9649a9
JM
1942 break;
1943 case 0x40:
1944 /* XOR */
30c7183b
AJ
1945 if (likely(rc != 31)) {
1946 if (ra != 31) {
1947 if (islit)
1948 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
30c7183b 1949 else
dfaa8583 1950 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1951 } else {
1952 if (islit)
1953 tcg_gen_movi_i64(cpu_ir[rc], lit);
30c7183b 1954 else
dfaa8583 1955 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1956 }
1957 }
4c9649a9
JM
1958 break;
1959 case 0x44:
1960 /* CMOVLT */
bbe1dab4 1961 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1962 break;
1963 case 0x46:
1964 /* CMOVGE */
bbe1dab4 1965 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
1966 break;
1967 case 0x48:
1968 /* EQV */
30c7183b
AJ
1969 if (likely(rc != 31)) {
1970 if (ra != 31) {
1971 if (islit)
1972 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1b581c44
AJ
1973 else
1974 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
30c7183b
AJ
1975 } else {
1976 if (islit)
1977 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
30c7183b 1978 else
dfaa8583 1979 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
30c7183b
AJ
1980 }
1981 }
4c9649a9
JM
1982 break;
1983 case 0x61:
1984 /* AMASK */
ae8ecd42
AJ
1985 if (likely(rc != 31)) {
1986 if (islit)
1a1f7dbc 1987 tcg_gen_movi_i64(cpu_ir[rc], lit);
ae8ecd42 1988 else
1a1f7dbc
AJ
1989 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1990 switch (ctx->env->implver) {
1991 case IMPLVER_2106x:
1992 /* EV4, EV45, LCA, LCA45 & EV5 */
1993 break;
1994 case IMPLVER_21164:
1995 case IMPLVER_21264:
1996 case IMPLVER_21364:
1997 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1998 ~(uint64_t)ctx->amask);
1999 break;
2000 }
ae8ecd42 2001 }
4c9649a9
JM
2002 break;
2003 case 0x64:
2004 /* CMOVLE */
bbe1dab4 2005 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2006 break;
2007 case 0x66:
2008 /* CMOVGT */
bbe1dab4 2009 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
4c9649a9
JM
2010 break;
2011 case 0x6C:
2012 /* IMPLVER */
3761035f 2013 if (rc != 31)
8579095b 2014 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
4c9649a9
JM
2015 break;
2016 default:
2017 goto invalid_opc;
2018 }
2019 break;
2020 case 0x12:
2021 switch (fn7) {
2022 case 0x02:
2023 /* MSKBL */
14ab1634 2024 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2025 break;
2026 case 0x06:
2027 /* EXTBL */
377a43b6 2028 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2029 break;
2030 case 0x0B:
2031 /* INSBL */
248c42f3 2032 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
4c9649a9
JM
2033 break;
2034 case 0x12:
2035 /* MSKWL */
14ab1634 2036 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2037 break;
2038 case 0x16:
2039 /* EXTWL */
377a43b6 2040 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2041 break;
2042 case 0x1B:
2043 /* INSWL */
248c42f3 2044 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2045 break;
2046 case 0x22:
2047 /* MSKLL */
14ab1634 2048 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2049 break;
2050 case 0x26:
2051 /* EXTLL */
377a43b6 2052 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2053 break;
2054 case 0x2B:
2055 /* INSLL */
248c42f3 2056 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2057 break;
2058 case 0x30:
2059 /* ZAP */
a7812ae4 2060 gen_zap(ra, rb, rc, islit, lit);
4c9649a9
JM
2061 break;
2062 case 0x31:
2063 /* ZAPNOT */
a7812ae4 2064 gen_zapnot(ra, rb, rc, islit, lit);
4c9649a9
JM
2065 break;
2066 case 0x32:
2067 /* MSKQL */
14ab1634 2068 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2069 break;
2070 case 0x34:
2071 /* SRL */
30c7183b
AJ
2072 if (likely(rc != 31)) {
2073 if (ra != 31) {
2074 if (islit)
2075 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2076 else {
a7812ae4 2077 TCGv shift = tcg_temp_new();
30c7183b
AJ
2078 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2079 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2080 tcg_temp_free(shift);
dfaa8583 2081 }
30c7183b
AJ
2082 } else
2083 tcg_gen_movi_i64(cpu_ir[rc], 0);
2084 }
4c9649a9
JM
2085 break;
2086 case 0x36:
2087 /* EXTQL */
377a43b6 2088 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2089 break;
2090 case 0x39:
2091 /* SLL */
30c7183b
AJ
2092 if (likely(rc != 31)) {
2093 if (ra != 31) {
2094 if (islit)
2095 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2096 else {
a7812ae4 2097 TCGv shift = tcg_temp_new();
30c7183b
AJ
2098 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2099 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2100 tcg_temp_free(shift);
dfaa8583 2101 }
30c7183b
AJ
2102 } else
2103 tcg_gen_movi_i64(cpu_ir[rc], 0);
2104 }
4c9649a9
JM
2105 break;
2106 case 0x3B:
2107 /* INSQL */
248c42f3 2108 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2109 break;
2110 case 0x3C:
2111 /* SRA */
30c7183b
AJ
2112 if (likely(rc != 31)) {
2113 if (ra != 31) {
2114 if (islit)
2115 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
dfaa8583 2116 else {
a7812ae4 2117 TCGv shift = tcg_temp_new();
30c7183b
AJ
2118 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2119 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2120 tcg_temp_free(shift);
dfaa8583 2121 }
30c7183b
AJ
2122 } else
2123 tcg_gen_movi_i64(cpu_ir[rc], 0);
2124 }
4c9649a9
JM
2125 break;
2126 case 0x52:
2127 /* MSKWH */
ffec44f1 2128 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2129 break;
2130 case 0x57:
2131 /* INSWH */
50eb6e5c 2132 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2133 break;
2134 case 0x5A:
2135 /* EXTWH */
377a43b6 2136 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
4c9649a9
JM
2137 break;
2138 case 0x62:
2139 /* MSKLH */
ffec44f1 2140 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2141 break;
2142 case 0x67:
2143 /* INSLH */
50eb6e5c 2144 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2145 break;
2146 case 0x6A:
2147 /* EXTLH */
377a43b6 2148 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
4c9649a9
JM
2149 break;
2150 case 0x72:
2151 /* MSKQH */
ffec44f1 2152 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2153 break;
2154 case 0x77:
2155 /* INSQH */
50eb6e5c 2156 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2157 break;
2158 case 0x7A:
2159 /* EXTQH */
377a43b6 2160 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
4c9649a9
JM
2161 break;
2162 default:
2163 goto invalid_opc;
2164 }
2165 break;
2166 case 0x13:
2167 switch (fn7) {
2168 case 0x00:
2169 /* MULL */
30c7183b 2170 if (likely(rc != 31)) {
dfaa8583 2171 if (ra == 31)
30c7183b
AJ
2172 tcg_gen_movi_i64(cpu_ir[rc], 0);
2173 else {
2174 if (islit)
2175 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2176 else
2177 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2178 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2179 }
2180 }
4c9649a9
JM
2181 break;
2182 case 0x20:
2183 /* MULQ */
30c7183b 2184 if (likely(rc != 31)) {
dfaa8583 2185 if (ra == 31)
30c7183b
AJ
2186 tcg_gen_movi_i64(cpu_ir[rc], 0);
2187 else if (islit)
2188 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2189 else
2190 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2191 }
4c9649a9
JM
2192 break;
2193 case 0x30:
2194 /* UMULH */
a7812ae4 2195 gen_umulh(ra, rb, rc, islit, lit);
4c9649a9
JM
2196 break;
2197 case 0x40:
2198 /* MULL/V */
a7812ae4 2199 gen_mullv(ra, rb, rc, islit, lit);
4c9649a9
JM
2200 break;
2201 case 0x60:
2202 /* MULQ/V */
a7812ae4 2203 gen_mulqv(ra, rb, rc, islit, lit);
4c9649a9
JM
2204 break;
2205 default:
2206 goto invalid_opc;
2207 }
2208 break;
2209 case 0x14:
f24518b5 2210 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2211 case 0x04:
2212 /* ITOFS */
2213 if (!(ctx->amask & AMASK_FIX))
2214 goto invalid_opc;
f18cd223
AJ
2215 if (likely(rc != 31)) {
2216 if (ra != 31) {
a7812ae4 2217 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2218 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2219 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2220 tcg_temp_free_i32(tmp);
f18cd223
AJ
2221 } else
2222 tcg_gen_movi_i64(cpu_fir[rc], 0);
2223 }
4c9649a9
JM
2224 break;
2225 case 0x0A:
2226 /* SQRTF */
2227 if (!(ctx->amask & AMASK_FIX))
2228 goto invalid_opc;
a7812ae4 2229 gen_fsqrtf(rb, rc);
4c9649a9
JM
2230 break;
2231 case 0x0B:
2232 /* SQRTS */
2233 if (!(ctx->amask & AMASK_FIX))
2234 goto invalid_opc;
f24518b5 2235 gen_fsqrts(ctx, rb, rc, fn11);
4c9649a9
JM
2236 break;
2237 case 0x14:
2238 /* ITOFF */
2239 if (!(ctx->amask & AMASK_FIX))
2240 goto invalid_opc;
f18cd223
AJ
2241 if (likely(rc != 31)) {
2242 if (ra != 31) {
a7812ae4 2243 TCGv_i32 tmp = tcg_temp_new_i32();
f18cd223 2244 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
a7812ae4
PB
2245 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2246 tcg_temp_free_i32(tmp);
f18cd223
AJ
2247 } else
2248 tcg_gen_movi_i64(cpu_fir[rc], 0);
2249 }
4c9649a9
JM
2250 break;
2251 case 0x24:
2252 /* ITOFT */
2253 if (!(ctx->amask & AMASK_FIX))
2254 goto invalid_opc;
f18cd223
AJ
2255 if (likely(rc != 31)) {
2256 if (ra != 31)
2257 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2258 else
2259 tcg_gen_movi_i64(cpu_fir[rc], 0);
2260 }
4c9649a9
JM
2261 break;
2262 case 0x2A:
2263 /* SQRTG */
2264 if (!(ctx->amask & AMASK_FIX))
2265 goto invalid_opc;
a7812ae4 2266 gen_fsqrtg(rb, rc);
4c9649a9
JM
2267 break;
2268 case 0x02B:
2269 /* SQRTT */
2270 if (!(ctx->amask & AMASK_FIX))
2271 goto invalid_opc;
f24518b5 2272 gen_fsqrtt(ctx, rb, rc, fn11);
4c9649a9
JM
2273 break;
2274 default:
2275 goto invalid_opc;
2276 }
2277 break;
2278 case 0x15:
2279 /* VAX floating point */
2280 /* XXX: rounding mode and trap are ignored (!) */
f24518b5 2281 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2282 case 0x00:
2283 /* ADDF */
a7812ae4 2284 gen_faddf(ra, rb, rc);
4c9649a9
JM
2285 break;
2286 case 0x01:
2287 /* SUBF */
a7812ae4 2288 gen_fsubf(ra, rb, rc);
4c9649a9
JM
2289 break;
2290 case 0x02:
2291 /* MULF */
a7812ae4 2292 gen_fmulf(ra, rb, rc);
4c9649a9
JM
2293 break;
2294 case 0x03:
2295 /* DIVF */
a7812ae4 2296 gen_fdivf(ra, rb, rc);
4c9649a9
JM
2297 break;
2298 case 0x1E:
2299 /* CVTDG */
2300#if 0 // TODO
a7812ae4 2301 gen_fcvtdg(rb, rc);
4c9649a9
JM
2302#else
2303 goto invalid_opc;
2304#endif
2305 break;
2306 case 0x20:
2307 /* ADDG */
a7812ae4 2308 gen_faddg(ra, rb, rc);
4c9649a9
JM
2309 break;
2310 case 0x21:
2311 /* SUBG */
a7812ae4 2312 gen_fsubg(ra, rb, rc);
4c9649a9
JM
2313 break;
2314 case 0x22:
2315 /* MULG */
a7812ae4 2316 gen_fmulg(ra, rb, rc);
4c9649a9
JM
2317 break;
2318 case 0x23:
2319 /* DIVG */
a7812ae4 2320 gen_fdivg(ra, rb, rc);
4c9649a9
JM
2321 break;
2322 case 0x25:
2323 /* CMPGEQ */
a7812ae4 2324 gen_fcmpgeq(ra, rb, rc);
4c9649a9
JM
2325 break;
2326 case 0x26:
2327 /* CMPGLT */
a7812ae4 2328 gen_fcmpglt(ra, rb, rc);
4c9649a9
JM
2329 break;
2330 case 0x27:
2331 /* CMPGLE */
a7812ae4 2332 gen_fcmpgle(ra, rb, rc);
4c9649a9
JM
2333 break;
2334 case 0x2C:
2335 /* CVTGF */
a7812ae4 2336 gen_fcvtgf(rb, rc);
4c9649a9
JM
2337 break;
2338 case 0x2D:
2339 /* CVTGD */
2340#if 0 // TODO
a7812ae4 2341 gen_fcvtgd(rb, rc);
4c9649a9
JM
2342#else
2343 goto invalid_opc;
2344#endif
2345 break;
2346 case 0x2F:
2347 /* CVTGQ */
a7812ae4 2348 gen_fcvtgq(rb, rc);
4c9649a9
JM
2349 break;
2350 case 0x3C:
2351 /* CVTQF */
a7812ae4 2352 gen_fcvtqf(rb, rc);
4c9649a9
JM
2353 break;
2354 case 0x3E:
2355 /* CVTQG */
a7812ae4 2356 gen_fcvtqg(rb, rc);
4c9649a9
JM
2357 break;
2358 default:
2359 goto invalid_opc;
2360 }
2361 break;
2362 case 0x16:
2363 /* IEEE floating-point */
f24518b5 2364 switch (fpfn) { /* fn11 & 0x3F */
4c9649a9
JM
2365 case 0x00:
2366 /* ADDS */
f24518b5 2367 gen_fadds(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2368 break;
2369 case 0x01:
2370 /* SUBS */
f24518b5 2371 gen_fsubs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2372 break;
2373 case 0x02:
2374 /* MULS */
f24518b5 2375 gen_fmuls(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2376 break;
2377 case 0x03:
2378 /* DIVS */
f24518b5 2379 gen_fdivs(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2380 break;
2381 case 0x20:
2382 /* ADDT */
f24518b5 2383 gen_faddt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2384 break;
2385 case 0x21:
2386 /* SUBT */
f24518b5 2387 gen_fsubt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2388 break;
2389 case 0x22:
2390 /* MULT */
f24518b5 2391 gen_fmult(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2392 break;
2393 case 0x23:
2394 /* DIVT */
f24518b5 2395 gen_fdivt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2396 break;
2397 case 0x24:
2398 /* CMPTUN */
f24518b5 2399 gen_fcmptun(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2400 break;
2401 case 0x25:
2402 /* CMPTEQ */
f24518b5 2403 gen_fcmpteq(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2404 break;
2405 case 0x26:
2406 /* CMPTLT */
f24518b5 2407 gen_fcmptlt(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2408 break;
2409 case 0x27:
2410 /* CMPTLE */
f24518b5 2411 gen_fcmptle(ctx, ra, rb, rc, fn11);
4c9649a9
JM
2412 break;
2413 case 0x2C:
a74b4d2c 2414 if (fn11 == 0x2AC || fn11 == 0x6AC) {
4c9649a9 2415 /* CVTST */
f24518b5 2416 gen_fcvtst(ctx, rb, rc, fn11);
4c9649a9
JM
2417 } else {
2418 /* CVTTS */
f24518b5 2419 gen_fcvtts(ctx, rb, rc, fn11);
4c9649a9
JM
2420 }
2421 break;
2422 case 0x2F:
2423 /* CVTTQ */
f24518b5 2424 gen_fcvttq(ctx, rb, rc, fn11);
4c9649a9
JM
2425 break;
2426 case 0x3C:
2427 /* CVTQS */
f24518b5 2428 gen_fcvtqs(ctx, rb, rc, fn11);
4c9649a9
JM
2429 break;
2430 case 0x3E:
2431 /* CVTQT */
f24518b5 2432 gen_fcvtqt(ctx, rb, rc, fn11);
4c9649a9
JM
2433 break;
2434 default:
2435 goto invalid_opc;
2436 }
2437 break;
2438 case 0x17:
2439 switch (fn11) {
2440 case 0x010:
2441 /* CVTLQ */
a7812ae4 2442 gen_fcvtlq(rb, rc);
4c9649a9
JM
2443 break;
2444 case 0x020:
f18cd223 2445 if (likely(rc != 31)) {
a06d48d9 2446 if (ra == rb) {
4c9649a9 2447 /* FMOV */
a06d48d9
RH
2448 if (ra == 31)
2449 tcg_gen_movi_i64(cpu_fir[rc], 0);
2450 else
2451 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2452 } else {
f18cd223 2453 /* CPYS */
a7812ae4 2454 gen_fcpys(ra, rb, rc);
a06d48d9 2455 }
4c9649a9
JM
2456 }
2457 break;
2458 case 0x021:
2459 /* CPYSN */
a7812ae4 2460 gen_fcpysn(ra, rb, rc);
4c9649a9
JM
2461 break;
2462 case 0x022:
2463 /* CPYSE */
a7812ae4 2464 gen_fcpyse(ra, rb, rc);
4c9649a9
JM
2465 break;
2466 case 0x024:
2467 /* MT_FPCR */
f18cd223 2468 if (likely(ra != 31))
a7812ae4 2469 gen_helper_store_fpcr(cpu_fir[ra]);
f18cd223
AJ
2470 else {
2471 TCGv tmp = tcg_const_i64(0);
a7812ae4 2472 gen_helper_store_fpcr(tmp);
f18cd223
AJ
2473 tcg_temp_free(tmp);
2474 }
4c9649a9
JM
2475 break;
2476 case 0x025:
2477 /* MF_FPCR */
f18cd223 2478 if (likely(ra != 31))
a7812ae4 2479 gen_helper_load_fpcr(cpu_fir[ra]);
4c9649a9
JM
2480 break;
2481 case 0x02A:
2482 /* FCMOVEQ */
bbe1dab4 2483 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
4c9649a9
JM
2484 break;
2485 case 0x02B:
2486 /* FCMOVNE */
bbe1dab4 2487 gen_fcmov(TCG_COND_NE, ra, rb, rc);
4c9649a9
JM
2488 break;
2489 case 0x02C:
2490 /* FCMOVLT */
bbe1dab4 2491 gen_fcmov(TCG_COND_LT, ra, rb, rc);
4c9649a9
JM
2492 break;
2493 case 0x02D:
2494 /* FCMOVGE */
bbe1dab4 2495 gen_fcmov(TCG_COND_GE, ra, rb, rc);
4c9649a9
JM
2496 break;
2497 case 0x02E:
2498 /* FCMOVLE */
bbe1dab4 2499 gen_fcmov(TCG_COND_LE, ra, rb, rc);
4c9649a9
JM
2500 break;
2501 case 0x02F:
2502 /* FCMOVGT */
bbe1dab4 2503 gen_fcmov(TCG_COND_GT, ra, rb, rc);
4c9649a9
JM
2504 break;
2505 case 0x030:
2506 /* CVTQL */
a7812ae4 2507 gen_fcvtql(rb, rc);
4c9649a9
JM
2508 break;
2509 case 0x130:
2510 /* CVTQL/V */
4c9649a9
JM
2511 case 0x530:
2512 /* CVTQL/SV */
735cf45f
RH
2513 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2514 /v doesn't do. The only thing I can think is that /sv is a
2515 valid instruction merely for completeness in the ISA. */
2516 gen_fcvtql_v(ctx, rb, rc);
4c9649a9
JM
2517 break;
2518 default:
2519 goto invalid_opc;
2520 }
2521 break;
2522 case 0x18:
2523 switch ((uint16_t)disp16) {
2524 case 0x0000:
2525 /* TRAPB */
4af70374 2526 /* No-op. */
4c9649a9
JM
2527 break;
2528 case 0x0400:
2529 /* EXCB */
4af70374 2530 /* No-op. */
4c9649a9
JM
2531 break;
2532 case 0x4000:
2533 /* MB */
2534 /* No-op */
2535 break;
2536 case 0x4400:
2537 /* WMB */
2538 /* No-op */
2539 break;
2540 case 0x8000:
2541 /* FETCH */
2542 /* No-op */
2543 break;
2544 case 0xA000:
2545 /* FETCH_M */
2546 /* No-op */
2547 break;
2548 case 0xC000:
2549 /* RPCC */
3761035f 2550 if (ra != 31)
a7812ae4 2551 gen_helper_load_pcc(cpu_ir[ra]);
4c9649a9
JM
2552 break;
2553 case 0xE000:
2554 /* RC */
ac316ca4 2555 gen_rx(ra, 0);
4c9649a9
JM
2556 break;
2557 case 0xE800:
2558 /* ECB */
4c9649a9
JM
2559 break;
2560 case 0xF000:
2561 /* RS */
ac316ca4 2562 gen_rx(ra, 1);
4c9649a9
JM
2563 break;
2564 case 0xF800:
2565 /* WH64 */
2566 /* No-op */
2567 break;
2568 default:
2569 goto invalid_opc;
2570 }
2571 break;
2572 case 0x19:
2573 /* HW_MFPR (PALcode) */
2574#if defined (CONFIG_USER_ONLY)
2575 goto invalid_opc;
2576#else
2577 if (!ctx->pal_mode)
2578 goto invalid_opc;
8bb6e981
AJ
2579 if (ra != 31) {
2580 TCGv tmp = tcg_const_i32(insn & 0xFF);
a7812ae4 2581 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
8bb6e981
AJ
2582 tcg_temp_free(tmp);
2583 }
4c9649a9
JM
2584 break;
2585#endif
2586 case 0x1A:
49563a72
RH
2587 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2588 prediction stack action, which of course we don't implement. */
2589 if (rb != 31) {
3761035f 2590 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
49563a72 2591 } else {
3761035f 2592 tcg_gen_movi_i64(cpu_pc, 0);
49563a72
RH
2593 }
2594 if (ra != 31) {
1304ca87 2595 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
49563a72 2596 }
4af70374 2597 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2598 break;
2599 case 0x1B:
2600 /* HW_LD (PALcode) */
2601#if defined (CONFIG_USER_ONLY)
2602 goto invalid_opc;
2603#else
2604 if (!ctx->pal_mode)
2605 goto invalid_opc;
8bb6e981 2606 if (ra != 31) {
a7812ae4 2607 TCGv addr = tcg_temp_new();
8bb6e981
AJ
2608 if (rb != 31)
2609 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2610 else
2611 tcg_gen_movi_i64(addr, disp12);
2612 switch ((insn >> 12) & 0xF) {
2613 case 0x0:
b5d51029 2614 /* Longword physical access (hw_ldl/p) */
a7812ae4 2615 gen_helper_ldl_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2616 break;
2617 case 0x1:
b5d51029 2618 /* Quadword physical access (hw_ldq/p) */
a7812ae4 2619 gen_helper_ldq_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2620 break;
2621 case 0x2:
b5d51029 2622 /* Longword physical access with lock (hw_ldl_l/p) */
a7812ae4 2623 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2624 break;
2625 case 0x3:
b5d51029 2626 /* Quadword physical access with lock (hw_ldq_l/p) */
a7812ae4 2627 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2628 break;
2629 case 0x4:
b5d51029
AJ
2630 /* Longword virtual PTE fetch (hw_ldl/v) */
2631 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2632 break;
2633 case 0x5:
b5d51029
AJ
2634 /* Quadword virtual PTE fetch (hw_ldq/v) */
2635 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2636 break;
2637 case 0x6:
2638 /* Incpu_ir[ra]id */
b5d51029 2639 goto invalid_opc;
8bb6e981
AJ
2640 case 0x7:
2641 /* Incpu_ir[ra]id */
b5d51029 2642 goto invalid_opc;
8bb6e981 2643 case 0x8:
b5d51029 2644 /* Longword virtual access (hw_ldl) */
a7812ae4
PB
2645 gen_helper_st_virt_to_phys(addr, addr);
2646 gen_helper_ldl_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2647 break;
2648 case 0x9:
b5d51029 2649 /* Quadword virtual access (hw_ldq) */
a7812ae4
PB
2650 gen_helper_st_virt_to_phys(addr, addr);
2651 gen_helper_ldq_raw(cpu_ir[ra], addr);
8bb6e981
AJ
2652 break;
2653 case 0xA:
b5d51029
AJ
2654 /* Longword virtual access with protection check (hw_ldl/w) */
2655 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2656 break;
2657 case 0xB:
b5d51029
AJ
2658 /* Quadword virtual access with protection check (hw_ldq/w) */
2659 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
8bb6e981
AJ
2660 break;
2661 case 0xC:
b5d51029 2662 /* Longword virtual access with alt access mode (hw_ldl/a)*/
a7812ae4
PB
2663 gen_helper_set_alt_mode();
2664 gen_helper_st_virt_to_phys(addr, addr);
2665 gen_helper_ldl_raw(cpu_ir[ra], addr);
2666 gen_helper_restore_mode();
8bb6e981
AJ
2667 break;
2668 case 0xD:
b5d51029 2669 /* Quadword virtual access with alt access mode (hw_ldq/a) */
a7812ae4
PB
2670 gen_helper_set_alt_mode();
2671 gen_helper_st_virt_to_phys(addr, addr);
2672 gen_helper_ldq_raw(cpu_ir[ra], addr);
2673 gen_helper_restore_mode();
8bb6e981
AJ
2674 break;
2675 case 0xE:
2676 /* Longword virtual access with alternate access mode and
b5d51029 2677 * protection checks (hw_ldl/wa)
8bb6e981 2678 */
a7812ae4
PB
2679 gen_helper_set_alt_mode();
2680 gen_helper_ldl_data(cpu_ir[ra], addr);
2681 gen_helper_restore_mode();
8bb6e981
AJ
2682 break;
2683 case 0xF:
2684 /* Quadword virtual access with alternate access mode and
b5d51029 2685 * protection checks (hw_ldq/wa)
8bb6e981 2686 */
a7812ae4
PB
2687 gen_helper_set_alt_mode();
2688 gen_helper_ldq_data(cpu_ir[ra], addr);
2689 gen_helper_restore_mode();
8bb6e981
AJ
2690 break;
2691 }
2692 tcg_temp_free(addr);
4c9649a9 2693 }
4c9649a9
JM
2694 break;
2695#endif
2696 case 0x1C:
2697 switch (fn7) {
2698 case 0x00:
2699 /* SEXTB */
2700 if (!(ctx->amask & AMASK_BWX))
2701 goto invalid_opc;
ae8ecd42
AJ
2702 if (likely(rc != 31)) {
2703 if (islit)
2704 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
ae8ecd42 2705 else
dfaa8583 2706 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2707 }
4c9649a9
JM
2708 break;
2709 case 0x01:
2710 /* SEXTW */
2711 if (!(ctx->amask & AMASK_BWX))
2712 goto invalid_opc;
ae8ecd42
AJ
2713 if (likely(rc != 31)) {
2714 if (islit)
2715 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
ae8ecd42 2716 else
dfaa8583 2717 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2718 }
4c9649a9
JM
2719 break;
2720 case 0x30:
2721 /* CTPOP */
2722 if (!(ctx->amask & AMASK_CIX))
2723 goto invalid_opc;
ae8ecd42
AJ
2724 if (likely(rc != 31)) {
2725 if (islit)
2726 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
ae8ecd42 2727 else
a7812ae4 2728 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2729 }
4c9649a9
JM
2730 break;
2731 case 0x31:
2732 /* PERR */
2733 if (!(ctx->amask & AMASK_MVI))
2734 goto invalid_opc;
13e4df99 2735 gen_perr(ra, rb, rc, islit, lit);
4c9649a9
JM
2736 break;
2737 case 0x32:
2738 /* CTLZ */
2739 if (!(ctx->amask & AMASK_CIX))
2740 goto invalid_opc;
ae8ecd42
AJ
2741 if (likely(rc != 31)) {
2742 if (islit)
2743 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
ae8ecd42 2744 else
a7812ae4 2745 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2746 }
4c9649a9
JM
2747 break;
2748 case 0x33:
2749 /* CTTZ */
2750 if (!(ctx->amask & AMASK_CIX))
2751 goto invalid_opc;
ae8ecd42
AJ
2752 if (likely(rc != 31)) {
2753 if (islit)
2754 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
ae8ecd42 2755 else
a7812ae4 2756 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
ae8ecd42 2757 }
4c9649a9
JM
2758 break;
2759 case 0x34:
2760 /* UNPKBW */
2761 if (!(ctx->amask & AMASK_MVI))
2762 goto invalid_opc;
13e4df99
RH
2763 if (real_islit || ra != 31)
2764 goto invalid_opc;
2765 gen_unpkbw (rb, rc);
4c9649a9
JM
2766 break;
2767 case 0x35:
13e4df99 2768 /* UNPKBL */
4c9649a9
JM
2769 if (!(ctx->amask & AMASK_MVI))
2770 goto invalid_opc;
13e4df99
RH
2771 if (real_islit || ra != 31)
2772 goto invalid_opc;
2773 gen_unpkbl (rb, rc);
4c9649a9
JM
2774 break;
2775 case 0x36:
2776 /* PKWB */
2777 if (!(ctx->amask & AMASK_MVI))
2778 goto invalid_opc;
13e4df99
RH
2779 if (real_islit || ra != 31)
2780 goto invalid_opc;
2781 gen_pkwb (rb, rc);
4c9649a9
JM
2782 break;
2783 case 0x37:
2784 /* PKLB */
2785 if (!(ctx->amask & AMASK_MVI))
2786 goto invalid_opc;
13e4df99
RH
2787 if (real_islit || ra != 31)
2788 goto invalid_opc;
2789 gen_pklb (rb, rc);
4c9649a9
JM
2790 break;
2791 case 0x38:
2792 /* MINSB8 */
2793 if (!(ctx->amask & AMASK_MVI))
2794 goto invalid_opc;
13e4df99 2795 gen_minsb8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2796 break;
2797 case 0x39:
2798 /* MINSW4 */
2799 if (!(ctx->amask & AMASK_MVI))
2800 goto invalid_opc;
13e4df99 2801 gen_minsw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2802 break;
2803 case 0x3A:
2804 /* MINUB8 */
2805 if (!(ctx->amask & AMASK_MVI))
2806 goto invalid_opc;
13e4df99 2807 gen_minub8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2808 break;
2809 case 0x3B:
2810 /* MINUW4 */
2811 if (!(ctx->amask & AMASK_MVI))
2812 goto invalid_opc;
13e4df99 2813 gen_minuw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2814 break;
2815 case 0x3C:
2816 /* MAXUB8 */
2817 if (!(ctx->amask & AMASK_MVI))
2818 goto invalid_opc;
13e4df99 2819 gen_maxub8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2820 break;
2821 case 0x3D:
2822 /* MAXUW4 */
2823 if (!(ctx->amask & AMASK_MVI))
2824 goto invalid_opc;
13e4df99 2825 gen_maxuw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2826 break;
2827 case 0x3E:
2828 /* MAXSB8 */
2829 if (!(ctx->amask & AMASK_MVI))
2830 goto invalid_opc;
13e4df99 2831 gen_maxsb8 (ra, rb, rc, islit, lit);
4c9649a9
JM
2832 break;
2833 case 0x3F:
2834 /* MAXSW4 */
2835 if (!(ctx->amask & AMASK_MVI))
2836 goto invalid_opc;
13e4df99 2837 gen_maxsw4 (ra, rb, rc, islit, lit);
4c9649a9
JM
2838 break;
2839 case 0x70:
2840 /* FTOIT */
2841 if (!(ctx->amask & AMASK_FIX))
2842 goto invalid_opc;
f18cd223
AJ
2843 if (likely(rc != 31)) {
2844 if (ra != 31)
2845 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2846 else
2847 tcg_gen_movi_i64(cpu_ir[rc], 0);
2848 }
4c9649a9
JM
2849 break;
2850 case 0x78:
2851 /* FTOIS */
2852 if (!(ctx->amask & AMASK_FIX))
2853 goto invalid_opc;
f18cd223 2854 if (rc != 31) {
a7812ae4 2855 TCGv_i32 tmp1 = tcg_temp_new_i32();
f18cd223 2856 if (ra != 31)
a7812ae4 2857 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
f18cd223
AJ
2858 else {
2859 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 2860 gen_helper_s_to_memory(tmp1, tmp2);
f18cd223
AJ
2861 tcg_temp_free(tmp2);
2862 }
2863 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
a7812ae4 2864 tcg_temp_free_i32(tmp1);
f18cd223 2865 }
4c9649a9
JM
2866 break;
2867 default:
2868 goto invalid_opc;
2869 }
2870 break;
2871 case 0x1D:
2872 /* HW_MTPR (PALcode) */
2873#if defined (CONFIG_USER_ONLY)
2874 goto invalid_opc;
2875#else
2876 if (!ctx->pal_mode)
2877 goto invalid_opc;
8bb6e981
AJ
2878 else {
2879 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2880 if (ra != 31)
a7812ae4 2881 gen_helper_mtpr(tmp1, cpu_ir[ra]);
8bb6e981
AJ
2882 else {
2883 TCGv tmp2 = tcg_const_i64(0);
a7812ae4 2884 gen_helper_mtpr(tmp1, tmp2);
8bb6e981
AJ
2885 tcg_temp_free(tmp2);
2886 }
2887 tcg_temp_free(tmp1);
4af70374 2888 ret = EXIT_PC_STALE;
8bb6e981 2889 }
4c9649a9
JM
2890 break;
2891#endif
2892 case 0x1E:
2893 /* HW_REI (PALcode) */
2894#if defined (CONFIG_USER_ONLY)
2895 goto invalid_opc;
2896#else
2897 if (!ctx->pal_mode)
2898 goto invalid_opc;
2899 if (rb == 31) {
2900 /* "Old" alpha */
a7812ae4 2901 gen_helper_hw_rei();
4c9649a9 2902 } else {
8bb6e981
AJ
2903 TCGv tmp;
2904
2905 if (ra != 31) {
a7812ae4 2906 tmp = tcg_temp_new();
8bb6e981
AJ
2907 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2908 } else
2909 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
a7812ae4 2910 gen_helper_hw_ret(tmp);
8bb6e981 2911 tcg_temp_free(tmp);
4c9649a9 2912 }
4af70374 2913 ret = EXIT_PC_UPDATED;
4c9649a9
JM
2914 break;
2915#endif
2916 case 0x1F:
2917 /* HW_ST (PALcode) */
2918#if defined (CONFIG_USER_ONLY)
2919 goto invalid_opc;
2920#else
2921 if (!ctx->pal_mode)
2922 goto invalid_opc;
8bb6e981
AJ
2923 else {
2924 TCGv addr, val;
a7812ae4 2925 addr = tcg_temp_new();
8bb6e981
AJ
2926 if (rb != 31)
2927 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2928 else
2929 tcg_gen_movi_i64(addr, disp12);
2930 if (ra != 31)
2931 val = cpu_ir[ra];
2932 else {
a7812ae4 2933 val = tcg_temp_new();
8bb6e981
AJ
2934 tcg_gen_movi_i64(val, 0);
2935 }
2936 switch ((insn >> 12) & 0xF) {
2937 case 0x0:
2938 /* Longword physical access */
a7812ae4 2939 gen_helper_stl_raw(val, addr);
8bb6e981
AJ
2940 break;
2941 case 0x1:
2942 /* Quadword physical access */
a7812ae4 2943 gen_helper_stq_raw(val, addr);
8bb6e981
AJ
2944 break;
2945 case 0x2:
2946 /* Longword physical access with lock */
a7812ae4 2947 gen_helper_stl_c_raw(val, val, addr);
8bb6e981
AJ
2948 break;
2949 case 0x3:
2950 /* Quadword physical access with lock */
a7812ae4 2951 gen_helper_stq_c_raw(val, val, addr);
8bb6e981
AJ
2952 break;
2953 case 0x4:
2954 /* Longword virtual access */
a7812ae4
PB
2955 gen_helper_st_virt_to_phys(addr, addr);
2956 gen_helper_stl_raw(val, addr);
8bb6e981
AJ
2957 break;
2958 case 0x5:
2959 /* Quadword virtual access */
a7812ae4
PB
2960 gen_helper_st_virt_to_phys(addr, addr);
2961 gen_helper_stq_raw(val, addr);
8bb6e981
AJ
2962 break;
2963 case 0x6:
2964 /* Invalid */
2965 goto invalid_opc;
2966 case 0x7:
2967 /* Invalid */
2968 goto invalid_opc;
2969 case 0x8:
2970 /* Invalid */
2971 goto invalid_opc;
2972 case 0x9:
2973 /* Invalid */
2974 goto invalid_opc;
2975 case 0xA:
2976 /* Invalid */
2977 goto invalid_opc;
2978 case 0xB:
2979 /* Invalid */
2980 goto invalid_opc;
2981 case 0xC:
2982 /* Longword virtual access with alternate access mode */
a7812ae4
PB
2983 gen_helper_set_alt_mode();
2984 gen_helper_st_virt_to_phys(addr, addr);
2985 gen_helper_stl_raw(val, addr);
2986 gen_helper_restore_mode();
8bb6e981
AJ
2987 break;
2988 case 0xD:
2989 /* Quadword virtual access with alternate access mode */
a7812ae4
PB
2990 gen_helper_set_alt_mode();
2991 gen_helper_st_virt_to_phys(addr, addr);
2992 gen_helper_stl_raw(val, addr);
2993 gen_helper_restore_mode();
8bb6e981
AJ
2994 break;
2995 case 0xE:
2996 /* Invalid */
2997 goto invalid_opc;
2998 case 0xF:
2999 /* Invalid */
3000 goto invalid_opc;
3001 }
45d46ce8 3002 if (ra == 31)
8bb6e981
AJ
3003 tcg_temp_free(val);
3004 tcg_temp_free(addr);
4c9649a9 3005 }
4c9649a9
JM
3006 break;
3007#endif
3008 case 0x20:
3009 /* LDF */
f18cd223 3010 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3011 break;
3012 case 0x21:
3013 /* LDG */
f18cd223 3014 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3015 break;
3016 case 0x22:
3017 /* LDS */
f18cd223 3018 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
4c9649a9
JM
3019 break;
3020 case 0x23:
3021 /* LDT */
f18cd223 3022 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3023 break;
3024 case 0x24:
3025 /* STF */
6910b8f6 3026 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
4c9649a9
JM
3027 break;
3028 case 0x25:
3029 /* STG */
6910b8f6 3030 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
4c9649a9
JM
3031 break;
3032 case 0x26:
3033 /* STS */
6910b8f6 3034 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
4c9649a9
JM
3035 break;
3036 case 0x27:
3037 /* STT */
6910b8f6 3038 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
4c9649a9
JM
3039 break;
3040 case 0x28:
3041 /* LDL */
f18cd223 3042 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
4c9649a9
JM
3043 break;
3044 case 0x29:
3045 /* LDQ */
f18cd223 3046 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3047 break;
3048 case 0x2A:
3049 /* LDL_L */
f4ed8679 3050 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3051 break;
3052 case 0x2B:
3053 /* LDQ_L */
f4ed8679 3054 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
4c9649a9
JM
3055 break;
3056 case 0x2C:
3057 /* STL */
6910b8f6 3058 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
4c9649a9
JM
3059 break;
3060 case 0x2D:
3061 /* STQ */
6910b8f6 3062 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
4c9649a9
JM
3063 break;
3064 case 0x2E:
3065 /* STL_C */
6910b8f6 3066 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
4c9649a9
JM
3067 break;
3068 case 0x2F:
3069 /* STQ_C */
6910b8f6 3070 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
4c9649a9
JM
3071 break;
3072 case 0x30:
3073 /* BR */
4af70374 3074 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3075 break;
a7812ae4 3076 case 0x31: /* FBEQ */
4af70374 3077 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
dbb30fe6 3078 break;
a7812ae4 3079 case 0x32: /* FBLT */
4af70374 3080 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
dbb30fe6 3081 break;
a7812ae4 3082 case 0x33: /* FBLE */
4af70374 3083 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
4c9649a9
JM
3084 break;
3085 case 0x34:
3086 /* BSR */
4af70374 3087 ret = gen_bdirect(ctx, ra, disp21);
4c9649a9 3088 break;
a7812ae4 3089 case 0x35: /* FBNE */
4af70374 3090 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
dbb30fe6 3091 break;
a7812ae4 3092 case 0x36: /* FBGE */
4af70374 3093 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
dbb30fe6 3094 break;
a7812ae4 3095 case 0x37: /* FBGT */
4af70374 3096 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
4c9649a9
JM
3097 break;
3098 case 0x38:
3099 /* BLBC */
4af70374 3100 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
4c9649a9
JM
3101 break;
3102 case 0x39:
3103 /* BEQ */
4af70374 3104 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
4c9649a9
JM
3105 break;
3106 case 0x3A:
3107 /* BLT */
4af70374 3108 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
4c9649a9
JM
3109 break;
3110 case 0x3B:
3111 /* BLE */
4af70374 3112 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
4c9649a9
JM
3113 break;
3114 case 0x3C:
3115 /* BLBS */
4af70374 3116 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
4c9649a9
JM
3117 break;
3118 case 0x3D:
3119 /* BNE */
4af70374 3120 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
4c9649a9
JM
3121 break;
3122 case 0x3E:
3123 /* BGE */
4af70374 3124 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
4c9649a9
JM
3125 break;
3126 case 0x3F:
3127 /* BGT */
4af70374 3128 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
4c9649a9
JM
3129 break;
3130 invalid_opc:
8aa3fa20 3131 ret = gen_invalid(ctx);
4c9649a9
JM
3132 break;
3133 }
3134
3135 return ret;
3136}
3137
636aa200
BS
3138static inline void gen_intermediate_code_internal(CPUState *env,
3139 TranslationBlock *tb,
3140 int search_pc)
4c9649a9 3141{
4c9649a9
JM
3142 DisasContext ctx, *ctxp = &ctx;
3143 target_ulong pc_start;
3144 uint32_t insn;
3145 uint16_t *gen_opc_end;
a1d1bb31 3146 CPUBreakpoint *bp;
4c9649a9 3147 int j, lj = -1;
4af70374 3148 ExitStatus ret;
2e70f6ef
PB
3149 int num_insns;
3150 int max_insns;
4c9649a9
JM
3151
3152 pc_start = tb->pc;
4c9649a9 3153 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
4af70374
RH
3154
3155 ctx.tb = tb;
3156 ctx.env = env;
4c9649a9
JM
3157 ctx.pc = pc_start;
3158 ctx.amask = env->amask;
3159#if defined (CONFIG_USER_ONLY)
3160 ctx.mem_idx = 0;
3161#else
3162 ctx.mem_idx = ((env->ps >> 3) & 3);
3163 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
3164#endif
f24518b5
RH
3165
3166 /* ??? Every TB begins with unset rounding mode, to be initialized on
3167 the first fp insn of the TB. Alternately we could define a proper
3168 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3169 to reset the FP_STATUS to that default at the end of any TB that
3170 changes the default. We could even (gasp) dynamiclly figure out
3171 what default would be most efficient given the running program. */
3172 ctx.tb_rm = -1;
3173 /* Similarly for flush-to-zero. */
3174 ctx.tb_ftz = -1;
3175
2e70f6ef
PB
3176 num_insns = 0;
3177 max_insns = tb->cflags & CF_COUNT_MASK;
3178 if (max_insns == 0)
3179 max_insns = CF_COUNT_MASK;
3180
3181 gen_icount_start();
4af70374 3182 do {
72cf2d4f
BS
3183 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3184 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 3185 if (bp->pc == ctx.pc) {
4c9649a9
JM
3186 gen_excp(&ctx, EXCP_DEBUG, 0);
3187 break;
3188 }
3189 }
3190 }
3191 if (search_pc) {
3192 j = gen_opc_ptr - gen_opc_buf;
3193 if (lj < j) {
3194 lj++;
3195 while (lj < j)
3196 gen_opc_instr_start[lj++] = 0;
4c9649a9 3197 }
ed1dda53
AJ
3198 gen_opc_pc[lj] = ctx.pc;
3199 gen_opc_instr_start[lj] = 1;
3200 gen_opc_icount[lj] = num_insns;
4c9649a9 3201 }
2e70f6ef
PB
3202 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3203 gen_io_start();
4c9649a9 3204 insn = ldl_code(ctx.pc);
2e70f6ef 3205 num_insns++;
c4b3be39
RH
3206
3207 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3208 tcg_gen_debug_insn_start(ctx.pc);
3209 }
3210
4c9649a9
JM
3211 ctx.pc += 4;
3212 ret = translate_one(ctxp, insn);
19bf517b 3213
4af70374
RH
3214 if (ret == NO_EXIT) {
3215 /* If we reach a page boundary, are single stepping,
3216 or exhaust instruction count, stop generation. */
3217 if (env->singlestep_enabled) {
3218 gen_excp(&ctx, EXCP_DEBUG, 0);
3219 ret = EXIT_PC_UPDATED;
3220 } else if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3221 || gen_opc_ptr >= gen_opc_end
3222 || num_insns >= max_insns
3223 || singlestep) {
3224 ret = EXIT_PC_STALE;
3225 }
1b530a6d 3226 }
4af70374
RH
3227 } while (ret == NO_EXIT);
3228
3229 if (tb->cflags & CF_LAST_IO) {
3230 gen_io_end();
4c9649a9 3231 }
4af70374
RH
3232
3233 switch (ret) {
3234 case EXIT_GOTO_TB:
8aa3fa20 3235 case EXIT_NORETURN:
4af70374
RH
3236 break;
3237 case EXIT_PC_STALE:
496cb5b9 3238 tcg_gen_movi_i64(cpu_pc, ctx.pc);
4af70374
RH
3239 /* FALLTHRU */
3240 case EXIT_PC_UPDATED:
3241 tcg_gen_exit_tb(0);
3242 break;
3243 default:
3244 abort();
4c9649a9 3245 }
4af70374 3246
2e70f6ef 3247 gen_icount_end(tb, num_insns);
4c9649a9
JM
3248 *gen_opc_ptr = INDEX_op_end;
3249 if (search_pc) {
3250 j = gen_opc_ptr - gen_opc_buf;
3251 lj++;
3252 while (lj <= j)
3253 gen_opc_instr_start[lj++] = 0;
4c9649a9
JM
3254 } else {
3255 tb->size = ctx.pc - pc_start;
2e70f6ef 3256 tb->icount = num_insns;
4c9649a9 3257 }
4af70374 3258
806991da 3259#ifdef DEBUG_DISAS
8fec2b8c 3260 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
3261 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3262 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3263 qemu_log("\n");
4c9649a9 3264 }
4c9649a9 3265#endif
4c9649a9
JM
3266}
3267
2cfc5f17 3268void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3269{
2cfc5f17 3270 gen_intermediate_code_internal(env, tb, 0);
4c9649a9
JM
3271}
3272
2cfc5f17 3273void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
4c9649a9 3274{
2cfc5f17 3275 gen_intermediate_code_internal(env, tb, 1);
4c9649a9
JM
3276}
3277
a964acc6
RH
3278struct cpu_def_t {
3279 const char *name;
3280 int implver, amask;
3281};
3282
3283static const struct cpu_def_t cpu_defs[] = {
3284 { "ev4", IMPLVER_2106x, 0 },
3285 { "ev5", IMPLVER_21164, 0 },
3286 { "ev56", IMPLVER_21164, AMASK_BWX },
3287 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3288 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3289 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3290 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3291 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3292 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3293 { "21064", IMPLVER_2106x, 0 },
3294 { "21164", IMPLVER_21164, 0 },
3295 { "21164a", IMPLVER_21164, AMASK_BWX },
3296 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3297 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3298 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3299 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3300};
3301
aaed909a 3302CPUAlphaState * cpu_alpha_init (const char *cpu_model)
4c9649a9
JM
3303{
3304 CPUAlphaState *env;
a964acc6 3305 int implver, amask, i, max;
4c9649a9
JM
3306
3307 env = qemu_mallocz(sizeof(CPUAlphaState));
4c9649a9 3308 cpu_exec_init(env);
2e70f6ef 3309 alpha_translate_init();
4c9649a9 3310 tlb_flush(env, 1);
a964acc6
RH
3311
3312 /* Default to ev67; no reason not to emulate insns by default. */
3313 implver = IMPLVER_21264;
3314 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3315 | AMASK_TRAP | AMASK_PREFETCH);
3316
3317 max = ARRAY_SIZE(cpu_defs);
3318 for (i = 0; i < max; i++) {
3319 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3320 implver = cpu_defs[i].implver;
3321 amask = cpu_defs[i].amask;
3322 break;
3323 }
3324 }
3325 env->implver = implver;
3326 env->amask = amask;
3327
4c9649a9
JM
3328 env->ps = 0x1F00;
3329#if defined (CONFIG_USER_ONLY)
3330 env->ps |= 1 << 3;
2edd07ef
RH
3331 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3332 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
6049f4f8 3333#else
4c9649a9 3334 pal_init(env);
6049f4f8 3335#endif
6910b8f6 3336 env->lock_addr = -1;
dad081ee 3337
4c9649a9 3338 /* Initialize IPR */
dad081ee
RH
3339#if defined (CONFIG_USER_ONLY)
3340 env->ipr[IPR_EXC_ADDR] = 0;
3341 env->ipr[IPR_EXC_SUM] = 0;
3342 env->ipr[IPR_EXC_MASK] = 0;
3343#else
3344 {
f88fe4e3
BS
3345 // uint64_t hwpcb;
3346 // hwpcb = env->ipr[IPR_PCBB];
dad081ee
RH
3347 env->ipr[IPR_ASN] = 0;
3348 env->ipr[IPR_ASTEN] = 0;
3349 env->ipr[IPR_ASTSR] = 0;
3350 env->ipr[IPR_DATFX] = 0;
3351 /* XXX: fix this */
3352 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3353 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3354 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3355 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3356 env->ipr[IPR_FEN] = 0;
3357 env->ipr[IPR_IPL] = 31;
3358 env->ipr[IPR_MCES] = 0;
3359 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
3360 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3361 env->ipr[IPR_SISR] = 0;
3362 env->ipr[IPR_VIRBND] = -1ULL;
3363 }
3364#endif
4c9649a9 3365
0bf46a40 3366 qemu_init_vcpu(env);
4c9649a9
JM
3367 return env;
3368}
aaed909a 3369
d2856f1a
AJ
3370void gen_pc_load(CPUState *env, TranslationBlock *tb,
3371 unsigned long searched_pc, int pc_pos, void *puc)
3372{
3373 env->pc = gen_opc_pc[pc_pos];
3374}