]> git.proxmox.com Git - qemu.git/blob - target-alpha/translate.c
target-alpha: Use setcond for int comparisons.
[qemu.git] / target-alpha / translate.c
1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23
24 #include "cpu.h"
25 #include "exec-all.h"
26 #include "disas.h"
27 #include "host-utils.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
30
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
37
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
43
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46 uint64_t pc;
47 int mem_idx;
48 #if !defined (CONFIG_USER_ONLY)
49 int pal_mode;
50 #endif
51 CPUAlphaState *env;
52 uint32_t amask;
53
54 /* Current rounding mode for this TB. */
55 int tb_rm;
56 /* Current flush-to-zero setting for this TB. */
57 int tb_ftz;
58 };
59
60 /* global register indexes */
61 static TCGv_ptr cpu_env;
62 static TCGv cpu_ir[31];
63 static TCGv cpu_fir[31];
64 static TCGv cpu_pc;
65 static TCGv cpu_lock;
66 #ifdef CONFIG_USER_ONLY
67 static TCGv cpu_uniq;
68 #endif
69
70 /* register names */
71 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
72
73 #include "gen-icount.h"
74
75 static void alpha_translate_init(void)
76 {
77 int i;
78 char *p;
79 static int done_init = 0;
80
81 if (done_init)
82 return;
83
84 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
85
86 p = cpu_reg_names;
87 for (i = 0; i < 31; i++) {
88 sprintf(p, "ir%d", i);
89 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
90 offsetof(CPUState, ir[i]), p);
91 p += (i < 10) ? 4 : 5;
92
93 sprintf(p, "fir%d", i);
94 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
95 offsetof(CPUState, fir[i]), p);
96 p += (i < 10) ? 5 : 6;
97 }
98
99 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
100 offsetof(CPUState, pc), "pc");
101
102 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
103 offsetof(CPUState, lock), "lock");
104
105 #ifdef CONFIG_USER_ONLY
106 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
107 offsetof(CPUState, unique), "uniq");
108 #endif
109
110 /* register helpers */
111 #define GEN_HELPER 2
112 #include "helper.h"
113
114 done_init = 1;
115 }
116
117 static inline void gen_excp(DisasContext *ctx, int exception, int error_code)
118 {
119 TCGv_i32 tmp1, tmp2;
120
121 tcg_gen_movi_i64(cpu_pc, ctx->pc);
122 tmp1 = tcg_const_i32(exception);
123 tmp2 = tcg_const_i32(error_code);
124 gen_helper_excp(tmp1, tmp2);
125 tcg_temp_free_i32(tmp2);
126 tcg_temp_free_i32(tmp1);
127 }
128
129 static inline void gen_invalid(DisasContext *ctx)
130 {
131 gen_excp(ctx, EXCP_OPCDEC, 0);
132 }
133
134 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
135 {
136 TCGv tmp = tcg_temp_new();
137 TCGv_i32 tmp32 = tcg_temp_new_i32();
138 tcg_gen_qemu_ld32u(tmp, t1, flags);
139 tcg_gen_trunc_i64_i32(tmp32, tmp);
140 gen_helper_memory_to_f(t0, tmp32);
141 tcg_temp_free_i32(tmp32);
142 tcg_temp_free(tmp);
143 }
144
145 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
146 {
147 TCGv tmp = tcg_temp_new();
148 tcg_gen_qemu_ld64(tmp, t1, flags);
149 gen_helper_memory_to_g(t0, tmp);
150 tcg_temp_free(tmp);
151 }
152
153 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
154 {
155 TCGv tmp = tcg_temp_new();
156 TCGv_i32 tmp32 = tcg_temp_new_i32();
157 tcg_gen_qemu_ld32u(tmp, t1, flags);
158 tcg_gen_trunc_i64_i32(tmp32, tmp);
159 gen_helper_memory_to_s(t0, tmp32);
160 tcg_temp_free_i32(tmp32);
161 tcg_temp_free(tmp);
162 }
163
164 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
165 {
166 tcg_gen_mov_i64(cpu_lock, t1);
167 tcg_gen_qemu_ld32s(t0, t1, flags);
168 }
169
170 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
171 {
172 tcg_gen_mov_i64(cpu_lock, t1);
173 tcg_gen_qemu_ld64(t0, t1, flags);
174 }
175
176 static inline void gen_load_mem(DisasContext *ctx,
177 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
178 int flags),
179 int ra, int rb, int32_t disp16, int fp,
180 int clear)
181 {
182 TCGv addr;
183
184 if (unlikely(ra == 31))
185 return;
186
187 addr = tcg_temp_new();
188 if (rb != 31) {
189 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
190 if (clear)
191 tcg_gen_andi_i64(addr, addr, ~0x7);
192 } else {
193 if (clear)
194 disp16 &= ~0x7;
195 tcg_gen_movi_i64(addr, disp16);
196 }
197 if (fp)
198 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
199 else
200 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
201 tcg_temp_free(addr);
202 }
203
204 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
205 {
206 TCGv_i32 tmp32 = tcg_temp_new_i32();
207 TCGv tmp = tcg_temp_new();
208 gen_helper_f_to_memory(tmp32, t0);
209 tcg_gen_extu_i32_i64(tmp, tmp32);
210 tcg_gen_qemu_st32(tmp, t1, flags);
211 tcg_temp_free(tmp);
212 tcg_temp_free_i32(tmp32);
213 }
214
215 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
216 {
217 TCGv tmp = tcg_temp_new();
218 gen_helper_g_to_memory(tmp, t0);
219 tcg_gen_qemu_st64(tmp, t1, flags);
220 tcg_temp_free(tmp);
221 }
222
223 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
224 {
225 TCGv_i32 tmp32 = tcg_temp_new_i32();
226 TCGv tmp = tcg_temp_new();
227 gen_helper_s_to_memory(tmp32, t0);
228 tcg_gen_extu_i32_i64(tmp, tmp32);
229 tcg_gen_qemu_st32(tmp, t1, flags);
230 tcg_temp_free(tmp);
231 tcg_temp_free_i32(tmp32);
232 }
233
234 static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
235 {
236 int l1, l2;
237
238 l1 = gen_new_label();
239 l2 = gen_new_label();
240 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
241 tcg_gen_qemu_st32(t0, t1, flags);
242 tcg_gen_movi_i64(t0, 1);
243 tcg_gen_br(l2);
244 gen_set_label(l1);
245 tcg_gen_movi_i64(t0, 0);
246 gen_set_label(l2);
247 tcg_gen_movi_i64(cpu_lock, -1);
248 }
249
250 static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
251 {
252 int l1, l2;
253
254 l1 = gen_new_label();
255 l2 = gen_new_label();
256 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
257 tcg_gen_qemu_st64(t0, t1, flags);
258 tcg_gen_movi_i64(t0, 1);
259 tcg_gen_br(l2);
260 gen_set_label(l1);
261 tcg_gen_movi_i64(t0, 0);
262 gen_set_label(l2);
263 tcg_gen_movi_i64(cpu_lock, -1);
264 }
265
266 static inline void gen_store_mem(DisasContext *ctx,
267 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
268 int flags),
269 int ra, int rb, int32_t disp16, int fp,
270 int clear, int local)
271 {
272 TCGv addr;
273 if (local)
274 addr = tcg_temp_local_new();
275 else
276 addr = tcg_temp_new();
277 if (rb != 31) {
278 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
279 if (clear)
280 tcg_gen_andi_i64(addr, addr, ~0x7);
281 } else {
282 if (clear)
283 disp16 &= ~0x7;
284 tcg_gen_movi_i64(addr, disp16);
285 }
286 if (ra != 31) {
287 if (fp)
288 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
289 else
290 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
291 } else {
292 TCGv zero;
293 if (local)
294 zero = tcg_const_local_i64(0);
295 else
296 zero = tcg_const_i64(0);
297 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
298 tcg_temp_free(zero);
299 }
300 tcg_temp_free(addr);
301 }
302
303 static void gen_bcond_pcload(DisasContext *ctx, int32_t disp, int lab_true)
304 {
305 int lab_over = gen_new_label();
306
307 tcg_gen_movi_i64(cpu_pc, ctx->pc);
308 tcg_gen_br(lab_over);
309 gen_set_label(lab_true);
310 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
311 gen_set_label(lab_over);
312 }
313
314 static void gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
315 int32_t disp, int mask)
316 {
317 int lab_true = gen_new_label();
318
319 if (likely(ra != 31)) {
320 if (mask) {
321 TCGv tmp = tcg_temp_new();
322 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
323 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
324 tcg_temp_free(tmp);
325 } else {
326 tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, lab_true);
327 }
328 } else {
329 /* Very uncommon case - Do not bother to optimize. */
330 TCGv tmp = tcg_const_i64(0);
331 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
332 tcg_temp_free(tmp);
333 }
334 gen_bcond_pcload(ctx, disp, lab_true);
335 }
336
337 /* Generate a forward TCG branch to LAB_TRUE if RA cmp 0.0.
338 This is complicated by the fact that -0.0 compares the same as +0.0. */
339
340 static void gen_fbcond_internal(TCGCond cond, TCGv src, int lab_true)
341 {
342 int lab_false = -1;
343 uint64_t mzero = 1ull << 63;
344 TCGv tmp;
345
346 switch (cond) {
347 case TCG_COND_LE:
348 case TCG_COND_GT:
349 /* For <= or >, the -0.0 value directly compares the way we want. */
350 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
351 break;
352
353 case TCG_COND_EQ:
354 case TCG_COND_NE:
355 /* For == or !=, we can simply mask off the sign bit and compare. */
356 /* ??? Assume that the temporary is reclaimed at the branch. */
357 tmp = tcg_temp_new();
358 tcg_gen_andi_i64(tmp, src, mzero - 1);
359 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
360 break;
361
362 case TCG_COND_GE:
363 /* For >=, emit two branches to the destination. */
364 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
365 tcg_gen_brcondi_i64(TCG_COND_EQ, src, mzero, lab_true);
366 break;
367
368 case TCG_COND_LT:
369 /* For <, first filter out -0.0 to what will be the fallthru. */
370 lab_false = gen_new_label();
371 tcg_gen_brcondi_i64(TCG_COND_EQ, src, mzero, lab_false);
372 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
373 gen_set_label(lab_false);
374 break;
375
376 default:
377 abort();
378 }
379 }
380
381 static void gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, int32_t disp)
382 {
383 int lab_true;
384
385 if (unlikely(ra == 31)) {
386 /* Very uncommon case, but easier to optimize it to an integer
387 comparison than continuing with the floating point comparison. */
388 gen_bcond(ctx, cond, ra, disp, 0);
389 return;
390 }
391
392 lab_true = gen_new_label();
393 gen_fbcond_internal(cond, cpu_fir[ra], lab_true);
394 gen_bcond_pcload(ctx, disp, lab_true);
395 }
396
397 static inline void gen_cmov(TCGCond inv_cond, int ra, int rb, int rc,
398 int islit, uint8_t lit, int mask)
399 {
400 int l1;
401
402 if (unlikely(rc == 31))
403 return;
404
405 l1 = gen_new_label();
406
407 if (ra != 31) {
408 if (mask) {
409 TCGv tmp = tcg_temp_new();
410 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
411 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
412 tcg_temp_free(tmp);
413 } else
414 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
415 } else {
416 /* Very uncommon case - Do not bother to optimize. */
417 TCGv tmp = tcg_const_i64(0);
418 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
419 tcg_temp_free(tmp);
420 }
421
422 if (islit)
423 tcg_gen_movi_i64(cpu_ir[rc], lit);
424 else
425 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
426 gen_set_label(l1);
427 }
428
429 static void gen_fcmov(TCGCond inv_cond, int ra, int rb, int rc)
430 {
431 TCGv va = cpu_fir[ra];
432 int l1;
433
434 if (unlikely(rc == 31))
435 return;
436 if (unlikely(ra == 31)) {
437 /* ??? Assume that the temporary is reclaimed at the branch. */
438 va = tcg_const_i64(0);
439 }
440
441 l1 = gen_new_label();
442 gen_fbcond_internal(inv_cond, va, l1);
443
444 if (rb != 31)
445 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
446 else
447 tcg_gen_movi_i64(cpu_fir[rc], 0);
448 gen_set_label(l1);
449 }
450
451 #define QUAL_RM_N 0x080 /* Round mode nearest even */
452 #define QUAL_RM_C 0x000 /* Round mode chopped */
453 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
454 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
455 #define QUAL_RM_MASK 0x0c0
456
457 #define QUAL_U 0x100 /* Underflow enable (fp output) */
458 #define QUAL_V 0x100 /* Overflow enable (int output) */
459 #define QUAL_S 0x400 /* Software completion enable */
460 #define QUAL_I 0x200 /* Inexact detection enable */
461
462 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
463 {
464 TCGv_i32 tmp;
465
466 fn11 &= QUAL_RM_MASK;
467 if (fn11 == ctx->tb_rm) {
468 return;
469 }
470 ctx->tb_rm = fn11;
471
472 tmp = tcg_temp_new_i32();
473 switch (fn11) {
474 case QUAL_RM_N:
475 tcg_gen_movi_i32(tmp, float_round_nearest_even);
476 break;
477 case QUAL_RM_C:
478 tcg_gen_movi_i32(tmp, float_round_to_zero);
479 break;
480 case QUAL_RM_M:
481 tcg_gen_movi_i32(tmp, float_round_down);
482 break;
483 case QUAL_RM_D:
484 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
485 break;
486 }
487
488 #if defined(CONFIG_SOFTFLOAT_INLINE)
489 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
490 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
491 sets the one field. */
492 tcg_gen_st8_i32(tmp, cpu_env,
493 offsetof(CPUState, fp_status.float_rounding_mode));
494 #else
495 gen_helper_setroundmode(tmp);
496 #endif
497
498 tcg_temp_free_i32(tmp);
499 }
500
501 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
502 {
503 TCGv_i32 tmp;
504
505 fn11 &= QUAL_U;
506 if (fn11 == ctx->tb_ftz) {
507 return;
508 }
509 ctx->tb_ftz = fn11;
510
511 tmp = tcg_temp_new_i32();
512 if (fn11) {
513 /* Underflow is enabled, use the FPCR setting. */
514 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
515 } else {
516 /* Underflow is disabled, force flush-to-zero. */
517 tcg_gen_movi_i32(tmp, 1);
518 }
519
520 #if defined(CONFIG_SOFTFLOAT_INLINE)
521 tcg_gen_st8_i32(tmp, cpu_env,
522 offsetof(CPUState, fp_status.flush_to_zero));
523 #else
524 gen_helper_setflushzero(tmp);
525 #endif
526
527 tcg_temp_free_i32(tmp);
528 }
529
530 static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
531 {
532 TCGv val = tcg_temp_new();
533 if (reg == 31) {
534 tcg_gen_movi_i64(val, 0);
535 } else if (fn11 & QUAL_S) {
536 gen_helper_ieee_input_s(val, cpu_fir[reg]);
537 } else if (is_cmp) {
538 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
539 } else {
540 gen_helper_ieee_input(val, cpu_fir[reg]);
541 }
542 return val;
543 }
544
545 static void gen_fp_exc_clear(void)
546 {
547 #if defined(CONFIG_SOFTFLOAT_INLINE)
548 TCGv_i32 zero = tcg_const_i32(0);
549 tcg_gen_st8_i32(zero, cpu_env,
550 offsetof(CPUState, fp_status.float_exception_flags));
551 tcg_temp_free_i32(zero);
552 #else
553 gen_helper_fp_exc_clear();
554 #endif
555 }
556
557 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
558 {
559 /* ??? We ought to be able to do something with imprecise exceptions.
560 E.g. notice we're still in the trap shadow of something within the
561 TB and do not generate the code to signal the exception; end the TB
562 when an exception is forced to arrive, either by consumption of a
563 register value or TRAPB or EXCB. */
564 TCGv_i32 exc = tcg_temp_new_i32();
565 TCGv_i32 reg;
566
567 #if defined(CONFIG_SOFTFLOAT_INLINE)
568 tcg_gen_ld8u_i32(exc, cpu_env,
569 offsetof(CPUState, fp_status.float_exception_flags));
570 #else
571 gen_helper_fp_exc_get(exc);
572 #endif
573
574 if (ignore) {
575 tcg_gen_andi_i32(exc, exc, ~ignore);
576 }
577
578 /* ??? Pass in the regno of the destination so that the helper can
579 set EXC_MASK, which contains a bitmask of destination registers
580 that have caused arithmetic traps. A simple userspace emulation
581 does not require this. We do need it for a guest kernel's entArith,
582 or if we were to do something clever with imprecise exceptions. */
583 reg = tcg_const_i32(rc + 32);
584
585 if (fn11 & QUAL_S) {
586 gen_helper_fp_exc_raise_s(exc, reg);
587 } else {
588 gen_helper_fp_exc_raise(exc, reg);
589 }
590
591 tcg_temp_free_i32(reg);
592 tcg_temp_free_i32(exc);
593 }
594
595 static inline void gen_fp_exc_raise(int rc, int fn11)
596 {
597 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
598 }
599
600 static void gen_fcvtql(int rb, int rc)
601 {
602 if (unlikely(rc == 31)) {
603 return;
604 }
605 if (unlikely(rb == 31)) {
606 tcg_gen_movi_i64(cpu_fir[rc], 0);
607 } else {
608 TCGv tmp = tcg_temp_new();
609
610 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
611 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
612 tcg_gen_shli_i64(tmp, tmp, 32);
613 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
614 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
615
616 tcg_temp_free(tmp);
617 }
618 }
619
620 static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
621 {
622 if (rb != 31) {
623 int lab = gen_new_label();
624 TCGv tmp = tcg_temp_new();
625
626 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
627 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
628 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
629
630 gen_set_label(lab);
631 }
632 gen_fcvtql(rb, rc);
633 }
634
635 #define FARITH2(name) \
636 static inline void glue(gen_f, name)(int rb, int rc) \
637 { \
638 if (unlikely(rc == 31)) { \
639 return; \
640 } \
641 if (rb != 31) { \
642 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
643 } else { \
644 TCGv tmp = tcg_const_i64(0); \
645 gen_helper_ ## name (cpu_fir[rc], tmp); \
646 tcg_temp_free(tmp); \
647 } \
648 }
649 FARITH2(cvtlq)
650
651 /* ??? VAX instruction qualifiers ignored. */
652 FARITH2(sqrtf)
653 FARITH2(sqrtg)
654 FARITH2(cvtgf)
655 FARITH2(cvtgq)
656 FARITH2(cvtqf)
657 FARITH2(cvtqg)
658
659 static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
660 int rb, int rc, int fn11)
661 {
662 TCGv vb;
663
664 /* ??? This is wrong: the instruction is not a nop, it still may
665 raise exceptions. */
666 if (unlikely(rc == 31)) {
667 return;
668 }
669
670 gen_qual_roundmode(ctx, fn11);
671 gen_qual_flushzero(ctx, fn11);
672 gen_fp_exc_clear();
673
674 vb = gen_ieee_input(rb, fn11, 0);
675 helper(cpu_fir[rc], vb);
676 tcg_temp_free(vb);
677
678 gen_fp_exc_raise(rc, fn11);
679 }
680
681 #define IEEE_ARITH2(name) \
682 static inline void glue(gen_f, name)(DisasContext *ctx, \
683 int rb, int rc, int fn11) \
684 { \
685 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
686 }
687 IEEE_ARITH2(sqrts)
688 IEEE_ARITH2(sqrtt)
689 IEEE_ARITH2(cvtst)
690 IEEE_ARITH2(cvtts)
691
692 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
693 {
694 TCGv vb;
695 int ignore = 0;
696
697 /* ??? This is wrong: the instruction is not a nop, it still may
698 raise exceptions. */
699 if (unlikely(rc == 31)) {
700 return;
701 }
702
703 /* No need to set flushzero, since we have an integer output. */
704 gen_fp_exc_clear();
705 vb = gen_ieee_input(rb, fn11, 0);
706
707 /* Almost all integer conversions use cropped rounding, and most
708 also do not have integer overflow enabled. Special case that. */
709 switch (fn11) {
710 case QUAL_RM_C:
711 gen_helper_cvttq_c(cpu_fir[rc], vb);
712 break;
713 case QUAL_V | QUAL_RM_C:
714 case QUAL_S | QUAL_V | QUAL_RM_C:
715 ignore = float_flag_inexact;
716 /* FALLTHRU */
717 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
718 gen_helper_cvttq_svic(cpu_fir[rc], vb);
719 break;
720 default:
721 gen_qual_roundmode(ctx, fn11);
722 gen_helper_cvttq(cpu_fir[rc], vb);
723 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
724 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
725 break;
726 }
727 tcg_temp_free(vb);
728
729 gen_fp_exc_raise_ignore(rc, fn11, ignore);
730 }
731
732 static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
733 int rb, int rc, int fn11)
734 {
735 TCGv vb;
736
737 /* ??? This is wrong: the instruction is not a nop, it still may
738 raise exceptions. */
739 if (unlikely(rc == 31)) {
740 return;
741 }
742
743 gen_qual_roundmode(ctx, fn11);
744
745 if (rb == 31) {
746 vb = tcg_const_i64(0);
747 } else {
748 vb = cpu_fir[rb];
749 }
750
751 /* The only exception that can be raised by integer conversion
752 is inexact. Thus we only need to worry about exceptions when
753 inexact handling is requested. */
754 if (fn11 & QUAL_I) {
755 gen_fp_exc_clear();
756 helper(cpu_fir[rc], vb);
757 gen_fp_exc_raise(rc, fn11);
758 } else {
759 helper(cpu_fir[rc], vb);
760 }
761
762 if (rb == 31) {
763 tcg_temp_free(vb);
764 }
765 }
766
767 #define IEEE_INTCVT(name) \
768 static inline void glue(gen_f, name)(DisasContext *ctx, \
769 int rb, int rc, int fn11) \
770 { \
771 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
772 }
773 IEEE_INTCVT(cvtqs)
774 IEEE_INTCVT(cvtqt)
775
776 #define FARITH3(name) \
777 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
778 { \
779 TCGv va, vb; \
780 \
781 if (unlikely(rc == 31)) { \
782 return; \
783 } \
784 if (ra == 31) { \
785 va = tcg_const_i64(0); \
786 } else { \
787 va = cpu_fir[ra]; \
788 } \
789 if (rb == 31) { \
790 vb = tcg_const_i64(0); \
791 } else { \
792 vb = cpu_fir[rb]; \
793 } \
794 \
795 gen_helper_ ## name (cpu_fir[rc], va, vb); \
796 \
797 if (ra == 31) { \
798 tcg_temp_free(va); \
799 } \
800 if (rb == 31) { \
801 tcg_temp_free(vb); \
802 } \
803 }
804 /* ??? Ought to expand these inline; simple masking operations. */
805 FARITH3(cpys)
806 FARITH3(cpysn)
807 FARITH3(cpyse)
808
809 /* ??? VAX instruction qualifiers ignored. */
810 FARITH3(addf)
811 FARITH3(subf)
812 FARITH3(mulf)
813 FARITH3(divf)
814 FARITH3(addg)
815 FARITH3(subg)
816 FARITH3(mulg)
817 FARITH3(divg)
818 FARITH3(cmpgeq)
819 FARITH3(cmpglt)
820 FARITH3(cmpgle)
821
822 static void gen_ieee_arith3(DisasContext *ctx,
823 void (*helper)(TCGv, TCGv, TCGv),
824 int ra, int rb, int rc, int fn11)
825 {
826 TCGv va, vb;
827
828 /* ??? This is wrong: the instruction is not a nop, it still may
829 raise exceptions. */
830 if (unlikely(rc == 31)) {
831 return;
832 }
833
834 gen_qual_roundmode(ctx, fn11);
835 gen_qual_flushzero(ctx, fn11);
836 gen_fp_exc_clear();
837
838 va = gen_ieee_input(ra, fn11, 0);
839 vb = gen_ieee_input(rb, fn11, 0);
840 helper(cpu_fir[rc], va, vb);
841 tcg_temp_free(va);
842 tcg_temp_free(vb);
843
844 gen_fp_exc_raise(rc, fn11);
845 }
846
847 #define IEEE_ARITH3(name) \
848 static inline void glue(gen_f, name)(DisasContext *ctx, \
849 int ra, int rb, int rc, int fn11) \
850 { \
851 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
852 }
853 IEEE_ARITH3(adds)
854 IEEE_ARITH3(subs)
855 IEEE_ARITH3(muls)
856 IEEE_ARITH3(divs)
857 IEEE_ARITH3(addt)
858 IEEE_ARITH3(subt)
859 IEEE_ARITH3(mult)
860 IEEE_ARITH3(divt)
861
862 static void gen_ieee_compare(DisasContext *ctx,
863 void (*helper)(TCGv, TCGv, TCGv),
864 int ra, int rb, int rc, int fn11)
865 {
866 TCGv va, vb;
867
868 /* ??? This is wrong: the instruction is not a nop, it still may
869 raise exceptions. */
870 if (unlikely(rc == 31)) {
871 return;
872 }
873
874 gen_fp_exc_clear();
875
876 va = gen_ieee_input(ra, fn11, 1);
877 vb = gen_ieee_input(rb, fn11, 1);
878 helper(cpu_fir[rc], va, vb);
879 tcg_temp_free(va);
880 tcg_temp_free(vb);
881
882 gen_fp_exc_raise(rc, fn11);
883 }
884
885 #define IEEE_CMP3(name) \
886 static inline void glue(gen_f, name)(DisasContext *ctx, \
887 int ra, int rb, int rc, int fn11) \
888 { \
889 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
890 }
891 IEEE_CMP3(cmptun)
892 IEEE_CMP3(cmpteq)
893 IEEE_CMP3(cmptlt)
894 IEEE_CMP3(cmptle)
895
896 static inline uint64_t zapnot_mask(uint8_t lit)
897 {
898 uint64_t mask = 0;
899 int i;
900
901 for (i = 0; i < 8; ++i) {
902 if ((lit >> i) & 1)
903 mask |= 0xffull << (i * 8);
904 }
905 return mask;
906 }
907
908 /* Implement zapnot with an immediate operand, which expands to some
909 form of immediate AND. This is a basic building block in the
910 definition of many of the other byte manipulation instructions. */
911 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
912 {
913 switch (lit) {
914 case 0x00:
915 tcg_gen_movi_i64(dest, 0);
916 break;
917 case 0x01:
918 tcg_gen_ext8u_i64(dest, src);
919 break;
920 case 0x03:
921 tcg_gen_ext16u_i64(dest, src);
922 break;
923 case 0x0f:
924 tcg_gen_ext32u_i64(dest, src);
925 break;
926 case 0xff:
927 tcg_gen_mov_i64(dest, src);
928 break;
929 default:
930 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
931 break;
932 }
933 }
934
935 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
936 {
937 if (unlikely(rc == 31))
938 return;
939 else if (unlikely(ra == 31))
940 tcg_gen_movi_i64(cpu_ir[rc], 0);
941 else if (islit)
942 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
943 else
944 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
945 }
946
947 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
948 {
949 if (unlikely(rc == 31))
950 return;
951 else if (unlikely(ra == 31))
952 tcg_gen_movi_i64(cpu_ir[rc], 0);
953 else if (islit)
954 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
955 else
956 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
957 }
958
959
960 /* EXTWH, EXTLH, EXTQH */
961 static void gen_ext_h(int ra, int rb, int rc, int islit,
962 uint8_t lit, uint8_t byte_mask)
963 {
964 if (unlikely(rc == 31))
965 return;
966 else if (unlikely(ra == 31))
967 tcg_gen_movi_i64(cpu_ir[rc], 0);
968 else {
969 if (islit) {
970 lit = (64 - (lit & 7) * 8) & 0x3f;
971 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
972 } else {
973 TCGv tmp1 = tcg_temp_new();
974 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
975 tcg_gen_shli_i64(tmp1, tmp1, 3);
976 tcg_gen_neg_i64(tmp1, tmp1);
977 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
978 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
979 tcg_temp_free(tmp1);
980 }
981 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
982 }
983 }
984
985 /* EXTBL, EXTWL, EXTLL, EXTQL */
986 static void gen_ext_l(int ra, int rb, int rc, int islit,
987 uint8_t lit, uint8_t byte_mask)
988 {
989 if (unlikely(rc == 31))
990 return;
991 else if (unlikely(ra == 31))
992 tcg_gen_movi_i64(cpu_ir[rc], 0);
993 else {
994 if (islit) {
995 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
996 } else {
997 TCGv tmp = tcg_temp_new();
998 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
999 tcg_gen_shli_i64(tmp, tmp, 3);
1000 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1001 tcg_temp_free(tmp);
1002 }
1003 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1004 }
1005 }
1006
1007 /* INSWH, INSLH, INSQH */
1008 static void gen_ins_h(int ra, int rb, int rc, int islit,
1009 uint8_t lit, uint8_t byte_mask)
1010 {
1011 if (unlikely(rc == 31))
1012 return;
1013 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1014 tcg_gen_movi_i64(cpu_ir[rc], 0);
1015 else {
1016 TCGv tmp = tcg_temp_new();
1017
1018 /* The instruction description has us left-shift the byte mask
1019 and extract bits <15:8> and apply that zap at the end. This
1020 is equivalent to simply performing the zap first and shifting
1021 afterward. */
1022 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1023
1024 if (islit) {
1025 /* Note that we have handled the lit==0 case above. */
1026 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1027 } else {
1028 TCGv shift = tcg_temp_new();
1029
1030 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1031 Do this portably by splitting the shift into two parts:
1032 shift_count-1 and 1. Arrange for the -1 by using
1033 ones-complement instead of twos-complement in the negation:
1034 ~((B & 7) * 8) & 63. */
1035
1036 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1037 tcg_gen_shli_i64(shift, shift, 3);
1038 tcg_gen_not_i64(shift, shift);
1039 tcg_gen_andi_i64(shift, shift, 0x3f);
1040
1041 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1042 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1043 tcg_temp_free(shift);
1044 }
1045 tcg_temp_free(tmp);
1046 }
1047 }
1048
1049 /* INSBL, INSWL, INSLL, INSQL */
1050 static void gen_ins_l(int ra, int rb, int rc, int islit,
1051 uint8_t lit, uint8_t byte_mask)
1052 {
1053 if (unlikely(rc == 31))
1054 return;
1055 else if (unlikely(ra == 31))
1056 tcg_gen_movi_i64(cpu_ir[rc], 0);
1057 else {
1058 TCGv tmp = tcg_temp_new();
1059
1060 /* The instruction description has us left-shift the byte mask
1061 the same number of byte slots as the data and apply the zap
1062 at the end. This is equivalent to simply performing the zap
1063 first and shifting afterward. */
1064 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1065
1066 if (islit) {
1067 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1068 } else {
1069 TCGv shift = tcg_temp_new();
1070 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1071 tcg_gen_shli_i64(shift, shift, 3);
1072 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1073 tcg_temp_free(shift);
1074 }
1075 tcg_temp_free(tmp);
1076 }
1077 }
1078
1079 /* MSKWH, MSKLH, MSKQH */
1080 static void gen_msk_h(int ra, int rb, int rc, int islit,
1081 uint8_t lit, uint8_t byte_mask)
1082 {
1083 if (unlikely(rc == 31))
1084 return;
1085 else if (unlikely(ra == 31))
1086 tcg_gen_movi_i64(cpu_ir[rc], 0);
1087 else if (islit) {
1088 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1089 } else {
1090 TCGv shift = tcg_temp_new();
1091 TCGv mask = tcg_temp_new();
1092
1093 /* The instruction description is as above, where the byte_mask
1094 is shifted left, and then we extract bits <15:8>. This can be
1095 emulated with a right-shift on the expanded byte mask. This
1096 requires extra care because for an input <2:0> == 0 we need a
1097 shift of 64 bits in order to generate a zero. This is done by
1098 splitting the shift into two parts, the variable shift - 1
1099 followed by a constant 1 shift. The code we expand below is
1100 equivalent to ~((B & 7) * 8) & 63. */
1101
1102 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1103 tcg_gen_shli_i64(shift, shift, 3);
1104 tcg_gen_not_i64(shift, shift);
1105 tcg_gen_andi_i64(shift, shift, 0x3f);
1106 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1107 tcg_gen_shr_i64(mask, mask, shift);
1108 tcg_gen_shri_i64(mask, mask, 1);
1109
1110 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1111
1112 tcg_temp_free(mask);
1113 tcg_temp_free(shift);
1114 }
1115 }
1116
1117 /* MSKBL, MSKWL, MSKLL, MSKQL */
1118 static void gen_msk_l(int ra, int rb, int rc, int islit,
1119 uint8_t lit, uint8_t byte_mask)
1120 {
1121 if (unlikely(rc == 31))
1122 return;
1123 else if (unlikely(ra == 31))
1124 tcg_gen_movi_i64(cpu_ir[rc], 0);
1125 else if (islit) {
1126 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1127 } else {
1128 TCGv shift = tcg_temp_new();
1129 TCGv mask = tcg_temp_new();
1130
1131 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1132 tcg_gen_shli_i64(shift, shift, 3);
1133 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1134 tcg_gen_shl_i64(mask, mask, shift);
1135
1136 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1137
1138 tcg_temp_free(mask);
1139 tcg_temp_free(shift);
1140 }
1141 }
1142
1143 /* Code to call arith3 helpers */
1144 #define ARITH3(name) \
1145 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1146 uint8_t lit) \
1147 { \
1148 if (unlikely(rc == 31)) \
1149 return; \
1150 \
1151 if (ra != 31) { \
1152 if (islit) { \
1153 TCGv tmp = tcg_const_i64(lit); \
1154 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1155 tcg_temp_free(tmp); \
1156 } else \
1157 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1158 } else { \
1159 TCGv tmp1 = tcg_const_i64(0); \
1160 if (islit) { \
1161 TCGv tmp2 = tcg_const_i64(lit); \
1162 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1163 tcg_temp_free(tmp2); \
1164 } else \
1165 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1166 tcg_temp_free(tmp1); \
1167 } \
1168 }
1169 ARITH3(cmpbge)
1170 ARITH3(addlv)
1171 ARITH3(sublv)
1172 ARITH3(addqv)
1173 ARITH3(subqv)
1174 ARITH3(umulh)
1175 ARITH3(mullv)
1176 ARITH3(mulqv)
1177 ARITH3(minub8)
1178 ARITH3(minsb8)
1179 ARITH3(minuw4)
1180 ARITH3(minsw4)
1181 ARITH3(maxub8)
1182 ARITH3(maxsb8)
1183 ARITH3(maxuw4)
1184 ARITH3(maxsw4)
1185 ARITH3(perr)
1186
1187 #define MVIOP2(name) \
1188 static inline void glue(gen_, name)(int rb, int rc) \
1189 { \
1190 if (unlikely(rc == 31)) \
1191 return; \
1192 if (unlikely(rb == 31)) \
1193 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1194 else \
1195 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1196 }
1197 MVIOP2(pklb)
1198 MVIOP2(pkwb)
1199 MVIOP2(unpkbl)
1200 MVIOP2(unpkbw)
1201
1202 static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1203 int islit, uint8_t lit)
1204 {
1205 TCGv va, vb;
1206
1207 if (unlikely(rc == 31)) {
1208 return;
1209 }
1210
1211 if (ra == 31) {
1212 va = tcg_const_i64(0);
1213 } else {
1214 va = cpu_ir[ra];
1215 }
1216 if (islit) {
1217 vb = tcg_const_i64(lit);
1218 } else {
1219 vb = cpu_ir[rb];
1220 }
1221
1222 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1223
1224 if (ra == 31) {
1225 tcg_temp_free(va);
1226 }
1227 if (islit) {
1228 tcg_temp_free(vb);
1229 }
1230 }
1231
1232 static inline int translate_one(DisasContext *ctx, uint32_t insn)
1233 {
1234 uint32_t palcode;
1235 int32_t disp21, disp16, disp12;
1236 uint16_t fn11, fn16;
1237 uint8_t opc, ra, rb, rc, sbz, fpfn, fn7, fn2, islit, real_islit;
1238 uint8_t lit;
1239 int ret;
1240
1241 /* Decode all instruction fields */
1242 opc = insn >> 26;
1243 ra = (insn >> 21) & 0x1F;
1244 rb = (insn >> 16) & 0x1F;
1245 rc = insn & 0x1F;
1246 sbz = (insn >> 13) & 0x07;
1247 real_islit = islit = (insn >> 12) & 1;
1248 if (rb == 31 && !islit) {
1249 islit = 1;
1250 lit = 0;
1251 } else
1252 lit = (insn >> 13) & 0xFF;
1253 palcode = insn & 0x03FFFFFF;
1254 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1255 disp16 = (int16_t)(insn & 0x0000FFFF);
1256 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1257 fn16 = insn & 0x0000FFFF;
1258 fn11 = (insn >> 5) & 0x000007FF;
1259 fpfn = fn11 & 0x3F;
1260 fn7 = (insn >> 5) & 0x0000007F;
1261 fn2 = (insn >> 5) & 0x00000003;
1262 ret = 0;
1263 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1264 opc, ra, rb, rc, disp16);
1265
1266 switch (opc) {
1267 case 0x00:
1268 /* CALL_PAL */
1269 #ifdef CONFIG_USER_ONLY
1270 if (palcode == 0x9E) {
1271 /* RDUNIQUE */
1272 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1273 break;
1274 } else if (palcode == 0x9F) {
1275 /* WRUNIQUE */
1276 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1277 break;
1278 }
1279 #endif
1280 if (palcode >= 0x80 && palcode < 0xC0) {
1281 /* Unprivileged PAL call */
1282 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
1283 ret = 3;
1284 break;
1285 }
1286 #ifndef CONFIG_USER_ONLY
1287 if (palcode < 0x40) {
1288 /* Privileged PAL code */
1289 if (ctx->mem_idx & 1)
1290 goto invalid_opc;
1291 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
1292 ret = 3;
1293 }
1294 #endif
1295 /* Invalid PAL call */
1296 goto invalid_opc;
1297 case 0x01:
1298 /* OPC01 */
1299 goto invalid_opc;
1300 case 0x02:
1301 /* OPC02 */
1302 goto invalid_opc;
1303 case 0x03:
1304 /* OPC03 */
1305 goto invalid_opc;
1306 case 0x04:
1307 /* OPC04 */
1308 goto invalid_opc;
1309 case 0x05:
1310 /* OPC05 */
1311 goto invalid_opc;
1312 case 0x06:
1313 /* OPC06 */
1314 goto invalid_opc;
1315 case 0x07:
1316 /* OPC07 */
1317 goto invalid_opc;
1318 case 0x08:
1319 /* LDA */
1320 if (likely(ra != 31)) {
1321 if (rb != 31)
1322 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1323 else
1324 tcg_gen_movi_i64(cpu_ir[ra], disp16);
1325 }
1326 break;
1327 case 0x09:
1328 /* LDAH */
1329 if (likely(ra != 31)) {
1330 if (rb != 31)
1331 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1332 else
1333 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1334 }
1335 break;
1336 case 0x0A:
1337 /* LDBU */
1338 if (!(ctx->amask & AMASK_BWX))
1339 goto invalid_opc;
1340 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1341 break;
1342 case 0x0B:
1343 /* LDQ_U */
1344 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1345 break;
1346 case 0x0C:
1347 /* LDWU */
1348 if (!(ctx->amask & AMASK_BWX))
1349 goto invalid_opc;
1350 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1351 break;
1352 case 0x0D:
1353 /* STW */
1354 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
1355 break;
1356 case 0x0E:
1357 /* STB */
1358 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
1359 break;
1360 case 0x0F:
1361 /* STQ_U */
1362 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
1363 break;
1364 case 0x10:
1365 switch (fn7) {
1366 case 0x00:
1367 /* ADDL */
1368 if (likely(rc != 31)) {
1369 if (ra != 31) {
1370 if (islit) {
1371 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1372 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1373 } else {
1374 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1375 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1376 }
1377 } else {
1378 if (islit)
1379 tcg_gen_movi_i64(cpu_ir[rc], lit);
1380 else
1381 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1382 }
1383 }
1384 break;
1385 case 0x02:
1386 /* S4ADDL */
1387 if (likely(rc != 31)) {
1388 if (ra != 31) {
1389 TCGv tmp = tcg_temp_new();
1390 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1391 if (islit)
1392 tcg_gen_addi_i64(tmp, tmp, lit);
1393 else
1394 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1395 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1396 tcg_temp_free(tmp);
1397 } else {
1398 if (islit)
1399 tcg_gen_movi_i64(cpu_ir[rc], lit);
1400 else
1401 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1402 }
1403 }
1404 break;
1405 case 0x09:
1406 /* SUBL */
1407 if (likely(rc != 31)) {
1408 if (ra != 31) {
1409 if (islit)
1410 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1411 else
1412 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1413 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1414 } else {
1415 if (islit)
1416 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1417 else {
1418 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1419 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1420 }
1421 }
1422 break;
1423 case 0x0B:
1424 /* S4SUBL */
1425 if (likely(rc != 31)) {
1426 if (ra != 31) {
1427 TCGv tmp = tcg_temp_new();
1428 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1429 if (islit)
1430 tcg_gen_subi_i64(tmp, tmp, lit);
1431 else
1432 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1433 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1434 tcg_temp_free(tmp);
1435 } else {
1436 if (islit)
1437 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1438 else {
1439 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1440 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1441 }
1442 }
1443 }
1444 break;
1445 case 0x0F:
1446 /* CMPBGE */
1447 gen_cmpbge(ra, rb, rc, islit, lit);
1448 break;
1449 case 0x12:
1450 /* S8ADDL */
1451 if (likely(rc != 31)) {
1452 if (ra != 31) {
1453 TCGv tmp = tcg_temp_new();
1454 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1455 if (islit)
1456 tcg_gen_addi_i64(tmp, tmp, lit);
1457 else
1458 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1459 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1460 tcg_temp_free(tmp);
1461 } else {
1462 if (islit)
1463 tcg_gen_movi_i64(cpu_ir[rc], lit);
1464 else
1465 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1466 }
1467 }
1468 break;
1469 case 0x1B:
1470 /* S8SUBL */
1471 if (likely(rc != 31)) {
1472 if (ra != 31) {
1473 TCGv tmp = tcg_temp_new();
1474 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1475 if (islit)
1476 tcg_gen_subi_i64(tmp, tmp, lit);
1477 else
1478 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1479 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1480 tcg_temp_free(tmp);
1481 } else {
1482 if (islit)
1483 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1484 else
1485 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1486 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1487 }
1488 }
1489 }
1490 break;
1491 case 0x1D:
1492 /* CMPULT */
1493 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1494 break;
1495 case 0x20:
1496 /* ADDQ */
1497 if (likely(rc != 31)) {
1498 if (ra != 31) {
1499 if (islit)
1500 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1501 else
1502 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1503 } else {
1504 if (islit)
1505 tcg_gen_movi_i64(cpu_ir[rc], lit);
1506 else
1507 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1508 }
1509 }
1510 break;
1511 case 0x22:
1512 /* S4ADDQ */
1513 if (likely(rc != 31)) {
1514 if (ra != 31) {
1515 TCGv tmp = tcg_temp_new();
1516 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1517 if (islit)
1518 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1519 else
1520 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1521 tcg_temp_free(tmp);
1522 } else {
1523 if (islit)
1524 tcg_gen_movi_i64(cpu_ir[rc], lit);
1525 else
1526 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1527 }
1528 }
1529 break;
1530 case 0x29:
1531 /* SUBQ */
1532 if (likely(rc != 31)) {
1533 if (ra != 31) {
1534 if (islit)
1535 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1536 else
1537 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1538 } else {
1539 if (islit)
1540 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1541 else
1542 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1543 }
1544 }
1545 break;
1546 case 0x2B:
1547 /* S4SUBQ */
1548 if (likely(rc != 31)) {
1549 if (ra != 31) {
1550 TCGv tmp = tcg_temp_new();
1551 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1552 if (islit)
1553 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1554 else
1555 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1556 tcg_temp_free(tmp);
1557 } else {
1558 if (islit)
1559 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1560 else
1561 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1562 }
1563 }
1564 break;
1565 case 0x2D:
1566 /* CMPEQ */
1567 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1568 break;
1569 case 0x32:
1570 /* S8ADDQ */
1571 if (likely(rc != 31)) {
1572 if (ra != 31) {
1573 TCGv tmp = tcg_temp_new();
1574 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1575 if (islit)
1576 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1577 else
1578 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1579 tcg_temp_free(tmp);
1580 } else {
1581 if (islit)
1582 tcg_gen_movi_i64(cpu_ir[rc], lit);
1583 else
1584 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1585 }
1586 }
1587 break;
1588 case 0x3B:
1589 /* S8SUBQ */
1590 if (likely(rc != 31)) {
1591 if (ra != 31) {
1592 TCGv tmp = tcg_temp_new();
1593 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1594 if (islit)
1595 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1596 else
1597 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1598 tcg_temp_free(tmp);
1599 } else {
1600 if (islit)
1601 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1602 else
1603 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1604 }
1605 }
1606 break;
1607 case 0x3D:
1608 /* CMPULE */
1609 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1610 break;
1611 case 0x40:
1612 /* ADDL/V */
1613 gen_addlv(ra, rb, rc, islit, lit);
1614 break;
1615 case 0x49:
1616 /* SUBL/V */
1617 gen_sublv(ra, rb, rc, islit, lit);
1618 break;
1619 case 0x4D:
1620 /* CMPLT */
1621 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1622 break;
1623 case 0x60:
1624 /* ADDQ/V */
1625 gen_addqv(ra, rb, rc, islit, lit);
1626 break;
1627 case 0x69:
1628 /* SUBQ/V */
1629 gen_subqv(ra, rb, rc, islit, lit);
1630 break;
1631 case 0x6D:
1632 /* CMPLE */
1633 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1634 break;
1635 default:
1636 goto invalid_opc;
1637 }
1638 break;
1639 case 0x11:
1640 switch (fn7) {
1641 case 0x00:
1642 /* AND */
1643 if (likely(rc != 31)) {
1644 if (ra == 31)
1645 tcg_gen_movi_i64(cpu_ir[rc], 0);
1646 else if (islit)
1647 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1648 else
1649 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1650 }
1651 break;
1652 case 0x08:
1653 /* BIC */
1654 if (likely(rc != 31)) {
1655 if (ra != 31) {
1656 if (islit)
1657 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1658 else
1659 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1660 } else
1661 tcg_gen_movi_i64(cpu_ir[rc], 0);
1662 }
1663 break;
1664 case 0x14:
1665 /* CMOVLBS */
1666 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1667 break;
1668 case 0x16:
1669 /* CMOVLBC */
1670 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1671 break;
1672 case 0x20:
1673 /* BIS */
1674 if (likely(rc != 31)) {
1675 if (ra != 31) {
1676 if (islit)
1677 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1678 else
1679 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1680 } else {
1681 if (islit)
1682 tcg_gen_movi_i64(cpu_ir[rc], lit);
1683 else
1684 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1685 }
1686 }
1687 break;
1688 case 0x24:
1689 /* CMOVEQ */
1690 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1691 break;
1692 case 0x26:
1693 /* CMOVNE */
1694 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1695 break;
1696 case 0x28:
1697 /* ORNOT */
1698 if (likely(rc != 31)) {
1699 if (ra != 31) {
1700 if (islit)
1701 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1702 else
1703 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1704 } else {
1705 if (islit)
1706 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1707 else
1708 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1709 }
1710 }
1711 break;
1712 case 0x40:
1713 /* XOR */
1714 if (likely(rc != 31)) {
1715 if (ra != 31) {
1716 if (islit)
1717 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1718 else
1719 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1720 } else {
1721 if (islit)
1722 tcg_gen_movi_i64(cpu_ir[rc], lit);
1723 else
1724 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1725 }
1726 }
1727 break;
1728 case 0x44:
1729 /* CMOVLT */
1730 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1731 break;
1732 case 0x46:
1733 /* CMOVGE */
1734 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1735 break;
1736 case 0x48:
1737 /* EQV */
1738 if (likely(rc != 31)) {
1739 if (ra != 31) {
1740 if (islit)
1741 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1742 else
1743 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1744 } else {
1745 if (islit)
1746 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1747 else
1748 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1749 }
1750 }
1751 break;
1752 case 0x61:
1753 /* AMASK */
1754 if (likely(rc != 31)) {
1755 if (islit)
1756 tcg_gen_movi_i64(cpu_ir[rc], lit);
1757 else
1758 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1759 switch (ctx->env->implver) {
1760 case IMPLVER_2106x:
1761 /* EV4, EV45, LCA, LCA45 & EV5 */
1762 break;
1763 case IMPLVER_21164:
1764 case IMPLVER_21264:
1765 case IMPLVER_21364:
1766 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1767 ~(uint64_t)ctx->amask);
1768 break;
1769 }
1770 }
1771 break;
1772 case 0x64:
1773 /* CMOVLE */
1774 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
1775 break;
1776 case 0x66:
1777 /* CMOVGT */
1778 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
1779 break;
1780 case 0x6C:
1781 /* IMPLVER */
1782 if (rc != 31)
1783 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
1784 break;
1785 default:
1786 goto invalid_opc;
1787 }
1788 break;
1789 case 0x12:
1790 switch (fn7) {
1791 case 0x02:
1792 /* MSKBL */
1793 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
1794 break;
1795 case 0x06:
1796 /* EXTBL */
1797 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
1798 break;
1799 case 0x0B:
1800 /* INSBL */
1801 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
1802 break;
1803 case 0x12:
1804 /* MSKWL */
1805 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
1806 break;
1807 case 0x16:
1808 /* EXTWL */
1809 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
1810 break;
1811 case 0x1B:
1812 /* INSWL */
1813 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
1814 break;
1815 case 0x22:
1816 /* MSKLL */
1817 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
1818 break;
1819 case 0x26:
1820 /* EXTLL */
1821 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
1822 break;
1823 case 0x2B:
1824 /* INSLL */
1825 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
1826 break;
1827 case 0x30:
1828 /* ZAP */
1829 gen_zap(ra, rb, rc, islit, lit);
1830 break;
1831 case 0x31:
1832 /* ZAPNOT */
1833 gen_zapnot(ra, rb, rc, islit, lit);
1834 break;
1835 case 0x32:
1836 /* MSKQL */
1837 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
1838 break;
1839 case 0x34:
1840 /* SRL */
1841 if (likely(rc != 31)) {
1842 if (ra != 31) {
1843 if (islit)
1844 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1845 else {
1846 TCGv shift = tcg_temp_new();
1847 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1848 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
1849 tcg_temp_free(shift);
1850 }
1851 } else
1852 tcg_gen_movi_i64(cpu_ir[rc], 0);
1853 }
1854 break;
1855 case 0x36:
1856 /* EXTQL */
1857 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
1858 break;
1859 case 0x39:
1860 /* SLL */
1861 if (likely(rc != 31)) {
1862 if (ra != 31) {
1863 if (islit)
1864 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1865 else {
1866 TCGv shift = tcg_temp_new();
1867 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1868 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
1869 tcg_temp_free(shift);
1870 }
1871 } else
1872 tcg_gen_movi_i64(cpu_ir[rc], 0);
1873 }
1874 break;
1875 case 0x3B:
1876 /* INSQL */
1877 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
1878 break;
1879 case 0x3C:
1880 /* SRA */
1881 if (likely(rc != 31)) {
1882 if (ra != 31) {
1883 if (islit)
1884 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1885 else {
1886 TCGv shift = tcg_temp_new();
1887 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1888 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
1889 tcg_temp_free(shift);
1890 }
1891 } else
1892 tcg_gen_movi_i64(cpu_ir[rc], 0);
1893 }
1894 break;
1895 case 0x52:
1896 /* MSKWH */
1897 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
1898 break;
1899 case 0x57:
1900 /* INSWH */
1901 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
1902 break;
1903 case 0x5A:
1904 /* EXTWH */
1905 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
1906 break;
1907 case 0x62:
1908 /* MSKLH */
1909 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
1910 break;
1911 case 0x67:
1912 /* INSLH */
1913 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
1914 break;
1915 case 0x6A:
1916 /* EXTLH */
1917 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
1918 break;
1919 case 0x72:
1920 /* MSKQH */
1921 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
1922 break;
1923 case 0x77:
1924 /* INSQH */
1925 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
1926 break;
1927 case 0x7A:
1928 /* EXTQH */
1929 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
1930 break;
1931 default:
1932 goto invalid_opc;
1933 }
1934 break;
1935 case 0x13:
1936 switch (fn7) {
1937 case 0x00:
1938 /* MULL */
1939 if (likely(rc != 31)) {
1940 if (ra == 31)
1941 tcg_gen_movi_i64(cpu_ir[rc], 0);
1942 else {
1943 if (islit)
1944 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1945 else
1946 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1947 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1948 }
1949 }
1950 break;
1951 case 0x20:
1952 /* MULQ */
1953 if (likely(rc != 31)) {
1954 if (ra == 31)
1955 tcg_gen_movi_i64(cpu_ir[rc], 0);
1956 else if (islit)
1957 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1958 else
1959 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1960 }
1961 break;
1962 case 0x30:
1963 /* UMULH */
1964 gen_umulh(ra, rb, rc, islit, lit);
1965 break;
1966 case 0x40:
1967 /* MULL/V */
1968 gen_mullv(ra, rb, rc, islit, lit);
1969 break;
1970 case 0x60:
1971 /* MULQ/V */
1972 gen_mulqv(ra, rb, rc, islit, lit);
1973 break;
1974 default:
1975 goto invalid_opc;
1976 }
1977 break;
1978 case 0x14:
1979 switch (fpfn) { /* fn11 & 0x3F */
1980 case 0x04:
1981 /* ITOFS */
1982 if (!(ctx->amask & AMASK_FIX))
1983 goto invalid_opc;
1984 if (likely(rc != 31)) {
1985 if (ra != 31) {
1986 TCGv_i32 tmp = tcg_temp_new_i32();
1987 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1988 gen_helper_memory_to_s(cpu_fir[rc], tmp);
1989 tcg_temp_free_i32(tmp);
1990 } else
1991 tcg_gen_movi_i64(cpu_fir[rc], 0);
1992 }
1993 break;
1994 case 0x0A:
1995 /* SQRTF */
1996 if (!(ctx->amask & AMASK_FIX))
1997 goto invalid_opc;
1998 gen_fsqrtf(rb, rc);
1999 break;
2000 case 0x0B:
2001 /* SQRTS */
2002 if (!(ctx->amask & AMASK_FIX))
2003 goto invalid_opc;
2004 gen_fsqrts(ctx, rb, rc, fn11);
2005 break;
2006 case 0x14:
2007 /* ITOFF */
2008 if (!(ctx->amask & AMASK_FIX))
2009 goto invalid_opc;
2010 if (likely(rc != 31)) {
2011 if (ra != 31) {
2012 TCGv_i32 tmp = tcg_temp_new_i32();
2013 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2014 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2015 tcg_temp_free_i32(tmp);
2016 } else
2017 tcg_gen_movi_i64(cpu_fir[rc], 0);
2018 }
2019 break;
2020 case 0x24:
2021 /* ITOFT */
2022 if (!(ctx->amask & AMASK_FIX))
2023 goto invalid_opc;
2024 if (likely(rc != 31)) {
2025 if (ra != 31)
2026 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2027 else
2028 tcg_gen_movi_i64(cpu_fir[rc], 0);
2029 }
2030 break;
2031 case 0x2A:
2032 /* SQRTG */
2033 if (!(ctx->amask & AMASK_FIX))
2034 goto invalid_opc;
2035 gen_fsqrtg(rb, rc);
2036 break;
2037 case 0x02B:
2038 /* SQRTT */
2039 if (!(ctx->amask & AMASK_FIX))
2040 goto invalid_opc;
2041 gen_fsqrtt(ctx, rb, rc, fn11);
2042 break;
2043 default:
2044 goto invalid_opc;
2045 }
2046 break;
2047 case 0x15:
2048 /* VAX floating point */
2049 /* XXX: rounding mode and trap are ignored (!) */
2050 switch (fpfn) { /* fn11 & 0x3F */
2051 case 0x00:
2052 /* ADDF */
2053 gen_faddf(ra, rb, rc);
2054 break;
2055 case 0x01:
2056 /* SUBF */
2057 gen_fsubf(ra, rb, rc);
2058 break;
2059 case 0x02:
2060 /* MULF */
2061 gen_fmulf(ra, rb, rc);
2062 break;
2063 case 0x03:
2064 /* DIVF */
2065 gen_fdivf(ra, rb, rc);
2066 break;
2067 case 0x1E:
2068 /* CVTDG */
2069 #if 0 // TODO
2070 gen_fcvtdg(rb, rc);
2071 #else
2072 goto invalid_opc;
2073 #endif
2074 break;
2075 case 0x20:
2076 /* ADDG */
2077 gen_faddg(ra, rb, rc);
2078 break;
2079 case 0x21:
2080 /* SUBG */
2081 gen_fsubg(ra, rb, rc);
2082 break;
2083 case 0x22:
2084 /* MULG */
2085 gen_fmulg(ra, rb, rc);
2086 break;
2087 case 0x23:
2088 /* DIVG */
2089 gen_fdivg(ra, rb, rc);
2090 break;
2091 case 0x25:
2092 /* CMPGEQ */
2093 gen_fcmpgeq(ra, rb, rc);
2094 break;
2095 case 0x26:
2096 /* CMPGLT */
2097 gen_fcmpglt(ra, rb, rc);
2098 break;
2099 case 0x27:
2100 /* CMPGLE */
2101 gen_fcmpgle(ra, rb, rc);
2102 break;
2103 case 0x2C:
2104 /* CVTGF */
2105 gen_fcvtgf(rb, rc);
2106 break;
2107 case 0x2D:
2108 /* CVTGD */
2109 #if 0 // TODO
2110 gen_fcvtgd(rb, rc);
2111 #else
2112 goto invalid_opc;
2113 #endif
2114 break;
2115 case 0x2F:
2116 /* CVTGQ */
2117 gen_fcvtgq(rb, rc);
2118 break;
2119 case 0x3C:
2120 /* CVTQF */
2121 gen_fcvtqf(rb, rc);
2122 break;
2123 case 0x3E:
2124 /* CVTQG */
2125 gen_fcvtqg(rb, rc);
2126 break;
2127 default:
2128 goto invalid_opc;
2129 }
2130 break;
2131 case 0x16:
2132 /* IEEE floating-point */
2133 switch (fpfn) { /* fn11 & 0x3F */
2134 case 0x00:
2135 /* ADDS */
2136 gen_fadds(ctx, ra, rb, rc, fn11);
2137 break;
2138 case 0x01:
2139 /* SUBS */
2140 gen_fsubs(ctx, ra, rb, rc, fn11);
2141 break;
2142 case 0x02:
2143 /* MULS */
2144 gen_fmuls(ctx, ra, rb, rc, fn11);
2145 break;
2146 case 0x03:
2147 /* DIVS */
2148 gen_fdivs(ctx, ra, rb, rc, fn11);
2149 break;
2150 case 0x20:
2151 /* ADDT */
2152 gen_faddt(ctx, ra, rb, rc, fn11);
2153 break;
2154 case 0x21:
2155 /* SUBT */
2156 gen_fsubt(ctx, ra, rb, rc, fn11);
2157 break;
2158 case 0x22:
2159 /* MULT */
2160 gen_fmult(ctx, ra, rb, rc, fn11);
2161 break;
2162 case 0x23:
2163 /* DIVT */
2164 gen_fdivt(ctx, ra, rb, rc, fn11);
2165 break;
2166 case 0x24:
2167 /* CMPTUN */
2168 gen_fcmptun(ctx, ra, rb, rc, fn11);
2169 break;
2170 case 0x25:
2171 /* CMPTEQ */
2172 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2173 break;
2174 case 0x26:
2175 /* CMPTLT */
2176 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2177 break;
2178 case 0x27:
2179 /* CMPTLE */
2180 gen_fcmptle(ctx, ra, rb, rc, fn11);
2181 break;
2182 case 0x2C:
2183 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2184 /* CVTST */
2185 gen_fcvtst(ctx, rb, rc, fn11);
2186 } else {
2187 /* CVTTS */
2188 gen_fcvtts(ctx, rb, rc, fn11);
2189 }
2190 break;
2191 case 0x2F:
2192 /* CVTTQ */
2193 gen_fcvttq(ctx, rb, rc, fn11);
2194 break;
2195 case 0x3C:
2196 /* CVTQS */
2197 gen_fcvtqs(ctx, rb, rc, fn11);
2198 break;
2199 case 0x3E:
2200 /* CVTQT */
2201 gen_fcvtqt(ctx, rb, rc, fn11);
2202 break;
2203 default:
2204 goto invalid_opc;
2205 }
2206 break;
2207 case 0x17:
2208 switch (fn11) {
2209 case 0x010:
2210 /* CVTLQ */
2211 gen_fcvtlq(rb, rc);
2212 break;
2213 case 0x020:
2214 if (likely(rc != 31)) {
2215 if (ra == rb) {
2216 /* FMOV */
2217 if (ra == 31)
2218 tcg_gen_movi_i64(cpu_fir[rc], 0);
2219 else
2220 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2221 } else {
2222 /* CPYS */
2223 gen_fcpys(ra, rb, rc);
2224 }
2225 }
2226 break;
2227 case 0x021:
2228 /* CPYSN */
2229 gen_fcpysn(ra, rb, rc);
2230 break;
2231 case 0x022:
2232 /* CPYSE */
2233 gen_fcpyse(ra, rb, rc);
2234 break;
2235 case 0x024:
2236 /* MT_FPCR */
2237 if (likely(ra != 31))
2238 gen_helper_store_fpcr(cpu_fir[ra]);
2239 else {
2240 TCGv tmp = tcg_const_i64(0);
2241 gen_helper_store_fpcr(tmp);
2242 tcg_temp_free(tmp);
2243 }
2244 break;
2245 case 0x025:
2246 /* MF_FPCR */
2247 if (likely(ra != 31))
2248 gen_helper_load_fpcr(cpu_fir[ra]);
2249 break;
2250 case 0x02A:
2251 /* FCMOVEQ */
2252 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2253 break;
2254 case 0x02B:
2255 /* FCMOVNE */
2256 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2257 break;
2258 case 0x02C:
2259 /* FCMOVLT */
2260 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2261 break;
2262 case 0x02D:
2263 /* FCMOVGE */
2264 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2265 break;
2266 case 0x02E:
2267 /* FCMOVLE */
2268 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2269 break;
2270 case 0x02F:
2271 /* FCMOVGT */
2272 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2273 break;
2274 case 0x030:
2275 /* CVTQL */
2276 gen_fcvtql(rb, rc);
2277 break;
2278 case 0x130:
2279 /* CVTQL/V */
2280 case 0x530:
2281 /* CVTQL/SV */
2282 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2283 /v doesn't do. The only thing I can think is that /sv is a
2284 valid instruction merely for completeness in the ISA. */
2285 gen_fcvtql_v(ctx, rb, rc);
2286 break;
2287 default:
2288 goto invalid_opc;
2289 }
2290 break;
2291 case 0x18:
2292 switch ((uint16_t)disp16) {
2293 case 0x0000:
2294 /* TRAPB */
2295 /* No-op. Just exit from the current tb */
2296 ret = 2;
2297 break;
2298 case 0x0400:
2299 /* EXCB */
2300 /* No-op. Just exit from the current tb */
2301 ret = 2;
2302 break;
2303 case 0x4000:
2304 /* MB */
2305 /* No-op */
2306 break;
2307 case 0x4400:
2308 /* WMB */
2309 /* No-op */
2310 break;
2311 case 0x8000:
2312 /* FETCH */
2313 /* No-op */
2314 break;
2315 case 0xA000:
2316 /* FETCH_M */
2317 /* No-op */
2318 break;
2319 case 0xC000:
2320 /* RPCC */
2321 if (ra != 31)
2322 gen_helper_load_pcc(cpu_ir[ra]);
2323 break;
2324 case 0xE000:
2325 /* RC */
2326 if (ra != 31)
2327 gen_helper_rc(cpu_ir[ra]);
2328 break;
2329 case 0xE800:
2330 /* ECB */
2331 break;
2332 case 0xF000:
2333 /* RS */
2334 if (ra != 31)
2335 gen_helper_rs(cpu_ir[ra]);
2336 break;
2337 case 0xF800:
2338 /* WH64 */
2339 /* No-op */
2340 break;
2341 default:
2342 goto invalid_opc;
2343 }
2344 break;
2345 case 0x19:
2346 /* HW_MFPR (PALcode) */
2347 #if defined (CONFIG_USER_ONLY)
2348 goto invalid_opc;
2349 #else
2350 if (!ctx->pal_mode)
2351 goto invalid_opc;
2352 if (ra != 31) {
2353 TCGv tmp = tcg_const_i32(insn & 0xFF);
2354 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
2355 tcg_temp_free(tmp);
2356 }
2357 break;
2358 #endif
2359 case 0x1A:
2360 if (rb != 31)
2361 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2362 else
2363 tcg_gen_movi_i64(cpu_pc, 0);
2364 if (ra != 31)
2365 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2366 /* Those four jumps only differ by the branch prediction hint */
2367 switch (fn2) {
2368 case 0x0:
2369 /* JMP */
2370 break;
2371 case 0x1:
2372 /* JSR */
2373 break;
2374 case 0x2:
2375 /* RET */
2376 break;
2377 case 0x3:
2378 /* JSR_COROUTINE */
2379 break;
2380 }
2381 ret = 1;
2382 break;
2383 case 0x1B:
2384 /* HW_LD (PALcode) */
2385 #if defined (CONFIG_USER_ONLY)
2386 goto invalid_opc;
2387 #else
2388 if (!ctx->pal_mode)
2389 goto invalid_opc;
2390 if (ra != 31) {
2391 TCGv addr = tcg_temp_new();
2392 if (rb != 31)
2393 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2394 else
2395 tcg_gen_movi_i64(addr, disp12);
2396 switch ((insn >> 12) & 0xF) {
2397 case 0x0:
2398 /* Longword physical access (hw_ldl/p) */
2399 gen_helper_ldl_raw(cpu_ir[ra], addr);
2400 break;
2401 case 0x1:
2402 /* Quadword physical access (hw_ldq/p) */
2403 gen_helper_ldq_raw(cpu_ir[ra], addr);
2404 break;
2405 case 0x2:
2406 /* Longword physical access with lock (hw_ldl_l/p) */
2407 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
2408 break;
2409 case 0x3:
2410 /* Quadword physical access with lock (hw_ldq_l/p) */
2411 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
2412 break;
2413 case 0x4:
2414 /* Longword virtual PTE fetch (hw_ldl/v) */
2415 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2416 break;
2417 case 0x5:
2418 /* Quadword virtual PTE fetch (hw_ldq/v) */
2419 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2420 break;
2421 case 0x6:
2422 /* Incpu_ir[ra]id */
2423 goto invalid_opc;
2424 case 0x7:
2425 /* Incpu_ir[ra]id */
2426 goto invalid_opc;
2427 case 0x8:
2428 /* Longword virtual access (hw_ldl) */
2429 gen_helper_st_virt_to_phys(addr, addr);
2430 gen_helper_ldl_raw(cpu_ir[ra], addr);
2431 break;
2432 case 0x9:
2433 /* Quadword virtual access (hw_ldq) */
2434 gen_helper_st_virt_to_phys(addr, addr);
2435 gen_helper_ldq_raw(cpu_ir[ra], addr);
2436 break;
2437 case 0xA:
2438 /* Longword virtual access with protection check (hw_ldl/w) */
2439 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2440 break;
2441 case 0xB:
2442 /* Quadword virtual access with protection check (hw_ldq/w) */
2443 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2444 break;
2445 case 0xC:
2446 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2447 gen_helper_set_alt_mode();
2448 gen_helper_st_virt_to_phys(addr, addr);
2449 gen_helper_ldl_raw(cpu_ir[ra], addr);
2450 gen_helper_restore_mode();
2451 break;
2452 case 0xD:
2453 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2454 gen_helper_set_alt_mode();
2455 gen_helper_st_virt_to_phys(addr, addr);
2456 gen_helper_ldq_raw(cpu_ir[ra], addr);
2457 gen_helper_restore_mode();
2458 break;
2459 case 0xE:
2460 /* Longword virtual access with alternate access mode and
2461 * protection checks (hw_ldl/wa)
2462 */
2463 gen_helper_set_alt_mode();
2464 gen_helper_ldl_data(cpu_ir[ra], addr);
2465 gen_helper_restore_mode();
2466 break;
2467 case 0xF:
2468 /* Quadword virtual access with alternate access mode and
2469 * protection checks (hw_ldq/wa)
2470 */
2471 gen_helper_set_alt_mode();
2472 gen_helper_ldq_data(cpu_ir[ra], addr);
2473 gen_helper_restore_mode();
2474 break;
2475 }
2476 tcg_temp_free(addr);
2477 }
2478 break;
2479 #endif
2480 case 0x1C:
2481 switch (fn7) {
2482 case 0x00:
2483 /* SEXTB */
2484 if (!(ctx->amask & AMASK_BWX))
2485 goto invalid_opc;
2486 if (likely(rc != 31)) {
2487 if (islit)
2488 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2489 else
2490 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2491 }
2492 break;
2493 case 0x01:
2494 /* SEXTW */
2495 if (!(ctx->amask & AMASK_BWX))
2496 goto invalid_opc;
2497 if (likely(rc != 31)) {
2498 if (islit)
2499 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2500 else
2501 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2502 }
2503 break;
2504 case 0x30:
2505 /* CTPOP */
2506 if (!(ctx->amask & AMASK_CIX))
2507 goto invalid_opc;
2508 if (likely(rc != 31)) {
2509 if (islit)
2510 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2511 else
2512 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2513 }
2514 break;
2515 case 0x31:
2516 /* PERR */
2517 if (!(ctx->amask & AMASK_MVI))
2518 goto invalid_opc;
2519 gen_perr(ra, rb, rc, islit, lit);
2520 break;
2521 case 0x32:
2522 /* CTLZ */
2523 if (!(ctx->amask & AMASK_CIX))
2524 goto invalid_opc;
2525 if (likely(rc != 31)) {
2526 if (islit)
2527 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2528 else
2529 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2530 }
2531 break;
2532 case 0x33:
2533 /* CTTZ */
2534 if (!(ctx->amask & AMASK_CIX))
2535 goto invalid_opc;
2536 if (likely(rc != 31)) {
2537 if (islit)
2538 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2539 else
2540 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2541 }
2542 break;
2543 case 0x34:
2544 /* UNPKBW */
2545 if (!(ctx->amask & AMASK_MVI))
2546 goto invalid_opc;
2547 if (real_islit || ra != 31)
2548 goto invalid_opc;
2549 gen_unpkbw (rb, rc);
2550 break;
2551 case 0x35:
2552 /* UNPKBL */
2553 if (!(ctx->amask & AMASK_MVI))
2554 goto invalid_opc;
2555 if (real_islit || ra != 31)
2556 goto invalid_opc;
2557 gen_unpkbl (rb, rc);
2558 break;
2559 case 0x36:
2560 /* PKWB */
2561 if (!(ctx->amask & AMASK_MVI))
2562 goto invalid_opc;
2563 if (real_islit || ra != 31)
2564 goto invalid_opc;
2565 gen_pkwb (rb, rc);
2566 break;
2567 case 0x37:
2568 /* PKLB */
2569 if (!(ctx->amask & AMASK_MVI))
2570 goto invalid_opc;
2571 if (real_islit || ra != 31)
2572 goto invalid_opc;
2573 gen_pklb (rb, rc);
2574 break;
2575 case 0x38:
2576 /* MINSB8 */
2577 if (!(ctx->amask & AMASK_MVI))
2578 goto invalid_opc;
2579 gen_minsb8 (ra, rb, rc, islit, lit);
2580 break;
2581 case 0x39:
2582 /* MINSW4 */
2583 if (!(ctx->amask & AMASK_MVI))
2584 goto invalid_opc;
2585 gen_minsw4 (ra, rb, rc, islit, lit);
2586 break;
2587 case 0x3A:
2588 /* MINUB8 */
2589 if (!(ctx->amask & AMASK_MVI))
2590 goto invalid_opc;
2591 gen_minub8 (ra, rb, rc, islit, lit);
2592 break;
2593 case 0x3B:
2594 /* MINUW4 */
2595 if (!(ctx->amask & AMASK_MVI))
2596 goto invalid_opc;
2597 gen_minuw4 (ra, rb, rc, islit, lit);
2598 break;
2599 case 0x3C:
2600 /* MAXUB8 */
2601 if (!(ctx->amask & AMASK_MVI))
2602 goto invalid_opc;
2603 gen_maxub8 (ra, rb, rc, islit, lit);
2604 break;
2605 case 0x3D:
2606 /* MAXUW4 */
2607 if (!(ctx->amask & AMASK_MVI))
2608 goto invalid_opc;
2609 gen_maxuw4 (ra, rb, rc, islit, lit);
2610 break;
2611 case 0x3E:
2612 /* MAXSB8 */
2613 if (!(ctx->amask & AMASK_MVI))
2614 goto invalid_opc;
2615 gen_maxsb8 (ra, rb, rc, islit, lit);
2616 break;
2617 case 0x3F:
2618 /* MAXSW4 */
2619 if (!(ctx->amask & AMASK_MVI))
2620 goto invalid_opc;
2621 gen_maxsw4 (ra, rb, rc, islit, lit);
2622 break;
2623 case 0x70:
2624 /* FTOIT */
2625 if (!(ctx->amask & AMASK_FIX))
2626 goto invalid_opc;
2627 if (likely(rc != 31)) {
2628 if (ra != 31)
2629 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2630 else
2631 tcg_gen_movi_i64(cpu_ir[rc], 0);
2632 }
2633 break;
2634 case 0x78:
2635 /* FTOIS */
2636 if (!(ctx->amask & AMASK_FIX))
2637 goto invalid_opc;
2638 if (rc != 31) {
2639 TCGv_i32 tmp1 = tcg_temp_new_i32();
2640 if (ra != 31)
2641 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2642 else {
2643 TCGv tmp2 = tcg_const_i64(0);
2644 gen_helper_s_to_memory(tmp1, tmp2);
2645 tcg_temp_free(tmp2);
2646 }
2647 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2648 tcg_temp_free_i32(tmp1);
2649 }
2650 break;
2651 default:
2652 goto invalid_opc;
2653 }
2654 break;
2655 case 0x1D:
2656 /* HW_MTPR (PALcode) */
2657 #if defined (CONFIG_USER_ONLY)
2658 goto invalid_opc;
2659 #else
2660 if (!ctx->pal_mode)
2661 goto invalid_opc;
2662 else {
2663 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2664 if (ra != 31)
2665 gen_helper_mtpr(tmp1, cpu_ir[ra]);
2666 else {
2667 TCGv tmp2 = tcg_const_i64(0);
2668 gen_helper_mtpr(tmp1, tmp2);
2669 tcg_temp_free(tmp2);
2670 }
2671 tcg_temp_free(tmp1);
2672 ret = 2;
2673 }
2674 break;
2675 #endif
2676 case 0x1E:
2677 /* HW_REI (PALcode) */
2678 #if defined (CONFIG_USER_ONLY)
2679 goto invalid_opc;
2680 #else
2681 if (!ctx->pal_mode)
2682 goto invalid_opc;
2683 if (rb == 31) {
2684 /* "Old" alpha */
2685 gen_helper_hw_rei();
2686 } else {
2687 TCGv tmp;
2688
2689 if (ra != 31) {
2690 tmp = tcg_temp_new();
2691 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2692 } else
2693 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2694 gen_helper_hw_ret(tmp);
2695 tcg_temp_free(tmp);
2696 }
2697 ret = 2;
2698 break;
2699 #endif
2700 case 0x1F:
2701 /* HW_ST (PALcode) */
2702 #if defined (CONFIG_USER_ONLY)
2703 goto invalid_opc;
2704 #else
2705 if (!ctx->pal_mode)
2706 goto invalid_opc;
2707 else {
2708 TCGv addr, val;
2709 addr = tcg_temp_new();
2710 if (rb != 31)
2711 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2712 else
2713 tcg_gen_movi_i64(addr, disp12);
2714 if (ra != 31)
2715 val = cpu_ir[ra];
2716 else {
2717 val = tcg_temp_new();
2718 tcg_gen_movi_i64(val, 0);
2719 }
2720 switch ((insn >> 12) & 0xF) {
2721 case 0x0:
2722 /* Longword physical access */
2723 gen_helper_stl_raw(val, addr);
2724 break;
2725 case 0x1:
2726 /* Quadword physical access */
2727 gen_helper_stq_raw(val, addr);
2728 break;
2729 case 0x2:
2730 /* Longword physical access with lock */
2731 gen_helper_stl_c_raw(val, val, addr);
2732 break;
2733 case 0x3:
2734 /* Quadword physical access with lock */
2735 gen_helper_stq_c_raw(val, val, addr);
2736 break;
2737 case 0x4:
2738 /* Longword virtual access */
2739 gen_helper_st_virt_to_phys(addr, addr);
2740 gen_helper_stl_raw(val, addr);
2741 break;
2742 case 0x5:
2743 /* Quadword virtual access */
2744 gen_helper_st_virt_to_phys(addr, addr);
2745 gen_helper_stq_raw(val, addr);
2746 break;
2747 case 0x6:
2748 /* Invalid */
2749 goto invalid_opc;
2750 case 0x7:
2751 /* Invalid */
2752 goto invalid_opc;
2753 case 0x8:
2754 /* Invalid */
2755 goto invalid_opc;
2756 case 0x9:
2757 /* Invalid */
2758 goto invalid_opc;
2759 case 0xA:
2760 /* Invalid */
2761 goto invalid_opc;
2762 case 0xB:
2763 /* Invalid */
2764 goto invalid_opc;
2765 case 0xC:
2766 /* Longword virtual access with alternate access mode */
2767 gen_helper_set_alt_mode();
2768 gen_helper_st_virt_to_phys(addr, addr);
2769 gen_helper_stl_raw(val, addr);
2770 gen_helper_restore_mode();
2771 break;
2772 case 0xD:
2773 /* Quadword virtual access with alternate access mode */
2774 gen_helper_set_alt_mode();
2775 gen_helper_st_virt_to_phys(addr, addr);
2776 gen_helper_stl_raw(val, addr);
2777 gen_helper_restore_mode();
2778 break;
2779 case 0xE:
2780 /* Invalid */
2781 goto invalid_opc;
2782 case 0xF:
2783 /* Invalid */
2784 goto invalid_opc;
2785 }
2786 if (ra == 31)
2787 tcg_temp_free(val);
2788 tcg_temp_free(addr);
2789 }
2790 break;
2791 #endif
2792 case 0x20:
2793 /* LDF */
2794 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2795 break;
2796 case 0x21:
2797 /* LDG */
2798 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2799 break;
2800 case 0x22:
2801 /* LDS */
2802 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2803 break;
2804 case 0x23:
2805 /* LDT */
2806 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2807 break;
2808 case 0x24:
2809 /* STF */
2810 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
2811 break;
2812 case 0x25:
2813 /* STG */
2814 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
2815 break;
2816 case 0x26:
2817 /* STS */
2818 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
2819 break;
2820 case 0x27:
2821 /* STT */
2822 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
2823 break;
2824 case 0x28:
2825 /* LDL */
2826 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2827 break;
2828 case 0x29:
2829 /* LDQ */
2830 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2831 break;
2832 case 0x2A:
2833 /* LDL_L */
2834 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2835 break;
2836 case 0x2B:
2837 /* LDQ_L */
2838 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2839 break;
2840 case 0x2C:
2841 /* STL */
2842 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
2843 break;
2844 case 0x2D:
2845 /* STQ */
2846 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
2847 break;
2848 case 0x2E:
2849 /* STL_C */
2850 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
2851 break;
2852 case 0x2F:
2853 /* STQ_C */
2854 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
2855 break;
2856 case 0x30:
2857 /* BR */
2858 if (ra != 31)
2859 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2860 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2861 ret = 1;
2862 break;
2863 case 0x31: /* FBEQ */
2864 gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2865 ret = 1;
2866 break;
2867 case 0x32: /* FBLT */
2868 gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2869 ret = 1;
2870 break;
2871 case 0x33: /* FBLE */
2872 gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2873 ret = 1;
2874 break;
2875 case 0x34:
2876 /* BSR */
2877 if (ra != 31)
2878 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2879 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2880 ret = 1;
2881 break;
2882 case 0x35: /* FBNE */
2883 gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2884 ret = 1;
2885 break;
2886 case 0x36: /* FBGE */
2887 gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2888 ret = 1;
2889 break;
2890 case 0x37: /* FBGT */
2891 gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2892 ret = 1;
2893 break;
2894 case 0x38:
2895 /* BLBC */
2896 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2897 ret = 1;
2898 break;
2899 case 0x39:
2900 /* BEQ */
2901 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2902 ret = 1;
2903 break;
2904 case 0x3A:
2905 /* BLT */
2906 gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2907 ret = 1;
2908 break;
2909 case 0x3B:
2910 /* BLE */
2911 gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2912 ret = 1;
2913 break;
2914 case 0x3C:
2915 /* BLBS */
2916 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2917 ret = 1;
2918 break;
2919 case 0x3D:
2920 /* BNE */
2921 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2922 ret = 1;
2923 break;
2924 case 0x3E:
2925 /* BGE */
2926 gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2927 ret = 1;
2928 break;
2929 case 0x3F:
2930 /* BGT */
2931 gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2932 ret = 1;
2933 break;
2934 invalid_opc:
2935 gen_invalid(ctx);
2936 ret = 3;
2937 break;
2938 }
2939
2940 return ret;
2941 }
2942
2943 static inline void gen_intermediate_code_internal(CPUState *env,
2944 TranslationBlock *tb,
2945 int search_pc)
2946 {
2947 DisasContext ctx, *ctxp = &ctx;
2948 target_ulong pc_start;
2949 uint32_t insn;
2950 uint16_t *gen_opc_end;
2951 CPUBreakpoint *bp;
2952 int j, lj = -1;
2953 int ret;
2954 int num_insns;
2955 int max_insns;
2956
2957 pc_start = tb->pc;
2958 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2959 ctx.pc = pc_start;
2960 ctx.amask = env->amask;
2961 ctx.env = env;
2962 #if defined (CONFIG_USER_ONLY)
2963 ctx.mem_idx = 0;
2964 #else
2965 ctx.mem_idx = ((env->ps >> 3) & 3);
2966 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
2967 #endif
2968
2969 /* ??? Every TB begins with unset rounding mode, to be initialized on
2970 the first fp insn of the TB. Alternately we could define a proper
2971 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2972 to reset the FP_STATUS to that default at the end of any TB that
2973 changes the default. We could even (gasp) dynamiclly figure out
2974 what default would be most efficient given the running program. */
2975 ctx.tb_rm = -1;
2976 /* Similarly for flush-to-zero. */
2977 ctx.tb_ftz = -1;
2978
2979 num_insns = 0;
2980 max_insns = tb->cflags & CF_COUNT_MASK;
2981 if (max_insns == 0)
2982 max_insns = CF_COUNT_MASK;
2983
2984 gen_icount_start();
2985 for (ret = 0; ret == 0;) {
2986 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2987 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2988 if (bp->pc == ctx.pc) {
2989 gen_excp(&ctx, EXCP_DEBUG, 0);
2990 break;
2991 }
2992 }
2993 }
2994 if (search_pc) {
2995 j = gen_opc_ptr - gen_opc_buf;
2996 if (lj < j) {
2997 lj++;
2998 while (lj < j)
2999 gen_opc_instr_start[lj++] = 0;
3000 }
3001 gen_opc_pc[lj] = ctx.pc;
3002 gen_opc_instr_start[lj] = 1;
3003 gen_opc_icount[lj] = num_insns;
3004 }
3005 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3006 gen_io_start();
3007 insn = ldl_code(ctx.pc);
3008 num_insns++;
3009
3010 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3011 tcg_gen_debug_insn_start(ctx.pc);
3012 }
3013
3014 ctx.pc += 4;
3015 ret = translate_one(ctxp, insn);
3016 if (ret != 0)
3017 break;
3018 /* if we reach a page boundary or are single stepping, stop
3019 * generation
3020 */
3021 if (env->singlestep_enabled) {
3022 gen_excp(&ctx, EXCP_DEBUG, 0);
3023 break;
3024 }
3025
3026 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
3027 break;
3028
3029 if (gen_opc_ptr >= gen_opc_end)
3030 break;
3031
3032 if (num_insns >= max_insns)
3033 break;
3034
3035 if (singlestep) {
3036 break;
3037 }
3038 }
3039 if (ret != 1 && ret != 3) {
3040 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3041 }
3042 if (tb->cflags & CF_LAST_IO)
3043 gen_io_end();
3044 /* Generate the return instruction */
3045 tcg_gen_exit_tb(0);
3046 gen_icount_end(tb, num_insns);
3047 *gen_opc_ptr = INDEX_op_end;
3048 if (search_pc) {
3049 j = gen_opc_ptr - gen_opc_buf;
3050 lj++;
3051 while (lj <= j)
3052 gen_opc_instr_start[lj++] = 0;
3053 } else {
3054 tb->size = ctx.pc - pc_start;
3055 tb->icount = num_insns;
3056 }
3057 #ifdef DEBUG_DISAS
3058 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3059 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3060 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3061 qemu_log("\n");
3062 }
3063 #endif
3064 }
3065
3066 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3067 {
3068 gen_intermediate_code_internal(env, tb, 0);
3069 }
3070
3071 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3072 {
3073 gen_intermediate_code_internal(env, tb, 1);
3074 }
3075
3076 struct cpu_def_t {
3077 const char *name;
3078 int implver, amask;
3079 };
3080
3081 static const struct cpu_def_t cpu_defs[] = {
3082 { "ev4", IMPLVER_2106x, 0 },
3083 { "ev5", IMPLVER_21164, 0 },
3084 { "ev56", IMPLVER_21164, AMASK_BWX },
3085 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3086 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3087 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3088 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3089 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3090 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3091 { "21064", IMPLVER_2106x, 0 },
3092 { "21164", IMPLVER_21164, 0 },
3093 { "21164a", IMPLVER_21164, AMASK_BWX },
3094 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3095 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3096 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3097 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3098 };
3099
3100 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3101 {
3102 CPUAlphaState *env;
3103 int implver, amask, i, max;
3104
3105 env = qemu_mallocz(sizeof(CPUAlphaState));
3106 cpu_exec_init(env);
3107 alpha_translate_init();
3108 tlb_flush(env, 1);
3109
3110 /* Default to ev67; no reason not to emulate insns by default. */
3111 implver = IMPLVER_21264;
3112 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3113 | AMASK_TRAP | AMASK_PREFETCH);
3114
3115 max = ARRAY_SIZE(cpu_defs);
3116 for (i = 0; i < max; i++) {
3117 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3118 implver = cpu_defs[i].implver;
3119 amask = cpu_defs[i].amask;
3120 break;
3121 }
3122 }
3123 env->implver = implver;
3124 env->amask = amask;
3125
3126 env->ps = 0x1F00;
3127 #if defined (CONFIG_USER_ONLY)
3128 env->ps |= 1 << 3;
3129 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3130 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
3131 #else
3132 pal_init(env);
3133 #endif
3134
3135 /* Initialize IPR */
3136 #if defined (CONFIG_USER_ONLY)
3137 env->ipr[IPR_EXC_ADDR] = 0;
3138 env->ipr[IPR_EXC_SUM] = 0;
3139 env->ipr[IPR_EXC_MASK] = 0;
3140 #else
3141 {
3142 uint64_t hwpcb;
3143 hwpcb = env->ipr[IPR_PCBB];
3144 env->ipr[IPR_ASN] = 0;
3145 env->ipr[IPR_ASTEN] = 0;
3146 env->ipr[IPR_ASTSR] = 0;
3147 env->ipr[IPR_DATFX] = 0;
3148 /* XXX: fix this */
3149 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3150 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3151 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3152 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3153 env->ipr[IPR_FEN] = 0;
3154 env->ipr[IPR_IPL] = 31;
3155 env->ipr[IPR_MCES] = 0;
3156 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
3157 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3158 env->ipr[IPR_SISR] = 0;
3159 env->ipr[IPR_VIRBND] = -1ULL;
3160 }
3161 #endif
3162
3163 qemu_init_vcpu(env);
3164 return env;
3165 }
3166
3167 void gen_pc_load(CPUState *env, TranslationBlock *tb,
3168 unsigned long searched_pc, int pc_pos, void *puc)
3169 {
3170 env->pc = gen_opc_pc[pc_pos];
3171 }