]> git.proxmox.com Git - qemu.git/blob - target-alpha/translate.c
Merge remote-tracking branch 'qmp/queue/qmp' into staging
[qemu.git] / target-alpha / translate.c
1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23
24 #include "cpu.h"
25 #include "disas.h"
26 #include "host-utils.h"
27 #include "tcg-op.h"
28 #include "qemu-common.h"
29
30 #include "helper.h"
31 #define GEN_HELPER 1
32 #include "helper.h"
33
34 #undef ALPHA_DEBUG_DISAS
35 #define CONFIG_SOFTFLOAT_INLINE
36
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
39 #else
40 # define LOG_DISAS(...) do { } while (0)
41 #endif
42
43 typedef struct DisasContext DisasContext;
44 struct DisasContext {
45 struct TranslationBlock *tb;
46 CPUAlphaState *env;
47 uint64_t pc;
48 int mem_idx;
49
50 /* Current rounding mode for this TB. */
51 int tb_rm;
52 /* Current flush-to-zero setting for this TB. */
53 int tb_ftz;
54 };
55
56 /* Return values from translate_one, indicating the state of the TB.
57 Note that zero indicates that we are not exiting the TB. */
58
59 typedef enum {
60 NO_EXIT,
61
62 /* We have emitted one or more goto_tb. No fixup required. */
63 EXIT_GOTO_TB,
64
65 /* We are not using a goto_tb (for whatever reason), but have updated
66 the PC (for whatever reason), so there's no need to do it again on
67 exiting the TB. */
68 EXIT_PC_UPDATED,
69
70 /* We are exiting the TB, but have neither emitted a goto_tb, nor
71 updated the PC for the next instruction to be executed. */
72 EXIT_PC_STALE,
73
74 /* We are ending the TB with a noreturn function call, e.g. longjmp.
75 No following code will be executed. */
76 EXIT_NORETURN,
77 } ExitStatus;
78
79 /* global register indexes */
80 static TCGv_ptr cpu_env;
81 static TCGv cpu_ir[31];
82 static TCGv cpu_fir[31];
83 static TCGv cpu_pc;
84 static TCGv cpu_lock_addr;
85 static TCGv cpu_lock_st_addr;
86 static TCGv cpu_lock_value;
87 static TCGv cpu_unique;
88 #ifndef CONFIG_USER_ONLY
89 static TCGv cpu_sysval;
90 static TCGv cpu_usp;
91 #endif
92
93 /* register names */
94 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
95
96 #include "gen-icount.h"
97
98 static void alpha_translate_init(void)
99 {
100 int i;
101 char *p;
102 static int done_init = 0;
103
104 if (done_init)
105 return;
106
107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
108
109 p = cpu_reg_names;
110 for (i = 0; i < 31; i++) {
111 sprintf(p, "ir%d", i);
112 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
113 offsetof(CPUState, ir[i]), p);
114 p += (i < 10) ? 4 : 5;
115
116 sprintf(p, "fir%d", i);
117 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
118 offsetof(CPUState, fir[i]), p);
119 p += (i < 10) ? 5 : 6;
120 }
121
122 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
123 offsetof(CPUState, pc), "pc");
124
125 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
126 offsetof(CPUState, lock_addr),
127 "lock_addr");
128 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
129 offsetof(CPUState, lock_st_addr),
130 "lock_st_addr");
131 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
132 offsetof(CPUState, lock_value),
133 "lock_value");
134
135 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
136 offsetof(CPUState, unique), "unique");
137 #ifndef CONFIG_USER_ONLY
138 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
139 offsetof(CPUState, sysval), "sysval");
140 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
141 offsetof(CPUState, usp), "usp");
142 #endif
143
144 /* register helpers */
145 #define GEN_HELPER 2
146 #include "helper.h"
147
148 done_init = 1;
149 }
150
151 static void gen_excp_1(int exception, int error_code)
152 {
153 TCGv_i32 tmp1, tmp2;
154
155 tmp1 = tcg_const_i32(exception);
156 tmp2 = tcg_const_i32(error_code);
157 gen_helper_excp(tmp1, tmp2);
158 tcg_temp_free_i32(tmp2);
159 tcg_temp_free_i32(tmp1);
160 }
161
162 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
163 {
164 tcg_gen_movi_i64(cpu_pc, ctx->pc);
165 gen_excp_1(exception, error_code);
166 return EXIT_NORETURN;
167 }
168
169 static inline ExitStatus gen_invalid(DisasContext *ctx)
170 {
171 return gen_excp(ctx, EXCP_OPCDEC, 0);
172 }
173
174 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
175 {
176 TCGv tmp = tcg_temp_new();
177 TCGv_i32 tmp32 = tcg_temp_new_i32();
178 tcg_gen_qemu_ld32u(tmp, t1, flags);
179 tcg_gen_trunc_i64_i32(tmp32, tmp);
180 gen_helper_memory_to_f(t0, tmp32);
181 tcg_temp_free_i32(tmp32);
182 tcg_temp_free(tmp);
183 }
184
185 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
186 {
187 TCGv tmp = tcg_temp_new();
188 tcg_gen_qemu_ld64(tmp, t1, flags);
189 gen_helper_memory_to_g(t0, tmp);
190 tcg_temp_free(tmp);
191 }
192
193 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
194 {
195 TCGv tmp = tcg_temp_new();
196 TCGv_i32 tmp32 = tcg_temp_new_i32();
197 tcg_gen_qemu_ld32u(tmp, t1, flags);
198 tcg_gen_trunc_i64_i32(tmp32, tmp);
199 gen_helper_memory_to_s(t0, tmp32);
200 tcg_temp_free_i32(tmp32);
201 tcg_temp_free(tmp);
202 }
203
204 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
205 {
206 tcg_gen_qemu_ld32s(t0, t1, flags);
207 tcg_gen_mov_i64(cpu_lock_addr, t1);
208 tcg_gen_mov_i64(cpu_lock_value, t0);
209 }
210
211 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
212 {
213 tcg_gen_qemu_ld64(t0, t1, flags);
214 tcg_gen_mov_i64(cpu_lock_addr, t1);
215 tcg_gen_mov_i64(cpu_lock_value, t0);
216 }
217
218 static inline void gen_load_mem(DisasContext *ctx,
219 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
220 int flags),
221 int ra, int rb, int32_t disp16, int fp,
222 int clear)
223 {
224 TCGv addr, va;
225
226 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
227 prefetches, which we can treat as nops. No worries about
228 missed exceptions here. */
229 if (unlikely(ra == 31)) {
230 return;
231 }
232
233 addr = tcg_temp_new();
234 if (rb != 31) {
235 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
236 if (clear) {
237 tcg_gen_andi_i64(addr, addr, ~0x7);
238 }
239 } else {
240 if (clear) {
241 disp16 &= ~0x7;
242 }
243 tcg_gen_movi_i64(addr, disp16);
244 }
245
246 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
247 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
248
249 tcg_temp_free(addr);
250 }
251
252 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
253 {
254 TCGv_i32 tmp32 = tcg_temp_new_i32();
255 TCGv tmp = tcg_temp_new();
256 gen_helper_f_to_memory(tmp32, t0);
257 tcg_gen_extu_i32_i64(tmp, tmp32);
258 tcg_gen_qemu_st32(tmp, t1, flags);
259 tcg_temp_free(tmp);
260 tcg_temp_free_i32(tmp32);
261 }
262
263 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
264 {
265 TCGv tmp = tcg_temp_new();
266 gen_helper_g_to_memory(tmp, t0);
267 tcg_gen_qemu_st64(tmp, t1, flags);
268 tcg_temp_free(tmp);
269 }
270
271 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
272 {
273 TCGv_i32 tmp32 = tcg_temp_new_i32();
274 TCGv tmp = tcg_temp_new();
275 gen_helper_s_to_memory(tmp32, t0);
276 tcg_gen_extu_i32_i64(tmp, tmp32);
277 tcg_gen_qemu_st32(tmp, t1, flags);
278 tcg_temp_free(tmp);
279 tcg_temp_free_i32(tmp32);
280 }
281
282 static inline void gen_store_mem(DisasContext *ctx,
283 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
284 int flags),
285 int ra, int rb, int32_t disp16, int fp,
286 int clear)
287 {
288 TCGv addr, va;
289
290 addr = tcg_temp_new();
291 if (rb != 31) {
292 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
293 if (clear) {
294 tcg_gen_andi_i64(addr, addr, ~0x7);
295 }
296 } else {
297 if (clear) {
298 disp16 &= ~0x7;
299 }
300 tcg_gen_movi_i64(addr, disp16);
301 }
302
303 if (ra == 31) {
304 va = tcg_const_i64(0);
305 } else {
306 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
307 }
308 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
309
310 tcg_temp_free(addr);
311 if (ra == 31) {
312 tcg_temp_free(va);
313 }
314 }
315
316 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
317 int32_t disp16, int quad)
318 {
319 TCGv addr;
320
321 if (ra == 31) {
322 /* ??? Don't bother storing anything. The user can't tell
323 the difference, since the zero register always reads zero. */
324 return NO_EXIT;
325 }
326
327 #if defined(CONFIG_USER_ONLY)
328 addr = cpu_lock_st_addr;
329 #else
330 addr = tcg_temp_local_new();
331 #endif
332
333 if (rb != 31) {
334 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
335 } else {
336 tcg_gen_movi_i64(addr, disp16);
337 }
338
339 #if defined(CONFIG_USER_ONLY)
340 /* ??? This is handled via a complicated version of compare-and-swap
341 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
342 in TCG so that this isn't necessary. */
343 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
344 #else
345 /* ??? In system mode we are never multi-threaded, so CAS can be
346 implemented via a non-atomic load-compare-store sequence. */
347 {
348 int lab_fail, lab_done;
349 TCGv val;
350
351 lab_fail = gen_new_label();
352 lab_done = gen_new_label();
353 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
354
355 val = tcg_temp_new();
356 if (quad) {
357 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
358 } else {
359 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
360 }
361 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
362
363 if (quad) {
364 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
365 } else {
366 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
367 }
368 tcg_gen_movi_i64(cpu_ir[ra], 1);
369 tcg_gen_br(lab_done);
370
371 gen_set_label(lab_fail);
372 tcg_gen_movi_i64(cpu_ir[ra], 0);
373
374 gen_set_label(lab_done);
375 tcg_gen_movi_i64(cpu_lock_addr, -1);
376
377 tcg_temp_free(addr);
378 return NO_EXIT;
379 }
380 #endif
381 }
382
383 static int use_goto_tb(DisasContext *ctx, uint64_t dest)
384 {
385 /* Check for the dest on the same page as the start of the TB. We
386 also want to suppress goto_tb in the case of single-steping and IO. */
387 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
388 && !ctx->env->singlestep_enabled
389 && !(ctx->tb->cflags & CF_LAST_IO));
390 }
391
392 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
393 {
394 uint64_t dest = ctx->pc + (disp << 2);
395
396 if (ra != 31) {
397 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
398 }
399
400 /* Notice branch-to-next; used to initialize RA with the PC. */
401 if (disp == 0) {
402 return 0;
403 } else if (use_goto_tb(ctx, dest)) {
404 tcg_gen_goto_tb(0);
405 tcg_gen_movi_i64(cpu_pc, dest);
406 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
407 return EXIT_GOTO_TB;
408 } else {
409 tcg_gen_movi_i64(cpu_pc, dest);
410 return EXIT_PC_UPDATED;
411 }
412 }
413
414 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
415 TCGv cmp, int32_t disp)
416 {
417 uint64_t dest = ctx->pc + (disp << 2);
418 int lab_true = gen_new_label();
419
420 if (use_goto_tb(ctx, dest)) {
421 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
422
423 tcg_gen_goto_tb(0);
424 tcg_gen_movi_i64(cpu_pc, ctx->pc);
425 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
426
427 gen_set_label(lab_true);
428 tcg_gen_goto_tb(1);
429 tcg_gen_movi_i64(cpu_pc, dest);
430 tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
431
432 return EXIT_GOTO_TB;
433 } else {
434 int lab_over = gen_new_label();
435
436 /* ??? Consider using either
437 movi pc, next
438 addi tmp, pc, disp
439 movcond pc, cond, 0, tmp, pc
440 or
441 setcond tmp, cond, 0
442 movi pc, next
443 neg tmp, tmp
444 andi tmp, tmp, disp
445 add pc, pc, tmp
446 The current diamond subgraph surely isn't efficient. */
447
448 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
449 tcg_gen_movi_i64(cpu_pc, ctx->pc);
450 tcg_gen_br(lab_over);
451 gen_set_label(lab_true);
452 tcg_gen_movi_i64(cpu_pc, dest);
453 gen_set_label(lab_over);
454
455 return EXIT_PC_UPDATED;
456 }
457 }
458
459 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
460 int32_t disp, int mask)
461 {
462 TCGv cmp_tmp;
463
464 if (unlikely(ra == 31)) {
465 cmp_tmp = tcg_const_i64(0);
466 } else {
467 cmp_tmp = tcg_temp_new();
468 if (mask) {
469 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
470 } else {
471 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
472 }
473 }
474
475 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
476 }
477
478 /* Fold -0.0 for comparison with COND. */
479
480 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
481 {
482 uint64_t mzero = 1ull << 63;
483
484 switch (cond) {
485 case TCG_COND_LE:
486 case TCG_COND_GT:
487 /* For <= or >, the -0.0 value directly compares the way we want. */
488 tcg_gen_mov_i64(dest, src);
489 break;
490
491 case TCG_COND_EQ:
492 case TCG_COND_NE:
493 /* For == or !=, we can simply mask off the sign bit and compare. */
494 tcg_gen_andi_i64(dest, src, mzero - 1);
495 break;
496
497 case TCG_COND_GE:
498 case TCG_COND_LT:
499 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
500 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
501 tcg_gen_neg_i64(dest, dest);
502 tcg_gen_and_i64(dest, dest, src);
503 break;
504
505 default:
506 abort();
507 }
508 }
509
510 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
511 int32_t disp)
512 {
513 TCGv cmp_tmp;
514
515 if (unlikely(ra == 31)) {
516 /* Very uncommon case, but easier to optimize it to an integer
517 comparison than continuing with the floating point comparison. */
518 return gen_bcond(ctx, cond, ra, disp, 0);
519 }
520
521 cmp_tmp = tcg_temp_new();
522 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
523 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
524 }
525
526 static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
527 int islit, uint8_t lit, int mask)
528 {
529 TCGCond inv_cond = tcg_invert_cond(cond);
530 int l1;
531
532 if (unlikely(rc == 31))
533 return;
534
535 l1 = gen_new_label();
536
537 if (ra != 31) {
538 if (mask) {
539 TCGv tmp = tcg_temp_new();
540 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
541 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
542 tcg_temp_free(tmp);
543 } else
544 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
545 } else {
546 /* Very uncommon case - Do not bother to optimize. */
547 TCGv tmp = tcg_const_i64(0);
548 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
549 tcg_temp_free(tmp);
550 }
551
552 if (islit)
553 tcg_gen_movi_i64(cpu_ir[rc], lit);
554 else
555 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
556 gen_set_label(l1);
557 }
558
559 static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
560 {
561 TCGv cmp_tmp;
562 int l1;
563
564 if (unlikely(rc == 31)) {
565 return;
566 }
567
568 cmp_tmp = tcg_temp_new();
569 if (unlikely(ra == 31)) {
570 tcg_gen_movi_i64(cmp_tmp, 0);
571 } else {
572 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
573 }
574
575 l1 = gen_new_label();
576 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
577 tcg_temp_free(cmp_tmp);
578
579 if (rb != 31)
580 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
581 else
582 tcg_gen_movi_i64(cpu_fir[rc], 0);
583 gen_set_label(l1);
584 }
585
586 #define QUAL_RM_N 0x080 /* Round mode nearest even */
587 #define QUAL_RM_C 0x000 /* Round mode chopped */
588 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
589 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
590 #define QUAL_RM_MASK 0x0c0
591
592 #define QUAL_U 0x100 /* Underflow enable (fp output) */
593 #define QUAL_V 0x100 /* Overflow enable (int output) */
594 #define QUAL_S 0x400 /* Software completion enable */
595 #define QUAL_I 0x200 /* Inexact detection enable */
596
597 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
598 {
599 TCGv_i32 tmp;
600
601 fn11 &= QUAL_RM_MASK;
602 if (fn11 == ctx->tb_rm) {
603 return;
604 }
605 ctx->tb_rm = fn11;
606
607 tmp = tcg_temp_new_i32();
608 switch (fn11) {
609 case QUAL_RM_N:
610 tcg_gen_movi_i32(tmp, float_round_nearest_even);
611 break;
612 case QUAL_RM_C:
613 tcg_gen_movi_i32(tmp, float_round_to_zero);
614 break;
615 case QUAL_RM_M:
616 tcg_gen_movi_i32(tmp, float_round_down);
617 break;
618 case QUAL_RM_D:
619 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
620 break;
621 }
622
623 #if defined(CONFIG_SOFTFLOAT_INLINE)
624 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
625 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
626 sets the one field. */
627 tcg_gen_st8_i32(tmp, cpu_env,
628 offsetof(CPUState, fp_status.float_rounding_mode));
629 #else
630 gen_helper_setroundmode(tmp);
631 #endif
632
633 tcg_temp_free_i32(tmp);
634 }
635
636 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
637 {
638 TCGv_i32 tmp;
639
640 fn11 &= QUAL_U;
641 if (fn11 == ctx->tb_ftz) {
642 return;
643 }
644 ctx->tb_ftz = fn11;
645
646 tmp = tcg_temp_new_i32();
647 if (fn11) {
648 /* Underflow is enabled, use the FPCR setting. */
649 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
650 } else {
651 /* Underflow is disabled, force flush-to-zero. */
652 tcg_gen_movi_i32(tmp, 1);
653 }
654
655 #if defined(CONFIG_SOFTFLOAT_INLINE)
656 tcg_gen_st8_i32(tmp, cpu_env,
657 offsetof(CPUState, fp_status.flush_to_zero));
658 #else
659 gen_helper_setflushzero(tmp);
660 #endif
661
662 tcg_temp_free_i32(tmp);
663 }
664
665 static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
666 {
667 TCGv val = tcg_temp_new();
668 if (reg == 31) {
669 tcg_gen_movi_i64(val, 0);
670 } else if (fn11 & QUAL_S) {
671 gen_helper_ieee_input_s(val, cpu_fir[reg]);
672 } else if (is_cmp) {
673 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
674 } else {
675 gen_helper_ieee_input(val, cpu_fir[reg]);
676 }
677 return val;
678 }
679
680 static void gen_fp_exc_clear(void)
681 {
682 #if defined(CONFIG_SOFTFLOAT_INLINE)
683 TCGv_i32 zero = tcg_const_i32(0);
684 tcg_gen_st8_i32(zero, cpu_env,
685 offsetof(CPUState, fp_status.float_exception_flags));
686 tcg_temp_free_i32(zero);
687 #else
688 gen_helper_fp_exc_clear();
689 #endif
690 }
691
692 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
693 {
694 /* ??? We ought to be able to do something with imprecise exceptions.
695 E.g. notice we're still in the trap shadow of something within the
696 TB and do not generate the code to signal the exception; end the TB
697 when an exception is forced to arrive, either by consumption of a
698 register value or TRAPB or EXCB. */
699 TCGv_i32 exc = tcg_temp_new_i32();
700 TCGv_i32 reg;
701
702 #if defined(CONFIG_SOFTFLOAT_INLINE)
703 tcg_gen_ld8u_i32(exc, cpu_env,
704 offsetof(CPUState, fp_status.float_exception_flags));
705 #else
706 gen_helper_fp_exc_get(exc);
707 #endif
708
709 if (ignore) {
710 tcg_gen_andi_i32(exc, exc, ~ignore);
711 }
712
713 /* ??? Pass in the regno of the destination so that the helper can
714 set EXC_MASK, which contains a bitmask of destination registers
715 that have caused arithmetic traps. A simple userspace emulation
716 does not require this. We do need it for a guest kernel's entArith,
717 or if we were to do something clever with imprecise exceptions. */
718 reg = tcg_const_i32(rc + 32);
719
720 if (fn11 & QUAL_S) {
721 gen_helper_fp_exc_raise_s(exc, reg);
722 } else {
723 gen_helper_fp_exc_raise(exc, reg);
724 }
725
726 tcg_temp_free_i32(reg);
727 tcg_temp_free_i32(exc);
728 }
729
730 static inline void gen_fp_exc_raise(int rc, int fn11)
731 {
732 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
733 }
734
735 static void gen_fcvtlq(int rb, int rc)
736 {
737 if (unlikely(rc == 31)) {
738 return;
739 }
740 if (unlikely(rb == 31)) {
741 tcg_gen_movi_i64(cpu_fir[rc], 0);
742 } else {
743 TCGv tmp = tcg_temp_new();
744
745 /* The arithmetic right shift here, plus the sign-extended mask below
746 yields a sign-extended result without an explicit ext32s_i64. */
747 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
748 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
749 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
750 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
751 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
752
753 tcg_temp_free(tmp);
754 }
755 }
756
757 static void gen_fcvtql(int rb, int rc)
758 {
759 if (unlikely(rc == 31)) {
760 return;
761 }
762 if (unlikely(rb == 31)) {
763 tcg_gen_movi_i64(cpu_fir[rc], 0);
764 } else {
765 TCGv tmp = tcg_temp_new();
766
767 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
768 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
769 tcg_gen_shli_i64(tmp, tmp, 32);
770 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
771 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
772
773 tcg_temp_free(tmp);
774 }
775 }
776
777 static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
778 {
779 if (rb != 31) {
780 int lab = gen_new_label();
781 TCGv tmp = tcg_temp_new();
782
783 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
784 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
785 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
786
787 gen_set_label(lab);
788 }
789 gen_fcvtql(rb, rc);
790 }
791
792 #define FARITH2(name) \
793 static inline void glue(gen_f, name)(int rb, int rc) \
794 { \
795 if (unlikely(rc == 31)) { \
796 return; \
797 } \
798 if (rb != 31) { \
799 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
800 } else { \
801 TCGv tmp = tcg_const_i64(0); \
802 gen_helper_ ## name (cpu_fir[rc], tmp); \
803 tcg_temp_free(tmp); \
804 } \
805 }
806
807 /* ??? VAX instruction qualifiers ignored. */
808 FARITH2(sqrtf)
809 FARITH2(sqrtg)
810 FARITH2(cvtgf)
811 FARITH2(cvtgq)
812 FARITH2(cvtqf)
813 FARITH2(cvtqg)
814
815 static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
816 int rb, int rc, int fn11)
817 {
818 TCGv vb;
819
820 /* ??? This is wrong: the instruction is not a nop, it still may
821 raise exceptions. */
822 if (unlikely(rc == 31)) {
823 return;
824 }
825
826 gen_qual_roundmode(ctx, fn11);
827 gen_qual_flushzero(ctx, fn11);
828 gen_fp_exc_clear();
829
830 vb = gen_ieee_input(rb, fn11, 0);
831 helper(cpu_fir[rc], vb);
832 tcg_temp_free(vb);
833
834 gen_fp_exc_raise(rc, fn11);
835 }
836
837 #define IEEE_ARITH2(name) \
838 static inline void glue(gen_f, name)(DisasContext *ctx, \
839 int rb, int rc, int fn11) \
840 { \
841 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
842 }
843 IEEE_ARITH2(sqrts)
844 IEEE_ARITH2(sqrtt)
845 IEEE_ARITH2(cvtst)
846 IEEE_ARITH2(cvtts)
847
848 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
849 {
850 TCGv vb;
851 int ignore = 0;
852
853 /* ??? This is wrong: the instruction is not a nop, it still may
854 raise exceptions. */
855 if (unlikely(rc == 31)) {
856 return;
857 }
858
859 /* No need to set flushzero, since we have an integer output. */
860 gen_fp_exc_clear();
861 vb = gen_ieee_input(rb, fn11, 0);
862
863 /* Almost all integer conversions use cropped rounding, and most
864 also do not have integer overflow enabled. Special case that. */
865 switch (fn11) {
866 case QUAL_RM_C:
867 gen_helper_cvttq_c(cpu_fir[rc], vb);
868 break;
869 case QUAL_V | QUAL_RM_C:
870 case QUAL_S | QUAL_V | QUAL_RM_C:
871 ignore = float_flag_inexact;
872 /* FALLTHRU */
873 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
874 gen_helper_cvttq_svic(cpu_fir[rc], vb);
875 break;
876 default:
877 gen_qual_roundmode(ctx, fn11);
878 gen_helper_cvttq(cpu_fir[rc], vb);
879 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
880 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
881 break;
882 }
883 tcg_temp_free(vb);
884
885 gen_fp_exc_raise_ignore(rc, fn11, ignore);
886 }
887
888 static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
889 int rb, int rc, int fn11)
890 {
891 TCGv vb;
892
893 /* ??? This is wrong: the instruction is not a nop, it still may
894 raise exceptions. */
895 if (unlikely(rc == 31)) {
896 return;
897 }
898
899 gen_qual_roundmode(ctx, fn11);
900
901 if (rb == 31) {
902 vb = tcg_const_i64(0);
903 } else {
904 vb = cpu_fir[rb];
905 }
906
907 /* The only exception that can be raised by integer conversion
908 is inexact. Thus we only need to worry about exceptions when
909 inexact handling is requested. */
910 if (fn11 & QUAL_I) {
911 gen_fp_exc_clear();
912 helper(cpu_fir[rc], vb);
913 gen_fp_exc_raise(rc, fn11);
914 } else {
915 helper(cpu_fir[rc], vb);
916 }
917
918 if (rb == 31) {
919 tcg_temp_free(vb);
920 }
921 }
922
923 #define IEEE_INTCVT(name) \
924 static inline void glue(gen_f, name)(DisasContext *ctx, \
925 int rb, int rc, int fn11) \
926 { \
927 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
928 }
929 IEEE_INTCVT(cvtqs)
930 IEEE_INTCVT(cvtqt)
931
932 static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
933 {
934 TCGv va, vb, vmask;
935 int za = 0, zb = 0;
936
937 if (unlikely(rc == 31)) {
938 return;
939 }
940
941 vmask = tcg_const_i64(mask);
942
943 TCGV_UNUSED_I64(va);
944 if (ra == 31) {
945 if (inv_a) {
946 va = vmask;
947 } else {
948 za = 1;
949 }
950 } else {
951 va = tcg_temp_new_i64();
952 tcg_gen_mov_i64(va, cpu_fir[ra]);
953 if (inv_a) {
954 tcg_gen_andc_i64(va, vmask, va);
955 } else {
956 tcg_gen_and_i64(va, va, vmask);
957 }
958 }
959
960 TCGV_UNUSED_I64(vb);
961 if (rb == 31) {
962 zb = 1;
963 } else {
964 vb = tcg_temp_new_i64();
965 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
966 }
967
968 switch (za << 1 | zb) {
969 case 0 | 0:
970 tcg_gen_or_i64(cpu_fir[rc], va, vb);
971 break;
972 case 0 | 1:
973 tcg_gen_mov_i64(cpu_fir[rc], va);
974 break;
975 case 2 | 0:
976 tcg_gen_mov_i64(cpu_fir[rc], vb);
977 break;
978 case 2 | 1:
979 tcg_gen_movi_i64(cpu_fir[rc], 0);
980 break;
981 }
982
983 tcg_temp_free(vmask);
984 if (ra != 31) {
985 tcg_temp_free(va);
986 }
987 if (rb != 31) {
988 tcg_temp_free(vb);
989 }
990 }
991
992 static inline void gen_fcpys(int ra, int rb, int rc)
993 {
994 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
995 }
996
997 static inline void gen_fcpysn(int ra, int rb, int rc)
998 {
999 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1000 }
1001
1002 static inline void gen_fcpyse(int ra, int rb, int rc)
1003 {
1004 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1005 }
1006
1007 #define FARITH3(name) \
1008 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1009 { \
1010 TCGv va, vb; \
1011 \
1012 if (unlikely(rc == 31)) { \
1013 return; \
1014 } \
1015 if (ra == 31) { \
1016 va = tcg_const_i64(0); \
1017 } else { \
1018 va = cpu_fir[ra]; \
1019 } \
1020 if (rb == 31) { \
1021 vb = tcg_const_i64(0); \
1022 } else { \
1023 vb = cpu_fir[rb]; \
1024 } \
1025 \
1026 gen_helper_ ## name (cpu_fir[rc], va, vb); \
1027 \
1028 if (ra == 31) { \
1029 tcg_temp_free(va); \
1030 } \
1031 if (rb == 31) { \
1032 tcg_temp_free(vb); \
1033 } \
1034 }
1035
1036 /* ??? VAX instruction qualifiers ignored. */
1037 FARITH3(addf)
1038 FARITH3(subf)
1039 FARITH3(mulf)
1040 FARITH3(divf)
1041 FARITH3(addg)
1042 FARITH3(subg)
1043 FARITH3(mulg)
1044 FARITH3(divg)
1045 FARITH3(cmpgeq)
1046 FARITH3(cmpglt)
1047 FARITH3(cmpgle)
1048
1049 static void gen_ieee_arith3(DisasContext *ctx,
1050 void (*helper)(TCGv, TCGv, TCGv),
1051 int ra, int rb, int rc, int fn11)
1052 {
1053 TCGv va, vb;
1054
1055 /* ??? This is wrong: the instruction is not a nop, it still may
1056 raise exceptions. */
1057 if (unlikely(rc == 31)) {
1058 return;
1059 }
1060
1061 gen_qual_roundmode(ctx, fn11);
1062 gen_qual_flushzero(ctx, fn11);
1063 gen_fp_exc_clear();
1064
1065 va = gen_ieee_input(ra, fn11, 0);
1066 vb = gen_ieee_input(rb, fn11, 0);
1067 helper(cpu_fir[rc], va, vb);
1068 tcg_temp_free(va);
1069 tcg_temp_free(vb);
1070
1071 gen_fp_exc_raise(rc, fn11);
1072 }
1073
1074 #define IEEE_ARITH3(name) \
1075 static inline void glue(gen_f, name)(DisasContext *ctx, \
1076 int ra, int rb, int rc, int fn11) \
1077 { \
1078 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1079 }
1080 IEEE_ARITH3(adds)
1081 IEEE_ARITH3(subs)
1082 IEEE_ARITH3(muls)
1083 IEEE_ARITH3(divs)
1084 IEEE_ARITH3(addt)
1085 IEEE_ARITH3(subt)
1086 IEEE_ARITH3(mult)
1087 IEEE_ARITH3(divt)
1088
1089 static void gen_ieee_compare(DisasContext *ctx,
1090 void (*helper)(TCGv, TCGv, TCGv),
1091 int ra, int rb, int rc, int fn11)
1092 {
1093 TCGv va, vb;
1094
1095 /* ??? This is wrong: the instruction is not a nop, it still may
1096 raise exceptions. */
1097 if (unlikely(rc == 31)) {
1098 return;
1099 }
1100
1101 gen_fp_exc_clear();
1102
1103 va = gen_ieee_input(ra, fn11, 1);
1104 vb = gen_ieee_input(rb, fn11, 1);
1105 helper(cpu_fir[rc], va, vb);
1106 tcg_temp_free(va);
1107 tcg_temp_free(vb);
1108
1109 gen_fp_exc_raise(rc, fn11);
1110 }
1111
1112 #define IEEE_CMP3(name) \
1113 static inline void glue(gen_f, name)(DisasContext *ctx, \
1114 int ra, int rb, int rc, int fn11) \
1115 { \
1116 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1117 }
1118 IEEE_CMP3(cmptun)
1119 IEEE_CMP3(cmpteq)
1120 IEEE_CMP3(cmptlt)
1121 IEEE_CMP3(cmptle)
1122
1123 static inline uint64_t zapnot_mask(uint8_t lit)
1124 {
1125 uint64_t mask = 0;
1126 int i;
1127
1128 for (i = 0; i < 8; ++i) {
1129 if ((lit >> i) & 1)
1130 mask |= 0xffull << (i * 8);
1131 }
1132 return mask;
1133 }
1134
1135 /* Implement zapnot with an immediate operand, which expands to some
1136 form of immediate AND. This is a basic building block in the
1137 definition of many of the other byte manipulation instructions. */
1138 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1139 {
1140 switch (lit) {
1141 case 0x00:
1142 tcg_gen_movi_i64(dest, 0);
1143 break;
1144 case 0x01:
1145 tcg_gen_ext8u_i64(dest, src);
1146 break;
1147 case 0x03:
1148 tcg_gen_ext16u_i64(dest, src);
1149 break;
1150 case 0x0f:
1151 tcg_gen_ext32u_i64(dest, src);
1152 break;
1153 case 0xff:
1154 tcg_gen_mov_i64(dest, src);
1155 break;
1156 default:
1157 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1158 break;
1159 }
1160 }
1161
1162 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1163 {
1164 if (unlikely(rc == 31))
1165 return;
1166 else if (unlikely(ra == 31))
1167 tcg_gen_movi_i64(cpu_ir[rc], 0);
1168 else if (islit)
1169 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1170 else
1171 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1172 }
1173
1174 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1175 {
1176 if (unlikely(rc == 31))
1177 return;
1178 else if (unlikely(ra == 31))
1179 tcg_gen_movi_i64(cpu_ir[rc], 0);
1180 else if (islit)
1181 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1182 else
1183 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1184 }
1185
1186
1187 /* EXTWH, EXTLH, EXTQH */
1188 static void gen_ext_h(int ra, int rb, int rc, int islit,
1189 uint8_t lit, uint8_t byte_mask)
1190 {
1191 if (unlikely(rc == 31))
1192 return;
1193 else if (unlikely(ra == 31))
1194 tcg_gen_movi_i64(cpu_ir[rc], 0);
1195 else {
1196 if (islit) {
1197 lit = (64 - (lit & 7) * 8) & 0x3f;
1198 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1199 } else {
1200 TCGv tmp1 = tcg_temp_new();
1201 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1202 tcg_gen_shli_i64(tmp1, tmp1, 3);
1203 tcg_gen_neg_i64(tmp1, tmp1);
1204 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1205 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1206 tcg_temp_free(tmp1);
1207 }
1208 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1209 }
1210 }
1211
1212 /* EXTBL, EXTWL, EXTLL, EXTQL */
1213 static void gen_ext_l(int ra, int rb, int rc, int islit,
1214 uint8_t lit, uint8_t byte_mask)
1215 {
1216 if (unlikely(rc == 31))
1217 return;
1218 else if (unlikely(ra == 31))
1219 tcg_gen_movi_i64(cpu_ir[rc], 0);
1220 else {
1221 if (islit) {
1222 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1223 } else {
1224 TCGv tmp = tcg_temp_new();
1225 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1226 tcg_gen_shli_i64(tmp, tmp, 3);
1227 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1228 tcg_temp_free(tmp);
1229 }
1230 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1231 }
1232 }
1233
1234 /* INSWH, INSLH, INSQH */
1235 static void gen_ins_h(int ra, int rb, int rc, int islit,
1236 uint8_t lit, uint8_t byte_mask)
1237 {
1238 if (unlikely(rc == 31))
1239 return;
1240 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1241 tcg_gen_movi_i64(cpu_ir[rc], 0);
1242 else {
1243 TCGv tmp = tcg_temp_new();
1244
1245 /* The instruction description has us left-shift the byte mask
1246 and extract bits <15:8> and apply that zap at the end. This
1247 is equivalent to simply performing the zap first and shifting
1248 afterward. */
1249 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1250
1251 if (islit) {
1252 /* Note that we have handled the lit==0 case above. */
1253 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1254 } else {
1255 TCGv shift = tcg_temp_new();
1256
1257 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1258 Do this portably by splitting the shift into two parts:
1259 shift_count-1 and 1. Arrange for the -1 by using
1260 ones-complement instead of twos-complement in the negation:
1261 ~((B & 7) * 8) & 63. */
1262
1263 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1264 tcg_gen_shli_i64(shift, shift, 3);
1265 tcg_gen_not_i64(shift, shift);
1266 tcg_gen_andi_i64(shift, shift, 0x3f);
1267
1268 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1269 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1270 tcg_temp_free(shift);
1271 }
1272 tcg_temp_free(tmp);
1273 }
1274 }
1275
1276 /* INSBL, INSWL, INSLL, INSQL */
1277 static void gen_ins_l(int ra, int rb, int rc, int islit,
1278 uint8_t lit, uint8_t byte_mask)
1279 {
1280 if (unlikely(rc == 31))
1281 return;
1282 else if (unlikely(ra == 31))
1283 tcg_gen_movi_i64(cpu_ir[rc], 0);
1284 else {
1285 TCGv tmp = tcg_temp_new();
1286
1287 /* The instruction description has us left-shift the byte mask
1288 the same number of byte slots as the data and apply the zap
1289 at the end. This is equivalent to simply performing the zap
1290 first and shifting afterward. */
1291 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1292
1293 if (islit) {
1294 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1295 } else {
1296 TCGv shift = tcg_temp_new();
1297 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1298 tcg_gen_shli_i64(shift, shift, 3);
1299 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1300 tcg_temp_free(shift);
1301 }
1302 tcg_temp_free(tmp);
1303 }
1304 }
1305
1306 /* MSKWH, MSKLH, MSKQH */
1307 static void gen_msk_h(int ra, int rb, int rc, int islit,
1308 uint8_t lit, uint8_t byte_mask)
1309 {
1310 if (unlikely(rc == 31))
1311 return;
1312 else if (unlikely(ra == 31))
1313 tcg_gen_movi_i64(cpu_ir[rc], 0);
1314 else if (islit) {
1315 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1316 } else {
1317 TCGv shift = tcg_temp_new();
1318 TCGv mask = tcg_temp_new();
1319
1320 /* The instruction description is as above, where the byte_mask
1321 is shifted left, and then we extract bits <15:8>. This can be
1322 emulated with a right-shift on the expanded byte mask. This
1323 requires extra care because for an input <2:0> == 0 we need a
1324 shift of 64 bits in order to generate a zero. This is done by
1325 splitting the shift into two parts, the variable shift - 1
1326 followed by a constant 1 shift. The code we expand below is
1327 equivalent to ~((B & 7) * 8) & 63. */
1328
1329 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1330 tcg_gen_shli_i64(shift, shift, 3);
1331 tcg_gen_not_i64(shift, shift);
1332 tcg_gen_andi_i64(shift, shift, 0x3f);
1333 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1334 tcg_gen_shr_i64(mask, mask, shift);
1335 tcg_gen_shri_i64(mask, mask, 1);
1336
1337 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1338
1339 tcg_temp_free(mask);
1340 tcg_temp_free(shift);
1341 }
1342 }
1343
1344 /* MSKBL, MSKWL, MSKLL, MSKQL */
1345 static void gen_msk_l(int ra, int rb, int rc, int islit,
1346 uint8_t lit, uint8_t byte_mask)
1347 {
1348 if (unlikely(rc == 31))
1349 return;
1350 else if (unlikely(ra == 31))
1351 tcg_gen_movi_i64(cpu_ir[rc], 0);
1352 else if (islit) {
1353 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1354 } else {
1355 TCGv shift = tcg_temp_new();
1356 TCGv mask = tcg_temp_new();
1357
1358 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1359 tcg_gen_shli_i64(shift, shift, 3);
1360 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1361 tcg_gen_shl_i64(mask, mask, shift);
1362
1363 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1364
1365 tcg_temp_free(mask);
1366 tcg_temp_free(shift);
1367 }
1368 }
1369
1370 /* Code to call arith3 helpers */
1371 #define ARITH3(name) \
1372 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1373 uint8_t lit) \
1374 { \
1375 if (unlikely(rc == 31)) \
1376 return; \
1377 \
1378 if (ra != 31) { \
1379 if (islit) { \
1380 TCGv tmp = tcg_const_i64(lit); \
1381 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1382 tcg_temp_free(tmp); \
1383 } else \
1384 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1385 } else { \
1386 TCGv tmp1 = tcg_const_i64(0); \
1387 if (islit) { \
1388 TCGv tmp2 = tcg_const_i64(lit); \
1389 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1390 tcg_temp_free(tmp2); \
1391 } else \
1392 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1393 tcg_temp_free(tmp1); \
1394 } \
1395 }
1396 ARITH3(cmpbge)
1397 ARITH3(addlv)
1398 ARITH3(sublv)
1399 ARITH3(addqv)
1400 ARITH3(subqv)
1401 ARITH3(umulh)
1402 ARITH3(mullv)
1403 ARITH3(mulqv)
1404 ARITH3(minub8)
1405 ARITH3(minsb8)
1406 ARITH3(minuw4)
1407 ARITH3(minsw4)
1408 ARITH3(maxub8)
1409 ARITH3(maxsb8)
1410 ARITH3(maxuw4)
1411 ARITH3(maxsw4)
1412 ARITH3(perr)
1413
1414 #define MVIOP2(name) \
1415 static inline void glue(gen_, name)(int rb, int rc) \
1416 { \
1417 if (unlikely(rc == 31)) \
1418 return; \
1419 if (unlikely(rb == 31)) \
1420 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1421 else \
1422 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1423 }
1424 MVIOP2(pklb)
1425 MVIOP2(pkwb)
1426 MVIOP2(unpkbl)
1427 MVIOP2(unpkbw)
1428
1429 static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1430 int islit, uint8_t lit)
1431 {
1432 TCGv va, vb;
1433
1434 if (unlikely(rc == 31)) {
1435 return;
1436 }
1437
1438 if (ra == 31) {
1439 va = tcg_const_i64(0);
1440 } else {
1441 va = cpu_ir[ra];
1442 }
1443 if (islit) {
1444 vb = tcg_const_i64(lit);
1445 } else {
1446 vb = cpu_ir[rb];
1447 }
1448
1449 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1450
1451 if (ra == 31) {
1452 tcg_temp_free(va);
1453 }
1454 if (islit) {
1455 tcg_temp_free(vb);
1456 }
1457 }
1458
1459 static void gen_rx(int ra, int set)
1460 {
1461 TCGv_i32 tmp;
1462
1463 if (ra != 31) {
1464 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1465 }
1466
1467 tmp = tcg_const_i32(set);
1468 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1469 tcg_temp_free_i32(tmp);
1470 }
1471
1472 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1473 {
1474 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1475 to internal cpu registers. */
1476
1477 /* Unprivileged PAL call */
1478 if (palcode >= 0x80 && palcode < 0xC0) {
1479 switch (palcode) {
1480 case 0x86:
1481 /* IMB */
1482 /* No-op inside QEMU. */
1483 break;
1484 case 0x9E:
1485 /* RDUNIQUE */
1486 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1487 break;
1488 case 0x9F:
1489 /* WRUNIQUE */
1490 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1491 break;
1492 default:
1493 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1494 }
1495 return NO_EXIT;
1496 }
1497
1498 #ifndef CONFIG_USER_ONLY
1499 /* Privileged PAL code */
1500 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1501 switch (palcode) {
1502 case 0x01:
1503 /* CFLUSH */
1504 /* No-op inside QEMU. */
1505 break;
1506 case 0x02:
1507 /* DRAINA */
1508 /* No-op inside QEMU. */
1509 break;
1510 case 0x2D:
1511 /* WRVPTPTR */
1512 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUState, vptptr));
1513 break;
1514 case 0x31:
1515 /* WRVAL */
1516 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1517 break;
1518 case 0x32:
1519 /* RDVAL */
1520 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1521 break;
1522
1523 case 0x35: {
1524 /* SWPIPL */
1525 TCGv tmp;
1526
1527 /* Note that we already know we're in kernel mode, so we know
1528 that PS only contains the 3 IPL bits. */
1529 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1530
1531 /* But make sure and store only the 3 IPL bits from the user. */
1532 tmp = tcg_temp_new();
1533 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1534 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUState, ps));
1535 tcg_temp_free(tmp);
1536 break;
1537 }
1538
1539 case 0x36:
1540 /* RDPS */
1541 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1542 break;
1543 case 0x38:
1544 /* WRUSP */
1545 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1546 break;
1547 case 0x3A:
1548 /* RDUSP */
1549 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1550 break;
1551 case 0x3C:
1552 /* WHAMI */
1553 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1554 offsetof(CPUState, cpu_index));
1555 break;
1556
1557 default:
1558 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1559 }
1560 return NO_EXIT;
1561 }
1562 #endif
1563
1564 return gen_invalid(ctx);
1565 }
1566
1567 #ifndef CONFIG_USER_ONLY
1568
1569 #define PR_BYTE 0x100000
1570 #define PR_LONG 0x200000
1571
1572 static int cpu_pr_data(int pr)
1573 {
1574 switch (pr) {
1575 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1576 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1577 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1578 case 3: return offsetof(CPUAlphaState, trap_arg0);
1579 case 4: return offsetof(CPUAlphaState, trap_arg1);
1580 case 5: return offsetof(CPUAlphaState, trap_arg2);
1581 case 6: return offsetof(CPUAlphaState, exc_addr);
1582 case 7: return offsetof(CPUAlphaState, palbr);
1583 case 8: return offsetof(CPUAlphaState, ptbr);
1584 case 9: return offsetof(CPUAlphaState, vptptr);
1585 case 10: return offsetof(CPUAlphaState, unique);
1586 case 11: return offsetof(CPUAlphaState, sysval);
1587 case 12: return offsetof(CPUAlphaState, usp);
1588
1589 case 32 ... 39:
1590 return offsetof(CPUAlphaState, shadow[pr - 32]);
1591 case 40 ... 63:
1592 return offsetof(CPUAlphaState, scratch[pr - 40]);
1593
1594 case 251:
1595 return offsetof(CPUAlphaState, alarm_expire);
1596 }
1597 return 0;
1598 }
1599
1600 static ExitStatus gen_mfpr(int ra, int regno)
1601 {
1602 int data = cpu_pr_data(regno);
1603
1604 /* In our emulated PALcode, these processor registers have no
1605 side effects from reading. */
1606 if (ra == 31) {
1607 return NO_EXIT;
1608 }
1609
1610 if (regno == 250) {
1611 /* WALL_TIME */
1612 if (use_icount) {
1613 gen_io_start();
1614 gen_helper_get_time(cpu_ir[ra]);
1615 gen_io_end();
1616 return EXIT_PC_STALE;
1617 } else {
1618 gen_helper_get_time(cpu_ir[ra]);
1619 return NO_EXIT;
1620 }
1621 }
1622
1623 /* The basic registers are data only, and unknown registers
1624 are read-zero, write-ignore. */
1625 if (data == 0) {
1626 tcg_gen_movi_i64(cpu_ir[ra], 0);
1627 } else if (data & PR_BYTE) {
1628 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1629 } else if (data & PR_LONG) {
1630 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1631 } else {
1632 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1633 }
1634 return NO_EXIT;
1635 }
1636
1637 static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
1638 {
1639 TCGv tmp;
1640 int data;
1641
1642 if (rb == 31) {
1643 tmp = tcg_const_i64(0);
1644 } else {
1645 tmp = cpu_ir[rb];
1646 }
1647
1648 switch (regno) {
1649 case 255:
1650 /* TBIA */
1651 gen_helper_tbia();
1652 break;
1653
1654 case 254:
1655 /* TBIS */
1656 gen_helper_tbis(tmp);
1657 break;
1658
1659 case 253:
1660 /* WAIT */
1661 tmp = tcg_const_i64(1);
1662 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUState, halted));
1663 return gen_excp(ctx, EXCP_HLT, 0);
1664
1665 case 252:
1666 /* HALT */
1667 gen_helper_halt(tmp);
1668 return EXIT_PC_STALE;
1669
1670 case 251:
1671 /* ALARM */
1672 gen_helper_set_alarm(tmp);
1673 break;
1674
1675 default:
1676 /* The basic registers are data only, and unknown registers
1677 are read-zero, write-ignore. */
1678 data = cpu_pr_data(regno);
1679 if (data != 0) {
1680 if (data & PR_BYTE) {
1681 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1682 } else if (data & PR_LONG) {
1683 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1684 } else {
1685 tcg_gen_st_i64(tmp, cpu_env, data);
1686 }
1687 }
1688 break;
1689 }
1690
1691 if (rb == 31) {
1692 tcg_temp_free(tmp);
1693 }
1694
1695 return NO_EXIT;
1696 }
1697 #endif /* !USER_ONLY*/
1698
1699 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1700 {
1701 uint32_t palcode;
1702 int32_t disp21, disp16;
1703 #ifndef CONFIG_USER_ONLY
1704 int32_t disp12;
1705 #endif
1706 uint16_t fn11;
1707 uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
1708 uint8_t lit;
1709 ExitStatus ret;
1710
1711 /* Decode all instruction fields */
1712 opc = insn >> 26;
1713 ra = (insn >> 21) & 0x1F;
1714 rb = (insn >> 16) & 0x1F;
1715 rc = insn & 0x1F;
1716 real_islit = islit = (insn >> 12) & 1;
1717 if (rb == 31 && !islit) {
1718 islit = 1;
1719 lit = 0;
1720 } else
1721 lit = (insn >> 13) & 0xFF;
1722 palcode = insn & 0x03FFFFFF;
1723 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1724 disp16 = (int16_t)(insn & 0x0000FFFF);
1725 #ifndef CONFIG_USER_ONLY
1726 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1727 #endif
1728 fn11 = (insn >> 5) & 0x000007FF;
1729 fpfn = fn11 & 0x3F;
1730 fn7 = (insn >> 5) & 0x0000007F;
1731 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1732 opc, ra, rb, rc, disp16);
1733
1734 ret = NO_EXIT;
1735 switch (opc) {
1736 case 0x00:
1737 /* CALL_PAL */
1738 ret = gen_call_pal(ctx, palcode);
1739 break;
1740 case 0x01:
1741 /* OPC01 */
1742 goto invalid_opc;
1743 case 0x02:
1744 /* OPC02 */
1745 goto invalid_opc;
1746 case 0x03:
1747 /* OPC03 */
1748 goto invalid_opc;
1749 case 0x04:
1750 /* OPC04 */
1751 goto invalid_opc;
1752 case 0x05:
1753 /* OPC05 */
1754 goto invalid_opc;
1755 case 0x06:
1756 /* OPC06 */
1757 goto invalid_opc;
1758 case 0x07:
1759 /* OPC07 */
1760 goto invalid_opc;
1761 case 0x08:
1762 /* LDA */
1763 if (likely(ra != 31)) {
1764 if (rb != 31)
1765 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1766 else
1767 tcg_gen_movi_i64(cpu_ir[ra], disp16);
1768 }
1769 break;
1770 case 0x09:
1771 /* LDAH */
1772 if (likely(ra != 31)) {
1773 if (rb != 31)
1774 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1775 else
1776 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1777 }
1778 break;
1779 case 0x0A:
1780 /* LDBU */
1781 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1782 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1783 break;
1784 }
1785 goto invalid_opc;
1786 case 0x0B:
1787 /* LDQ_U */
1788 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1789 break;
1790 case 0x0C:
1791 /* LDWU */
1792 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1793 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1794 break;
1795 }
1796 goto invalid_opc;
1797 case 0x0D:
1798 /* STW */
1799 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1800 break;
1801 case 0x0E:
1802 /* STB */
1803 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1804 break;
1805 case 0x0F:
1806 /* STQ_U */
1807 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1808 break;
1809 case 0x10:
1810 switch (fn7) {
1811 case 0x00:
1812 /* ADDL */
1813 if (likely(rc != 31)) {
1814 if (ra != 31) {
1815 if (islit) {
1816 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1817 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1818 } else {
1819 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1820 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1821 }
1822 } else {
1823 if (islit)
1824 tcg_gen_movi_i64(cpu_ir[rc], lit);
1825 else
1826 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1827 }
1828 }
1829 break;
1830 case 0x02:
1831 /* S4ADDL */
1832 if (likely(rc != 31)) {
1833 if (ra != 31) {
1834 TCGv tmp = tcg_temp_new();
1835 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1836 if (islit)
1837 tcg_gen_addi_i64(tmp, tmp, lit);
1838 else
1839 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1840 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1841 tcg_temp_free(tmp);
1842 } else {
1843 if (islit)
1844 tcg_gen_movi_i64(cpu_ir[rc], lit);
1845 else
1846 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1847 }
1848 }
1849 break;
1850 case 0x09:
1851 /* SUBL */
1852 if (likely(rc != 31)) {
1853 if (ra != 31) {
1854 if (islit)
1855 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1856 else
1857 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1858 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1859 } else {
1860 if (islit)
1861 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1862 else {
1863 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1864 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1865 }
1866 }
1867 break;
1868 case 0x0B:
1869 /* S4SUBL */
1870 if (likely(rc != 31)) {
1871 if (ra != 31) {
1872 TCGv tmp = tcg_temp_new();
1873 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1874 if (islit)
1875 tcg_gen_subi_i64(tmp, tmp, lit);
1876 else
1877 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1878 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1879 tcg_temp_free(tmp);
1880 } else {
1881 if (islit)
1882 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1883 else {
1884 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1885 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1886 }
1887 }
1888 }
1889 break;
1890 case 0x0F:
1891 /* CMPBGE */
1892 gen_cmpbge(ra, rb, rc, islit, lit);
1893 break;
1894 case 0x12:
1895 /* S8ADDL */
1896 if (likely(rc != 31)) {
1897 if (ra != 31) {
1898 TCGv tmp = tcg_temp_new();
1899 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1900 if (islit)
1901 tcg_gen_addi_i64(tmp, tmp, lit);
1902 else
1903 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1904 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1905 tcg_temp_free(tmp);
1906 } else {
1907 if (islit)
1908 tcg_gen_movi_i64(cpu_ir[rc], lit);
1909 else
1910 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1911 }
1912 }
1913 break;
1914 case 0x1B:
1915 /* S8SUBL */
1916 if (likely(rc != 31)) {
1917 if (ra != 31) {
1918 TCGv tmp = tcg_temp_new();
1919 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1920 if (islit)
1921 tcg_gen_subi_i64(tmp, tmp, lit);
1922 else
1923 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1924 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1925 tcg_temp_free(tmp);
1926 } else {
1927 if (islit)
1928 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1929 else
1930 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1931 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1932 }
1933 }
1934 }
1935 break;
1936 case 0x1D:
1937 /* CMPULT */
1938 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1939 break;
1940 case 0x20:
1941 /* ADDQ */
1942 if (likely(rc != 31)) {
1943 if (ra != 31) {
1944 if (islit)
1945 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1946 else
1947 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1948 } else {
1949 if (islit)
1950 tcg_gen_movi_i64(cpu_ir[rc], lit);
1951 else
1952 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1953 }
1954 }
1955 break;
1956 case 0x22:
1957 /* S4ADDQ */
1958 if (likely(rc != 31)) {
1959 if (ra != 31) {
1960 TCGv tmp = tcg_temp_new();
1961 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1962 if (islit)
1963 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1964 else
1965 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1966 tcg_temp_free(tmp);
1967 } else {
1968 if (islit)
1969 tcg_gen_movi_i64(cpu_ir[rc], lit);
1970 else
1971 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1972 }
1973 }
1974 break;
1975 case 0x29:
1976 /* SUBQ */
1977 if (likely(rc != 31)) {
1978 if (ra != 31) {
1979 if (islit)
1980 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1981 else
1982 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1983 } else {
1984 if (islit)
1985 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1986 else
1987 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1988 }
1989 }
1990 break;
1991 case 0x2B:
1992 /* S4SUBQ */
1993 if (likely(rc != 31)) {
1994 if (ra != 31) {
1995 TCGv tmp = tcg_temp_new();
1996 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1997 if (islit)
1998 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1999 else
2000 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2001 tcg_temp_free(tmp);
2002 } else {
2003 if (islit)
2004 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2005 else
2006 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2007 }
2008 }
2009 break;
2010 case 0x2D:
2011 /* CMPEQ */
2012 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
2013 break;
2014 case 0x32:
2015 /* S8ADDQ */
2016 if (likely(rc != 31)) {
2017 if (ra != 31) {
2018 TCGv tmp = tcg_temp_new();
2019 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2020 if (islit)
2021 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2022 else
2023 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2024 tcg_temp_free(tmp);
2025 } else {
2026 if (islit)
2027 tcg_gen_movi_i64(cpu_ir[rc], lit);
2028 else
2029 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2030 }
2031 }
2032 break;
2033 case 0x3B:
2034 /* S8SUBQ */
2035 if (likely(rc != 31)) {
2036 if (ra != 31) {
2037 TCGv tmp = tcg_temp_new();
2038 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2039 if (islit)
2040 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2041 else
2042 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2043 tcg_temp_free(tmp);
2044 } else {
2045 if (islit)
2046 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2047 else
2048 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2049 }
2050 }
2051 break;
2052 case 0x3D:
2053 /* CMPULE */
2054 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
2055 break;
2056 case 0x40:
2057 /* ADDL/V */
2058 gen_addlv(ra, rb, rc, islit, lit);
2059 break;
2060 case 0x49:
2061 /* SUBL/V */
2062 gen_sublv(ra, rb, rc, islit, lit);
2063 break;
2064 case 0x4D:
2065 /* CMPLT */
2066 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
2067 break;
2068 case 0x60:
2069 /* ADDQ/V */
2070 gen_addqv(ra, rb, rc, islit, lit);
2071 break;
2072 case 0x69:
2073 /* SUBQ/V */
2074 gen_subqv(ra, rb, rc, islit, lit);
2075 break;
2076 case 0x6D:
2077 /* CMPLE */
2078 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
2079 break;
2080 default:
2081 goto invalid_opc;
2082 }
2083 break;
2084 case 0x11:
2085 switch (fn7) {
2086 case 0x00:
2087 /* AND */
2088 if (likely(rc != 31)) {
2089 if (ra == 31)
2090 tcg_gen_movi_i64(cpu_ir[rc], 0);
2091 else if (islit)
2092 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2093 else
2094 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2095 }
2096 break;
2097 case 0x08:
2098 /* BIC */
2099 if (likely(rc != 31)) {
2100 if (ra != 31) {
2101 if (islit)
2102 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2103 else
2104 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2105 } else
2106 tcg_gen_movi_i64(cpu_ir[rc], 0);
2107 }
2108 break;
2109 case 0x14:
2110 /* CMOVLBS */
2111 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
2112 break;
2113 case 0x16:
2114 /* CMOVLBC */
2115 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
2116 break;
2117 case 0x20:
2118 /* BIS */
2119 if (likely(rc != 31)) {
2120 if (ra != 31) {
2121 if (islit)
2122 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2123 else
2124 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2125 } else {
2126 if (islit)
2127 tcg_gen_movi_i64(cpu_ir[rc], lit);
2128 else
2129 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2130 }
2131 }
2132 break;
2133 case 0x24:
2134 /* CMOVEQ */
2135 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
2136 break;
2137 case 0x26:
2138 /* CMOVNE */
2139 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
2140 break;
2141 case 0x28:
2142 /* ORNOT */
2143 if (likely(rc != 31)) {
2144 if (ra != 31) {
2145 if (islit)
2146 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2147 else
2148 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2149 } else {
2150 if (islit)
2151 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2152 else
2153 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2154 }
2155 }
2156 break;
2157 case 0x40:
2158 /* XOR */
2159 if (likely(rc != 31)) {
2160 if (ra != 31) {
2161 if (islit)
2162 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2163 else
2164 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2165 } else {
2166 if (islit)
2167 tcg_gen_movi_i64(cpu_ir[rc], lit);
2168 else
2169 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2170 }
2171 }
2172 break;
2173 case 0x44:
2174 /* CMOVLT */
2175 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
2176 break;
2177 case 0x46:
2178 /* CMOVGE */
2179 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
2180 break;
2181 case 0x48:
2182 /* EQV */
2183 if (likely(rc != 31)) {
2184 if (ra != 31) {
2185 if (islit)
2186 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2187 else
2188 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2189 } else {
2190 if (islit)
2191 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2192 else
2193 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2194 }
2195 }
2196 break;
2197 case 0x61:
2198 /* AMASK */
2199 if (likely(rc != 31)) {
2200 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2201
2202 if (islit) {
2203 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2204 } else {
2205 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
2206 }
2207 }
2208 break;
2209 case 0x64:
2210 /* CMOVLE */
2211 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2212 break;
2213 case 0x66:
2214 /* CMOVGT */
2215 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2216 break;
2217 case 0x6C:
2218 /* IMPLVER */
2219 if (rc != 31)
2220 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
2221 break;
2222 default:
2223 goto invalid_opc;
2224 }
2225 break;
2226 case 0x12:
2227 switch (fn7) {
2228 case 0x02:
2229 /* MSKBL */
2230 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2231 break;
2232 case 0x06:
2233 /* EXTBL */
2234 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2235 break;
2236 case 0x0B:
2237 /* INSBL */
2238 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2239 break;
2240 case 0x12:
2241 /* MSKWL */
2242 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2243 break;
2244 case 0x16:
2245 /* EXTWL */
2246 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2247 break;
2248 case 0x1B:
2249 /* INSWL */
2250 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2251 break;
2252 case 0x22:
2253 /* MSKLL */
2254 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2255 break;
2256 case 0x26:
2257 /* EXTLL */
2258 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2259 break;
2260 case 0x2B:
2261 /* INSLL */
2262 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2263 break;
2264 case 0x30:
2265 /* ZAP */
2266 gen_zap(ra, rb, rc, islit, lit);
2267 break;
2268 case 0x31:
2269 /* ZAPNOT */
2270 gen_zapnot(ra, rb, rc, islit, lit);
2271 break;
2272 case 0x32:
2273 /* MSKQL */
2274 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2275 break;
2276 case 0x34:
2277 /* SRL */
2278 if (likely(rc != 31)) {
2279 if (ra != 31) {
2280 if (islit)
2281 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2282 else {
2283 TCGv shift = tcg_temp_new();
2284 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2285 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2286 tcg_temp_free(shift);
2287 }
2288 } else
2289 tcg_gen_movi_i64(cpu_ir[rc], 0);
2290 }
2291 break;
2292 case 0x36:
2293 /* EXTQL */
2294 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2295 break;
2296 case 0x39:
2297 /* SLL */
2298 if (likely(rc != 31)) {
2299 if (ra != 31) {
2300 if (islit)
2301 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2302 else {
2303 TCGv shift = tcg_temp_new();
2304 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2305 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2306 tcg_temp_free(shift);
2307 }
2308 } else
2309 tcg_gen_movi_i64(cpu_ir[rc], 0);
2310 }
2311 break;
2312 case 0x3B:
2313 /* INSQL */
2314 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2315 break;
2316 case 0x3C:
2317 /* SRA */
2318 if (likely(rc != 31)) {
2319 if (ra != 31) {
2320 if (islit)
2321 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2322 else {
2323 TCGv shift = tcg_temp_new();
2324 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2325 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2326 tcg_temp_free(shift);
2327 }
2328 } else
2329 tcg_gen_movi_i64(cpu_ir[rc], 0);
2330 }
2331 break;
2332 case 0x52:
2333 /* MSKWH */
2334 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2335 break;
2336 case 0x57:
2337 /* INSWH */
2338 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2339 break;
2340 case 0x5A:
2341 /* EXTWH */
2342 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2343 break;
2344 case 0x62:
2345 /* MSKLH */
2346 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2347 break;
2348 case 0x67:
2349 /* INSLH */
2350 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2351 break;
2352 case 0x6A:
2353 /* EXTLH */
2354 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2355 break;
2356 case 0x72:
2357 /* MSKQH */
2358 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2359 break;
2360 case 0x77:
2361 /* INSQH */
2362 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2363 break;
2364 case 0x7A:
2365 /* EXTQH */
2366 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2367 break;
2368 default:
2369 goto invalid_opc;
2370 }
2371 break;
2372 case 0x13:
2373 switch (fn7) {
2374 case 0x00:
2375 /* MULL */
2376 if (likely(rc != 31)) {
2377 if (ra == 31)
2378 tcg_gen_movi_i64(cpu_ir[rc], 0);
2379 else {
2380 if (islit)
2381 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2382 else
2383 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2384 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2385 }
2386 }
2387 break;
2388 case 0x20:
2389 /* MULQ */
2390 if (likely(rc != 31)) {
2391 if (ra == 31)
2392 tcg_gen_movi_i64(cpu_ir[rc], 0);
2393 else if (islit)
2394 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2395 else
2396 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2397 }
2398 break;
2399 case 0x30:
2400 /* UMULH */
2401 gen_umulh(ra, rb, rc, islit, lit);
2402 break;
2403 case 0x40:
2404 /* MULL/V */
2405 gen_mullv(ra, rb, rc, islit, lit);
2406 break;
2407 case 0x60:
2408 /* MULQ/V */
2409 gen_mulqv(ra, rb, rc, islit, lit);
2410 break;
2411 default:
2412 goto invalid_opc;
2413 }
2414 break;
2415 case 0x14:
2416 switch (fpfn) { /* fn11 & 0x3F */
2417 case 0x04:
2418 /* ITOFS */
2419 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2420 goto invalid_opc;
2421 }
2422 if (likely(rc != 31)) {
2423 if (ra != 31) {
2424 TCGv_i32 tmp = tcg_temp_new_i32();
2425 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2426 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2427 tcg_temp_free_i32(tmp);
2428 } else
2429 tcg_gen_movi_i64(cpu_fir[rc], 0);
2430 }
2431 break;
2432 case 0x0A:
2433 /* SQRTF */
2434 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2435 gen_fsqrtf(rb, rc);
2436 break;
2437 }
2438 goto invalid_opc;
2439 case 0x0B:
2440 /* SQRTS */
2441 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2442 gen_fsqrts(ctx, rb, rc, fn11);
2443 break;
2444 }
2445 goto invalid_opc;
2446 case 0x14:
2447 /* ITOFF */
2448 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2449 goto invalid_opc;
2450 }
2451 if (likely(rc != 31)) {
2452 if (ra != 31) {
2453 TCGv_i32 tmp = tcg_temp_new_i32();
2454 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2455 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2456 tcg_temp_free_i32(tmp);
2457 } else
2458 tcg_gen_movi_i64(cpu_fir[rc], 0);
2459 }
2460 break;
2461 case 0x24:
2462 /* ITOFT */
2463 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2464 goto invalid_opc;
2465 }
2466 if (likely(rc != 31)) {
2467 if (ra != 31)
2468 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2469 else
2470 tcg_gen_movi_i64(cpu_fir[rc], 0);
2471 }
2472 break;
2473 case 0x2A:
2474 /* SQRTG */
2475 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2476 gen_fsqrtg(rb, rc);
2477 break;
2478 }
2479 goto invalid_opc;
2480 case 0x02B:
2481 /* SQRTT */
2482 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2483 gen_fsqrtt(ctx, rb, rc, fn11);
2484 break;
2485 }
2486 goto invalid_opc;
2487 default:
2488 goto invalid_opc;
2489 }
2490 break;
2491 case 0x15:
2492 /* VAX floating point */
2493 /* XXX: rounding mode and trap are ignored (!) */
2494 switch (fpfn) { /* fn11 & 0x3F */
2495 case 0x00:
2496 /* ADDF */
2497 gen_faddf(ra, rb, rc);
2498 break;
2499 case 0x01:
2500 /* SUBF */
2501 gen_fsubf(ra, rb, rc);
2502 break;
2503 case 0x02:
2504 /* MULF */
2505 gen_fmulf(ra, rb, rc);
2506 break;
2507 case 0x03:
2508 /* DIVF */
2509 gen_fdivf(ra, rb, rc);
2510 break;
2511 case 0x1E:
2512 /* CVTDG */
2513 #if 0 // TODO
2514 gen_fcvtdg(rb, rc);
2515 #else
2516 goto invalid_opc;
2517 #endif
2518 break;
2519 case 0x20:
2520 /* ADDG */
2521 gen_faddg(ra, rb, rc);
2522 break;
2523 case 0x21:
2524 /* SUBG */
2525 gen_fsubg(ra, rb, rc);
2526 break;
2527 case 0x22:
2528 /* MULG */
2529 gen_fmulg(ra, rb, rc);
2530 break;
2531 case 0x23:
2532 /* DIVG */
2533 gen_fdivg(ra, rb, rc);
2534 break;
2535 case 0x25:
2536 /* CMPGEQ */
2537 gen_fcmpgeq(ra, rb, rc);
2538 break;
2539 case 0x26:
2540 /* CMPGLT */
2541 gen_fcmpglt(ra, rb, rc);
2542 break;
2543 case 0x27:
2544 /* CMPGLE */
2545 gen_fcmpgle(ra, rb, rc);
2546 break;
2547 case 0x2C:
2548 /* CVTGF */
2549 gen_fcvtgf(rb, rc);
2550 break;
2551 case 0x2D:
2552 /* CVTGD */
2553 #if 0 // TODO
2554 gen_fcvtgd(rb, rc);
2555 #else
2556 goto invalid_opc;
2557 #endif
2558 break;
2559 case 0x2F:
2560 /* CVTGQ */
2561 gen_fcvtgq(rb, rc);
2562 break;
2563 case 0x3C:
2564 /* CVTQF */
2565 gen_fcvtqf(rb, rc);
2566 break;
2567 case 0x3E:
2568 /* CVTQG */
2569 gen_fcvtqg(rb, rc);
2570 break;
2571 default:
2572 goto invalid_opc;
2573 }
2574 break;
2575 case 0x16:
2576 /* IEEE floating-point */
2577 switch (fpfn) { /* fn11 & 0x3F */
2578 case 0x00:
2579 /* ADDS */
2580 gen_fadds(ctx, ra, rb, rc, fn11);
2581 break;
2582 case 0x01:
2583 /* SUBS */
2584 gen_fsubs(ctx, ra, rb, rc, fn11);
2585 break;
2586 case 0x02:
2587 /* MULS */
2588 gen_fmuls(ctx, ra, rb, rc, fn11);
2589 break;
2590 case 0x03:
2591 /* DIVS */
2592 gen_fdivs(ctx, ra, rb, rc, fn11);
2593 break;
2594 case 0x20:
2595 /* ADDT */
2596 gen_faddt(ctx, ra, rb, rc, fn11);
2597 break;
2598 case 0x21:
2599 /* SUBT */
2600 gen_fsubt(ctx, ra, rb, rc, fn11);
2601 break;
2602 case 0x22:
2603 /* MULT */
2604 gen_fmult(ctx, ra, rb, rc, fn11);
2605 break;
2606 case 0x23:
2607 /* DIVT */
2608 gen_fdivt(ctx, ra, rb, rc, fn11);
2609 break;
2610 case 0x24:
2611 /* CMPTUN */
2612 gen_fcmptun(ctx, ra, rb, rc, fn11);
2613 break;
2614 case 0x25:
2615 /* CMPTEQ */
2616 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2617 break;
2618 case 0x26:
2619 /* CMPTLT */
2620 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2621 break;
2622 case 0x27:
2623 /* CMPTLE */
2624 gen_fcmptle(ctx, ra, rb, rc, fn11);
2625 break;
2626 case 0x2C:
2627 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2628 /* CVTST */
2629 gen_fcvtst(ctx, rb, rc, fn11);
2630 } else {
2631 /* CVTTS */
2632 gen_fcvtts(ctx, rb, rc, fn11);
2633 }
2634 break;
2635 case 0x2F:
2636 /* CVTTQ */
2637 gen_fcvttq(ctx, rb, rc, fn11);
2638 break;
2639 case 0x3C:
2640 /* CVTQS */
2641 gen_fcvtqs(ctx, rb, rc, fn11);
2642 break;
2643 case 0x3E:
2644 /* CVTQT */
2645 gen_fcvtqt(ctx, rb, rc, fn11);
2646 break;
2647 default:
2648 goto invalid_opc;
2649 }
2650 break;
2651 case 0x17:
2652 switch (fn11) {
2653 case 0x010:
2654 /* CVTLQ */
2655 gen_fcvtlq(rb, rc);
2656 break;
2657 case 0x020:
2658 if (likely(rc != 31)) {
2659 if (ra == rb) {
2660 /* FMOV */
2661 if (ra == 31)
2662 tcg_gen_movi_i64(cpu_fir[rc], 0);
2663 else
2664 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2665 } else {
2666 /* CPYS */
2667 gen_fcpys(ra, rb, rc);
2668 }
2669 }
2670 break;
2671 case 0x021:
2672 /* CPYSN */
2673 gen_fcpysn(ra, rb, rc);
2674 break;
2675 case 0x022:
2676 /* CPYSE */
2677 gen_fcpyse(ra, rb, rc);
2678 break;
2679 case 0x024:
2680 /* MT_FPCR */
2681 if (likely(ra != 31))
2682 gen_helper_store_fpcr(cpu_fir[ra]);
2683 else {
2684 TCGv tmp = tcg_const_i64(0);
2685 gen_helper_store_fpcr(tmp);
2686 tcg_temp_free(tmp);
2687 }
2688 break;
2689 case 0x025:
2690 /* MF_FPCR */
2691 if (likely(ra != 31))
2692 gen_helper_load_fpcr(cpu_fir[ra]);
2693 break;
2694 case 0x02A:
2695 /* FCMOVEQ */
2696 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2697 break;
2698 case 0x02B:
2699 /* FCMOVNE */
2700 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2701 break;
2702 case 0x02C:
2703 /* FCMOVLT */
2704 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2705 break;
2706 case 0x02D:
2707 /* FCMOVGE */
2708 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2709 break;
2710 case 0x02E:
2711 /* FCMOVLE */
2712 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2713 break;
2714 case 0x02F:
2715 /* FCMOVGT */
2716 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2717 break;
2718 case 0x030:
2719 /* CVTQL */
2720 gen_fcvtql(rb, rc);
2721 break;
2722 case 0x130:
2723 /* CVTQL/V */
2724 case 0x530:
2725 /* CVTQL/SV */
2726 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2727 /v doesn't do. The only thing I can think is that /sv is a
2728 valid instruction merely for completeness in the ISA. */
2729 gen_fcvtql_v(ctx, rb, rc);
2730 break;
2731 default:
2732 goto invalid_opc;
2733 }
2734 break;
2735 case 0x18:
2736 switch ((uint16_t)disp16) {
2737 case 0x0000:
2738 /* TRAPB */
2739 /* No-op. */
2740 break;
2741 case 0x0400:
2742 /* EXCB */
2743 /* No-op. */
2744 break;
2745 case 0x4000:
2746 /* MB */
2747 /* No-op */
2748 break;
2749 case 0x4400:
2750 /* WMB */
2751 /* No-op */
2752 break;
2753 case 0x8000:
2754 /* FETCH */
2755 /* No-op */
2756 break;
2757 case 0xA000:
2758 /* FETCH_M */
2759 /* No-op */
2760 break;
2761 case 0xC000:
2762 /* RPCC */
2763 if (ra != 31) {
2764 if (use_icount) {
2765 gen_io_start();
2766 gen_helper_load_pcc(cpu_ir[ra]);
2767 gen_io_end();
2768 ret = EXIT_PC_STALE;
2769 } else {
2770 gen_helper_load_pcc(cpu_ir[ra]);
2771 }
2772 }
2773 break;
2774 case 0xE000:
2775 /* RC */
2776 gen_rx(ra, 0);
2777 break;
2778 case 0xE800:
2779 /* ECB */
2780 break;
2781 case 0xF000:
2782 /* RS */
2783 gen_rx(ra, 1);
2784 break;
2785 case 0xF800:
2786 /* WH64 */
2787 /* No-op */
2788 break;
2789 default:
2790 goto invalid_opc;
2791 }
2792 break;
2793 case 0x19:
2794 /* HW_MFPR (PALcode) */
2795 #ifndef CONFIG_USER_ONLY
2796 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2797 return gen_mfpr(ra, insn & 0xffff);
2798 }
2799 #endif
2800 goto invalid_opc;
2801 case 0x1A:
2802 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2803 prediction stack action, which of course we don't implement. */
2804 if (rb != 31) {
2805 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2806 } else {
2807 tcg_gen_movi_i64(cpu_pc, 0);
2808 }
2809 if (ra != 31) {
2810 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2811 }
2812 ret = EXIT_PC_UPDATED;
2813 break;
2814 case 0x1B:
2815 /* HW_LD (PALcode) */
2816 #ifndef CONFIG_USER_ONLY
2817 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2818 TCGv addr;
2819
2820 if (ra == 31) {
2821 break;
2822 }
2823
2824 addr = tcg_temp_new();
2825 if (rb != 31)
2826 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2827 else
2828 tcg_gen_movi_i64(addr, disp12);
2829 switch ((insn >> 12) & 0xF) {
2830 case 0x0:
2831 /* Longword physical access (hw_ldl/p) */
2832 gen_helper_ldl_phys(cpu_ir[ra], addr);
2833 break;
2834 case 0x1:
2835 /* Quadword physical access (hw_ldq/p) */
2836 gen_helper_ldq_phys(cpu_ir[ra], addr);
2837 break;
2838 case 0x2:
2839 /* Longword physical access with lock (hw_ldl_l/p) */
2840 gen_helper_ldl_l_phys(cpu_ir[ra], addr);
2841 break;
2842 case 0x3:
2843 /* Quadword physical access with lock (hw_ldq_l/p) */
2844 gen_helper_ldq_l_phys(cpu_ir[ra], addr);
2845 break;
2846 case 0x4:
2847 /* Longword virtual PTE fetch (hw_ldl/v) */
2848 goto invalid_opc;
2849 case 0x5:
2850 /* Quadword virtual PTE fetch (hw_ldq/v) */
2851 goto invalid_opc;
2852 break;
2853 case 0x6:
2854 /* Incpu_ir[ra]id */
2855 goto invalid_opc;
2856 case 0x7:
2857 /* Incpu_ir[ra]id */
2858 goto invalid_opc;
2859 case 0x8:
2860 /* Longword virtual access (hw_ldl) */
2861 goto invalid_opc;
2862 case 0x9:
2863 /* Quadword virtual access (hw_ldq) */
2864 goto invalid_opc;
2865 case 0xA:
2866 /* Longword virtual access with protection check (hw_ldl/w) */
2867 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2868 break;
2869 case 0xB:
2870 /* Quadword virtual access with protection check (hw_ldq/w) */
2871 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2872 break;
2873 case 0xC:
2874 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2875 goto invalid_opc;
2876 case 0xD:
2877 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2878 goto invalid_opc;
2879 case 0xE:
2880 /* Longword virtual access with alternate access mode and
2881 protection checks (hw_ldl/wa) */
2882 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2883 break;
2884 case 0xF:
2885 /* Quadword virtual access with alternate access mode and
2886 protection checks (hw_ldq/wa) */
2887 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2888 break;
2889 }
2890 tcg_temp_free(addr);
2891 break;
2892 }
2893 #endif
2894 goto invalid_opc;
2895 case 0x1C:
2896 switch (fn7) {
2897 case 0x00:
2898 /* SEXTB */
2899 if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
2900 goto invalid_opc;
2901 }
2902 if (likely(rc != 31)) {
2903 if (islit)
2904 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2905 else
2906 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2907 }
2908 break;
2909 case 0x01:
2910 /* SEXTW */
2911 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2912 if (likely(rc != 31)) {
2913 if (islit) {
2914 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2915 } else {
2916 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2917 }
2918 }
2919 break;
2920 }
2921 goto invalid_opc;
2922 case 0x30:
2923 /* CTPOP */
2924 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2925 if (likely(rc != 31)) {
2926 if (islit) {
2927 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2928 } else {
2929 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2930 }
2931 }
2932 break;
2933 }
2934 goto invalid_opc;
2935 case 0x31:
2936 /* PERR */
2937 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2938 gen_perr(ra, rb, rc, islit, lit);
2939 break;
2940 }
2941 goto invalid_opc;
2942 case 0x32:
2943 /* CTLZ */
2944 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2945 if (likely(rc != 31)) {
2946 if (islit) {
2947 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2948 } else {
2949 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2950 }
2951 }
2952 break;
2953 }
2954 goto invalid_opc;
2955 case 0x33:
2956 /* CTTZ */
2957 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2958 if (likely(rc != 31)) {
2959 if (islit) {
2960 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2961 } else {
2962 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2963 }
2964 }
2965 break;
2966 }
2967 goto invalid_opc;
2968 case 0x34:
2969 /* UNPKBW */
2970 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2971 if (real_islit || ra != 31) {
2972 goto invalid_opc;
2973 }
2974 gen_unpkbw(rb, rc);
2975 break;
2976 }
2977 goto invalid_opc;
2978 case 0x35:
2979 /* UNPKBL */
2980 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2981 if (real_islit || ra != 31) {
2982 goto invalid_opc;
2983 }
2984 gen_unpkbl(rb, rc);
2985 break;
2986 }
2987 goto invalid_opc;
2988 case 0x36:
2989 /* PKWB */
2990 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2991 if (real_islit || ra != 31) {
2992 goto invalid_opc;
2993 }
2994 gen_pkwb(rb, rc);
2995 break;
2996 }
2997 goto invalid_opc;
2998 case 0x37:
2999 /* PKLB */
3000 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3001 if (real_islit || ra != 31) {
3002 goto invalid_opc;
3003 }
3004 gen_pklb(rb, rc);
3005 break;
3006 }
3007 goto invalid_opc;
3008 case 0x38:
3009 /* MINSB8 */
3010 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3011 gen_minsb8(ra, rb, rc, islit, lit);
3012 break;
3013 }
3014 goto invalid_opc;
3015 case 0x39:
3016 /* MINSW4 */
3017 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3018 gen_minsw4(ra, rb, rc, islit, lit);
3019 break;
3020 }
3021 goto invalid_opc;
3022 case 0x3A:
3023 /* MINUB8 */
3024 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3025 gen_minub8(ra, rb, rc, islit, lit);
3026 break;
3027 }
3028 goto invalid_opc;
3029 case 0x3B:
3030 /* MINUW4 */
3031 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3032 gen_minuw4(ra, rb, rc, islit, lit);
3033 break;
3034 }
3035 goto invalid_opc;
3036 case 0x3C:
3037 /* MAXUB8 */
3038 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3039 gen_maxub8(ra, rb, rc, islit, lit);
3040 break;
3041 }
3042 goto invalid_opc;
3043 case 0x3D:
3044 /* MAXUW4 */
3045 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3046 gen_maxuw4(ra, rb, rc, islit, lit);
3047 break;
3048 }
3049 goto invalid_opc;
3050 case 0x3E:
3051 /* MAXSB8 */
3052 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3053 gen_maxsb8(ra, rb, rc, islit, lit);
3054 break;
3055 }
3056 goto invalid_opc;
3057 case 0x3F:
3058 /* MAXSW4 */
3059 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3060 gen_maxsw4(ra, rb, rc, islit, lit);
3061 break;
3062 }
3063 goto invalid_opc;
3064 case 0x70:
3065 /* FTOIT */
3066 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3067 goto invalid_opc;
3068 }
3069 if (likely(rc != 31)) {
3070 if (ra != 31)
3071 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3072 else
3073 tcg_gen_movi_i64(cpu_ir[rc], 0);
3074 }
3075 break;
3076 case 0x78:
3077 /* FTOIS */
3078 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3079 goto invalid_opc;
3080 }
3081 if (rc != 31) {
3082 TCGv_i32 tmp1 = tcg_temp_new_i32();
3083 if (ra != 31)
3084 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
3085 else {
3086 TCGv tmp2 = tcg_const_i64(0);
3087 gen_helper_s_to_memory(tmp1, tmp2);
3088 tcg_temp_free(tmp2);
3089 }
3090 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
3091 tcg_temp_free_i32(tmp1);
3092 }
3093 break;
3094 default:
3095 goto invalid_opc;
3096 }
3097 break;
3098 case 0x1D:
3099 /* HW_MTPR (PALcode) */
3100 #ifndef CONFIG_USER_ONLY
3101 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3102 return gen_mtpr(ctx, rb, insn & 0xffff);
3103 }
3104 #endif
3105 goto invalid_opc;
3106 case 0x1E:
3107 /* HW_RET (PALcode) */
3108 #ifndef CONFIG_USER_ONLY
3109 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3110 if (rb == 31) {
3111 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3112 address from EXC_ADDR. This turns out to be useful for our
3113 emulation PALcode, so continue to accept it. */
3114 TCGv tmp = tcg_temp_new();
3115 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUState, exc_addr));
3116 gen_helper_hw_ret(tmp);
3117 tcg_temp_free(tmp);
3118 } else {
3119 gen_helper_hw_ret(cpu_ir[rb]);
3120 }
3121 ret = EXIT_PC_UPDATED;
3122 break;
3123 }
3124 #endif
3125 goto invalid_opc;
3126 case 0x1F:
3127 /* HW_ST (PALcode) */
3128 #ifndef CONFIG_USER_ONLY
3129 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3130 TCGv addr, val;
3131 addr = tcg_temp_new();
3132 if (rb != 31)
3133 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3134 else
3135 tcg_gen_movi_i64(addr, disp12);
3136 if (ra != 31)
3137 val = cpu_ir[ra];
3138 else {
3139 val = tcg_temp_new();
3140 tcg_gen_movi_i64(val, 0);
3141 }
3142 switch ((insn >> 12) & 0xF) {
3143 case 0x0:
3144 /* Longword physical access */
3145 gen_helper_stl_phys(addr, val);
3146 break;
3147 case 0x1:
3148 /* Quadword physical access */
3149 gen_helper_stq_phys(addr, val);
3150 break;
3151 case 0x2:
3152 /* Longword physical access with lock */
3153 gen_helper_stl_c_phys(val, addr, val);
3154 break;
3155 case 0x3:
3156 /* Quadword physical access with lock */
3157 gen_helper_stq_c_phys(val, addr, val);
3158 break;
3159 case 0x4:
3160 /* Longword virtual access */
3161 goto invalid_opc;
3162 case 0x5:
3163 /* Quadword virtual access */
3164 goto invalid_opc;
3165 case 0x6:
3166 /* Invalid */
3167 goto invalid_opc;
3168 case 0x7:
3169 /* Invalid */
3170 goto invalid_opc;
3171 case 0x8:
3172 /* Invalid */
3173 goto invalid_opc;
3174 case 0x9:
3175 /* Invalid */
3176 goto invalid_opc;
3177 case 0xA:
3178 /* Invalid */
3179 goto invalid_opc;
3180 case 0xB:
3181 /* Invalid */
3182 goto invalid_opc;
3183 case 0xC:
3184 /* Longword virtual access with alternate access mode */
3185 goto invalid_opc;
3186 case 0xD:
3187 /* Quadword virtual access with alternate access mode */
3188 goto invalid_opc;
3189 case 0xE:
3190 /* Invalid */
3191 goto invalid_opc;
3192 case 0xF:
3193 /* Invalid */
3194 goto invalid_opc;
3195 }
3196 if (ra == 31)
3197 tcg_temp_free(val);
3198 tcg_temp_free(addr);
3199 break;
3200 }
3201 #endif
3202 goto invalid_opc;
3203 case 0x20:
3204 /* LDF */
3205 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
3206 break;
3207 case 0x21:
3208 /* LDG */
3209 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
3210 break;
3211 case 0x22:
3212 /* LDS */
3213 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
3214 break;
3215 case 0x23:
3216 /* LDT */
3217 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
3218 break;
3219 case 0x24:
3220 /* STF */
3221 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3222 break;
3223 case 0x25:
3224 /* STG */
3225 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3226 break;
3227 case 0x26:
3228 /* STS */
3229 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3230 break;
3231 case 0x27:
3232 /* STT */
3233 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3234 break;
3235 case 0x28:
3236 /* LDL */
3237 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3238 break;
3239 case 0x29:
3240 /* LDQ */
3241 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3242 break;
3243 case 0x2A:
3244 /* LDL_L */
3245 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3246 break;
3247 case 0x2B:
3248 /* LDQ_L */
3249 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3250 break;
3251 case 0x2C:
3252 /* STL */
3253 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3254 break;
3255 case 0x2D:
3256 /* STQ */
3257 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3258 break;
3259 case 0x2E:
3260 /* STL_C */
3261 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3262 break;
3263 case 0x2F:
3264 /* STQ_C */
3265 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3266 break;
3267 case 0x30:
3268 /* BR */
3269 ret = gen_bdirect(ctx, ra, disp21);
3270 break;
3271 case 0x31: /* FBEQ */
3272 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3273 break;
3274 case 0x32: /* FBLT */
3275 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3276 break;
3277 case 0x33: /* FBLE */
3278 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3279 break;
3280 case 0x34:
3281 /* BSR */
3282 ret = gen_bdirect(ctx, ra, disp21);
3283 break;
3284 case 0x35: /* FBNE */
3285 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3286 break;
3287 case 0x36: /* FBGE */
3288 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3289 break;
3290 case 0x37: /* FBGT */
3291 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3292 break;
3293 case 0x38:
3294 /* BLBC */
3295 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3296 break;
3297 case 0x39:
3298 /* BEQ */
3299 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3300 break;
3301 case 0x3A:
3302 /* BLT */
3303 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3304 break;
3305 case 0x3B:
3306 /* BLE */
3307 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3308 break;
3309 case 0x3C:
3310 /* BLBS */
3311 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3312 break;
3313 case 0x3D:
3314 /* BNE */
3315 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3316 break;
3317 case 0x3E:
3318 /* BGE */
3319 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3320 break;
3321 case 0x3F:
3322 /* BGT */
3323 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3324 break;
3325 invalid_opc:
3326 ret = gen_invalid(ctx);
3327 break;
3328 }
3329
3330 return ret;
3331 }
3332
3333 static inline void gen_intermediate_code_internal(CPUState *env,
3334 TranslationBlock *tb,
3335 int search_pc)
3336 {
3337 DisasContext ctx, *ctxp = &ctx;
3338 target_ulong pc_start;
3339 uint32_t insn;
3340 uint16_t *gen_opc_end;
3341 CPUBreakpoint *bp;
3342 int j, lj = -1;
3343 ExitStatus ret;
3344 int num_insns;
3345 int max_insns;
3346
3347 pc_start = tb->pc;
3348 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3349
3350 ctx.tb = tb;
3351 ctx.env = env;
3352 ctx.pc = pc_start;
3353 ctx.mem_idx = cpu_mmu_index(env);
3354
3355 /* ??? Every TB begins with unset rounding mode, to be initialized on
3356 the first fp insn of the TB. Alternately we could define a proper
3357 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3358 to reset the FP_STATUS to that default at the end of any TB that
3359 changes the default. We could even (gasp) dynamiclly figure out
3360 what default would be most efficient given the running program. */
3361 ctx.tb_rm = -1;
3362 /* Similarly for flush-to-zero. */
3363 ctx.tb_ftz = -1;
3364
3365 num_insns = 0;
3366 max_insns = tb->cflags & CF_COUNT_MASK;
3367 if (max_insns == 0)
3368 max_insns = CF_COUNT_MASK;
3369
3370 gen_icount_start();
3371 do {
3372 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3373 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3374 if (bp->pc == ctx.pc) {
3375 gen_excp(&ctx, EXCP_DEBUG, 0);
3376 break;
3377 }
3378 }
3379 }
3380 if (search_pc) {
3381 j = gen_opc_ptr - gen_opc_buf;
3382 if (lj < j) {
3383 lj++;
3384 while (lj < j)
3385 gen_opc_instr_start[lj++] = 0;
3386 }
3387 gen_opc_pc[lj] = ctx.pc;
3388 gen_opc_instr_start[lj] = 1;
3389 gen_opc_icount[lj] = num_insns;
3390 }
3391 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3392 gen_io_start();
3393 insn = ldl_code(ctx.pc);
3394 num_insns++;
3395
3396 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3397 tcg_gen_debug_insn_start(ctx.pc);
3398 }
3399
3400 ctx.pc += 4;
3401 ret = translate_one(ctxp, insn);
3402
3403 /* If we reach a page boundary, are single stepping,
3404 or exhaust instruction count, stop generation. */
3405 if (ret == NO_EXIT
3406 && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3407 || gen_opc_ptr >= gen_opc_end
3408 || num_insns >= max_insns
3409 || singlestep
3410 || env->singlestep_enabled)) {
3411 ret = EXIT_PC_STALE;
3412 }
3413 } while (ret == NO_EXIT);
3414
3415 if (tb->cflags & CF_LAST_IO) {
3416 gen_io_end();
3417 }
3418
3419 switch (ret) {
3420 case EXIT_GOTO_TB:
3421 case EXIT_NORETURN:
3422 break;
3423 case EXIT_PC_STALE:
3424 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3425 /* FALLTHRU */
3426 case EXIT_PC_UPDATED:
3427 if (env->singlestep_enabled) {
3428 gen_excp_1(EXCP_DEBUG, 0);
3429 } else {
3430 tcg_gen_exit_tb(0);
3431 }
3432 break;
3433 default:
3434 abort();
3435 }
3436
3437 gen_icount_end(tb, num_insns);
3438 *gen_opc_ptr = INDEX_op_end;
3439 if (search_pc) {
3440 j = gen_opc_ptr - gen_opc_buf;
3441 lj++;
3442 while (lj <= j)
3443 gen_opc_instr_start[lj++] = 0;
3444 } else {
3445 tb->size = ctx.pc - pc_start;
3446 tb->icount = num_insns;
3447 }
3448
3449 #ifdef DEBUG_DISAS
3450 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3451 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3452 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3453 qemu_log("\n");
3454 }
3455 #endif
3456 }
3457
3458 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3459 {
3460 gen_intermediate_code_internal(env, tb, 0);
3461 }
3462
3463 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3464 {
3465 gen_intermediate_code_internal(env, tb, 1);
3466 }
3467
3468 struct cpu_def_t {
3469 const char *name;
3470 int implver, amask;
3471 };
3472
3473 static const struct cpu_def_t cpu_defs[] = {
3474 { "ev4", IMPLVER_2106x, 0 },
3475 { "ev5", IMPLVER_21164, 0 },
3476 { "ev56", IMPLVER_21164, AMASK_BWX },
3477 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3478 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3479 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3480 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3481 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3482 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3483 { "21064", IMPLVER_2106x, 0 },
3484 { "21164", IMPLVER_21164, 0 },
3485 { "21164a", IMPLVER_21164, AMASK_BWX },
3486 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3487 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3488 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3489 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3490 };
3491
3492 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3493 {
3494 CPUAlphaState *env;
3495 int implver, amask, i, max;
3496
3497 env = g_malloc0(sizeof(CPUAlphaState));
3498 cpu_exec_init(env);
3499 alpha_translate_init();
3500 tlb_flush(env, 1);
3501
3502 /* Default to ev67; no reason not to emulate insns by default. */
3503 implver = IMPLVER_21264;
3504 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3505 | AMASK_TRAP | AMASK_PREFETCH);
3506
3507 max = ARRAY_SIZE(cpu_defs);
3508 for (i = 0; i < max; i++) {
3509 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3510 implver = cpu_defs[i].implver;
3511 amask = cpu_defs[i].amask;
3512 break;
3513 }
3514 }
3515 env->implver = implver;
3516 env->amask = amask;
3517
3518 #if defined (CONFIG_USER_ONLY)
3519 env->ps = PS_USER_MODE;
3520 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3521 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
3522 #endif
3523 env->lock_addr = -1;
3524 env->fen = 1;
3525
3526 qemu_init_vcpu(env);
3527 return env;
3528 }
3529
3530 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
3531 {
3532 env->pc = gen_opc_pc[pc_pos];
3533 }