]> git.proxmox.com Git - mirror_qemu.git/blob - target-alpha/translate.c
Merge remote-tracking branch 'kraxel/seabios-1.7.2.1' into staging
[mirror_qemu.git] / target-alpha / translate.c
1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
23 #include "tcg-op.h"
24
25 #include "helper.h"
26 #define GEN_HELPER 1
27 #include "helper.h"
28
29 #undef ALPHA_DEBUG_DISAS
30 #define CONFIG_SOFTFLOAT_INLINE
31
32 #ifdef ALPHA_DEBUG_DISAS
33 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
34 #else
35 # define LOG_DISAS(...) do { } while (0)
36 #endif
37
38 typedef struct DisasContext DisasContext;
39 struct DisasContext {
40 struct TranslationBlock *tb;
41 CPUAlphaState *env;
42 uint64_t pc;
43 int mem_idx;
44
45 /* Current rounding mode for this TB. */
46 int tb_rm;
47 /* Current flush-to-zero setting for this TB. */
48 int tb_ftz;
49 };
50
51 /* Return values from translate_one, indicating the state of the TB.
52 Note that zero indicates that we are not exiting the TB. */
53
54 typedef enum {
55 NO_EXIT,
56
57 /* We have emitted one or more goto_tb. No fixup required. */
58 EXIT_GOTO_TB,
59
60 /* We are not using a goto_tb (for whatever reason), but have updated
61 the PC (for whatever reason), so there's no need to do it again on
62 exiting the TB. */
63 EXIT_PC_UPDATED,
64
65 /* We are exiting the TB, but have neither emitted a goto_tb, nor
66 updated the PC for the next instruction to be executed. */
67 EXIT_PC_STALE,
68
69 /* We are ending the TB with a noreturn function call, e.g. longjmp.
70 No following code will be executed. */
71 EXIT_NORETURN,
72 } ExitStatus;
73
74 /* global register indexes */
75 static TCGv_ptr cpu_env;
76 static TCGv cpu_ir[31];
77 static TCGv cpu_fir[31];
78 static TCGv cpu_pc;
79 static TCGv cpu_lock_addr;
80 static TCGv cpu_lock_st_addr;
81 static TCGv cpu_lock_value;
82 static TCGv cpu_unique;
83 #ifndef CONFIG_USER_ONLY
84 static TCGv cpu_sysval;
85 static TCGv cpu_usp;
86 #endif
87
88 /* register names */
89 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
90
91 #include "exec/gen-icount.h"
92
93 void alpha_translate_init(void)
94 {
95 int i;
96 char *p;
97 static int done_init = 0;
98
99 if (done_init)
100 return;
101
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103
104 p = cpu_reg_names;
105 for (i = 0; i < 31; i++) {
106 sprintf(p, "ir%d", i);
107 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
108 offsetof(CPUAlphaState, ir[i]), p);
109 p += (i < 10) ? 4 : 5;
110
111 sprintf(p, "fir%d", i);
112 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
113 offsetof(CPUAlphaState, fir[i]), p);
114 p += (i < 10) ? 5 : 6;
115 }
116
117 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
118 offsetof(CPUAlphaState, pc), "pc");
119
120 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
121 offsetof(CPUAlphaState, lock_addr),
122 "lock_addr");
123 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
124 offsetof(CPUAlphaState, lock_st_addr),
125 "lock_st_addr");
126 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
127 offsetof(CPUAlphaState, lock_value),
128 "lock_value");
129
130 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
131 offsetof(CPUAlphaState, unique), "unique");
132 #ifndef CONFIG_USER_ONLY
133 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
134 offsetof(CPUAlphaState, sysval), "sysval");
135 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
136 offsetof(CPUAlphaState, usp), "usp");
137 #endif
138
139 /* register helpers */
140 #define GEN_HELPER 2
141 #include "helper.h"
142
143 done_init = 1;
144 }
145
146 static void gen_excp_1(int exception, int error_code)
147 {
148 TCGv_i32 tmp1, tmp2;
149
150 tmp1 = tcg_const_i32(exception);
151 tmp2 = tcg_const_i32(error_code);
152 gen_helper_excp(cpu_env, tmp1, tmp2);
153 tcg_temp_free_i32(tmp2);
154 tcg_temp_free_i32(tmp1);
155 }
156
157 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
158 {
159 tcg_gen_movi_i64(cpu_pc, ctx->pc);
160 gen_excp_1(exception, error_code);
161 return EXIT_NORETURN;
162 }
163
164 static inline ExitStatus gen_invalid(DisasContext *ctx)
165 {
166 return gen_excp(ctx, EXCP_OPCDEC, 0);
167 }
168
169 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
170 {
171 TCGv tmp = tcg_temp_new();
172 TCGv_i32 tmp32 = tcg_temp_new_i32();
173 tcg_gen_qemu_ld32u(tmp, t1, flags);
174 tcg_gen_trunc_i64_i32(tmp32, tmp);
175 gen_helper_memory_to_f(t0, tmp32);
176 tcg_temp_free_i32(tmp32);
177 tcg_temp_free(tmp);
178 }
179
180 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
181 {
182 TCGv tmp = tcg_temp_new();
183 tcg_gen_qemu_ld64(tmp, t1, flags);
184 gen_helper_memory_to_g(t0, tmp);
185 tcg_temp_free(tmp);
186 }
187
188 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
189 {
190 TCGv tmp = tcg_temp_new();
191 TCGv_i32 tmp32 = tcg_temp_new_i32();
192 tcg_gen_qemu_ld32u(tmp, t1, flags);
193 tcg_gen_trunc_i64_i32(tmp32, tmp);
194 gen_helper_memory_to_s(t0, tmp32);
195 tcg_temp_free_i32(tmp32);
196 tcg_temp_free(tmp);
197 }
198
199 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
200 {
201 tcg_gen_qemu_ld32s(t0, t1, flags);
202 tcg_gen_mov_i64(cpu_lock_addr, t1);
203 tcg_gen_mov_i64(cpu_lock_value, t0);
204 }
205
206 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
207 {
208 tcg_gen_qemu_ld64(t0, t1, flags);
209 tcg_gen_mov_i64(cpu_lock_addr, t1);
210 tcg_gen_mov_i64(cpu_lock_value, t0);
211 }
212
213 static inline void gen_load_mem(DisasContext *ctx,
214 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
215 int flags),
216 int ra, int rb, int32_t disp16, int fp,
217 int clear)
218 {
219 TCGv addr, va;
220
221 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
222 prefetches, which we can treat as nops. No worries about
223 missed exceptions here. */
224 if (unlikely(ra == 31)) {
225 return;
226 }
227
228 addr = tcg_temp_new();
229 if (rb != 31) {
230 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
231 if (clear) {
232 tcg_gen_andi_i64(addr, addr, ~0x7);
233 }
234 } else {
235 if (clear) {
236 disp16 &= ~0x7;
237 }
238 tcg_gen_movi_i64(addr, disp16);
239 }
240
241 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
242 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
243
244 tcg_temp_free(addr);
245 }
246
247 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
248 {
249 TCGv_i32 tmp32 = tcg_temp_new_i32();
250 TCGv tmp = tcg_temp_new();
251 gen_helper_f_to_memory(tmp32, t0);
252 tcg_gen_extu_i32_i64(tmp, tmp32);
253 tcg_gen_qemu_st32(tmp, t1, flags);
254 tcg_temp_free(tmp);
255 tcg_temp_free_i32(tmp32);
256 }
257
258 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
259 {
260 TCGv tmp = tcg_temp_new();
261 gen_helper_g_to_memory(tmp, t0);
262 tcg_gen_qemu_st64(tmp, t1, flags);
263 tcg_temp_free(tmp);
264 }
265
266 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
267 {
268 TCGv_i32 tmp32 = tcg_temp_new_i32();
269 TCGv tmp = tcg_temp_new();
270 gen_helper_s_to_memory(tmp32, t0);
271 tcg_gen_extu_i32_i64(tmp, tmp32);
272 tcg_gen_qemu_st32(tmp, t1, flags);
273 tcg_temp_free(tmp);
274 tcg_temp_free_i32(tmp32);
275 }
276
277 static inline void gen_store_mem(DisasContext *ctx,
278 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
279 int flags),
280 int ra, int rb, int32_t disp16, int fp,
281 int clear)
282 {
283 TCGv addr, va;
284
285 addr = tcg_temp_new();
286 if (rb != 31) {
287 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
288 if (clear) {
289 tcg_gen_andi_i64(addr, addr, ~0x7);
290 }
291 } else {
292 if (clear) {
293 disp16 &= ~0x7;
294 }
295 tcg_gen_movi_i64(addr, disp16);
296 }
297
298 if (ra == 31) {
299 va = tcg_const_i64(0);
300 } else {
301 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
302 }
303 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
304
305 tcg_temp_free(addr);
306 if (ra == 31) {
307 tcg_temp_free(va);
308 }
309 }
310
311 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
312 int32_t disp16, int quad)
313 {
314 TCGv addr;
315
316 if (ra == 31) {
317 /* ??? Don't bother storing anything. The user can't tell
318 the difference, since the zero register always reads zero. */
319 return NO_EXIT;
320 }
321
322 #if defined(CONFIG_USER_ONLY)
323 addr = cpu_lock_st_addr;
324 #else
325 addr = tcg_temp_local_new();
326 #endif
327
328 if (rb != 31) {
329 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
330 } else {
331 tcg_gen_movi_i64(addr, disp16);
332 }
333
334 #if defined(CONFIG_USER_ONLY)
335 /* ??? This is handled via a complicated version of compare-and-swap
336 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
337 in TCG so that this isn't necessary. */
338 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
339 #else
340 /* ??? In system mode we are never multi-threaded, so CAS can be
341 implemented via a non-atomic load-compare-store sequence. */
342 {
343 int lab_fail, lab_done;
344 TCGv val;
345
346 lab_fail = gen_new_label();
347 lab_done = gen_new_label();
348 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
349
350 val = tcg_temp_new();
351 if (quad) {
352 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
353 } else {
354 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
355 }
356 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
357
358 if (quad) {
359 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
360 } else {
361 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
362 }
363 tcg_gen_movi_i64(cpu_ir[ra], 1);
364 tcg_gen_br(lab_done);
365
366 gen_set_label(lab_fail);
367 tcg_gen_movi_i64(cpu_ir[ra], 0);
368
369 gen_set_label(lab_done);
370 tcg_gen_movi_i64(cpu_lock_addr, -1);
371
372 tcg_temp_free(addr);
373 return NO_EXIT;
374 }
375 #endif
376 }
377
378 static int use_goto_tb(DisasContext *ctx, uint64_t dest)
379 {
380 /* Check for the dest on the same page as the start of the TB. We
381 also want to suppress goto_tb in the case of single-steping and IO. */
382 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
383 && !ctx->env->singlestep_enabled
384 && !(ctx->tb->cflags & CF_LAST_IO));
385 }
386
387 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
388 {
389 uint64_t dest = ctx->pc + (disp << 2);
390
391 if (ra != 31) {
392 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
393 }
394
395 /* Notice branch-to-next; used to initialize RA with the PC. */
396 if (disp == 0) {
397 return 0;
398 } else if (use_goto_tb(ctx, dest)) {
399 tcg_gen_goto_tb(0);
400 tcg_gen_movi_i64(cpu_pc, dest);
401 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
402 return EXIT_GOTO_TB;
403 } else {
404 tcg_gen_movi_i64(cpu_pc, dest);
405 return EXIT_PC_UPDATED;
406 }
407 }
408
409 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
410 TCGv cmp, int32_t disp)
411 {
412 uint64_t dest = ctx->pc + (disp << 2);
413 int lab_true = gen_new_label();
414
415 if (use_goto_tb(ctx, dest)) {
416 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
417
418 tcg_gen_goto_tb(0);
419 tcg_gen_movi_i64(cpu_pc, ctx->pc);
420 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
421
422 gen_set_label(lab_true);
423 tcg_gen_goto_tb(1);
424 tcg_gen_movi_i64(cpu_pc, dest);
425 tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
426
427 return EXIT_GOTO_TB;
428 } else {
429 TCGv_i64 z = tcg_const_i64(0);
430 TCGv_i64 d = tcg_const_i64(dest);
431 TCGv_i64 p = tcg_const_i64(ctx->pc);
432
433 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
434
435 tcg_temp_free_i64(z);
436 tcg_temp_free_i64(d);
437 tcg_temp_free_i64(p);
438 return EXIT_PC_UPDATED;
439 }
440 }
441
442 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
443 int32_t disp, int mask)
444 {
445 TCGv cmp_tmp;
446
447 if (unlikely(ra == 31)) {
448 cmp_tmp = tcg_const_i64(0);
449 } else {
450 cmp_tmp = tcg_temp_new();
451 if (mask) {
452 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
453 } else {
454 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
455 }
456 }
457
458 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
459 }
460
461 /* Fold -0.0 for comparison with COND. */
462
463 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
464 {
465 uint64_t mzero = 1ull << 63;
466
467 switch (cond) {
468 case TCG_COND_LE:
469 case TCG_COND_GT:
470 /* For <= or >, the -0.0 value directly compares the way we want. */
471 tcg_gen_mov_i64(dest, src);
472 break;
473
474 case TCG_COND_EQ:
475 case TCG_COND_NE:
476 /* For == or !=, we can simply mask off the sign bit and compare. */
477 tcg_gen_andi_i64(dest, src, mzero - 1);
478 break;
479
480 case TCG_COND_GE:
481 case TCG_COND_LT:
482 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
483 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
484 tcg_gen_neg_i64(dest, dest);
485 tcg_gen_and_i64(dest, dest, src);
486 break;
487
488 default:
489 abort();
490 }
491 }
492
493 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
494 int32_t disp)
495 {
496 TCGv cmp_tmp;
497
498 if (unlikely(ra == 31)) {
499 /* Very uncommon case, but easier to optimize it to an integer
500 comparison than continuing with the floating point comparison. */
501 return gen_bcond(ctx, cond, ra, disp, 0);
502 }
503
504 cmp_tmp = tcg_temp_new();
505 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
506 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
507 }
508
509 static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
510 int islit, uint8_t lit, int mask)
511 {
512 TCGv_i64 c1, z, v1;
513
514 if (unlikely(rc == 31)) {
515 return;
516 }
517
518 if (ra == 31) {
519 /* Very uncommon case - Do not bother to optimize. */
520 c1 = tcg_const_i64(0);
521 } else if (mask) {
522 c1 = tcg_const_i64(1);
523 tcg_gen_and_i64(c1, c1, cpu_ir[ra]);
524 } else {
525 c1 = cpu_ir[ra];
526 }
527 if (islit) {
528 v1 = tcg_const_i64(lit);
529 } else {
530 v1 = cpu_ir[rb];
531 }
532 z = tcg_const_i64(0);
533
534 tcg_gen_movcond_i64(cond, cpu_ir[rc], c1, z, v1, cpu_ir[rc]);
535
536 tcg_temp_free_i64(z);
537 if (ra == 31 || mask) {
538 tcg_temp_free_i64(c1);
539 }
540 if (islit) {
541 tcg_temp_free_i64(v1);
542 }
543 }
544
545 static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
546 {
547 TCGv_i64 c1, z, v1;
548
549 if (unlikely(rc == 31)) {
550 return;
551 }
552
553 c1 = tcg_temp_new_i64();
554 if (unlikely(ra == 31)) {
555 tcg_gen_movi_i64(c1, 0);
556 } else {
557 gen_fold_mzero(cond, c1, cpu_fir[ra]);
558 }
559 if (rb == 31) {
560 v1 = tcg_const_i64(0);
561 } else {
562 v1 = cpu_fir[rb];
563 }
564 z = tcg_const_i64(0);
565
566 tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]);
567
568 tcg_temp_free_i64(z);
569 tcg_temp_free_i64(c1);
570 if (rb == 31) {
571 tcg_temp_free_i64(v1);
572 }
573 }
574
575 #define QUAL_RM_N 0x080 /* Round mode nearest even */
576 #define QUAL_RM_C 0x000 /* Round mode chopped */
577 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
578 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
579 #define QUAL_RM_MASK 0x0c0
580
581 #define QUAL_U 0x100 /* Underflow enable (fp output) */
582 #define QUAL_V 0x100 /* Overflow enable (int output) */
583 #define QUAL_S 0x400 /* Software completion enable */
584 #define QUAL_I 0x200 /* Inexact detection enable */
585
586 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
587 {
588 TCGv_i32 tmp;
589
590 fn11 &= QUAL_RM_MASK;
591 if (fn11 == ctx->tb_rm) {
592 return;
593 }
594 ctx->tb_rm = fn11;
595
596 tmp = tcg_temp_new_i32();
597 switch (fn11) {
598 case QUAL_RM_N:
599 tcg_gen_movi_i32(tmp, float_round_nearest_even);
600 break;
601 case QUAL_RM_C:
602 tcg_gen_movi_i32(tmp, float_round_to_zero);
603 break;
604 case QUAL_RM_M:
605 tcg_gen_movi_i32(tmp, float_round_down);
606 break;
607 case QUAL_RM_D:
608 tcg_gen_ld8u_i32(tmp, cpu_env,
609 offsetof(CPUAlphaState, fpcr_dyn_round));
610 break;
611 }
612
613 #if defined(CONFIG_SOFTFLOAT_INLINE)
614 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
615 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
616 sets the one field. */
617 tcg_gen_st8_i32(tmp, cpu_env,
618 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
619 #else
620 gen_helper_setroundmode(tmp);
621 #endif
622
623 tcg_temp_free_i32(tmp);
624 }
625
626 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
627 {
628 TCGv_i32 tmp;
629
630 fn11 &= QUAL_U;
631 if (fn11 == ctx->tb_ftz) {
632 return;
633 }
634 ctx->tb_ftz = fn11;
635
636 tmp = tcg_temp_new_i32();
637 if (fn11) {
638 /* Underflow is enabled, use the FPCR setting. */
639 tcg_gen_ld8u_i32(tmp, cpu_env,
640 offsetof(CPUAlphaState, fpcr_flush_to_zero));
641 } else {
642 /* Underflow is disabled, force flush-to-zero. */
643 tcg_gen_movi_i32(tmp, 1);
644 }
645
646 #if defined(CONFIG_SOFTFLOAT_INLINE)
647 tcg_gen_st8_i32(tmp, cpu_env,
648 offsetof(CPUAlphaState, fp_status.flush_to_zero));
649 #else
650 gen_helper_setflushzero(tmp);
651 #endif
652
653 tcg_temp_free_i32(tmp);
654 }
655
656 static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
657 {
658 TCGv val;
659 if (reg == 31) {
660 val = tcg_const_i64(0);
661 } else {
662 if ((fn11 & QUAL_S) == 0) {
663 if (is_cmp) {
664 gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
665 } else {
666 gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
667 }
668 }
669 val = tcg_temp_new();
670 tcg_gen_mov_i64(val, cpu_fir[reg]);
671 }
672 return val;
673 }
674
675 static void gen_fp_exc_clear(void)
676 {
677 #if defined(CONFIG_SOFTFLOAT_INLINE)
678 TCGv_i32 zero = tcg_const_i32(0);
679 tcg_gen_st8_i32(zero, cpu_env,
680 offsetof(CPUAlphaState, fp_status.float_exception_flags));
681 tcg_temp_free_i32(zero);
682 #else
683 gen_helper_fp_exc_clear(cpu_env);
684 #endif
685 }
686
687 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
688 {
689 /* ??? We ought to be able to do something with imprecise exceptions.
690 E.g. notice we're still in the trap shadow of something within the
691 TB and do not generate the code to signal the exception; end the TB
692 when an exception is forced to arrive, either by consumption of a
693 register value or TRAPB or EXCB. */
694 TCGv_i32 exc = tcg_temp_new_i32();
695 TCGv_i32 reg;
696
697 #if defined(CONFIG_SOFTFLOAT_INLINE)
698 tcg_gen_ld8u_i32(exc, cpu_env,
699 offsetof(CPUAlphaState, fp_status.float_exception_flags));
700 #else
701 gen_helper_fp_exc_get(exc, cpu_env);
702 #endif
703
704 if (ignore) {
705 tcg_gen_andi_i32(exc, exc, ~ignore);
706 }
707
708 /* ??? Pass in the regno of the destination so that the helper can
709 set EXC_MASK, which contains a bitmask of destination registers
710 that have caused arithmetic traps. A simple userspace emulation
711 does not require this. We do need it for a guest kernel's entArith,
712 or if we were to do something clever with imprecise exceptions. */
713 reg = tcg_const_i32(rc + 32);
714
715 if (fn11 & QUAL_S) {
716 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
717 } else {
718 gen_helper_fp_exc_raise(cpu_env, exc, reg);
719 }
720
721 tcg_temp_free_i32(reg);
722 tcg_temp_free_i32(exc);
723 }
724
725 static inline void gen_fp_exc_raise(int rc, int fn11)
726 {
727 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
728 }
729
730 static void gen_fcvtlq(int rb, int rc)
731 {
732 if (unlikely(rc == 31)) {
733 return;
734 }
735 if (unlikely(rb == 31)) {
736 tcg_gen_movi_i64(cpu_fir[rc], 0);
737 } else {
738 TCGv tmp = tcg_temp_new();
739
740 /* The arithmetic right shift here, plus the sign-extended mask below
741 yields a sign-extended result without an explicit ext32s_i64. */
742 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
743 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
744 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
745 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
746 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
747
748 tcg_temp_free(tmp);
749 }
750 }
751
752 static void gen_fcvtql(int rb, int rc)
753 {
754 if (unlikely(rc == 31)) {
755 return;
756 }
757 if (unlikely(rb == 31)) {
758 tcg_gen_movi_i64(cpu_fir[rc], 0);
759 } else {
760 TCGv tmp = tcg_temp_new();
761
762 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
763 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
764 tcg_gen_shli_i64(tmp, tmp, 32);
765 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
766 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
767
768 tcg_temp_free(tmp);
769 }
770 }
771
772 static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
773 {
774 if (rb != 31) {
775 int lab = gen_new_label();
776 TCGv tmp = tcg_temp_new();
777
778 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
779 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
780 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
781
782 gen_set_label(lab);
783 }
784 gen_fcvtql(rb, rc);
785 }
786
787 #define FARITH2(name) \
788 static inline void glue(gen_f, name)(int rb, int rc) \
789 { \
790 if (unlikely(rc == 31)) { \
791 return; \
792 } \
793 if (rb != 31) { \
794 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
795 } else { \
796 TCGv tmp = tcg_const_i64(0); \
797 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
798 tcg_temp_free(tmp); \
799 } \
800 }
801
802 /* ??? VAX instruction qualifiers ignored. */
803 FARITH2(sqrtf)
804 FARITH2(sqrtg)
805 FARITH2(cvtgf)
806 FARITH2(cvtgq)
807 FARITH2(cvtqf)
808 FARITH2(cvtqg)
809
810 static void gen_ieee_arith2(DisasContext *ctx,
811 void (*helper)(TCGv, TCGv_ptr, TCGv),
812 int rb, int rc, int fn11)
813 {
814 TCGv vb;
815
816 /* ??? This is wrong: the instruction is not a nop, it still may
817 raise exceptions. */
818 if (unlikely(rc == 31)) {
819 return;
820 }
821
822 gen_qual_roundmode(ctx, fn11);
823 gen_qual_flushzero(ctx, fn11);
824 gen_fp_exc_clear();
825
826 vb = gen_ieee_input(rb, fn11, 0);
827 helper(cpu_fir[rc], cpu_env, vb);
828 tcg_temp_free(vb);
829
830 gen_fp_exc_raise(rc, fn11);
831 }
832
833 #define IEEE_ARITH2(name) \
834 static inline void glue(gen_f, name)(DisasContext *ctx, \
835 int rb, int rc, int fn11) \
836 { \
837 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
838 }
839 IEEE_ARITH2(sqrts)
840 IEEE_ARITH2(sqrtt)
841 IEEE_ARITH2(cvtst)
842 IEEE_ARITH2(cvtts)
843
844 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
845 {
846 TCGv vb;
847 int ignore = 0;
848
849 /* ??? This is wrong: the instruction is not a nop, it still may
850 raise exceptions. */
851 if (unlikely(rc == 31)) {
852 return;
853 }
854
855 /* No need to set flushzero, since we have an integer output. */
856 gen_fp_exc_clear();
857 vb = gen_ieee_input(rb, fn11, 0);
858
859 /* Almost all integer conversions use cropped rounding, and most
860 also do not have integer overflow enabled. Special case that. */
861 switch (fn11) {
862 case QUAL_RM_C:
863 gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
864 break;
865 case QUAL_V | QUAL_RM_C:
866 case QUAL_S | QUAL_V | QUAL_RM_C:
867 ignore = float_flag_inexact;
868 /* FALLTHRU */
869 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
870 gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
871 break;
872 default:
873 gen_qual_roundmode(ctx, fn11);
874 gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
875 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
876 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
877 break;
878 }
879 tcg_temp_free(vb);
880
881 gen_fp_exc_raise_ignore(rc, fn11, ignore);
882 }
883
884 static void gen_ieee_intcvt(DisasContext *ctx,
885 void (*helper)(TCGv, TCGv_ptr, TCGv),
886 int rb, int rc, int fn11)
887 {
888 TCGv vb;
889
890 /* ??? This is wrong: the instruction is not a nop, it still may
891 raise exceptions. */
892 if (unlikely(rc == 31)) {
893 return;
894 }
895
896 gen_qual_roundmode(ctx, fn11);
897
898 if (rb == 31) {
899 vb = tcg_const_i64(0);
900 } else {
901 vb = cpu_fir[rb];
902 }
903
904 /* The only exception that can be raised by integer conversion
905 is inexact. Thus we only need to worry about exceptions when
906 inexact handling is requested. */
907 if (fn11 & QUAL_I) {
908 gen_fp_exc_clear();
909 helper(cpu_fir[rc], cpu_env, vb);
910 gen_fp_exc_raise(rc, fn11);
911 } else {
912 helper(cpu_fir[rc], cpu_env, vb);
913 }
914
915 if (rb == 31) {
916 tcg_temp_free(vb);
917 }
918 }
919
920 #define IEEE_INTCVT(name) \
921 static inline void glue(gen_f, name)(DisasContext *ctx, \
922 int rb, int rc, int fn11) \
923 { \
924 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
925 }
926 IEEE_INTCVT(cvtqs)
927 IEEE_INTCVT(cvtqt)
928
929 static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
930 {
931 TCGv va, vb, vmask;
932 int za = 0, zb = 0;
933
934 if (unlikely(rc == 31)) {
935 return;
936 }
937
938 vmask = tcg_const_i64(mask);
939
940 TCGV_UNUSED_I64(va);
941 if (ra == 31) {
942 if (inv_a) {
943 va = vmask;
944 } else {
945 za = 1;
946 }
947 } else {
948 va = tcg_temp_new_i64();
949 tcg_gen_mov_i64(va, cpu_fir[ra]);
950 if (inv_a) {
951 tcg_gen_andc_i64(va, vmask, va);
952 } else {
953 tcg_gen_and_i64(va, va, vmask);
954 }
955 }
956
957 TCGV_UNUSED_I64(vb);
958 if (rb == 31) {
959 zb = 1;
960 } else {
961 vb = tcg_temp_new_i64();
962 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
963 }
964
965 switch (za << 1 | zb) {
966 case 0 | 0:
967 tcg_gen_or_i64(cpu_fir[rc], va, vb);
968 break;
969 case 0 | 1:
970 tcg_gen_mov_i64(cpu_fir[rc], va);
971 break;
972 case 2 | 0:
973 tcg_gen_mov_i64(cpu_fir[rc], vb);
974 break;
975 case 2 | 1:
976 tcg_gen_movi_i64(cpu_fir[rc], 0);
977 break;
978 }
979
980 tcg_temp_free(vmask);
981 if (ra != 31) {
982 tcg_temp_free(va);
983 }
984 if (rb != 31) {
985 tcg_temp_free(vb);
986 }
987 }
988
989 static inline void gen_fcpys(int ra, int rb, int rc)
990 {
991 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
992 }
993
994 static inline void gen_fcpysn(int ra, int rb, int rc)
995 {
996 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
997 }
998
999 static inline void gen_fcpyse(int ra, int rb, int rc)
1000 {
1001 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1002 }
1003
1004 #define FARITH3(name) \
1005 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1006 { \
1007 TCGv va, vb; \
1008 \
1009 if (unlikely(rc == 31)) { \
1010 return; \
1011 } \
1012 if (ra == 31) { \
1013 va = tcg_const_i64(0); \
1014 } else { \
1015 va = cpu_fir[ra]; \
1016 } \
1017 if (rb == 31) { \
1018 vb = tcg_const_i64(0); \
1019 } else { \
1020 vb = cpu_fir[rb]; \
1021 } \
1022 \
1023 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1024 \
1025 if (ra == 31) { \
1026 tcg_temp_free(va); \
1027 } \
1028 if (rb == 31) { \
1029 tcg_temp_free(vb); \
1030 } \
1031 }
1032
1033 /* ??? VAX instruction qualifiers ignored. */
1034 FARITH3(addf)
1035 FARITH3(subf)
1036 FARITH3(mulf)
1037 FARITH3(divf)
1038 FARITH3(addg)
1039 FARITH3(subg)
1040 FARITH3(mulg)
1041 FARITH3(divg)
1042 FARITH3(cmpgeq)
1043 FARITH3(cmpglt)
1044 FARITH3(cmpgle)
1045
1046 static void gen_ieee_arith3(DisasContext *ctx,
1047 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1048 int ra, int rb, int rc, int fn11)
1049 {
1050 TCGv va, vb;
1051
1052 /* ??? This is wrong: the instruction is not a nop, it still may
1053 raise exceptions. */
1054 if (unlikely(rc == 31)) {
1055 return;
1056 }
1057
1058 gen_qual_roundmode(ctx, fn11);
1059 gen_qual_flushzero(ctx, fn11);
1060 gen_fp_exc_clear();
1061
1062 va = gen_ieee_input(ra, fn11, 0);
1063 vb = gen_ieee_input(rb, fn11, 0);
1064 helper(cpu_fir[rc], cpu_env, va, vb);
1065 tcg_temp_free(va);
1066 tcg_temp_free(vb);
1067
1068 gen_fp_exc_raise(rc, fn11);
1069 }
1070
1071 #define IEEE_ARITH3(name) \
1072 static inline void glue(gen_f, name)(DisasContext *ctx, \
1073 int ra, int rb, int rc, int fn11) \
1074 { \
1075 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1076 }
1077 IEEE_ARITH3(adds)
1078 IEEE_ARITH3(subs)
1079 IEEE_ARITH3(muls)
1080 IEEE_ARITH3(divs)
1081 IEEE_ARITH3(addt)
1082 IEEE_ARITH3(subt)
1083 IEEE_ARITH3(mult)
1084 IEEE_ARITH3(divt)
1085
1086 static void gen_ieee_compare(DisasContext *ctx,
1087 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1088 int ra, int rb, int rc, int fn11)
1089 {
1090 TCGv va, vb;
1091
1092 /* ??? This is wrong: the instruction is not a nop, it still may
1093 raise exceptions. */
1094 if (unlikely(rc == 31)) {
1095 return;
1096 }
1097
1098 gen_fp_exc_clear();
1099
1100 va = gen_ieee_input(ra, fn11, 1);
1101 vb = gen_ieee_input(rb, fn11, 1);
1102 helper(cpu_fir[rc], cpu_env, va, vb);
1103 tcg_temp_free(va);
1104 tcg_temp_free(vb);
1105
1106 gen_fp_exc_raise(rc, fn11);
1107 }
1108
1109 #define IEEE_CMP3(name) \
1110 static inline void glue(gen_f, name)(DisasContext *ctx, \
1111 int ra, int rb, int rc, int fn11) \
1112 { \
1113 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1114 }
1115 IEEE_CMP3(cmptun)
1116 IEEE_CMP3(cmpteq)
1117 IEEE_CMP3(cmptlt)
1118 IEEE_CMP3(cmptle)
1119
1120 static inline uint64_t zapnot_mask(uint8_t lit)
1121 {
1122 uint64_t mask = 0;
1123 int i;
1124
1125 for (i = 0; i < 8; ++i) {
1126 if ((lit >> i) & 1)
1127 mask |= 0xffull << (i * 8);
1128 }
1129 return mask;
1130 }
1131
1132 /* Implement zapnot with an immediate operand, which expands to some
1133 form of immediate AND. This is a basic building block in the
1134 definition of many of the other byte manipulation instructions. */
1135 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1136 {
1137 switch (lit) {
1138 case 0x00:
1139 tcg_gen_movi_i64(dest, 0);
1140 break;
1141 case 0x01:
1142 tcg_gen_ext8u_i64(dest, src);
1143 break;
1144 case 0x03:
1145 tcg_gen_ext16u_i64(dest, src);
1146 break;
1147 case 0x0f:
1148 tcg_gen_ext32u_i64(dest, src);
1149 break;
1150 case 0xff:
1151 tcg_gen_mov_i64(dest, src);
1152 break;
1153 default:
1154 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1155 break;
1156 }
1157 }
1158
1159 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1160 {
1161 if (unlikely(rc == 31))
1162 return;
1163 else if (unlikely(ra == 31))
1164 tcg_gen_movi_i64(cpu_ir[rc], 0);
1165 else if (islit)
1166 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1167 else
1168 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1169 }
1170
1171 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1172 {
1173 if (unlikely(rc == 31))
1174 return;
1175 else if (unlikely(ra == 31))
1176 tcg_gen_movi_i64(cpu_ir[rc], 0);
1177 else if (islit)
1178 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1179 else
1180 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1181 }
1182
1183
1184 /* EXTWH, EXTLH, EXTQH */
1185 static void gen_ext_h(int ra, int rb, int rc, int islit,
1186 uint8_t lit, uint8_t byte_mask)
1187 {
1188 if (unlikely(rc == 31))
1189 return;
1190 else if (unlikely(ra == 31))
1191 tcg_gen_movi_i64(cpu_ir[rc], 0);
1192 else {
1193 if (islit) {
1194 lit = (64 - (lit & 7) * 8) & 0x3f;
1195 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1196 } else {
1197 TCGv tmp1 = tcg_temp_new();
1198 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1199 tcg_gen_shli_i64(tmp1, tmp1, 3);
1200 tcg_gen_neg_i64(tmp1, tmp1);
1201 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1202 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1203 tcg_temp_free(tmp1);
1204 }
1205 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1206 }
1207 }
1208
1209 /* EXTBL, EXTWL, EXTLL, EXTQL */
1210 static void gen_ext_l(int ra, int rb, int rc, int islit,
1211 uint8_t lit, uint8_t byte_mask)
1212 {
1213 if (unlikely(rc == 31))
1214 return;
1215 else if (unlikely(ra == 31))
1216 tcg_gen_movi_i64(cpu_ir[rc], 0);
1217 else {
1218 if (islit) {
1219 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1220 } else {
1221 TCGv tmp = tcg_temp_new();
1222 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1223 tcg_gen_shli_i64(tmp, tmp, 3);
1224 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1225 tcg_temp_free(tmp);
1226 }
1227 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1228 }
1229 }
1230
1231 /* INSWH, INSLH, INSQH */
1232 static void gen_ins_h(int ra, int rb, int rc, int islit,
1233 uint8_t lit, uint8_t byte_mask)
1234 {
1235 if (unlikely(rc == 31))
1236 return;
1237 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1238 tcg_gen_movi_i64(cpu_ir[rc], 0);
1239 else {
1240 TCGv tmp = tcg_temp_new();
1241
1242 /* The instruction description has us left-shift the byte mask
1243 and extract bits <15:8> and apply that zap at the end. This
1244 is equivalent to simply performing the zap first and shifting
1245 afterward. */
1246 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1247
1248 if (islit) {
1249 /* Note that we have handled the lit==0 case above. */
1250 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1251 } else {
1252 TCGv shift = tcg_temp_new();
1253
1254 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1255 Do this portably by splitting the shift into two parts:
1256 shift_count-1 and 1. Arrange for the -1 by using
1257 ones-complement instead of twos-complement in the negation:
1258 ~((B & 7) * 8) & 63. */
1259
1260 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1261 tcg_gen_shli_i64(shift, shift, 3);
1262 tcg_gen_not_i64(shift, shift);
1263 tcg_gen_andi_i64(shift, shift, 0x3f);
1264
1265 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1266 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1267 tcg_temp_free(shift);
1268 }
1269 tcg_temp_free(tmp);
1270 }
1271 }
1272
1273 /* INSBL, INSWL, INSLL, INSQL */
1274 static void gen_ins_l(int ra, int rb, int rc, int islit,
1275 uint8_t lit, uint8_t byte_mask)
1276 {
1277 if (unlikely(rc == 31))
1278 return;
1279 else if (unlikely(ra == 31))
1280 tcg_gen_movi_i64(cpu_ir[rc], 0);
1281 else {
1282 TCGv tmp = tcg_temp_new();
1283
1284 /* The instruction description has us left-shift the byte mask
1285 the same number of byte slots as the data and apply the zap
1286 at the end. This is equivalent to simply performing the zap
1287 first and shifting afterward. */
1288 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1289
1290 if (islit) {
1291 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1292 } else {
1293 TCGv shift = tcg_temp_new();
1294 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1295 tcg_gen_shli_i64(shift, shift, 3);
1296 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1297 tcg_temp_free(shift);
1298 }
1299 tcg_temp_free(tmp);
1300 }
1301 }
1302
1303 /* MSKWH, MSKLH, MSKQH */
1304 static void gen_msk_h(int ra, int rb, int rc, int islit,
1305 uint8_t lit, uint8_t byte_mask)
1306 {
1307 if (unlikely(rc == 31))
1308 return;
1309 else if (unlikely(ra == 31))
1310 tcg_gen_movi_i64(cpu_ir[rc], 0);
1311 else if (islit) {
1312 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1313 } else {
1314 TCGv shift = tcg_temp_new();
1315 TCGv mask = tcg_temp_new();
1316
1317 /* The instruction description is as above, where the byte_mask
1318 is shifted left, and then we extract bits <15:8>. This can be
1319 emulated with a right-shift on the expanded byte mask. This
1320 requires extra care because for an input <2:0> == 0 we need a
1321 shift of 64 bits in order to generate a zero. This is done by
1322 splitting the shift into two parts, the variable shift - 1
1323 followed by a constant 1 shift. The code we expand below is
1324 equivalent to ~((B & 7) * 8) & 63. */
1325
1326 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1327 tcg_gen_shli_i64(shift, shift, 3);
1328 tcg_gen_not_i64(shift, shift);
1329 tcg_gen_andi_i64(shift, shift, 0x3f);
1330 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1331 tcg_gen_shr_i64(mask, mask, shift);
1332 tcg_gen_shri_i64(mask, mask, 1);
1333
1334 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1335
1336 tcg_temp_free(mask);
1337 tcg_temp_free(shift);
1338 }
1339 }
1340
1341 /* MSKBL, MSKWL, MSKLL, MSKQL */
1342 static void gen_msk_l(int ra, int rb, int rc, int islit,
1343 uint8_t lit, uint8_t byte_mask)
1344 {
1345 if (unlikely(rc == 31))
1346 return;
1347 else if (unlikely(ra == 31))
1348 tcg_gen_movi_i64(cpu_ir[rc], 0);
1349 else if (islit) {
1350 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1351 } else {
1352 TCGv shift = tcg_temp_new();
1353 TCGv mask = tcg_temp_new();
1354
1355 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1356 tcg_gen_shli_i64(shift, shift, 3);
1357 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1358 tcg_gen_shl_i64(mask, mask, shift);
1359
1360 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1361
1362 tcg_temp_free(mask);
1363 tcg_temp_free(shift);
1364 }
1365 }
1366
1367 /* Code to call arith3 helpers */
1368 #define ARITH3(name) \
1369 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1370 uint8_t lit) \
1371 { \
1372 if (unlikely(rc == 31)) \
1373 return; \
1374 \
1375 if (ra != 31) { \
1376 if (islit) { \
1377 TCGv tmp = tcg_const_i64(lit); \
1378 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1379 tcg_temp_free(tmp); \
1380 } else \
1381 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1382 } else { \
1383 TCGv tmp1 = tcg_const_i64(0); \
1384 if (islit) { \
1385 TCGv tmp2 = tcg_const_i64(lit); \
1386 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1387 tcg_temp_free(tmp2); \
1388 } else \
1389 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1390 tcg_temp_free(tmp1); \
1391 } \
1392 }
1393 ARITH3(cmpbge)
1394 ARITH3(minub8)
1395 ARITH3(minsb8)
1396 ARITH3(minuw4)
1397 ARITH3(minsw4)
1398 ARITH3(maxub8)
1399 ARITH3(maxsb8)
1400 ARITH3(maxuw4)
1401 ARITH3(maxsw4)
1402 ARITH3(perr)
1403
1404 /* Code to call arith3 helpers */
1405 #define ARITH3_EX(name) \
1406 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1407 int islit, uint8_t lit) \
1408 { \
1409 if (unlikely(rc == 31)) { \
1410 return; \
1411 } \
1412 if (ra != 31) { \
1413 if (islit) { \
1414 TCGv tmp = tcg_const_i64(lit); \
1415 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1416 cpu_ir[ra], tmp); \
1417 tcg_temp_free(tmp); \
1418 } else { \
1419 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1420 cpu_ir[ra], cpu_ir[rb]); \
1421 } \
1422 } else { \
1423 TCGv tmp1 = tcg_const_i64(0); \
1424 if (islit) { \
1425 TCGv tmp2 = tcg_const_i64(lit); \
1426 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1427 tcg_temp_free(tmp2); \
1428 } else { \
1429 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1430 } \
1431 tcg_temp_free(tmp1); \
1432 } \
1433 }
1434 ARITH3_EX(addlv)
1435 ARITH3_EX(sublv)
1436 ARITH3_EX(addqv)
1437 ARITH3_EX(subqv)
1438 ARITH3_EX(mullv)
1439 ARITH3_EX(mulqv)
1440
1441 #define MVIOP2(name) \
1442 static inline void glue(gen_, name)(int rb, int rc) \
1443 { \
1444 if (unlikely(rc == 31)) \
1445 return; \
1446 if (unlikely(rb == 31)) \
1447 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1448 else \
1449 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1450 }
1451 MVIOP2(pklb)
1452 MVIOP2(pkwb)
1453 MVIOP2(unpkbl)
1454 MVIOP2(unpkbw)
1455
1456 static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1457 int islit, uint8_t lit)
1458 {
1459 TCGv va, vb;
1460
1461 if (unlikely(rc == 31)) {
1462 return;
1463 }
1464
1465 if (ra == 31) {
1466 va = tcg_const_i64(0);
1467 } else {
1468 va = cpu_ir[ra];
1469 }
1470 if (islit) {
1471 vb = tcg_const_i64(lit);
1472 } else {
1473 vb = cpu_ir[rb];
1474 }
1475
1476 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1477
1478 if (ra == 31) {
1479 tcg_temp_free(va);
1480 }
1481 if (islit) {
1482 tcg_temp_free(vb);
1483 }
1484 }
1485
1486 static void gen_rx(int ra, int set)
1487 {
1488 TCGv_i32 tmp;
1489
1490 if (ra != 31) {
1491 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
1492 }
1493
1494 tmp = tcg_const_i32(set);
1495 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1496 tcg_temp_free_i32(tmp);
1497 }
1498
1499 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1500 {
1501 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1502 to internal cpu registers. */
1503
1504 /* Unprivileged PAL call */
1505 if (palcode >= 0x80 && palcode < 0xC0) {
1506 switch (palcode) {
1507 case 0x86:
1508 /* IMB */
1509 /* No-op inside QEMU. */
1510 break;
1511 case 0x9E:
1512 /* RDUNIQUE */
1513 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1514 break;
1515 case 0x9F:
1516 /* WRUNIQUE */
1517 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1518 break;
1519 default:
1520 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1521 }
1522 return NO_EXIT;
1523 }
1524
1525 #ifndef CONFIG_USER_ONLY
1526 /* Privileged PAL code */
1527 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1528 switch (palcode) {
1529 case 0x01:
1530 /* CFLUSH */
1531 /* No-op inside QEMU. */
1532 break;
1533 case 0x02:
1534 /* DRAINA */
1535 /* No-op inside QEMU. */
1536 break;
1537 case 0x2D:
1538 /* WRVPTPTR */
1539 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
1540 break;
1541 case 0x31:
1542 /* WRVAL */
1543 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1544 break;
1545 case 0x32:
1546 /* RDVAL */
1547 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1548 break;
1549
1550 case 0x35: {
1551 /* SWPIPL */
1552 TCGv tmp;
1553
1554 /* Note that we already know we're in kernel mode, so we know
1555 that PS only contains the 3 IPL bits. */
1556 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1557
1558 /* But make sure and store only the 3 IPL bits from the user. */
1559 tmp = tcg_temp_new();
1560 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1561 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1562 tcg_temp_free(tmp);
1563 break;
1564 }
1565
1566 case 0x36:
1567 /* RDPS */
1568 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1569 break;
1570 case 0x38:
1571 /* WRUSP */
1572 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1573 break;
1574 case 0x3A:
1575 /* RDUSP */
1576 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1577 break;
1578 case 0x3C:
1579 /* WHAMI */
1580 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1581 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1582 break;
1583
1584 default:
1585 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1586 }
1587 return NO_EXIT;
1588 }
1589 #endif
1590
1591 return gen_invalid(ctx);
1592 }
1593
1594 #ifndef CONFIG_USER_ONLY
1595
1596 #define PR_BYTE 0x100000
1597 #define PR_LONG 0x200000
1598
1599 static int cpu_pr_data(int pr)
1600 {
1601 switch (pr) {
1602 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1603 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1604 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1605 case 3: return offsetof(CPUAlphaState, trap_arg0);
1606 case 4: return offsetof(CPUAlphaState, trap_arg1);
1607 case 5: return offsetof(CPUAlphaState, trap_arg2);
1608 case 6: return offsetof(CPUAlphaState, exc_addr);
1609 case 7: return offsetof(CPUAlphaState, palbr);
1610 case 8: return offsetof(CPUAlphaState, ptbr);
1611 case 9: return offsetof(CPUAlphaState, vptptr);
1612 case 10: return offsetof(CPUAlphaState, unique);
1613 case 11: return offsetof(CPUAlphaState, sysval);
1614 case 12: return offsetof(CPUAlphaState, usp);
1615
1616 case 32 ... 39:
1617 return offsetof(CPUAlphaState, shadow[pr - 32]);
1618 case 40 ... 63:
1619 return offsetof(CPUAlphaState, scratch[pr - 40]);
1620
1621 case 251:
1622 return offsetof(CPUAlphaState, alarm_expire);
1623 }
1624 return 0;
1625 }
1626
1627 static ExitStatus gen_mfpr(int ra, int regno)
1628 {
1629 int data = cpu_pr_data(regno);
1630
1631 /* In our emulated PALcode, these processor registers have no
1632 side effects from reading. */
1633 if (ra == 31) {
1634 return NO_EXIT;
1635 }
1636
1637 if (regno == 250) {
1638 /* WALL_TIME */
1639 if (use_icount) {
1640 gen_io_start();
1641 gen_helper_get_time(cpu_ir[ra]);
1642 gen_io_end();
1643 return EXIT_PC_STALE;
1644 } else {
1645 gen_helper_get_time(cpu_ir[ra]);
1646 return NO_EXIT;
1647 }
1648 }
1649
1650 /* The basic registers are data only, and unknown registers
1651 are read-zero, write-ignore. */
1652 if (data == 0) {
1653 tcg_gen_movi_i64(cpu_ir[ra], 0);
1654 } else if (data & PR_BYTE) {
1655 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1656 } else if (data & PR_LONG) {
1657 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1658 } else {
1659 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1660 }
1661 return NO_EXIT;
1662 }
1663
1664 static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
1665 {
1666 TCGv tmp;
1667 int data;
1668
1669 if (rb == 31) {
1670 tmp = tcg_const_i64(0);
1671 } else {
1672 tmp = cpu_ir[rb];
1673 }
1674
1675 switch (regno) {
1676 case 255:
1677 /* TBIA */
1678 gen_helper_tbia(cpu_env);
1679 break;
1680
1681 case 254:
1682 /* TBIS */
1683 gen_helper_tbis(cpu_env, tmp);
1684 break;
1685
1686 case 253:
1687 /* WAIT */
1688 tmp = tcg_const_i64(1);
1689 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUAlphaState, halted));
1690 return gen_excp(ctx, EXCP_HLT, 0);
1691
1692 case 252:
1693 /* HALT */
1694 gen_helper_halt(tmp);
1695 return EXIT_PC_STALE;
1696
1697 case 251:
1698 /* ALARM */
1699 gen_helper_set_alarm(cpu_env, tmp);
1700 break;
1701
1702 default:
1703 /* The basic registers are data only, and unknown registers
1704 are read-zero, write-ignore. */
1705 data = cpu_pr_data(regno);
1706 if (data != 0) {
1707 if (data & PR_BYTE) {
1708 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1709 } else if (data & PR_LONG) {
1710 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1711 } else {
1712 tcg_gen_st_i64(tmp, cpu_env, data);
1713 }
1714 }
1715 break;
1716 }
1717
1718 if (rb == 31) {
1719 tcg_temp_free(tmp);
1720 }
1721
1722 return NO_EXIT;
1723 }
1724 #endif /* !USER_ONLY*/
1725
1726 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1727 {
1728 uint32_t palcode;
1729 int32_t disp21, disp16;
1730 #ifndef CONFIG_USER_ONLY
1731 int32_t disp12;
1732 #endif
1733 uint16_t fn11;
1734 uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
1735 uint8_t lit;
1736 ExitStatus ret;
1737
1738 /* Decode all instruction fields */
1739 opc = insn >> 26;
1740 ra = (insn >> 21) & 0x1F;
1741 rb = (insn >> 16) & 0x1F;
1742 rc = insn & 0x1F;
1743 real_islit = islit = (insn >> 12) & 1;
1744 if (rb == 31 && !islit) {
1745 islit = 1;
1746 lit = 0;
1747 } else
1748 lit = (insn >> 13) & 0xFF;
1749 palcode = insn & 0x03FFFFFF;
1750 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1751 disp16 = (int16_t)(insn & 0x0000FFFF);
1752 #ifndef CONFIG_USER_ONLY
1753 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1754 #endif
1755 fn11 = (insn >> 5) & 0x000007FF;
1756 fpfn = fn11 & 0x3F;
1757 fn7 = (insn >> 5) & 0x0000007F;
1758 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1759 opc, ra, rb, rc, disp16);
1760
1761 ret = NO_EXIT;
1762 switch (opc) {
1763 case 0x00:
1764 /* CALL_PAL */
1765 ret = gen_call_pal(ctx, palcode);
1766 break;
1767 case 0x01:
1768 /* OPC01 */
1769 goto invalid_opc;
1770 case 0x02:
1771 /* OPC02 */
1772 goto invalid_opc;
1773 case 0x03:
1774 /* OPC03 */
1775 goto invalid_opc;
1776 case 0x04:
1777 /* OPC04 */
1778 goto invalid_opc;
1779 case 0x05:
1780 /* OPC05 */
1781 goto invalid_opc;
1782 case 0x06:
1783 /* OPC06 */
1784 goto invalid_opc;
1785 case 0x07:
1786 /* OPC07 */
1787 goto invalid_opc;
1788 case 0x08:
1789 /* LDA */
1790 if (likely(ra != 31)) {
1791 if (rb != 31)
1792 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1793 else
1794 tcg_gen_movi_i64(cpu_ir[ra], disp16);
1795 }
1796 break;
1797 case 0x09:
1798 /* LDAH */
1799 if (likely(ra != 31)) {
1800 if (rb != 31)
1801 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1802 else
1803 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1804 }
1805 break;
1806 case 0x0A:
1807 /* LDBU */
1808 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1809 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1810 break;
1811 }
1812 goto invalid_opc;
1813 case 0x0B:
1814 /* LDQ_U */
1815 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1816 break;
1817 case 0x0C:
1818 /* LDWU */
1819 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1820 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1821 break;
1822 }
1823 goto invalid_opc;
1824 case 0x0D:
1825 /* STW */
1826 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1827 break;
1828 case 0x0E:
1829 /* STB */
1830 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1831 break;
1832 case 0x0F:
1833 /* STQ_U */
1834 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1835 break;
1836 case 0x10:
1837 switch (fn7) {
1838 case 0x00:
1839 /* ADDL */
1840 if (likely(rc != 31)) {
1841 if (ra != 31) {
1842 if (islit) {
1843 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1844 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1845 } else {
1846 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1847 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1848 }
1849 } else {
1850 if (islit)
1851 tcg_gen_movi_i64(cpu_ir[rc], lit);
1852 else
1853 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1854 }
1855 }
1856 break;
1857 case 0x02:
1858 /* S4ADDL */
1859 if (likely(rc != 31)) {
1860 if (ra != 31) {
1861 TCGv tmp = tcg_temp_new();
1862 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1863 if (islit)
1864 tcg_gen_addi_i64(tmp, tmp, lit);
1865 else
1866 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1867 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1868 tcg_temp_free(tmp);
1869 } else {
1870 if (islit)
1871 tcg_gen_movi_i64(cpu_ir[rc], lit);
1872 else
1873 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1874 }
1875 }
1876 break;
1877 case 0x09:
1878 /* SUBL */
1879 if (likely(rc != 31)) {
1880 if (ra != 31) {
1881 if (islit)
1882 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1883 else
1884 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1885 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1886 } else {
1887 if (islit)
1888 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1889 else {
1890 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1891 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1892 }
1893 }
1894 break;
1895 case 0x0B:
1896 /* S4SUBL */
1897 if (likely(rc != 31)) {
1898 if (ra != 31) {
1899 TCGv tmp = tcg_temp_new();
1900 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1901 if (islit)
1902 tcg_gen_subi_i64(tmp, tmp, lit);
1903 else
1904 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1905 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1906 tcg_temp_free(tmp);
1907 } else {
1908 if (islit)
1909 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1910 else {
1911 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1912 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1913 }
1914 }
1915 }
1916 break;
1917 case 0x0F:
1918 /* CMPBGE */
1919 gen_cmpbge(ra, rb, rc, islit, lit);
1920 break;
1921 case 0x12:
1922 /* S8ADDL */
1923 if (likely(rc != 31)) {
1924 if (ra != 31) {
1925 TCGv tmp = tcg_temp_new();
1926 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1927 if (islit)
1928 tcg_gen_addi_i64(tmp, tmp, lit);
1929 else
1930 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1931 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1932 tcg_temp_free(tmp);
1933 } else {
1934 if (islit)
1935 tcg_gen_movi_i64(cpu_ir[rc], lit);
1936 else
1937 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1938 }
1939 }
1940 break;
1941 case 0x1B:
1942 /* S8SUBL */
1943 if (likely(rc != 31)) {
1944 if (ra != 31) {
1945 TCGv tmp = tcg_temp_new();
1946 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1947 if (islit)
1948 tcg_gen_subi_i64(tmp, tmp, lit);
1949 else
1950 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1951 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1952 tcg_temp_free(tmp);
1953 } else {
1954 if (islit)
1955 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1956 else
1957 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1958 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1959 }
1960 }
1961 }
1962 break;
1963 case 0x1D:
1964 /* CMPULT */
1965 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1966 break;
1967 case 0x20:
1968 /* ADDQ */
1969 if (likely(rc != 31)) {
1970 if (ra != 31) {
1971 if (islit)
1972 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1973 else
1974 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1975 } else {
1976 if (islit)
1977 tcg_gen_movi_i64(cpu_ir[rc], lit);
1978 else
1979 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1980 }
1981 }
1982 break;
1983 case 0x22:
1984 /* S4ADDQ */
1985 if (likely(rc != 31)) {
1986 if (ra != 31) {
1987 TCGv tmp = tcg_temp_new();
1988 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1989 if (islit)
1990 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1991 else
1992 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1993 tcg_temp_free(tmp);
1994 } else {
1995 if (islit)
1996 tcg_gen_movi_i64(cpu_ir[rc], lit);
1997 else
1998 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1999 }
2000 }
2001 break;
2002 case 0x29:
2003 /* SUBQ */
2004 if (likely(rc != 31)) {
2005 if (ra != 31) {
2006 if (islit)
2007 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2008 else
2009 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2010 } else {
2011 if (islit)
2012 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2013 else
2014 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2015 }
2016 }
2017 break;
2018 case 0x2B:
2019 /* S4SUBQ */
2020 if (likely(rc != 31)) {
2021 if (ra != 31) {
2022 TCGv tmp = tcg_temp_new();
2023 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2024 if (islit)
2025 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2026 else
2027 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2028 tcg_temp_free(tmp);
2029 } else {
2030 if (islit)
2031 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2032 else
2033 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2034 }
2035 }
2036 break;
2037 case 0x2D:
2038 /* CMPEQ */
2039 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
2040 break;
2041 case 0x32:
2042 /* S8ADDQ */
2043 if (likely(rc != 31)) {
2044 if (ra != 31) {
2045 TCGv tmp = tcg_temp_new();
2046 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2047 if (islit)
2048 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2049 else
2050 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2051 tcg_temp_free(tmp);
2052 } else {
2053 if (islit)
2054 tcg_gen_movi_i64(cpu_ir[rc], lit);
2055 else
2056 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2057 }
2058 }
2059 break;
2060 case 0x3B:
2061 /* S8SUBQ */
2062 if (likely(rc != 31)) {
2063 if (ra != 31) {
2064 TCGv tmp = tcg_temp_new();
2065 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2066 if (islit)
2067 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2068 else
2069 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2070 tcg_temp_free(tmp);
2071 } else {
2072 if (islit)
2073 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2074 else
2075 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2076 }
2077 }
2078 break;
2079 case 0x3D:
2080 /* CMPULE */
2081 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
2082 break;
2083 case 0x40:
2084 /* ADDL/V */
2085 gen_addlv(ra, rb, rc, islit, lit);
2086 break;
2087 case 0x49:
2088 /* SUBL/V */
2089 gen_sublv(ra, rb, rc, islit, lit);
2090 break;
2091 case 0x4D:
2092 /* CMPLT */
2093 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
2094 break;
2095 case 0x60:
2096 /* ADDQ/V */
2097 gen_addqv(ra, rb, rc, islit, lit);
2098 break;
2099 case 0x69:
2100 /* SUBQ/V */
2101 gen_subqv(ra, rb, rc, islit, lit);
2102 break;
2103 case 0x6D:
2104 /* CMPLE */
2105 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
2106 break;
2107 default:
2108 goto invalid_opc;
2109 }
2110 break;
2111 case 0x11:
2112 switch (fn7) {
2113 case 0x00:
2114 /* AND */
2115 if (likely(rc != 31)) {
2116 if (ra == 31)
2117 tcg_gen_movi_i64(cpu_ir[rc], 0);
2118 else if (islit)
2119 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2120 else
2121 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2122 }
2123 break;
2124 case 0x08:
2125 /* BIC */
2126 if (likely(rc != 31)) {
2127 if (ra != 31) {
2128 if (islit)
2129 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2130 else
2131 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2132 } else
2133 tcg_gen_movi_i64(cpu_ir[rc], 0);
2134 }
2135 break;
2136 case 0x14:
2137 /* CMOVLBS */
2138 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
2139 break;
2140 case 0x16:
2141 /* CMOVLBC */
2142 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
2143 break;
2144 case 0x20:
2145 /* BIS */
2146 if (likely(rc != 31)) {
2147 if (ra != 31) {
2148 if (islit)
2149 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2150 else
2151 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2152 } else {
2153 if (islit)
2154 tcg_gen_movi_i64(cpu_ir[rc], lit);
2155 else
2156 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2157 }
2158 }
2159 break;
2160 case 0x24:
2161 /* CMOVEQ */
2162 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
2163 break;
2164 case 0x26:
2165 /* CMOVNE */
2166 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
2167 break;
2168 case 0x28:
2169 /* ORNOT */
2170 if (likely(rc != 31)) {
2171 if (ra != 31) {
2172 if (islit)
2173 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2174 else
2175 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2176 } else {
2177 if (islit)
2178 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2179 else
2180 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2181 }
2182 }
2183 break;
2184 case 0x40:
2185 /* XOR */
2186 if (likely(rc != 31)) {
2187 if (ra != 31) {
2188 if (islit)
2189 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2190 else
2191 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2192 } else {
2193 if (islit)
2194 tcg_gen_movi_i64(cpu_ir[rc], lit);
2195 else
2196 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2197 }
2198 }
2199 break;
2200 case 0x44:
2201 /* CMOVLT */
2202 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
2203 break;
2204 case 0x46:
2205 /* CMOVGE */
2206 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
2207 break;
2208 case 0x48:
2209 /* EQV */
2210 if (likely(rc != 31)) {
2211 if (ra != 31) {
2212 if (islit)
2213 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2214 else
2215 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2216 } else {
2217 if (islit)
2218 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2219 else
2220 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2221 }
2222 }
2223 break;
2224 case 0x61:
2225 /* AMASK */
2226 if (likely(rc != 31)) {
2227 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2228
2229 if (islit) {
2230 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2231 } else {
2232 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
2233 }
2234 }
2235 break;
2236 case 0x64:
2237 /* CMOVLE */
2238 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2239 break;
2240 case 0x66:
2241 /* CMOVGT */
2242 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2243 break;
2244 case 0x6C:
2245 /* IMPLVER */
2246 if (rc != 31)
2247 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
2248 break;
2249 default:
2250 goto invalid_opc;
2251 }
2252 break;
2253 case 0x12:
2254 switch (fn7) {
2255 case 0x02:
2256 /* MSKBL */
2257 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2258 break;
2259 case 0x06:
2260 /* EXTBL */
2261 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2262 break;
2263 case 0x0B:
2264 /* INSBL */
2265 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2266 break;
2267 case 0x12:
2268 /* MSKWL */
2269 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2270 break;
2271 case 0x16:
2272 /* EXTWL */
2273 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2274 break;
2275 case 0x1B:
2276 /* INSWL */
2277 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2278 break;
2279 case 0x22:
2280 /* MSKLL */
2281 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2282 break;
2283 case 0x26:
2284 /* EXTLL */
2285 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2286 break;
2287 case 0x2B:
2288 /* INSLL */
2289 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2290 break;
2291 case 0x30:
2292 /* ZAP */
2293 gen_zap(ra, rb, rc, islit, lit);
2294 break;
2295 case 0x31:
2296 /* ZAPNOT */
2297 gen_zapnot(ra, rb, rc, islit, lit);
2298 break;
2299 case 0x32:
2300 /* MSKQL */
2301 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2302 break;
2303 case 0x34:
2304 /* SRL */
2305 if (likely(rc != 31)) {
2306 if (ra != 31) {
2307 if (islit)
2308 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2309 else {
2310 TCGv shift = tcg_temp_new();
2311 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2312 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2313 tcg_temp_free(shift);
2314 }
2315 } else
2316 tcg_gen_movi_i64(cpu_ir[rc], 0);
2317 }
2318 break;
2319 case 0x36:
2320 /* EXTQL */
2321 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2322 break;
2323 case 0x39:
2324 /* SLL */
2325 if (likely(rc != 31)) {
2326 if (ra != 31) {
2327 if (islit)
2328 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2329 else {
2330 TCGv shift = tcg_temp_new();
2331 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2332 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2333 tcg_temp_free(shift);
2334 }
2335 } else
2336 tcg_gen_movi_i64(cpu_ir[rc], 0);
2337 }
2338 break;
2339 case 0x3B:
2340 /* INSQL */
2341 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2342 break;
2343 case 0x3C:
2344 /* SRA */
2345 if (likely(rc != 31)) {
2346 if (ra != 31) {
2347 if (islit)
2348 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2349 else {
2350 TCGv shift = tcg_temp_new();
2351 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2352 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2353 tcg_temp_free(shift);
2354 }
2355 } else
2356 tcg_gen_movi_i64(cpu_ir[rc], 0);
2357 }
2358 break;
2359 case 0x52:
2360 /* MSKWH */
2361 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2362 break;
2363 case 0x57:
2364 /* INSWH */
2365 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2366 break;
2367 case 0x5A:
2368 /* EXTWH */
2369 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2370 break;
2371 case 0x62:
2372 /* MSKLH */
2373 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2374 break;
2375 case 0x67:
2376 /* INSLH */
2377 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2378 break;
2379 case 0x6A:
2380 /* EXTLH */
2381 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2382 break;
2383 case 0x72:
2384 /* MSKQH */
2385 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2386 break;
2387 case 0x77:
2388 /* INSQH */
2389 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2390 break;
2391 case 0x7A:
2392 /* EXTQH */
2393 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2394 break;
2395 default:
2396 goto invalid_opc;
2397 }
2398 break;
2399 case 0x13:
2400 switch (fn7) {
2401 case 0x00:
2402 /* MULL */
2403 if (likely(rc != 31)) {
2404 if (ra == 31)
2405 tcg_gen_movi_i64(cpu_ir[rc], 0);
2406 else {
2407 if (islit)
2408 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2409 else
2410 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2411 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2412 }
2413 }
2414 break;
2415 case 0x20:
2416 /* MULQ */
2417 if (likely(rc != 31)) {
2418 if (ra == 31)
2419 tcg_gen_movi_i64(cpu_ir[rc], 0);
2420 else if (islit)
2421 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2422 else
2423 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2424 }
2425 break;
2426 case 0x30:
2427 /* UMULH */
2428 {
2429 TCGv low;
2430 if (unlikely(rc == 31)){
2431 break;
2432 }
2433 if (ra == 31) {
2434 tcg_gen_movi_i64(cpu_ir[rc], 0);
2435 break;
2436 }
2437 low = tcg_temp_new();
2438 if (islit) {
2439 tcg_gen_movi_tl(low, lit);
2440 tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], low);
2441 } else {
2442 tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2443 }
2444 tcg_temp_free(low);
2445 }
2446 break;
2447 case 0x40:
2448 /* MULL/V */
2449 gen_mullv(ra, rb, rc, islit, lit);
2450 break;
2451 case 0x60:
2452 /* MULQ/V */
2453 gen_mulqv(ra, rb, rc, islit, lit);
2454 break;
2455 default:
2456 goto invalid_opc;
2457 }
2458 break;
2459 case 0x14:
2460 switch (fpfn) { /* fn11 & 0x3F */
2461 case 0x04:
2462 /* ITOFS */
2463 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2464 goto invalid_opc;
2465 }
2466 if (likely(rc != 31)) {
2467 if (ra != 31) {
2468 TCGv_i32 tmp = tcg_temp_new_i32();
2469 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2470 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2471 tcg_temp_free_i32(tmp);
2472 } else
2473 tcg_gen_movi_i64(cpu_fir[rc], 0);
2474 }
2475 break;
2476 case 0x0A:
2477 /* SQRTF */
2478 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2479 gen_fsqrtf(rb, rc);
2480 break;
2481 }
2482 goto invalid_opc;
2483 case 0x0B:
2484 /* SQRTS */
2485 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2486 gen_fsqrts(ctx, rb, rc, fn11);
2487 break;
2488 }
2489 goto invalid_opc;
2490 case 0x14:
2491 /* ITOFF */
2492 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2493 goto invalid_opc;
2494 }
2495 if (likely(rc != 31)) {
2496 if (ra != 31) {
2497 TCGv_i32 tmp = tcg_temp_new_i32();
2498 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2499 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2500 tcg_temp_free_i32(tmp);
2501 } else
2502 tcg_gen_movi_i64(cpu_fir[rc], 0);
2503 }
2504 break;
2505 case 0x24:
2506 /* ITOFT */
2507 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2508 goto invalid_opc;
2509 }
2510 if (likely(rc != 31)) {
2511 if (ra != 31)
2512 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2513 else
2514 tcg_gen_movi_i64(cpu_fir[rc], 0);
2515 }
2516 break;
2517 case 0x2A:
2518 /* SQRTG */
2519 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2520 gen_fsqrtg(rb, rc);
2521 break;
2522 }
2523 goto invalid_opc;
2524 case 0x02B:
2525 /* SQRTT */
2526 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2527 gen_fsqrtt(ctx, rb, rc, fn11);
2528 break;
2529 }
2530 goto invalid_opc;
2531 default:
2532 goto invalid_opc;
2533 }
2534 break;
2535 case 0x15:
2536 /* VAX floating point */
2537 /* XXX: rounding mode and trap are ignored (!) */
2538 switch (fpfn) { /* fn11 & 0x3F */
2539 case 0x00:
2540 /* ADDF */
2541 gen_faddf(ra, rb, rc);
2542 break;
2543 case 0x01:
2544 /* SUBF */
2545 gen_fsubf(ra, rb, rc);
2546 break;
2547 case 0x02:
2548 /* MULF */
2549 gen_fmulf(ra, rb, rc);
2550 break;
2551 case 0x03:
2552 /* DIVF */
2553 gen_fdivf(ra, rb, rc);
2554 break;
2555 case 0x1E:
2556 /* CVTDG */
2557 #if 0 // TODO
2558 gen_fcvtdg(rb, rc);
2559 #else
2560 goto invalid_opc;
2561 #endif
2562 break;
2563 case 0x20:
2564 /* ADDG */
2565 gen_faddg(ra, rb, rc);
2566 break;
2567 case 0x21:
2568 /* SUBG */
2569 gen_fsubg(ra, rb, rc);
2570 break;
2571 case 0x22:
2572 /* MULG */
2573 gen_fmulg(ra, rb, rc);
2574 break;
2575 case 0x23:
2576 /* DIVG */
2577 gen_fdivg(ra, rb, rc);
2578 break;
2579 case 0x25:
2580 /* CMPGEQ */
2581 gen_fcmpgeq(ra, rb, rc);
2582 break;
2583 case 0x26:
2584 /* CMPGLT */
2585 gen_fcmpglt(ra, rb, rc);
2586 break;
2587 case 0x27:
2588 /* CMPGLE */
2589 gen_fcmpgle(ra, rb, rc);
2590 break;
2591 case 0x2C:
2592 /* CVTGF */
2593 gen_fcvtgf(rb, rc);
2594 break;
2595 case 0x2D:
2596 /* CVTGD */
2597 #if 0 // TODO
2598 gen_fcvtgd(rb, rc);
2599 #else
2600 goto invalid_opc;
2601 #endif
2602 break;
2603 case 0x2F:
2604 /* CVTGQ */
2605 gen_fcvtgq(rb, rc);
2606 break;
2607 case 0x3C:
2608 /* CVTQF */
2609 gen_fcvtqf(rb, rc);
2610 break;
2611 case 0x3E:
2612 /* CVTQG */
2613 gen_fcvtqg(rb, rc);
2614 break;
2615 default:
2616 goto invalid_opc;
2617 }
2618 break;
2619 case 0x16:
2620 /* IEEE floating-point */
2621 switch (fpfn) { /* fn11 & 0x3F */
2622 case 0x00:
2623 /* ADDS */
2624 gen_fadds(ctx, ra, rb, rc, fn11);
2625 break;
2626 case 0x01:
2627 /* SUBS */
2628 gen_fsubs(ctx, ra, rb, rc, fn11);
2629 break;
2630 case 0x02:
2631 /* MULS */
2632 gen_fmuls(ctx, ra, rb, rc, fn11);
2633 break;
2634 case 0x03:
2635 /* DIVS */
2636 gen_fdivs(ctx, ra, rb, rc, fn11);
2637 break;
2638 case 0x20:
2639 /* ADDT */
2640 gen_faddt(ctx, ra, rb, rc, fn11);
2641 break;
2642 case 0x21:
2643 /* SUBT */
2644 gen_fsubt(ctx, ra, rb, rc, fn11);
2645 break;
2646 case 0x22:
2647 /* MULT */
2648 gen_fmult(ctx, ra, rb, rc, fn11);
2649 break;
2650 case 0x23:
2651 /* DIVT */
2652 gen_fdivt(ctx, ra, rb, rc, fn11);
2653 break;
2654 case 0x24:
2655 /* CMPTUN */
2656 gen_fcmptun(ctx, ra, rb, rc, fn11);
2657 break;
2658 case 0x25:
2659 /* CMPTEQ */
2660 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2661 break;
2662 case 0x26:
2663 /* CMPTLT */
2664 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2665 break;
2666 case 0x27:
2667 /* CMPTLE */
2668 gen_fcmptle(ctx, ra, rb, rc, fn11);
2669 break;
2670 case 0x2C:
2671 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2672 /* CVTST */
2673 gen_fcvtst(ctx, rb, rc, fn11);
2674 } else {
2675 /* CVTTS */
2676 gen_fcvtts(ctx, rb, rc, fn11);
2677 }
2678 break;
2679 case 0x2F:
2680 /* CVTTQ */
2681 gen_fcvttq(ctx, rb, rc, fn11);
2682 break;
2683 case 0x3C:
2684 /* CVTQS */
2685 gen_fcvtqs(ctx, rb, rc, fn11);
2686 break;
2687 case 0x3E:
2688 /* CVTQT */
2689 gen_fcvtqt(ctx, rb, rc, fn11);
2690 break;
2691 default:
2692 goto invalid_opc;
2693 }
2694 break;
2695 case 0x17:
2696 switch (fn11) {
2697 case 0x010:
2698 /* CVTLQ */
2699 gen_fcvtlq(rb, rc);
2700 break;
2701 case 0x020:
2702 if (likely(rc != 31)) {
2703 if (ra == rb) {
2704 /* FMOV */
2705 if (ra == 31)
2706 tcg_gen_movi_i64(cpu_fir[rc], 0);
2707 else
2708 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2709 } else {
2710 /* CPYS */
2711 gen_fcpys(ra, rb, rc);
2712 }
2713 }
2714 break;
2715 case 0x021:
2716 /* CPYSN */
2717 gen_fcpysn(ra, rb, rc);
2718 break;
2719 case 0x022:
2720 /* CPYSE */
2721 gen_fcpyse(ra, rb, rc);
2722 break;
2723 case 0x024:
2724 /* MT_FPCR */
2725 if (likely(ra != 31))
2726 gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
2727 else {
2728 TCGv tmp = tcg_const_i64(0);
2729 gen_helper_store_fpcr(cpu_env, tmp);
2730 tcg_temp_free(tmp);
2731 }
2732 break;
2733 case 0x025:
2734 /* MF_FPCR */
2735 if (likely(ra != 31))
2736 gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
2737 break;
2738 case 0x02A:
2739 /* FCMOVEQ */
2740 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2741 break;
2742 case 0x02B:
2743 /* FCMOVNE */
2744 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2745 break;
2746 case 0x02C:
2747 /* FCMOVLT */
2748 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2749 break;
2750 case 0x02D:
2751 /* FCMOVGE */
2752 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2753 break;
2754 case 0x02E:
2755 /* FCMOVLE */
2756 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2757 break;
2758 case 0x02F:
2759 /* FCMOVGT */
2760 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2761 break;
2762 case 0x030:
2763 /* CVTQL */
2764 gen_fcvtql(rb, rc);
2765 break;
2766 case 0x130:
2767 /* CVTQL/V */
2768 case 0x530:
2769 /* CVTQL/SV */
2770 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2771 /v doesn't do. The only thing I can think is that /sv is a
2772 valid instruction merely for completeness in the ISA. */
2773 gen_fcvtql_v(ctx, rb, rc);
2774 break;
2775 default:
2776 goto invalid_opc;
2777 }
2778 break;
2779 case 0x18:
2780 switch ((uint16_t)disp16) {
2781 case 0x0000:
2782 /* TRAPB */
2783 /* No-op. */
2784 break;
2785 case 0x0400:
2786 /* EXCB */
2787 /* No-op. */
2788 break;
2789 case 0x4000:
2790 /* MB */
2791 /* No-op */
2792 break;
2793 case 0x4400:
2794 /* WMB */
2795 /* No-op */
2796 break;
2797 case 0x8000:
2798 /* FETCH */
2799 /* No-op */
2800 break;
2801 case 0xA000:
2802 /* FETCH_M */
2803 /* No-op */
2804 break;
2805 case 0xC000:
2806 /* RPCC */
2807 if (ra != 31) {
2808 if (use_icount) {
2809 gen_io_start();
2810 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
2811 gen_io_end();
2812 ret = EXIT_PC_STALE;
2813 } else {
2814 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
2815 }
2816 }
2817 break;
2818 case 0xE000:
2819 /* RC */
2820 gen_rx(ra, 0);
2821 break;
2822 case 0xE800:
2823 /* ECB */
2824 break;
2825 case 0xF000:
2826 /* RS */
2827 gen_rx(ra, 1);
2828 break;
2829 case 0xF800:
2830 /* WH64 */
2831 /* No-op */
2832 break;
2833 default:
2834 goto invalid_opc;
2835 }
2836 break;
2837 case 0x19:
2838 /* HW_MFPR (PALcode) */
2839 #ifndef CONFIG_USER_ONLY
2840 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2841 return gen_mfpr(ra, insn & 0xffff);
2842 }
2843 #endif
2844 goto invalid_opc;
2845 case 0x1A:
2846 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2847 prediction stack action, which of course we don't implement. */
2848 if (rb != 31) {
2849 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2850 } else {
2851 tcg_gen_movi_i64(cpu_pc, 0);
2852 }
2853 if (ra != 31) {
2854 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2855 }
2856 ret = EXIT_PC_UPDATED;
2857 break;
2858 case 0x1B:
2859 /* HW_LD (PALcode) */
2860 #ifndef CONFIG_USER_ONLY
2861 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2862 TCGv addr;
2863
2864 if (ra == 31) {
2865 break;
2866 }
2867
2868 addr = tcg_temp_new();
2869 if (rb != 31)
2870 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2871 else
2872 tcg_gen_movi_i64(addr, disp12);
2873 switch ((insn >> 12) & 0xF) {
2874 case 0x0:
2875 /* Longword physical access (hw_ldl/p) */
2876 gen_helper_ldl_phys(cpu_ir[ra], addr);
2877 break;
2878 case 0x1:
2879 /* Quadword physical access (hw_ldq/p) */
2880 gen_helper_ldq_phys(cpu_ir[ra], addr);
2881 break;
2882 case 0x2:
2883 /* Longword physical access with lock (hw_ldl_l/p) */
2884 gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr);
2885 break;
2886 case 0x3:
2887 /* Quadword physical access with lock (hw_ldq_l/p) */
2888 gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr);
2889 break;
2890 case 0x4:
2891 /* Longword virtual PTE fetch (hw_ldl/v) */
2892 goto invalid_opc;
2893 case 0x5:
2894 /* Quadword virtual PTE fetch (hw_ldq/v) */
2895 goto invalid_opc;
2896 break;
2897 case 0x6:
2898 /* Incpu_ir[ra]id */
2899 goto invalid_opc;
2900 case 0x7:
2901 /* Incpu_ir[ra]id */
2902 goto invalid_opc;
2903 case 0x8:
2904 /* Longword virtual access (hw_ldl) */
2905 goto invalid_opc;
2906 case 0x9:
2907 /* Quadword virtual access (hw_ldq) */
2908 goto invalid_opc;
2909 case 0xA:
2910 /* Longword virtual access with protection check (hw_ldl/w) */
2911 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2912 break;
2913 case 0xB:
2914 /* Quadword virtual access with protection check (hw_ldq/w) */
2915 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2916 break;
2917 case 0xC:
2918 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2919 goto invalid_opc;
2920 case 0xD:
2921 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2922 goto invalid_opc;
2923 case 0xE:
2924 /* Longword virtual access with alternate access mode and
2925 protection checks (hw_ldl/wa) */
2926 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2927 break;
2928 case 0xF:
2929 /* Quadword virtual access with alternate access mode and
2930 protection checks (hw_ldq/wa) */
2931 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2932 break;
2933 }
2934 tcg_temp_free(addr);
2935 break;
2936 }
2937 #endif
2938 goto invalid_opc;
2939 case 0x1C:
2940 switch (fn7) {
2941 case 0x00:
2942 /* SEXTB */
2943 if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
2944 goto invalid_opc;
2945 }
2946 if (likely(rc != 31)) {
2947 if (islit)
2948 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2949 else
2950 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2951 }
2952 break;
2953 case 0x01:
2954 /* SEXTW */
2955 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2956 if (likely(rc != 31)) {
2957 if (islit) {
2958 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2959 } else {
2960 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2961 }
2962 }
2963 break;
2964 }
2965 goto invalid_opc;
2966 case 0x30:
2967 /* CTPOP */
2968 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2969 if (likely(rc != 31)) {
2970 if (islit) {
2971 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2972 } else {
2973 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2974 }
2975 }
2976 break;
2977 }
2978 goto invalid_opc;
2979 case 0x31:
2980 /* PERR */
2981 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2982 gen_perr(ra, rb, rc, islit, lit);
2983 break;
2984 }
2985 goto invalid_opc;
2986 case 0x32:
2987 /* CTLZ */
2988 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2989 if (likely(rc != 31)) {
2990 if (islit) {
2991 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2992 } else {
2993 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2994 }
2995 }
2996 break;
2997 }
2998 goto invalid_opc;
2999 case 0x33:
3000 /* CTTZ */
3001 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3002 if (likely(rc != 31)) {
3003 if (islit) {
3004 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
3005 } else {
3006 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
3007 }
3008 }
3009 break;
3010 }
3011 goto invalid_opc;
3012 case 0x34:
3013 /* UNPKBW */
3014 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3015 if (real_islit || ra != 31) {
3016 goto invalid_opc;
3017 }
3018 gen_unpkbw(rb, rc);
3019 break;
3020 }
3021 goto invalid_opc;
3022 case 0x35:
3023 /* UNPKBL */
3024 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3025 if (real_islit || ra != 31) {
3026 goto invalid_opc;
3027 }
3028 gen_unpkbl(rb, rc);
3029 break;
3030 }
3031 goto invalid_opc;
3032 case 0x36:
3033 /* PKWB */
3034 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3035 if (real_islit || ra != 31) {
3036 goto invalid_opc;
3037 }
3038 gen_pkwb(rb, rc);
3039 break;
3040 }
3041 goto invalid_opc;
3042 case 0x37:
3043 /* PKLB */
3044 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3045 if (real_islit || ra != 31) {
3046 goto invalid_opc;
3047 }
3048 gen_pklb(rb, rc);
3049 break;
3050 }
3051 goto invalid_opc;
3052 case 0x38:
3053 /* MINSB8 */
3054 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3055 gen_minsb8(ra, rb, rc, islit, lit);
3056 break;
3057 }
3058 goto invalid_opc;
3059 case 0x39:
3060 /* MINSW4 */
3061 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3062 gen_minsw4(ra, rb, rc, islit, lit);
3063 break;
3064 }
3065 goto invalid_opc;
3066 case 0x3A:
3067 /* MINUB8 */
3068 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3069 gen_minub8(ra, rb, rc, islit, lit);
3070 break;
3071 }
3072 goto invalid_opc;
3073 case 0x3B:
3074 /* MINUW4 */
3075 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3076 gen_minuw4(ra, rb, rc, islit, lit);
3077 break;
3078 }
3079 goto invalid_opc;
3080 case 0x3C:
3081 /* MAXUB8 */
3082 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3083 gen_maxub8(ra, rb, rc, islit, lit);
3084 break;
3085 }
3086 goto invalid_opc;
3087 case 0x3D:
3088 /* MAXUW4 */
3089 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3090 gen_maxuw4(ra, rb, rc, islit, lit);
3091 break;
3092 }
3093 goto invalid_opc;
3094 case 0x3E:
3095 /* MAXSB8 */
3096 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3097 gen_maxsb8(ra, rb, rc, islit, lit);
3098 break;
3099 }
3100 goto invalid_opc;
3101 case 0x3F:
3102 /* MAXSW4 */
3103 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3104 gen_maxsw4(ra, rb, rc, islit, lit);
3105 break;
3106 }
3107 goto invalid_opc;
3108 case 0x70:
3109 /* FTOIT */
3110 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3111 goto invalid_opc;
3112 }
3113 if (likely(rc != 31)) {
3114 if (ra != 31)
3115 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3116 else
3117 tcg_gen_movi_i64(cpu_ir[rc], 0);
3118 }
3119 break;
3120 case 0x78:
3121 /* FTOIS */
3122 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3123 goto invalid_opc;
3124 }
3125 if (rc != 31) {
3126 TCGv_i32 tmp1 = tcg_temp_new_i32();
3127 if (ra != 31)
3128 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
3129 else {
3130 TCGv tmp2 = tcg_const_i64(0);
3131 gen_helper_s_to_memory(tmp1, tmp2);
3132 tcg_temp_free(tmp2);
3133 }
3134 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
3135 tcg_temp_free_i32(tmp1);
3136 }
3137 break;
3138 default:
3139 goto invalid_opc;
3140 }
3141 break;
3142 case 0x1D:
3143 /* HW_MTPR (PALcode) */
3144 #ifndef CONFIG_USER_ONLY
3145 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3146 return gen_mtpr(ctx, rb, insn & 0xffff);
3147 }
3148 #endif
3149 goto invalid_opc;
3150 case 0x1E:
3151 /* HW_RET (PALcode) */
3152 #ifndef CONFIG_USER_ONLY
3153 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3154 if (rb == 31) {
3155 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3156 address from EXC_ADDR. This turns out to be useful for our
3157 emulation PALcode, so continue to accept it. */
3158 TCGv tmp = tcg_temp_new();
3159 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
3160 gen_helper_hw_ret(cpu_env, tmp);
3161 tcg_temp_free(tmp);
3162 } else {
3163 gen_helper_hw_ret(cpu_env, cpu_ir[rb]);
3164 }
3165 ret = EXIT_PC_UPDATED;
3166 break;
3167 }
3168 #endif
3169 goto invalid_opc;
3170 case 0x1F:
3171 /* HW_ST (PALcode) */
3172 #ifndef CONFIG_USER_ONLY
3173 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3174 TCGv addr, val;
3175 addr = tcg_temp_new();
3176 if (rb != 31)
3177 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3178 else
3179 tcg_gen_movi_i64(addr, disp12);
3180 if (ra != 31)
3181 val = cpu_ir[ra];
3182 else {
3183 val = tcg_temp_new();
3184 tcg_gen_movi_i64(val, 0);
3185 }
3186 switch ((insn >> 12) & 0xF) {
3187 case 0x0:
3188 /* Longword physical access */
3189 gen_helper_stl_phys(addr, val);
3190 break;
3191 case 0x1:
3192 /* Quadword physical access */
3193 gen_helper_stq_phys(addr, val);
3194 break;
3195 case 0x2:
3196 /* Longword physical access with lock */
3197 gen_helper_stl_c_phys(val, cpu_env, addr, val);
3198 break;
3199 case 0x3:
3200 /* Quadword physical access with lock */
3201 gen_helper_stq_c_phys(val, cpu_env, addr, val);
3202 break;
3203 case 0x4:
3204 /* Longword virtual access */
3205 goto invalid_opc;
3206 case 0x5:
3207 /* Quadword virtual access */
3208 goto invalid_opc;
3209 case 0x6:
3210 /* Invalid */
3211 goto invalid_opc;
3212 case 0x7:
3213 /* Invalid */
3214 goto invalid_opc;
3215 case 0x8:
3216 /* Invalid */
3217 goto invalid_opc;
3218 case 0x9:
3219 /* Invalid */
3220 goto invalid_opc;
3221 case 0xA:
3222 /* Invalid */
3223 goto invalid_opc;
3224 case 0xB:
3225 /* Invalid */
3226 goto invalid_opc;
3227 case 0xC:
3228 /* Longword virtual access with alternate access mode */
3229 goto invalid_opc;
3230 case 0xD:
3231 /* Quadword virtual access with alternate access mode */
3232 goto invalid_opc;
3233 case 0xE:
3234 /* Invalid */
3235 goto invalid_opc;
3236 case 0xF:
3237 /* Invalid */
3238 goto invalid_opc;
3239 }
3240 if (ra == 31)
3241 tcg_temp_free(val);
3242 tcg_temp_free(addr);
3243 break;
3244 }
3245 #endif
3246 goto invalid_opc;
3247 case 0x20:
3248 /* LDF */
3249 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
3250 break;
3251 case 0x21:
3252 /* LDG */
3253 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
3254 break;
3255 case 0x22:
3256 /* LDS */
3257 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
3258 break;
3259 case 0x23:
3260 /* LDT */
3261 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
3262 break;
3263 case 0x24:
3264 /* STF */
3265 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3266 break;
3267 case 0x25:
3268 /* STG */
3269 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3270 break;
3271 case 0x26:
3272 /* STS */
3273 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3274 break;
3275 case 0x27:
3276 /* STT */
3277 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3278 break;
3279 case 0x28:
3280 /* LDL */
3281 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3282 break;
3283 case 0x29:
3284 /* LDQ */
3285 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3286 break;
3287 case 0x2A:
3288 /* LDL_L */
3289 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3290 break;
3291 case 0x2B:
3292 /* LDQ_L */
3293 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3294 break;
3295 case 0x2C:
3296 /* STL */
3297 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3298 break;
3299 case 0x2D:
3300 /* STQ */
3301 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3302 break;
3303 case 0x2E:
3304 /* STL_C */
3305 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3306 break;
3307 case 0x2F:
3308 /* STQ_C */
3309 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3310 break;
3311 case 0x30:
3312 /* BR */
3313 ret = gen_bdirect(ctx, ra, disp21);
3314 break;
3315 case 0x31: /* FBEQ */
3316 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3317 break;
3318 case 0x32: /* FBLT */
3319 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3320 break;
3321 case 0x33: /* FBLE */
3322 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3323 break;
3324 case 0x34:
3325 /* BSR */
3326 ret = gen_bdirect(ctx, ra, disp21);
3327 break;
3328 case 0x35: /* FBNE */
3329 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3330 break;
3331 case 0x36: /* FBGE */
3332 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3333 break;
3334 case 0x37: /* FBGT */
3335 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3336 break;
3337 case 0x38:
3338 /* BLBC */
3339 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3340 break;
3341 case 0x39:
3342 /* BEQ */
3343 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3344 break;
3345 case 0x3A:
3346 /* BLT */
3347 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3348 break;
3349 case 0x3B:
3350 /* BLE */
3351 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3352 break;
3353 case 0x3C:
3354 /* BLBS */
3355 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3356 break;
3357 case 0x3D:
3358 /* BNE */
3359 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3360 break;
3361 case 0x3E:
3362 /* BGE */
3363 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3364 break;
3365 case 0x3F:
3366 /* BGT */
3367 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3368 break;
3369 invalid_opc:
3370 ret = gen_invalid(ctx);
3371 break;
3372 }
3373
3374 return ret;
3375 }
3376
3377 static inline void gen_intermediate_code_internal(CPUAlphaState *env,
3378 TranslationBlock *tb,
3379 int search_pc)
3380 {
3381 DisasContext ctx, *ctxp = &ctx;
3382 target_ulong pc_start;
3383 uint32_t insn;
3384 uint16_t *gen_opc_end;
3385 CPUBreakpoint *bp;
3386 int j, lj = -1;
3387 ExitStatus ret;
3388 int num_insns;
3389 int max_insns;
3390
3391 pc_start = tb->pc;
3392 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
3393
3394 ctx.tb = tb;
3395 ctx.env = env;
3396 ctx.pc = pc_start;
3397 ctx.mem_idx = cpu_mmu_index(env);
3398
3399 /* ??? Every TB begins with unset rounding mode, to be initialized on
3400 the first fp insn of the TB. Alternately we could define a proper
3401 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3402 to reset the FP_STATUS to that default at the end of any TB that
3403 changes the default. We could even (gasp) dynamiclly figure out
3404 what default would be most efficient given the running program. */
3405 ctx.tb_rm = -1;
3406 /* Similarly for flush-to-zero. */
3407 ctx.tb_ftz = -1;
3408
3409 num_insns = 0;
3410 max_insns = tb->cflags & CF_COUNT_MASK;
3411 if (max_insns == 0)
3412 max_insns = CF_COUNT_MASK;
3413
3414 gen_tb_start();
3415 do {
3416 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3417 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3418 if (bp->pc == ctx.pc) {
3419 gen_excp(&ctx, EXCP_DEBUG, 0);
3420 break;
3421 }
3422 }
3423 }
3424 if (search_pc) {
3425 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3426 if (lj < j) {
3427 lj++;
3428 while (lj < j)
3429 tcg_ctx.gen_opc_instr_start[lj++] = 0;
3430 }
3431 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
3432 tcg_ctx.gen_opc_instr_start[lj] = 1;
3433 tcg_ctx.gen_opc_icount[lj] = num_insns;
3434 }
3435 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3436 gen_io_start();
3437 insn = cpu_ldl_code(env, ctx.pc);
3438 num_insns++;
3439
3440 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
3441 tcg_gen_debug_insn_start(ctx.pc);
3442 }
3443
3444 ctx.pc += 4;
3445 ret = translate_one(ctxp, insn);
3446
3447 /* If we reach a page boundary, are single stepping,
3448 or exhaust instruction count, stop generation. */
3449 if (ret == NO_EXIT
3450 && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3451 || tcg_ctx.gen_opc_ptr >= gen_opc_end
3452 || num_insns >= max_insns
3453 || singlestep
3454 || env->singlestep_enabled)) {
3455 ret = EXIT_PC_STALE;
3456 }
3457 } while (ret == NO_EXIT);
3458
3459 if (tb->cflags & CF_LAST_IO) {
3460 gen_io_end();
3461 }
3462
3463 switch (ret) {
3464 case EXIT_GOTO_TB:
3465 case EXIT_NORETURN:
3466 break;
3467 case EXIT_PC_STALE:
3468 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3469 /* FALLTHRU */
3470 case EXIT_PC_UPDATED:
3471 if (env->singlestep_enabled) {
3472 gen_excp_1(EXCP_DEBUG, 0);
3473 } else {
3474 tcg_gen_exit_tb(0);
3475 }
3476 break;
3477 default:
3478 abort();
3479 }
3480
3481 gen_tb_end(tb, num_insns);
3482 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
3483 if (search_pc) {
3484 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3485 lj++;
3486 while (lj <= j)
3487 tcg_ctx.gen_opc_instr_start[lj++] = 0;
3488 } else {
3489 tb->size = ctx.pc - pc_start;
3490 tb->icount = num_insns;
3491 }
3492
3493 #ifdef DEBUG_DISAS
3494 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3495 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3496 log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
3497 qemu_log("\n");
3498 }
3499 #endif
3500 }
3501
3502 void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
3503 {
3504 gen_intermediate_code_internal(env, tb, 0);
3505 }
3506
3507 void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
3508 {
3509 gen_intermediate_code_internal(env, tb, 1);
3510 }
3511
3512 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
3513 {
3514 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
3515 }