]> git.proxmox.com Git - mirror_qemu.git/blob - target-alpha/translate.c
target-alpha: Move fpcr helpers from op_helper.c to helper.c.
[mirror_qemu.git] / target-alpha / translate.c
1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "disas.h"
22 #include "host-utils.h"
23 #include "tcg-op.h"
24
25 #include "helper.h"
26 #define GEN_HELPER 1
27 #include "helper.h"
28
29 #undef ALPHA_DEBUG_DISAS
30 #define CONFIG_SOFTFLOAT_INLINE
31
32 #ifdef ALPHA_DEBUG_DISAS
33 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
34 #else
35 # define LOG_DISAS(...) do { } while (0)
36 #endif
37
38 typedef struct DisasContext DisasContext;
39 struct DisasContext {
40 struct TranslationBlock *tb;
41 CPUAlphaState *env;
42 uint64_t pc;
43 int mem_idx;
44
45 /* Current rounding mode for this TB. */
46 int tb_rm;
47 /* Current flush-to-zero setting for this TB. */
48 int tb_ftz;
49 };
50
51 /* Return values from translate_one, indicating the state of the TB.
52 Note that zero indicates that we are not exiting the TB. */
53
54 typedef enum {
55 NO_EXIT,
56
57 /* We have emitted one or more goto_tb. No fixup required. */
58 EXIT_GOTO_TB,
59
60 /* We are not using a goto_tb (for whatever reason), but have updated
61 the PC (for whatever reason), so there's no need to do it again on
62 exiting the TB. */
63 EXIT_PC_UPDATED,
64
65 /* We are exiting the TB, but have neither emitted a goto_tb, nor
66 updated the PC for the next instruction to be executed. */
67 EXIT_PC_STALE,
68
69 /* We are ending the TB with a noreturn function call, e.g. longjmp.
70 No following code will be executed. */
71 EXIT_NORETURN,
72 } ExitStatus;
73
74 /* global register indexes */
75 static TCGv_ptr cpu_env;
76 static TCGv cpu_ir[31];
77 static TCGv cpu_fir[31];
78 static TCGv cpu_pc;
79 static TCGv cpu_lock_addr;
80 static TCGv cpu_lock_st_addr;
81 static TCGv cpu_lock_value;
82 static TCGv cpu_unique;
83 #ifndef CONFIG_USER_ONLY
84 static TCGv cpu_sysval;
85 static TCGv cpu_usp;
86 #endif
87
88 /* register names */
89 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
90
91 #include "gen-icount.h"
92
93 static void alpha_translate_init(void)
94 {
95 int i;
96 char *p;
97 static int done_init = 0;
98
99 if (done_init)
100 return;
101
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103
104 p = cpu_reg_names;
105 for (i = 0; i < 31; i++) {
106 sprintf(p, "ir%d", i);
107 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
108 offsetof(CPUAlphaState, ir[i]), p);
109 p += (i < 10) ? 4 : 5;
110
111 sprintf(p, "fir%d", i);
112 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
113 offsetof(CPUAlphaState, fir[i]), p);
114 p += (i < 10) ? 5 : 6;
115 }
116
117 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
118 offsetof(CPUAlphaState, pc), "pc");
119
120 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
121 offsetof(CPUAlphaState, lock_addr),
122 "lock_addr");
123 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
124 offsetof(CPUAlphaState, lock_st_addr),
125 "lock_st_addr");
126 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
127 offsetof(CPUAlphaState, lock_value),
128 "lock_value");
129
130 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
131 offsetof(CPUAlphaState, unique), "unique");
132 #ifndef CONFIG_USER_ONLY
133 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
134 offsetof(CPUAlphaState, sysval), "sysval");
135 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
136 offsetof(CPUAlphaState, usp), "usp");
137 #endif
138
139 /* register helpers */
140 #define GEN_HELPER 2
141 #include "helper.h"
142
143 done_init = 1;
144 }
145
146 static void gen_excp_1(int exception, int error_code)
147 {
148 TCGv_i32 tmp1, tmp2;
149
150 tmp1 = tcg_const_i32(exception);
151 tmp2 = tcg_const_i32(error_code);
152 gen_helper_excp(cpu_env, tmp1, tmp2);
153 tcg_temp_free_i32(tmp2);
154 tcg_temp_free_i32(tmp1);
155 }
156
157 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
158 {
159 tcg_gen_movi_i64(cpu_pc, ctx->pc);
160 gen_excp_1(exception, error_code);
161 return EXIT_NORETURN;
162 }
163
164 static inline ExitStatus gen_invalid(DisasContext *ctx)
165 {
166 return gen_excp(ctx, EXCP_OPCDEC, 0);
167 }
168
169 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
170 {
171 TCGv tmp = tcg_temp_new();
172 TCGv_i32 tmp32 = tcg_temp_new_i32();
173 tcg_gen_qemu_ld32u(tmp, t1, flags);
174 tcg_gen_trunc_i64_i32(tmp32, tmp);
175 gen_helper_memory_to_f(t0, tmp32);
176 tcg_temp_free_i32(tmp32);
177 tcg_temp_free(tmp);
178 }
179
180 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
181 {
182 TCGv tmp = tcg_temp_new();
183 tcg_gen_qemu_ld64(tmp, t1, flags);
184 gen_helper_memory_to_g(t0, tmp);
185 tcg_temp_free(tmp);
186 }
187
188 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
189 {
190 TCGv tmp = tcg_temp_new();
191 TCGv_i32 tmp32 = tcg_temp_new_i32();
192 tcg_gen_qemu_ld32u(tmp, t1, flags);
193 tcg_gen_trunc_i64_i32(tmp32, tmp);
194 gen_helper_memory_to_s(t0, tmp32);
195 tcg_temp_free_i32(tmp32);
196 tcg_temp_free(tmp);
197 }
198
199 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
200 {
201 tcg_gen_qemu_ld32s(t0, t1, flags);
202 tcg_gen_mov_i64(cpu_lock_addr, t1);
203 tcg_gen_mov_i64(cpu_lock_value, t0);
204 }
205
206 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
207 {
208 tcg_gen_qemu_ld64(t0, t1, flags);
209 tcg_gen_mov_i64(cpu_lock_addr, t1);
210 tcg_gen_mov_i64(cpu_lock_value, t0);
211 }
212
213 static inline void gen_load_mem(DisasContext *ctx,
214 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
215 int flags),
216 int ra, int rb, int32_t disp16, int fp,
217 int clear)
218 {
219 TCGv addr, va;
220
221 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
222 prefetches, which we can treat as nops. No worries about
223 missed exceptions here. */
224 if (unlikely(ra == 31)) {
225 return;
226 }
227
228 addr = tcg_temp_new();
229 if (rb != 31) {
230 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
231 if (clear) {
232 tcg_gen_andi_i64(addr, addr, ~0x7);
233 }
234 } else {
235 if (clear) {
236 disp16 &= ~0x7;
237 }
238 tcg_gen_movi_i64(addr, disp16);
239 }
240
241 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
242 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
243
244 tcg_temp_free(addr);
245 }
246
247 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
248 {
249 TCGv_i32 tmp32 = tcg_temp_new_i32();
250 TCGv tmp = tcg_temp_new();
251 gen_helper_f_to_memory(tmp32, t0);
252 tcg_gen_extu_i32_i64(tmp, tmp32);
253 tcg_gen_qemu_st32(tmp, t1, flags);
254 tcg_temp_free(tmp);
255 tcg_temp_free_i32(tmp32);
256 }
257
258 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
259 {
260 TCGv tmp = tcg_temp_new();
261 gen_helper_g_to_memory(tmp, t0);
262 tcg_gen_qemu_st64(tmp, t1, flags);
263 tcg_temp_free(tmp);
264 }
265
266 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
267 {
268 TCGv_i32 tmp32 = tcg_temp_new_i32();
269 TCGv tmp = tcg_temp_new();
270 gen_helper_s_to_memory(tmp32, t0);
271 tcg_gen_extu_i32_i64(tmp, tmp32);
272 tcg_gen_qemu_st32(tmp, t1, flags);
273 tcg_temp_free(tmp);
274 tcg_temp_free_i32(tmp32);
275 }
276
277 static inline void gen_store_mem(DisasContext *ctx,
278 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
279 int flags),
280 int ra, int rb, int32_t disp16, int fp,
281 int clear)
282 {
283 TCGv addr, va;
284
285 addr = tcg_temp_new();
286 if (rb != 31) {
287 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
288 if (clear) {
289 tcg_gen_andi_i64(addr, addr, ~0x7);
290 }
291 } else {
292 if (clear) {
293 disp16 &= ~0x7;
294 }
295 tcg_gen_movi_i64(addr, disp16);
296 }
297
298 if (ra == 31) {
299 va = tcg_const_i64(0);
300 } else {
301 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
302 }
303 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
304
305 tcg_temp_free(addr);
306 if (ra == 31) {
307 tcg_temp_free(va);
308 }
309 }
310
311 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
312 int32_t disp16, int quad)
313 {
314 TCGv addr;
315
316 if (ra == 31) {
317 /* ??? Don't bother storing anything. The user can't tell
318 the difference, since the zero register always reads zero. */
319 return NO_EXIT;
320 }
321
322 #if defined(CONFIG_USER_ONLY)
323 addr = cpu_lock_st_addr;
324 #else
325 addr = tcg_temp_local_new();
326 #endif
327
328 if (rb != 31) {
329 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
330 } else {
331 tcg_gen_movi_i64(addr, disp16);
332 }
333
334 #if defined(CONFIG_USER_ONLY)
335 /* ??? This is handled via a complicated version of compare-and-swap
336 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
337 in TCG so that this isn't necessary. */
338 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
339 #else
340 /* ??? In system mode we are never multi-threaded, so CAS can be
341 implemented via a non-atomic load-compare-store sequence. */
342 {
343 int lab_fail, lab_done;
344 TCGv val;
345
346 lab_fail = gen_new_label();
347 lab_done = gen_new_label();
348 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
349
350 val = tcg_temp_new();
351 if (quad) {
352 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
353 } else {
354 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
355 }
356 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
357
358 if (quad) {
359 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
360 } else {
361 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
362 }
363 tcg_gen_movi_i64(cpu_ir[ra], 1);
364 tcg_gen_br(lab_done);
365
366 gen_set_label(lab_fail);
367 tcg_gen_movi_i64(cpu_ir[ra], 0);
368
369 gen_set_label(lab_done);
370 tcg_gen_movi_i64(cpu_lock_addr, -1);
371
372 tcg_temp_free(addr);
373 return NO_EXIT;
374 }
375 #endif
376 }
377
378 static int use_goto_tb(DisasContext *ctx, uint64_t dest)
379 {
380 /* Check for the dest on the same page as the start of the TB. We
381 also want to suppress goto_tb in the case of single-steping and IO. */
382 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
383 && !ctx->env->singlestep_enabled
384 && !(ctx->tb->cflags & CF_LAST_IO));
385 }
386
387 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
388 {
389 uint64_t dest = ctx->pc + (disp << 2);
390
391 if (ra != 31) {
392 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
393 }
394
395 /* Notice branch-to-next; used to initialize RA with the PC. */
396 if (disp == 0) {
397 return 0;
398 } else if (use_goto_tb(ctx, dest)) {
399 tcg_gen_goto_tb(0);
400 tcg_gen_movi_i64(cpu_pc, dest);
401 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
402 return EXIT_GOTO_TB;
403 } else {
404 tcg_gen_movi_i64(cpu_pc, dest);
405 return EXIT_PC_UPDATED;
406 }
407 }
408
409 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
410 TCGv cmp, int32_t disp)
411 {
412 uint64_t dest = ctx->pc + (disp << 2);
413 int lab_true = gen_new_label();
414
415 if (use_goto_tb(ctx, dest)) {
416 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
417
418 tcg_gen_goto_tb(0);
419 tcg_gen_movi_i64(cpu_pc, ctx->pc);
420 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
421
422 gen_set_label(lab_true);
423 tcg_gen_goto_tb(1);
424 tcg_gen_movi_i64(cpu_pc, dest);
425 tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
426
427 return EXIT_GOTO_TB;
428 } else {
429 int lab_over = gen_new_label();
430
431 /* ??? Consider using either
432 movi pc, next
433 addi tmp, pc, disp
434 movcond pc, cond, 0, tmp, pc
435 or
436 setcond tmp, cond, 0
437 movi pc, next
438 neg tmp, tmp
439 andi tmp, tmp, disp
440 add pc, pc, tmp
441 The current diamond subgraph surely isn't efficient. */
442
443 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
444 tcg_gen_movi_i64(cpu_pc, ctx->pc);
445 tcg_gen_br(lab_over);
446 gen_set_label(lab_true);
447 tcg_gen_movi_i64(cpu_pc, dest);
448 gen_set_label(lab_over);
449
450 return EXIT_PC_UPDATED;
451 }
452 }
453
454 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
455 int32_t disp, int mask)
456 {
457 TCGv cmp_tmp;
458
459 if (unlikely(ra == 31)) {
460 cmp_tmp = tcg_const_i64(0);
461 } else {
462 cmp_tmp = tcg_temp_new();
463 if (mask) {
464 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
465 } else {
466 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
467 }
468 }
469
470 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
471 }
472
473 /* Fold -0.0 for comparison with COND. */
474
475 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
476 {
477 uint64_t mzero = 1ull << 63;
478
479 switch (cond) {
480 case TCG_COND_LE:
481 case TCG_COND_GT:
482 /* For <= or >, the -0.0 value directly compares the way we want. */
483 tcg_gen_mov_i64(dest, src);
484 break;
485
486 case TCG_COND_EQ:
487 case TCG_COND_NE:
488 /* For == or !=, we can simply mask off the sign bit and compare. */
489 tcg_gen_andi_i64(dest, src, mzero - 1);
490 break;
491
492 case TCG_COND_GE:
493 case TCG_COND_LT:
494 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
495 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
496 tcg_gen_neg_i64(dest, dest);
497 tcg_gen_and_i64(dest, dest, src);
498 break;
499
500 default:
501 abort();
502 }
503 }
504
505 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
506 int32_t disp)
507 {
508 TCGv cmp_tmp;
509
510 if (unlikely(ra == 31)) {
511 /* Very uncommon case, but easier to optimize it to an integer
512 comparison than continuing with the floating point comparison. */
513 return gen_bcond(ctx, cond, ra, disp, 0);
514 }
515
516 cmp_tmp = tcg_temp_new();
517 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
518 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
519 }
520
521 static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
522 int islit, uint8_t lit, int mask)
523 {
524 TCGCond inv_cond = tcg_invert_cond(cond);
525 int l1;
526
527 if (unlikely(rc == 31))
528 return;
529
530 l1 = gen_new_label();
531
532 if (ra != 31) {
533 if (mask) {
534 TCGv tmp = tcg_temp_new();
535 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
536 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
537 tcg_temp_free(tmp);
538 } else
539 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
540 } else {
541 /* Very uncommon case - Do not bother to optimize. */
542 TCGv tmp = tcg_const_i64(0);
543 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
544 tcg_temp_free(tmp);
545 }
546
547 if (islit)
548 tcg_gen_movi_i64(cpu_ir[rc], lit);
549 else
550 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
551 gen_set_label(l1);
552 }
553
554 static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
555 {
556 TCGv cmp_tmp;
557 int l1;
558
559 if (unlikely(rc == 31)) {
560 return;
561 }
562
563 cmp_tmp = tcg_temp_new();
564 if (unlikely(ra == 31)) {
565 tcg_gen_movi_i64(cmp_tmp, 0);
566 } else {
567 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
568 }
569
570 l1 = gen_new_label();
571 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
572 tcg_temp_free(cmp_tmp);
573
574 if (rb != 31)
575 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
576 else
577 tcg_gen_movi_i64(cpu_fir[rc], 0);
578 gen_set_label(l1);
579 }
580
581 #define QUAL_RM_N 0x080 /* Round mode nearest even */
582 #define QUAL_RM_C 0x000 /* Round mode chopped */
583 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
584 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
585 #define QUAL_RM_MASK 0x0c0
586
587 #define QUAL_U 0x100 /* Underflow enable (fp output) */
588 #define QUAL_V 0x100 /* Overflow enable (int output) */
589 #define QUAL_S 0x400 /* Software completion enable */
590 #define QUAL_I 0x200 /* Inexact detection enable */
591
592 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
593 {
594 TCGv_i32 tmp;
595
596 fn11 &= QUAL_RM_MASK;
597 if (fn11 == ctx->tb_rm) {
598 return;
599 }
600 ctx->tb_rm = fn11;
601
602 tmp = tcg_temp_new_i32();
603 switch (fn11) {
604 case QUAL_RM_N:
605 tcg_gen_movi_i32(tmp, float_round_nearest_even);
606 break;
607 case QUAL_RM_C:
608 tcg_gen_movi_i32(tmp, float_round_to_zero);
609 break;
610 case QUAL_RM_M:
611 tcg_gen_movi_i32(tmp, float_round_down);
612 break;
613 case QUAL_RM_D:
614 tcg_gen_ld8u_i32(tmp, cpu_env,
615 offsetof(CPUAlphaState, fpcr_dyn_round));
616 break;
617 }
618
619 #if defined(CONFIG_SOFTFLOAT_INLINE)
620 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
621 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
622 sets the one field. */
623 tcg_gen_st8_i32(tmp, cpu_env,
624 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
625 #else
626 gen_helper_setroundmode(tmp);
627 #endif
628
629 tcg_temp_free_i32(tmp);
630 }
631
632 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
633 {
634 TCGv_i32 tmp;
635
636 fn11 &= QUAL_U;
637 if (fn11 == ctx->tb_ftz) {
638 return;
639 }
640 ctx->tb_ftz = fn11;
641
642 tmp = tcg_temp_new_i32();
643 if (fn11) {
644 /* Underflow is enabled, use the FPCR setting. */
645 tcg_gen_ld8u_i32(tmp, cpu_env,
646 offsetof(CPUAlphaState, fpcr_flush_to_zero));
647 } else {
648 /* Underflow is disabled, force flush-to-zero. */
649 tcg_gen_movi_i32(tmp, 1);
650 }
651
652 #if defined(CONFIG_SOFTFLOAT_INLINE)
653 tcg_gen_st8_i32(tmp, cpu_env,
654 offsetof(CPUAlphaState, fp_status.flush_to_zero));
655 #else
656 gen_helper_setflushzero(tmp);
657 #endif
658
659 tcg_temp_free_i32(tmp);
660 }
661
662 static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
663 {
664 TCGv val = tcg_temp_new();
665 if (reg == 31) {
666 tcg_gen_movi_i64(val, 0);
667 } else if (fn11 & QUAL_S) {
668 gen_helper_ieee_input_s(val, cpu_env, cpu_fir[reg]);
669 } else if (is_cmp) {
670 gen_helper_ieee_input_cmp(val, cpu_env, cpu_fir[reg]);
671 } else {
672 gen_helper_ieee_input(val, cpu_env, cpu_fir[reg]);
673 }
674 return val;
675 }
676
677 static void gen_fp_exc_clear(void)
678 {
679 #if defined(CONFIG_SOFTFLOAT_INLINE)
680 TCGv_i32 zero = tcg_const_i32(0);
681 tcg_gen_st8_i32(zero, cpu_env,
682 offsetof(CPUAlphaState, fp_status.float_exception_flags));
683 tcg_temp_free_i32(zero);
684 #else
685 gen_helper_fp_exc_clear(cpu_env);
686 #endif
687 }
688
689 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
690 {
691 /* ??? We ought to be able to do something with imprecise exceptions.
692 E.g. notice we're still in the trap shadow of something within the
693 TB and do not generate the code to signal the exception; end the TB
694 when an exception is forced to arrive, either by consumption of a
695 register value or TRAPB or EXCB. */
696 TCGv_i32 exc = tcg_temp_new_i32();
697 TCGv_i32 reg;
698
699 #if defined(CONFIG_SOFTFLOAT_INLINE)
700 tcg_gen_ld8u_i32(exc, cpu_env,
701 offsetof(CPUAlphaState, fp_status.float_exception_flags));
702 #else
703 gen_helper_fp_exc_get(exc, cpu_env);
704 #endif
705
706 if (ignore) {
707 tcg_gen_andi_i32(exc, exc, ~ignore);
708 }
709
710 /* ??? Pass in the regno of the destination so that the helper can
711 set EXC_MASK, which contains a bitmask of destination registers
712 that have caused arithmetic traps. A simple userspace emulation
713 does not require this. We do need it for a guest kernel's entArith,
714 or if we were to do something clever with imprecise exceptions. */
715 reg = tcg_const_i32(rc + 32);
716
717 if (fn11 & QUAL_S) {
718 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
719 } else {
720 gen_helper_fp_exc_raise(cpu_env, exc, reg);
721 }
722
723 tcg_temp_free_i32(reg);
724 tcg_temp_free_i32(exc);
725 }
726
727 static inline void gen_fp_exc_raise(int rc, int fn11)
728 {
729 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
730 }
731
732 static void gen_fcvtlq(int rb, int rc)
733 {
734 if (unlikely(rc == 31)) {
735 return;
736 }
737 if (unlikely(rb == 31)) {
738 tcg_gen_movi_i64(cpu_fir[rc], 0);
739 } else {
740 TCGv tmp = tcg_temp_new();
741
742 /* The arithmetic right shift here, plus the sign-extended mask below
743 yields a sign-extended result without an explicit ext32s_i64. */
744 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
745 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
746 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
747 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
748 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
749
750 tcg_temp_free(tmp);
751 }
752 }
753
754 static void gen_fcvtql(int rb, int rc)
755 {
756 if (unlikely(rc == 31)) {
757 return;
758 }
759 if (unlikely(rb == 31)) {
760 tcg_gen_movi_i64(cpu_fir[rc], 0);
761 } else {
762 TCGv tmp = tcg_temp_new();
763
764 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
765 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
766 tcg_gen_shli_i64(tmp, tmp, 32);
767 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
768 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
769
770 tcg_temp_free(tmp);
771 }
772 }
773
774 static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
775 {
776 if (rb != 31) {
777 int lab = gen_new_label();
778 TCGv tmp = tcg_temp_new();
779
780 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
781 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
782 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
783
784 gen_set_label(lab);
785 }
786 gen_fcvtql(rb, rc);
787 }
788
789 #define FARITH2(name) \
790 static inline void glue(gen_f, name)(int rb, int rc) \
791 { \
792 if (unlikely(rc == 31)) { \
793 return; \
794 } \
795 if (rb != 31) { \
796 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
797 } else { \
798 TCGv tmp = tcg_const_i64(0); \
799 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
800 tcg_temp_free(tmp); \
801 } \
802 }
803
804 /* ??? VAX instruction qualifiers ignored. */
805 FARITH2(sqrtf)
806 FARITH2(sqrtg)
807 FARITH2(cvtgf)
808 FARITH2(cvtgq)
809 FARITH2(cvtqf)
810 FARITH2(cvtqg)
811
812 static void gen_ieee_arith2(DisasContext *ctx,
813 void (*helper)(TCGv, TCGv_ptr, TCGv),
814 int rb, int rc, int fn11)
815 {
816 TCGv vb;
817
818 /* ??? This is wrong: the instruction is not a nop, it still may
819 raise exceptions. */
820 if (unlikely(rc == 31)) {
821 return;
822 }
823
824 gen_qual_roundmode(ctx, fn11);
825 gen_qual_flushzero(ctx, fn11);
826 gen_fp_exc_clear();
827
828 vb = gen_ieee_input(rb, fn11, 0);
829 helper(cpu_fir[rc], cpu_env, vb);
830 tcg_temp_free(vb);
831
832 gen_fp_exc_raise(rc, fn11);
833 }
834
835 #define IEEE_ARITH2(name) \
836 static inline void glue(gen_f, name)(DisasContext *ctx, \
837 int rb, int rc, int fn11) \
838 { \
839 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
840 }
841 IEEE_ARITH2(sqrts)
842 IEEE_ARITH2(sqrtt)
843 IEEE_ARITH2(cvtst)
844 IEEE_ARITH2(cvtts)
845
846 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
847 {
848 TCGv vb;
849 int ignore = 0;
850
851 /* ??? This is wrong: the instruction is not a nop, it still may
852 raise exceptions. */
853 if (unlikely(rc == 31)) {
854 return;
855 }
856
857 /* No need to set flushzero, since we have an integer output. */
858 gen_fp_exc_clear();
859 vb = gen_ieee_input(rb, fn11, 0);
860
861 /* Almost all integer conversions use cropped rounding, and most
862 also do not have integer overflow enabled. Special case that. */
863 switch (fn11) {
864 case QUAL_RM_C:
865 gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
866 break;
867 case QUAL_V | QUAL_RM_C:
868 case QUAL_S | QUAL_V | QUAL_RM_C:
869 ignore = float_flag_inexact;
870 /* FALLTHRU */
871 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
872 gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
873 break;
874 default:
875 gen_qual_roundmode(ctx, fn11);
876 gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
877 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
878 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
879 break;
880 }
881 tcg_temp_free(vb);
882
883 gen_fp_exc_raise_ignore(rc, fn11, ignore);
884 }
885
886 static void gen_ieee_intcvt(DisasContext *ctx,
887 void (*helper)(TCGv, TCGv_ptr, TCGv),
888 int rb, int rc, int fn11)
889 {
890 TCGv vb;
891
892 /* ??? This is wrong: the instruction is not a nop, it still may
893 raise exceptions. */
894 if (unlikely(rc == 31)) {
895 return;
896 }
897
898 gen_qual_roundmode(ctx, fn11);
899
900 if (rb == 31) {
901 vb = tcg_const_i64(0);
902 } else {
903 vb = cpu_fir[rb];
904 }
905
906 /* The only exception that can be raised by integer conversion
907 is inexact. Thus we only need to worry about exceptions when
908 inexact handling is requested. */
909 if (fn11 & QUAL_I) {
910 gen_fp_exc_clear();
911 helper(cpu_fir[rc], cpu_env, vb);
912 gen_fp_exc_raise(rc, fn11);
913 } else {
914 helper(cpu_fir[rc], cpu_env, vb);
915 }
916
917 if (rb == 31) {
918 tcg_temp_free(vb);
919 }
920 }
921
922 #define IEEE_INTCVT(name) \
923 static inline void glue(gen_f, name)(DisasContext *ctx, \
924 int rb, int rc, int fn11) \
925 { \
926 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
927 }
928 IEEE_INTCVT(cvtqs)
929 IEEE_INTCVT(cvtqt)
930
931 static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
932 {
933 TCGv va, vb, vmask;
934 int za = 0, zb = 0;
935
936 if (unlikely(rc == 31)) {
937 return;
938 }
939
940 vmask = tcg_const_i64(mask);
941
942 TCGV_UNUSED_I64(va);
943 if (ra == 31) {
944 if (inv_a) {
945 va = vmask;
946 } else {
947 za = 1;
948 }
949 } else {
950 va = tcg_temp_new_i64();
951 tcg_gen_mov_i64(va, cpu_fir[ra]);
952 if (inv_a) {
953 tcg_gen_andc_i64(va, vmask, va);
954 } else {
955 tcg_gen_and_i64(va, va, vmask);
956 }
957 }
958
959 TCGV_UNUSED_I64(vb);
960 if (rb == 31) {
961 zb = 1;
962 } else {
963 vb = tcg_temp_new_i64();
964 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
965 }
966
967 switch (za << 1 | zb) {
968 case 0 | 0:
969 tcg_gen_or_i64(cpu_fir[rc], va, vb);
970 break;
971 case 0 | 1:
972 tcg_gen_mov_i64(cpu_fir[rc], va);
973 break;
974 case 2 | 0:
975 tcg_gen_mov_i64(cpu_fir[rc], vb);
976 break;
977 case 2 | 1:
978 tcg_gen_movi_i64(cpu_fir[rc], 0);
979 break;
980 }
981
982 tcg_temp_free(vmask);
983 if (ra != 31) {
984 tcg_temp_free(va);
985 }
986 if (rb != 31) {
987 tcg_temp_free(vb);
988 }
989 }
990
991 static inline void gen_fcpys(int ra, int rb, int rc)
992 {
993 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
994 }
995
996 static inline void gen_fcpysn(int ra, int rb, int rc)
997 {
998 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
999 }
1000
1001 static inline void gen_fcpyse(int ra, int rb, int rc)
1002 {
1003 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1004 }
1005
1006 #define FARITH3(name) \
1007 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1008 { \
1009 TCGv va, vb; \
1010 \
1011 if (unlikely(rc == 31)) { \
1012 return; \
1013 } \
1014 if (ra == 31) { \
1015 va = tcg_const_i64(0); \
1016 } else { \
1017 va = cpu_fir[ra]; \
1018 } \
1019 if (rb == 31) { \
1020 vb = tcg_const_i64(0); \
1021 } else { \
1022 vb = cpu_fir[rb]; \
1023 } \
1024 \
1025 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1026 \
1027 if (ra == 31) { \
1028 tcg_temp_free(va); \
1029 } \
1030 if (rb == 31) { \
1031 tcg_temp_free(vb); \
1032 } \
1033 }
1034
1035 /* ??? VAX instruction qualifiers ignored. */
1036 FARITH3(addf)
1037 FARITH3(subf)
1038 FARITH3(mulf)
1039 FARITH3(divf)
1040 FARITH3(addg)
1041 FARITH3(subg)
1042 FARITH3(mulg)
1043 FARITH3(divg)
1044 FARITH3(cmpgeq)
1045 FARITH3(cmpglt)
1046 FARITH3(cmpgle)
1047
1048 static void gen_ieee_arith3(DisasContext *ctx,
1049 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1050 int ra, int rb, int rc, int fn11)
1051 {
1052 TCGv va, vb;
1053
1054 /* ??? This is wrong: the instruction is not a nop, it still may
1055 raise exceptions. */
1056 if (unlikely(rc == 31)) {
1057 return;
1058 }
1059
1060 gen_qual_roundmode(ctx, fn11);
1061 gen_qual_flushzero(ctx, fn11);
1062 gen_fp_exc_clear();
1063
1064 va = gen_ieee_input(ra, fn11, 0);
1065 vb = gen_ieee_input(rb, fn11, 0);
1066 helper(cpu_fir[rc], cpu_env, va, vb);
1067 tcg_temp_free(va);
1068 tcg_temp_free(vb);
1069
1070 gen_fp_exc_raise(rc, fn11);
1071 }
1072
1073 #define IEEE_ARITH3(name) \
1074 static inline void glue(gen_f, name)(DisasContext *ctx, \
1075 int ra, int rb, int rc, int fn11) \
1076 { \
1077 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1078 }
1079 IEEE_ARITH3(adds)
1080 IEEE_ARITH3(subs)
1081 IEEE_ARITH3(muls)
1082 IEEE_ARITH3(divs)
1083 IEEE_ARITH3(addt)
1084 IEEE_ARITH3(subt)
1085 IEEE_ARITH3(mult)
1086 IEEE_ARITH3(divt)
1087
1088 static void gen_ieee_compare(DisasContext *ctx,
1089 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1090 int ra, int rb, int rc, int fn11)
1091 {
1092 TCGv va, vb;
1093
1094 /* ??? This is wrong: the instruction is not a nop, it still may
1095 raise exceptions. */
1096 if (unlikely(rc == 31)) {
1097 return;
1098 }
1099
1100 gen_fp_exc_clear();
1101
1102 va = gen_ieee_input(ra, fn11, 1);
1103 vb = gen_ieee_input(rb, fn11, 1);
1104 helper(cpu_fir[rc], cpu_env, va, vb);
1105 tcg_temp_free(va);
1106 tcg_temp_free(vb);
1107
1108 gen_fp_exc_raise(rc, fn11);
1109 }
1110
1111 #define IEEE_CMP3(name) \
1112 static inline void glue(gen_f, name)(DisasContext *ctx, \
1113 int ra, int rb, int rc, int fn11) \
1114 { \
1115 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1116 }
1117 IEEE_CMP3(cmptun)
1118 IEEE_CMP3(cmpteq)
1119 IEEE_CMP3(cmptlt)
1120 IEEE_CMP3(cmptle)
1121
1122 static inline uint64_t zapnot_mask(uint8_t lit)
1123 {
1124 uint64_t mask = 0;
1125 int i;
1126
1127 for (i = 0; i < 8; ++i) {
1128 if ((lit >> i) & 1)
1129 mask |= 0xffull << (i * 8);
1130 }
1131 return mask;
1132 }
1133
1134 /* Implement zapnot with an immediate operand, which expands to some
1135 form of immediate AND. This is a basic building block in the
1136 definition of many of the other byte manipulation instructions. */
1137 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1138 {
1139 switch (lit) {
1140 case 0x00:
1141 tcg_gen_movi_i64(dest, 0);
1142 break;
1143 case 0x01:
1144 tcg_gen_ext8u_i64(dest, src);
1145 break;
1146 case 0x03:
1147 tcg_gen_ext16u_i64(dest, src);
1148 break;
1149 case 0x0f:
1150 tcg_gen_ext32u_i64(dest, src);
1151 break;
1152 case 0xff:
1153 tcg_gen_mov_i64(dest, src);
1154 break;
1155 default:
1156 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1157 break;
1158 }
1159 }
1160
1161 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1162 {
1163 if (unlikely(rc == 31))
1164 return;
1165 else if (unlikely(ra == 31))
1166 tcg_gen_movi_i64(cpu_ir[rc], 0);
1167 else if (islit)
1168 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1169 else
1170 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1171 }
1172
1173 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1174 {
1175 if (unlikely(rc == 31))
1176 return;
1177 else if (unlikely(ra == 31))
1178 tcg_gen_movi_i64(cpu_ir[rc], 0);
1179 else if (islit)
1180 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1181 else
1182 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1183 }
1184
1185
1186 /* EXTWH, EXTLH, EXTQH */
1187 static void gen_ext_h(int ra, int rb, int rc, int islit,
1188 uint8_t lit, uint8_t byte_mask)
1189 {
1190 if (unlikely(rc == 31))
1191 return;
1192 else if (unlikely(ra == 31))
1193 tcg_gen_movi_i64(cpu_ir[rc], 0);
1194 else {
1195 if (islit) {
1196 lit = (64 - (lit & 7) * 8) & 0x3f;
1197 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1198 } else {
1199 TCGv tmp1 = tcg_temp_new();
1200 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1201 tcg_gen_shli_i64(tmp1, tmp1, 3);
1202 tcg_gen_neg_i64(tmp1, tmp1);
1203 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1204 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1205 tcg_temp_free(tmp1);
1206 }
1207 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1208 }
1209 }
1210
1211 /* EXTBL, EXTWL, EXTLL, EXTQL */
1212 static void gen_ext_l(int ra, int rb, int rc, int islit,
1213 uint8_t lit, uint8_t byte_mask)
1214 {
1215 if (unlikely(rc == 31))
1216 return;
1217 else if (unlikely(ra == 31))
1218 tcg_gen_movi_i64(cpu_ir[rc], 0);
1219 else {
1220 if (islit) {
1221 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1222 } else {
1223 TCGv tmp = tcg_temp_new();
1224 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1225 tcg_gen_shli_i64(tmp, tmp, 3);
1226 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1227 tcg_temp_free(tmp);
1228 }
1229 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1230 }
1231 }
1232
1233 /* INSWH, INSLH, INSQH */
1234 static void gen_ins_h(int ra, int rb, int rc, int islit,
1235 uint8_t lit, uint8_t byte_mask)
1236 {
1237 if (unlikely(rc == 31))
1238 return;
1239 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1240 tcg_gen_movi_i64(cpu_ir[rc], 0);
1241 else {
1242 TCGv tmp = tcg_temp_new();
1243
1244 /* The instruction description has us left-shift the byte mask
1245 and extract bits <15:8> and apply that zap at the end. This
1246 is equivalent to simply performing the zap first and shifting
1247 afterward. */
1248 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1249
1250 if (islit) {
1251 /* Note that we have handled the lit==0 case above. */
1252 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1253 } else {
1254 TCGv shift = tcg_temp_new();
1255
1256 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1257 Do this portably by splitting the shift into two parts:
1258 shift_count-1 and 1. Arrange for the -1 by using
1259 ones-complement instead of twos-complement in the negation:
1260 ~((B & 7) * 8) & 63. */
1261
1262 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1263 tcg_gen_shli_i64(shift, shift, 3);
1264 tcg_gen_not_i64(shift, shift);
1265 tcg_gen_andi_i64(shift, shift, 0x3f);
1266
1267 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1268 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1269 tcg_temp_free(shift);
1270 }
1271 tcg_temp_free(tmp);
1272 }
1273 }
1274
1275 /* INSBL, INSWL, INSLL, INSQL */
1276 static void gen_ins_l(int ra, int rb, int rc, int islit,
1277 uint8_t lit, uint8_t byte_mask)
1278 {
1279 if (unlikely(rc == 31))
1280 return;
1281 else if (unlikely(ra == 31))
1282 tcg_gen_movi_i64(cpu_ir[rc], 0);
1283 else {
1284 TCGv tmp = tcg_temp_new();
1285
1286 /* The instruction description has us left-shift the byte mask
1287 the same number of byte slots as the data and apply the zap
1288 at the end. This is equivalent to simply performing the zap
1289 first and shifting afterward. */
1290 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1291
1292 if (islit) {
1293 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1294 } else {
1295 TCGv shift = tcg_temp_new();
1296 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1297 tcg_gen_shli_i64(shift, shift, 3);
1298 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1299 tcg_temp_free(shift);
1300 }
1301 tcg_temp_free(tmp);
1302 }
1303 }
1304
1305 /* MSKWH, MSKLH, MSKQH */
1306 static void gen_msk_h(int ra, int rb, int rc, int islit,
1307 uint8_t lit, uint8_t byte_mask)
1308 {
1309 if (unlikely(rc == 31))
1310 return;
1311 else if (unlikely(ra == 31))
1312 tcg_gen_movi_i64(cpu_ir[rc], 0);
1313 else if (islit) {
1314 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1315 } else {
1316 TCGv shift = tcg_temp_new();
1317 TCGv mask = tcg_temp_new();
1318
1319 /* The instruction description is as above, where the byte_mask
1320 is shifted left, and then we extract bits <15:8>. This can be
1321 emulated with a right-shift on the expanded byte mask. This
1322 requires extra care because for an input <2:0> == 0 we need a
1323 shift of 64 bits in order to generate a zero. This is done by
1324 splitting the shift into two parts, the variable shift - 1
1325 followed by a constant 1 shift. The code we expand below is
1326 equivalent to ~((B & 7) * 8) & 63. */
1327
1328 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1329 tcg_gen_shli_i64(shift, shift, 3);
1330 tcg_gen_not_i64(shift, shift);
1331 tcg_gen_andi_i64(shift, shift, 0x3f);
1332 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1333 tcg_gen_shr_i64(mask, mask, shift);
1334 tcg_gen_shri_i64(mask, mask, 1);
1335
1336 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1337
1338 tcg_temp_free(mask);
1339 tcg_temp_free(shift);
1340 }
1341 }
1342
1343 /* MSKBL, MSKWL, MSKLL, MSKQL */
1344 static void gen_msk_l(int ra, int rb, int rc, int islit,
1345 uint8_t lit, uint8_t byte_mask)
1346 {
1347 if (unlikely(rc == 31))
1348 return;
1349 else if (unlikely(ra == 31))
1350 tcg_gen_movi_i64(cpu_ir[rc], 0);
1351 else if (islit) {
1352 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1353 } else {
1354 TCGv shift = tcg_temp_new();
1355 TCGv mask = tcg_temp_new();
1356
1357 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1358 tcg_gen_shli_i64(shift, shift, 3);
1359 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1360 tcg_gen_shl_i64(mask, mask, shift);
1361
1362 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1363
1364 tcg_temp_free(mask);
1365 tcg_temp_free(shift);
1366 }
1367 }
1368
1369 /* Code to call arith3 helpers */
1370 #define ARITH3(name) \
1371 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1372 uint8_t lit) \
1373 { \
1374 if (unlikely(rc == 31)) \
1375 return; \
1376 \
1377 if (ra != 31) { \
1378 if (islit) { \
1379 TCGv tmp = tcg_const_i64(lit); \
1380 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1381 tcg_temp_free(tmp); \
1382 } else \
1383 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1384 } else { \
1385 TCGv tmp1 = tcg_const_i64(0); \
1386 if (islit) { \
1387 TCGv tmp2 = tcg_const_i64(lit); \
1388 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1389 tcg_temp_free(tmp2); \
1390 } else \
1391 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1392 tcg_temp_free(tmp1); \
1393 } \
1394 }
1395 ARITH3(cmpbge)
1396 ARITH3(addlv)
1397 ARITH3(sublv)
1398 ARITH3(addqv)
1399 ARITH3(subqv)
1400 ARITH3(umulh)
1401 ARITH3(mullv)
1402 ARITH3(mulqv)
1403 ARITH3(minub8)
1404 ARITH3(minsb8)
1405 ARITH3(minuw4)
1406 ARITH3(minsw4)
1407 ARITH3(maxub8)
1408 ARITH3(maxsb8)
1409 ARITH3(maxuw4)
1410 ARITH3(maxsw4)
1411 ARITH3(perr)
1412
1413 #define MVIOP2(name) \
1414 static inline void glue(gen_, name)(int rb, int rc) \
1415 { \
1416 if (unlikely(rc == 31)) \
1417 return; \
1418 if (unlikely(rb == 31)) \
1419 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1420 else \
1421 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1422 }
1423 MVIOP2(pklb)
1424 MVIOP2(pkwb)
1425 MVIOP2(unpkbl)
1426 MVIOP2(unpkbw)
1427
1428 static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1429 int islit, uint8_t lit)
1430 {
1431 TCGv va, vb;
1432
1433 if (unlikely(rc == 31)) {
1434 return;
1435 }
1436
1437 if (ra == 31) {
1438 va = tcg_const_i64(0);
1439 } else {
1440 va = cpu_ir[ra];
1441 }
1442 if (islit) {
1443 vb = tcg_const_i64(lit);
1444 } else {
1445 vb = cpu_ir[rb];
1446 }
1447
1448 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1449
1450 if (ra == 31) {
1451 tcg_temp_free(va);
1452 }
1453 if (islit) {
1454 tcg_temp_free(vb);
1455 }
1456 }
1457
1458 static void gen_rx(int ra, int set)
1459 {
1460 TCGv_i32 tmp;
1461
1462 if (ra != 31) {
1463 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
1464 }
1465
1466 tmp = tcg_const_i32(set);
1467 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1468 tcg_temp_free_i32(tmp);
1469 }
1470
1471 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1472 {
1473 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1474 to internal cpu registers. */
1475
1476 /* Unprivileged PAL call */
1477 if (palcode >= 0x80 && palcode < 0xC0) {
1478 switch (palcode) {
1479 case 0x86:
1480 /* IMB */
1481 /* No-op inside QEMU. */
1482 break;
1483 case 0x9E:
1484 /* RDUNIQUE */
1485 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1486 break;
1487 case 0x9F:
1488 /* WRUNIQUE */
1489 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1490 break;
1491 default:
1492 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1493 }
1494 return NO_EXIT;
1495 }
1496
1497 #ifndef CONFIG_USER_ONLY
1498 /* Privileged PAL code */
1499 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1500 switch (palcode) {
1501 case 0x01:
1502 /* CFLUSH */
1503 /* No-op inside QEMU. */
1504 break;
1505 case 0x02:
1506 /* DRAINA */
1507 /* No-op inside QEMU. */
1508 break;
1509 case 0x2D:
1510 /* WRVPTPTR */
1511 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
1512 break;
1513 case 0x31:
1514 /* WRVAL */
1515 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1516 break;
1517 case 0x32:
1518 /* RDVAL */
1519 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1520 break;
1521
1522 case 0x35: {
1523 /* SWPIPL */
1524 TCGv tmp;
1525
1526 /* Note that we already know we're in kernel mode, so we know
1527 that PS only contains the 3 IPL bits. */
1528 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1529
1530 /* But make sure and store only the 3 IPL bits from the user. */
1531 tmp = tcg_temp_new();
1532 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1533 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1534 tcg_temp_free(tmp);
1535 break;
1536 }
1537
1538 case 0x36:
1539 /* RDPS */
1540 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1541 break;
1542 case 0x38:
1543 /* WRUSP */
1544 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1545 break;
1546 case 0x3A:
1547 /* RDUSP */
1548 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1549 break;
1550 case 0x3C:
1551 /* WHAMI */
1552 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1553 offsetof(CPUAlphaState, cpu_index));
1554 break;
1555
1556 default:
1557 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1558 }
1559 return NO_EXIT;
1560 }
1561 #endif
1562
1563 return gen_invalid(ctx);
1564 }
1565
1566 #ifndef CONFIG_USER_ONLY
1567
1568 #define PR_BYTE 0x100000
1569 #define PR_LONG 0x200000
1570
1571 static int cpu_pr_data(int pr)
1572 {
1573 switch (pr) {
1574 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1575 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1576 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1577 case 3: return offsetof(CPUAlphaState, trap_arg0);
1578 case 4: return offsetof(CPUAlphaState, trap_arg1);
1579 case 5: return offsetof(CPUAlphaState, trap_arg2);
1580 case 6: return offsetof(CPUAlphaState, exc_addr);
1581 case 7: return offsetof(CPUAlphaState, palbr);
1582 case 8: return offsetof(CPUAlphaState, ptbr);
1583 case 9: return offsetof(CPUAlphaState, vptptr);
1584 case 10: return offsetof(CPUAlphaState, unique);
1585 case 11: return offsetof(CPUAlphaState, sysval);
1586 case 12: return offsetof(CPUAlphaState, usp);
1587
1588 case 32 ... 39:
1589 return offsetof(CPUAlphaState, shadow[pr - 32]);
1590 case 40 ... 63:
1591 return offsetof(CPUAlphaState, scratch[pr - 40]);
1592
1593 case 251:
1594 return offsetof(CPUAlphaState, alarm_expire);
1595 }
1596 return 0;
1597 }
1598
1599 static ExitStatus gen_mfpr(int ra, int regno)
1600 {
1601 int data = cpu_pr_data(regno);
1602
1603 /* In our emulated PALcode, these processor registers have no
1604 side effects from reading. */
1605 if (ra == 31) {
1606 return NO_EXIT;
1607 }
1608
1609 if (regno == 250) {
1610 /* WALL_TIME */
1611 if (use_icount) {
1612 gen_io_start();
1613 gen_helper_get_time(cpu_ir[ra]);
1614 gen_io_end();
1615 return EXIT_PC_STALE;
1616 } else {
1617 gen_helper_get_time(cpu_ir[ra]);
1618 return NO_EXIT;
1619 }
1620 }
1621
1622 /* The basic registers are data only, and unknown registers
1623 are read-zero, write-ignore. */
1624 if (data == 0) {
1625 tcg_gen_movi_i64(cpu_ir[ra], 0);
1626 } else if (data & PR_BYTE) {
1627 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1628 } else if (data & PR_LONG) {
1629 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1630 } else {
1631 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1632 }
1633 return NO_EXIT;
1634 }
1635
1636 static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
1637 {
1638 TCGv tmp;
1639 int data;
1640
1641 if (rb == 31) {
1642 tmp = tcg_const_i64(0);
1643 } else {
1644 tmp = cpu_ir[rb];
1645 }
1646
1647 switch (regno) {
1648 case 255:
1649 /* TBIA */
1650 gen_helper_tbia();
1651 break;
1652
1653 case 254:
1654 /* TBIS */
1655 gen_helper_tbis(tmp);
1656 break;
1657
1658 case 253:
1659 /* WAIT */
1660 tmp = tcg_const_i64(1);
1661 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUAlphaState, halted));
1662 return gen_excp(ctx, EXCP_HLT, 0);
1663
1664 case 252:
1665 /* HALT */
1666 gen_helper_halt(tmp);
1667 return EXIT_PC_STALE;
1668
1669 case 251:
1670 /* ALARM */
1671 gen_helper_set_alarm(tmp);
1672 break;
1673
1674 default:
1675 /* The basic registers are data only, and unknown registers
1676 are read-zero, write-ignore. */
1677 data = cpu_pr_data(regno);
1678 if (data != 0) {
1679 if (data & PR_BYTE) {
1680 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1681 } else if (data & PR_LONG) {
1682 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1683 } else {
1684 tcg_gen_st_i64(tmp, cpu_env, data);
1685 }
1686 }
1687 break;
1688 }
1689
1690 if (rb == 31) {
1691 tcg_temp_free(tmp);
1692 }
1693
1694 return NO_EXIT;
1695 }
1696 #endif /* !USER_ONLY*/
1697
1698 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1699 {
1700 uint32_t palcode;
1701 int32_t disp21, disp16;
1702 #ifndef CONFIG_USER_ONLY
1703 int32_t disp12;
1704 #endif
1705 uint16_t fn11;
1706 uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
1707 uint8_t lit;
1708 ExitStatus ret;
1709
1710 /* Decode all instruction fields */
1711 opc = insn >> 26;
1712 ra = (insn >> 21) & 0x1F;
1713 rb = (insn >> 16) & 0x1F;
1714 rc = insn & 0x1F;
1715 real_islit = islit = (insn >> 12) & 1;
1716 if (rb == 31 && !islit) {
1717 islit = 1;
1718 lit = 0;
1719 } else
1720 lit = (insn >> 13) & 0xFF;
1721 palcode = insn & 0x03FFFFFF;
1722 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1723 disp16 = (int16_t)(insn & 0x0000FFFF);
1724 #ifndef CONFIG_USER_ONLY
1725 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1726 #endif
1727 fn11 = (insn >> 5) & 0x000007FF;
1728 fpfn = fn11 & 0x3F;
1729 fn7 = (insn >> 5) & 0x0000007F;
1730 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1731 opc, ra, rb, rc, disp16);
1732
1733 ret = NO_EXIT;
1734 switch (opc) {
1735 case 0x00:
1736 /* CALL_PAL */
1737 ret = gen_call_pal(ctx, palcode);
1738 break;
1739 case 0x01:
1740 /* OPC01 */
1741 goto invalid_opc;
1742 case 0x02:
1743 /* OPC02 */
1744 goto invalid_opc;
1745 case 0x03:
1746 /* OPC03 */
1747 goto invalid_opc;
1748 case 0x04:
1749 /* OPC04 */
1750 goto invalid_opc;
1751 case 0x05:
1752 /* OPC05 */
1753 goto invalid_opc;
1754 case 0x06:
1755 /* OPC06 */
1756 goto invalid_opc;
1757 case 0x07:
1758 /* OPC07 */
1759 goto invalid_opc;
1760 case 0x08:
1761 /* LDA */
1762 if (likely(ra != 31)) {
1763 if (rb != 31)
1764 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1765 else
1766 tcg_gen_movi_i64(cpu_ir[ra], disp16);
1767 }
1768 break;
1769 case 0x09:
1770 /* LDAH */
1771 if (likely(ra != 31)) {
1772 if (rb != 31)
1773 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1774 else
1775 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1776 }
1777 break;
1778 case 0x0A:
1779 /* LDBU */
1780 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1781 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1782 break;
1783 }
1784 goto invalid_opc;
1785 case 0x0B:
1786 /* LDQ_U */
1787 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1788 break;
1789 case 0x0C:
1790 /* LDWU */
1791 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1792 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1793 break;
1794 }
1795 goto invalid_opc;
1796 case 0x0D:
1797 /* STW */
1798 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1799 break;
1800 case 0x0E:
1801 /* STB */
1802 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1803 break;
1804 case 0x0F:
1805 /* STQ_U */
1806 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1807 break;
1808 case 0x10:
1809 switch (fn7) {
1810 case 0x00:
1811 /* ADDL */
1812 if (likely(rc != 31)) {
1813 if (ra != 31) {
1814 if (islit) {
1815 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1816 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1817 } else {
1818 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1819 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1820 }
1821 } else {
1822 if (islit)
1823 tcg_gen_movi_i64(cpu_ir[rc], lit);
1824 else
1825 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1826 }
1827 }
1828 break;
1829 case 0x02:
1830 /* S4ADDL */
1831 if (likely(rc != 31)) {
1832 if (ra != 31) {
1833 TCGv tmp = tcg_temp_new();
1834 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1835 if (islit)
1836 tcg_gen_addi_i64(tmp, tmp, lit);
1837 else
1838 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1839 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1840 tcg_temp_free(tmp);
1841 } else {
1842 if (islit)
1843 tcg_gen_movi_i64(cpu_ir[rc], lit);
1844 else
1845 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1846 }
1847 }
1848 break;
1849 case 0x09:
1850 /* SUBL */
1851 if (likely(rc != 31)) {
1852 if (ra != 31) {
1853 if (islit)
1854 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1855 else
1856 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1857 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1858 } else {
1859 if (islit)
1860 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1861 else {
1862 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1863 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1864 }
1865 }
1866 break;
1867 case 0x0B:
1868 /* S4SUBL */
1869 if (likely(rc != 31)) {
1870 if (ra != 31) {
1871 TCGv tmp = tcg_temp_new();
1872 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1873 if (islit)
1874 tcg_gen_subi_i64(tmp, tmp, lit);
1875 else
1876 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1877 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1878 tcg_temp_free(tmp);
1879 } else {
1880 if (islit)
1881 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1882 else {
1883 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1884 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1885 }
1886 }
1887 }
1888 break;
1889 case 0x0F:
1890 /* CMPBGE */
1891 gen_cmpbge(ra, rb, rc, islit, lit);
1892 break;
1893 case 0x12:
1894 /* S8ADDL */
1895 if (likely(rc != 31)) {
1896 if (ra != 31) {
1897 TCGv tmp = tcg_temp_new();
1898 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1899 if (islit)
1900 tcg_gen_addi_i64(tmp, tmp, lit);
1901 else
1902 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1903 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1904 tcg_temp_free(tmp);
1905 } else {
1906 if (islit)
1907 tcg_gen_movi_i64(cpu_ir[rc], lit);
1908 else
1909 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1910 }
1911 }
1912 break;
1913 case 0x1B:
1914 /* S8SUBL */
1915 if (likely(rc != 31)) {
1916 if (ra != 31) {
1917 TCGv tmp = tcg_temp_new();
1918 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1919 if (islit)
1920 tcg_gen_subi_i64(tmp, tmp, lit);
1921 else
1922 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1923 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1924 tcg_temp_free(tmp);
1925 } else {
1926 if (islit)
1927 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1928 else
1929 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1930 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1931 }
1932 }
1933 }
1934 break;
1935 case 0x1D:
1936 /* CMPULT */
1937 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1938 break;
1939 case 0x20:
1940 /* ADDQ */
1941 if (likely(rc != 31)) {
1942 if (ra != 31) {
1943 if (islit)
1944 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1945 else
1946 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1947 } else {
1948 if (islit)
1949 tcg_gen_movi_i64(cpu_ir[rc], lit);
1950 else
1951 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1952 }
1953 }
1954 break;
1955 case 0x22:
1956 /* S4ADDQ */
1957 if (likely(rc != 31)) {
1958 if (ra != 31) {
1959 TCGv tmp = tcg_temp_new();
1960 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1961 if (islit)
1962 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1963 else
1964 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1965 tcg_temp_free(tmp);
1966 } else {
1967 if (islit)
1968 tcg_gen_movi_i64(cpu_ir[rc], lit);
1969 else
1970 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1971 }
1972 }
1973 break;
1974 case 0x29:
1975 /* SUBQ */
1976 if (likely(rc != 31)) {
1977 if (ra != 31) {
1978 if (islit)
1979 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1980 else
1981 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1982 } else {
1983 if (islit)
1984 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1985 else
1986 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1987 }
1988 }
1989 break;
1990 case 0x2B:
1991 /* S4SUBQ */
1992 if (likely(rc != 31)) {
1993 if (ra != 31) {
1994 TCGv tmp = tcg_temp_new();
1995 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1996 if (islit)
1997 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1998 else
1999 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2000 tcg_temp_free(tmp);
2001 } else {
2002 if (islit)
2003 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2004 else
2005 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2006 }
2007 }
2008 break;
2009 case 0x2D:
2010 /* CMPEQ */
2011 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
2012 break;
2013 case 0x32:
2014 /* S8ADDQ */
2015 if (likely(rc != 31)) {
2016 if (ra != 31) {
2017 TCGv tmp = tcg_temp_new();
2018 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2019 if (islit)
2020 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2021 else
2022 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2023 tcg_temp_free(tmp);
2024 } else {
2025 if (islit)
2026 tcg_gen_movi_i64(cpu_ir[rc], lit);
2027 else
2028 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2029 }
2030 }
2031 break;
2032 case 0x3B:
2033 /* S8SUBQ */
2034 if (likely(rc != 31)) {
2035 if (ra != 31) {
2036 TCGv tmp = tcg_temp_new();
2037 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2038 if (islit)
2039 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2040 else
2041 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2042 tcg_temp_free(tmp);
2043 } else {
2044 if (islit)
2045 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2046 else
2047 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2048 }
2049 }
2050 break;
2051 case 0x3D:
2052 /* CMPULE */
2053 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
2054 break;
2055 case 0x40:
2056 /* ADDL/V */
2057 gen_addlv(ra, rb, rc, islit, lit);
2058 break;
2059 case 0x49:
2060 /* SUBL/V */
2061 gen_sublv(ra, rb, rc, islit, lit);
2062 break;
2063 case 0x4D:
2064 /* CMPLT */
2065 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
2066 break;
2067 case 0x60:
2068 /* ADDQ/V */
2069 gen_addqv(ra, rb, rc, islit, lit);
2070 break;
2071 case 0x69:
2072 /* SUBQ/V */
2073 gen_subqv(ra, rb, rc, islit, lit);
2074 break;
2075 case 0x6D:
2076 /* CMPLE */
2077 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
2078 break;
2079 default:
2080 goto invalid_opc;
2081 }
2082 break;
2083 case 0x11:
2084 switch (fn7) {
2085 case 0x00:
2086 /* AND */
2087 if (likely(rc != 31)) {
2088 if (ra == 31)
2089 tcg_gen_movi_i64(cpu_ir[rc], 0);
2090 else if (islit)
2091 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2092 else
2093 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2094 }
2095 break;
2096 case 0x08:
2097 /* BIC */
2098 if (likely(rc != 31)) {
2099 if (ra != 31) {
2100 if (islit)
2101 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2102 else
2103 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2104 } else
2105 tcg_gen_movi_i64(cpu_ir[rc], 0);
2106 }
2107 break;
2108 case 0x14:
2109 /* CMOVLBS */
2110 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
2111 break;
2112 case 0x16:
2113 /* CMOVLBC */
2114 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
2115 break;
2116 case 0x20:
2117 /* BIS */
2118 if (likely(rc != 31)) {
2119 if (ra != 31) {
2120 if (islit)
2121 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2122 else
2123 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2124 } else {
2125 if (islit)
2126 tcg_gen_movi_i64(cpu_ir[rc], lit);
2127 else
2128 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2129 }
2130 }
2131 break;
2132 case 0x24:
2133 /* CMOVEQ */
2134 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
2135 break;
2136 case 0x26:
2137 /* CMOVNE */
2138 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
2139 break;
2140 case 0x28:
2141 /* ORNOT */
2142 if (likely(rc != 31)) {
2143 if (ra != 31) {
2144 if (islit)
2145 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2146 else
2147 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2148 } else {
2149 if (islit)
2150 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2151 else
2152 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2153 }
2154 }
2155 break;
2156 case 0x40:
2157 /* XOR */
2158 if (likely(rc != 31)) {
2159 if (ra != 31) {
2160 if (islit)
2161 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2162 else
2163 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2164 } else {
2165 if (islit)
2166 tcg_gen_movi_i64(cpu_ir[rc], lit);
2167 else
2168 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2169 }
2170 }
2171 break;
2172 case 0x44:
2173 /* CMOVLT */
2174 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
2175 break;
2176 case 0x46:
2177 /* CMOVGE */
2178 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
2179 break;
2180 case 0x48:
2181 /* EQV */
2182 if (likely(rc != 31)) {
2183 if (ra != 31) {
2184 if (islit)
2185 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2186 else
2187 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2188 } else {
2189 if (islit)
2190 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2191 else
2192 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2193 }
2194 }
2195 break;
2196 case 0x61:
2197 /* AMASK */
2198 if (likely(rc != 31)) {
2199 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2200
2201 if (islit) {
2202 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2203 } else {
2204 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
2205 }
2206 }
2207 break;
2208 case 0x64:
2209 /* CMOVLE */
2210 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2211 break;
2212 case 0x66:
2213 /* CMOVGT */
2214 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2215 break;
2216 case 0x6C:
2217 /* IMPLVER */
2218 if (rc != 31)
2219 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
2220 break;
2221 default:
2222 goto invalid_opc;
2223 }
2224 break;
2225 case 0x12:
2226 switch (fn7) {
2227 case 0x02:
2228 /* MSKBL */
2229 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2230 break;
2231 case 0x06:
2232 /* EXTBL */
2233 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2234 break;
2235 case 0x0B:
2236 /* INSBL */
2237 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2238 break;
2239 case 0x12:
2240 /* MSKWL */
2241 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2242 break;
2243 case 0x16:
2244 /* EXTWL */
2245 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2246 break;
2247 case 0x1B:
2248 /* INSWL */
2249 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2250 break;
2251 case 0x22:
2252 /* MSKLL */
2253 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2254 break;
2255 case 0x26:
2256 /* EXTLL */
2257 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2258 break;
2259 case 0x2B:
2260 /* INSLL */
2261 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2262 break;
2263 case 0x30:
2264 /* ZAP */
2265 gen_zap(ra, rb, rc, islit, lit);
2266 break;
2267 case 0x31:
2268 /* ZAPNOT */
2269 gen_zapnot(ra, rb, rc, islit, lit);
2270 break;
2271 case 0x32:
2272 /* MSKQL */
2273 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2274 break;
2275 case 0x34:
2276 /* SRL */
2277 if (likely(rc != 31)) {
2278 if (ra != 31) {
2279 if (islit)
2280 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2281 else {
2282 TCGv shift = tcg_temp_new();
2283 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2284 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2285 tcg_temp_free(shift);
2286 }
2287 } else
2288 tcg_gen_movi_i64(cpu_ir[rc], 0);
2289 }
2290 break;
2291 case 0x36:
2292 /* EXTQL */
2293 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2294 break;
2295 case 0x39:
2296 /* SLL */
2297 if (likely(rc != 31)) {
2298 if (ra != 31) {
2299 if (islit)
2300 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2301 else {
2302 TCGv shift = tcg_temp_new();
2303 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2304 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2305 tcg_temp_free(shift);
2306 }
2307 } else
2308 tcg_gen_movi_i64(cpu_ir[rc], 0);
2309 }
2310 break;
2311 case 0x3B:
2312 /* INSQL */
2313 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2314 break;
2315 case 0x3C:
2316 /* SRA */
2317 if (likely(rc != 31)) {
2318 if (ra != 31) {
2319 if (islit)
2320 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2321 else {
2322 TCGv shift = tcg_temp_new();
2323 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2324 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2325 tcg_temp_free(shift);
2326 }
2327 } else
2328 tcg_gen_movi_i64(cpu_ir[rc], 0);
2329 }
2330 break;
2331 case 0x52:
2332 /* MSKWH */
2333 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2334 break;
2335 case 0x57:
2336 /* INSWH */
2337 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2338 break;
2339 case 0x5A:
2340 /* EXTWH */
2341 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2342 break;
2343 case 0x62:
2344 /* MSKLH */
2345 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2346 break;
2347 case 0x67:
2348 /* INSLH */
2349 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2350 break;
2351 case 0x6A:
2352 /* EXTLH */
2353 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2354 break;
2355 case 0x72:
2356 /* MSKQH */
2357 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2358 break;
2359 case 0x77:
2360 /* INSQH */
2361 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2362 break;
2363 case 0x7A:
2364 /* EXTQH */
2365 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2366 break;
2367 default:
2368 goto invalid_opc;
2369 }
2370 break;
2371 case 0x13:
2372 switch (fn7) {
2373 case 0x00:
2374 /* MULL */
2375 if (likely(rc != 31)) {
2376 if (ra == 31)
2377 tcg_gen_movi_i64(cpu_ir[rc], 0);
2378 else {
2379 if (islit)
2380 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2381 else
2382 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2383 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2384 }
2385 }
2386 break;
2387 case 0x20:
2388 /* MULQ */
2389 if (likely(rc != 31)) {
2390 if (ra == 31)
2391 tcg_gen_movi_i64(cpu_ir[rc], 0);
2392 else if (islit)
2393 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2394 else
2395 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2396 }
2397 break;
2398 case 0x30:
2399 /* UMULH */
2400 gen_umulh(ra, rb, rc, islit, lit);
2401 break;
2402 case 0x40:
2403 /* MULL/V */
2404 gen_mullv(ra, rb, rc, islit, lit);
2405 break;
2406 case 0x60:
2407 /* MULQ/V */
2408 gen_mulqv(ra, rb, rc, islit, lit);
2409 break;
2410 default:
2411 goto invalid_opc;
2412 }
2413 break;
2414 case 0x14:
2415 switch (fpfn) { /* fn11 & 0x3F */
2416 case 0x04:
2417 /* ITOFS */
2418 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2419 goto invalid_opc;
2420 }
2421 if (likely(rc != 31)) {
2422 if (ra != 31) {
2423 TCGv_i32 tmp = tcg_temp_new_i32();
2424 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2425 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2426 tcg_temp_free_i32(tmp);
2427 } else
2428 tcg_gen_movi_i64(cpu_fir[rc], 0);
2429 }
2430 break;
2431 case 0x0A:
2432 /* SQRTF */
2433 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2434 gen_fsqrtf(rb, rc);
2435 break;
2436 }
2437 goto invalid_opc;
2438 case 0x0B:
2439 /* SQRTS */
2440 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2441 gen_fsqrts(ctx, rb, rc, fn11);
2442 break;
2443 }
2444 goto invalid_opc;
2445 case 0x14:
2446 /* ITOFF */
2447 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2448 goto invalid_opc;
2449 }
2450 if (likely(rc != 31)) {
2451 if (ra != 31) {
2452 TCGv_i32 tmp = tcg_temp_new_i32();
2453 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2454 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2455 tcg_temp_free_i32(tmp);
2456 } else
2457 tcg_gen_movi_i64(cpu_fir[rc], 0);
2458 }
2459 break;
2460 case 0x24:
2461 /* ITOFT */
2462 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2463 goto invalid_opc;
2464 }
2465 if (likely(rc != 31)) {
2466 if (ra != 31)
2467 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2468 else
2469 tcg_gen_movi_i64(cpu_fir[rc], 0);
2470 }
2471 break;
2472 case 0x2A:
2473 /* SQRTG */
2474 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2475 gen_fsqrtg(rb, rc);
2476 break;
2477 }
2478 goto invalid_opc;
2479 case 0x02B:
2480 /* SQRTT */
2481 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2482 gen_fsqrtt(ctx, rb, rc, fn11);
2483 break;
2484 }
2485 goto invalid_opc;
2486 default:
2487 goto invalid_opc;
2488 }
2489 break;
2490 case 0x15:
2491 /* VAX floating point */
2492 /* XXX: rounding mode and trap are ignored (!) */
2493 switch (fpfn) { /* fn11 & 0x3F */
2494 case 0x00:
2495 /* ADDF */
2496 gen_faddf(ra, rb, rc);
2497 break;
2498 case 0x01:
2499 /* SUBF */
2500 gen_fsubf(ra, rb, rc);
2501 break;
2502 case 0x02:
2503 /* MULF */
2504 gen_fmulf(ra, rb, rc);
2505 break;
2506 case 0x03:
2507 /* DIVF */
2508 gen_fdivf(ra, rb, rc);
2509 break;
2510 case 0x1E:
2511 /* CVTDG */
2512 #if 0 // TODO
2513 gen_fcvtdg(rb, rc);
2514 #else
2515 goto invalid_opc;
2516 #endif
2517 break;
2518 case 0x20:
2519 /* ADDG */
2520 gen_faddg(ra, rb, rc);
2521 break;
2522 case 0x21:
2523 /* SUBG */
2524 gen_fsubg(ra, rb, rc);
2525 break;
2526 case 0x22:
2527 /* MULG */
2528 gen_fmulg(ra, rb, rc);
2529 break;
2530 case 0x23:
2531 /* DIVG */
2532 gen_fdivg(ra, rb, rc);
2533 break;
2534 case 0x25:
2535 /* CMPGEQ */
2536 gen_fcmpgeq(ra, rb, rc);
2537 break;
2538 case 0x26:
2539 /* CMPGLT */
2540 gen_fcmpglt(ra, rb, rc);
2541 break;
2542 case 0x27:
2543 /* CMPGLE */
2544 gen_fcmpgle(ra, rb, rc);
2545 break;
2546 case 0x2C:
2547 /* CVTGF */
2548 gen_fcvtgf(rb, rc);
2549 break;
2550 case 0x2D:
2551 /* CVTGD */
2552 #if 0 // TODO
2553 gen_fcvtgd(rb, rc);
2554 #else
2555 goto invalid_opc;
2556 #endif
2557 break;
2558 case 0x2F:
2559 /* CVTGQ */
2560 gen_fcvtgq(rb, rc);
2561 break;
2562 case 0x3C:
2563 /* CVTQF */
2564 gen_fcvtqf(rb, rc);
2565 break;
2566 case 0x3E:
2567 /* CVTQG */
2568 gen_fcvtqg(rb, rc);
2569 break;
2570 default:
2571 goto invalid_opc;
2572 }
2573 break;
2574 case 0x16:
2575 /* IEEE floating-point */
2576 switch (fpfn) { /* fn11 & 0x3F */
2577 case 0x00:
2578 /* ADDS */
2579 gen_fadds(ctx, ra, rb, rc, fn11);
2580 break;
2581 case 0x01:
2582 /* SUBS */
2583 gen_fsubs(ctx, ra, rb, rc, fn11);
2584 break;
2585 case 0x02:
2586 /* MULS */
2587 gen_fmuls(ctx, ra, rb, rc, fn11);
2588 break;
2589 case 0x03:
2590 /* DIVS */
2591 gen_fdivs(ctx, ra, rb, rc, fn11);
2592 break;
2593 case 0x20:
2594 /* ADDT */
2595 gen_faddt(ctx, ra, rb, rc, fn11);
2596 break;
2597 case 0x21:
2598 /* SUBT */
2599 gen_fsubt(ctx, ra, rb, rc, fn11);
2600 break;
2601 case 0x22:
2602 /* MULT */
2603 gen_fmult(ctx, ra, rb, rc, fn11);
2604 break;
2605 case 0x23:
2606 /* DIVT */
2607 gen_fdivt(ctx, ra, rb, rc, fn11);
2608 break;
2609 case 0x24:
2610 /* CMPTUN */
2611 gen_fcmptun(ctx, ra, rb, rc, fn11);
2612 break;
2613 case 0x25:
2614 /* CMPTEQ */
2615 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2616 break;
2617 case 0x26:
2618 /* CMPTLT */
2619 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2620 break;
2621 case 0x27:
2622 /* CMPTLE */
2623 gen_fcmptle(ctx, ra, rb, rc, fn11);
2624 break;
2625 case 0x2C:
2626 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2627 /* CVTST */
2628 gen_fcvtst(ctx, rb, rc, fn11);
2629 } else {
2630 /* CVTTS */
2631 gen_fcvtts(ctx, rb, rc, fn11);
2632 }
2633 break;
2634 case 0x2F:
2635 /* CVTTQ */
2636 gen_fcvttq(ctx, rb, rc, fn11);
2637 break;
2638 case 0x3C:
2639 /* CVTQS */
2640 gen_fcvtqs(ctx, rb, rc, fn11);
2641 break;
2642 case 0x3E:
2643 /* CVTQT */
2644 gen_fcvtqt(ctx, rb, rc, fn11);
2645 break;
2646 default:
2647 goto invalid_opc;
2648 }
2649 break;
2650 case 0x17:
2651 switch (fn11) {
2652 case 0x010:
2653 /* CVTLQ */
2654 gen_fcvtlq(rb, rc);
2655 break;
2656 case 0x020:
2657 if (likely(rc != 31)) {
2658 if (ra == rb) {
2659 /* FMOV */
2660 if (ra == 31)
2661 tcg_gen_movi_i64(cpu_fir[rc], 0);
2662 else
2663 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2664 } else {
2665 /* CPYS */
2666 gen_fcpys(ra, rb, rc);
2667 }
2668 }
2669 break;
2670 case 0x021:
2671 /* CPYSN */
2672 gen_fcpysn(ra, rb, rc);
2673 break;
2674 case 0x022:
2675 /* CPYSE */
2676 gen_fcpyse(ra, rb, rc);
2677 break;
2678 case 0x024:
2679 /* MT_FPCR */
2680 if (likely(ra != 31))
2681 gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
2682 else {
2683 TCGv tmp = tcg_const_i64(0);
2684 gen_helper_store_fpcr(cpu_env, tmp);
2685 tcg_temp_free(tmp);
2686 }
2687 break;
2688 case 0x025:
2689 /* MF_FPCR */
2690 if (likely(ra != 31))
2691 gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
2692 break;
2693 case 0x02A:
2694 /* FCMOVEQ */
2695 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2696 break;
2697 case 0x02B:
2698 /* FCMOVNE */
2699 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2700 break;
2701 case 0x02C:
2702 /* FCMOVLT */
2703 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2704 break;
2705 case 0x02D:
2706 /* FCMOVGE */
2707 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2708 break;
2709 case 0x02E:
2710 /* FCMOVLE */
2711 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2712 break;
2713 case 0x02F:
2714 /* FCMOVGT */
2715 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2716 break;
2717 case 0x030:
2718 /* CVTQL */
2719 gen_fcvtql(rb, rc);
2720 break;
2721 case 0x130:
2722 /* CVTQL/V */
2723 case 0x530:
2724 /* CVTQL/SV */
2725 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2726 /v doesn't do. The only thing I can think is that /sv is a
2727 valid instruction merely for completeness in the ISA. */
2728 gen_fcvtql_v(ctx, rb, rc);
2729 break;
2730 default:
2731 goto invalid_opc;
2732 }
2733 break;
2734 case 0x18:
2735 switch ((uint16_t)disp16) {
2736 case 0x0000:
2737 /* TRAPB */
2738 /* No-op. */
2739 break;
2740 case 0x0400:
2741 /* EXCB */
2742 /* No-op. */
2743 break;
2744 case 0x4000:
2745 /* MB */
2746 /* No-op */
2747 break;
2748 case 0x4400:
2749 /* WMB */
2750 /* No-op */
2751 break;
2752 case 0x8000:
2753 /* FETCH */
2754 /* No-op */
2755 break;
2756 case 0xA000:
2757 /* FETCH_M */
2758 /* No-op */
2759 break;
2760 case 0xC000:
2761 /* RPCC */
2762 if (ra != 31) {
2763 if (use_icount) {
2764 gen_io_start();
2765 gen_helper_load_pcc(cpu_ir[ra]);
2766 gen_io_end();
2767 ret = EXIT_PC_STALE;
2768 } else {
2769 gen_helper_load_pcc(cpu_ir[ra]);
2770 }
2771 }
2772 break;
2773 case 0xE000:
2774 /* RC */
2775 gen_rx(ra, 0);
2776 break;
2777 case 0xE800:
2778 /* ECB */
2779 break;
2780 case 0xF000:
2781 /* RS */
2782 gen_rx(ra, 1);
2783 break;
2784 case 0xF800:
2785 /* WH64 */
2786 /* No-op */
2787 break;
2788 default:
2789 goto invalid_opc;
2790 }
2791 break;
2792 case 0x19:
2793 /* HW_MFPR (PALcode) */
2794 #ifndef CONFIG_USER_ONLY
2795 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2796 return gen_mfpr(ra, insn & 0xffff);
2797 }
2798 #endif
2799 goto invalid_opc;
2800 case 0x1A:
2801 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2802 prediction stack action, which of course we don't implement. */
2803 if (rb != 31) {
2804 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2805 } else {
2806 tcg_gen_movi_i64(cpu_pc, 0);
2807 }
2808 if (ra != 31) {
2809 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2810 }
2811 ret = EXIT_PC_UPDATED;
2812 break;
2813 case 0x1B:
2814 /* HW_LD (PALcode) */
2815 #ifndef CONFIG_USER_ONLY
2816 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2817 TCGv addr;
2818
2819 if (ra == 31) {
2820 break;
2821 }
2822
2823 addr = tcg_temp_new();
2824 if (rb != 31)
2825 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2826 else
2827 tcg_gen_movi_i64(addr, disp12);
2828 switch ((insn >> 12) & 0xF) {
2829 case 0x0:
2830 /* Longword physical access (hw_ldl/p) */
2831 gen_helper_ldl_phys(cpu_ir[ra], addr);
2832 break;
2833 case 0x1:
2834 /* Quadword physical access (hw_ldq/p) */
2835 gen_helper_ldq_phys(cpu_ir[ra], addr);
2836 break;
2837 case 0x2:
2838 /* Longword physical access with lock (hw_ldl_l/p) */
2839 gen_helper_ldl_l_phys(cpu_ir[ra], addr);
2840 break;
2841 case 0x3:
2842 /* Quadword physical access with lock (hw_ldq_l/p) */
2843 gen_helper_ldq_l_phys(cpu_ir[ra], addr);
2844 break;
2845 case 0x4:
2846 /* Longword virtual PTE fetch (hw_ldl/v) */
2847 goto invalid_opc;
2848 case 0x5:
2849 /* Quadword virtual PTE fetch (hw_ldq/v) */
2850 goto invalid_opc;
2851 break;
2852 case 0x6:
2853 /* Incpu_ir[ra]id */
2854 goto invalid_opc;
2855 case 0x7:
2856 /* Incpu_ir[ra]id */
2857 goto invalid_opc;
2858 case 0x8:
2859 /* Longword virtual access (hw_ldl) */
2860 goto invalid_opc;
2861 case 0x9:
2862 /* Quadword virtual access (hw_ldq) */
2863 goto invalid_opc;
2864 case 0xA:
2865 /* Longword virtual access with protection check (hw_ldl/w) */
2866 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2867 break;
2868 case 0xB:
2869 /* Quadword virtual access with protection check (hw_ldq/w) */
2870 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2871 break;
2872 case 0xC:
2873 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2874 goto invalid_opc;
2875 case 0xD:
2876 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2877 goto invalid_opc;
2878 case 0xE:
2879 /* Longword virtual access with alternate access mode and
2880 protection checks (hw_ldl/wa) */
2881 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2882 break;
2883 case 0xF:
2884 /* Quadword virtual access with alternate access mode and
2885 protection checks (hw_ldq/wa) */
2886 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2887 break;
2888 }
2889 tcg_temp_free(addr);
2890 break;
2891 }
2892 #endif
2893 goto invalid_opc;
2894 case 0x1C:
2895 switch (fn7) {
2896 case 0x00:
2897 /* SEXTB */
2898 if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
2899 goto invalid_opc;
2900 }
2901 if (likely(rc != 31)) {
2902 if (islit)
2903 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2904 else
2905 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2906 }
2907 break;
2908 case 0x01:
2909 /* SEXTW */
2910 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2911 if (likely(rc != 31)) {
2912 if (islit) {
2913 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2914 } else {
2915 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2916 }
2917 }
2918 break;
2919 }
2920 goto invalid_opc;
2921 case 0x30:
2922 /* CTPOP */
2923 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2924 if (likely(rc != 31)) {
2925 if (islit) {
2926 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2927 } else {
2928 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2929 }
2930 }
2931 break;
2932 }
2933 goto invalid_opc;
2934 case 0x31:
2935 /* PERR */
2936 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2937 gen_perr(ra, rb, rc, islit, lit);
2938 break;
2939 }
2940 goto invalid_opc;
2941 case 0x32:
2942 /* CTLZ */
2943 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2944 if (likely(rc != 31)) {
2945 if (islit) {
2946 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2947 } else {
2948 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2949 }
2950 }
2951 break;
2952 }
2953 goto invalid_opc;
2954 case 0x33:
2955 /* CTTZ */
2956 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2957 if (likely(rc != 31)) {
2958 if (islit) {
2959 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2960 } else {
2961 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2962 }
2963 }
2964 break;
2965 }
2966 goto invalid_opc;
2967 case 0x34:
2968 /* UNPKBW */
2969 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2970 if (real_islit || ra != 31) {
2971 goto invalid_opc;
2972 }
2973 gen_unpkbw(rb, rc);
2974 break;
2975 }
2976 goto invalid_opc;
2977 case 0x35:
2978 /* UNPKBL */
2979 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2980 if (real_islit || ra != 31) {
2981 goto invalid_opc;
2982 }
2983 gen_unpkbl(rb, rc);
2984 break;
2985 }
2986 goto invalid_opc;
2987 case 0x36:
2988 /* PKWB */
2989 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2990 if (real_islit || ra != 31) {
2991 goto invalid_opc;
2992 }
2993 gen_pkwb(rb, rc);
2994 break;
2995 }
2996 goto invalid_opc;
2997 case 0x37:
2998 /* PKLB */
2999 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3000 if (real_islit || ra != 31) {
3001 goto invalid_opc;
3002 }
3003 gen_pklb(rb, rc);
3004 break;
3005 }
3006 goto invalid_opc;
3007 case 0x38:
3008 /* MINSB8 */
3009 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3010 gen_minsb8(ra, rb, rc, islit, lit);
3011 break;
3012 }
3013 goto invalid_opc;
3014 case 0x39:
3015 /* MINSW4 */
3016 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3017 gen_minsw4(ra, rb, rc, islit, lit);
3018 break;
3019 }
3020 goto invalid_opc;
3021 case 0x3A:
3022 /* MINUB8 */
3023 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3024 gen_minub8(ra, rb, rc, islit, lit);
3025 break;
3026 }
3027 goto invalid_opc;
3028 case 0x3B:
3029 /* MINUW4 */
3030 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3031 gen_minuw4(ra, rb, rc, islit, lit);
3032 break;
3033 }
3034 goto invalid_opc;
3035 case 0x3C:
3036 /* MAXUB8 */
3037 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3038 gen_maxub8(ra, rb, rc, islit, lit);
3039 break;
3040 }
3041 goto invalid_opc;
3042 case 0x3D:
3043 /* MAXUW4 */
3044 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3045 gen_maxuw4(ra, rb, rc, islit, lit);
3046 break;
3047 }
3048 goto invalid_opc;
3049 case 0x3E:
3050 /* MAXSB8 */
3051 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3052 gen_maxsb8(ra, rb, rc, islit, lit);
3053 break;
3054 }
3055 goto invalid_opc;
3056 case 0x3F:
3057 /* MAXSW4 */
3058 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3059 gen_maxsw4(ra, rb, rc, islit, lit);
3060 break;
3061 }
3062 goto invalid_opc;
3063 case 0x70:
3064 /* FTOIT */
3065 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3066 goto invalid_opc;
3067 }
3068 if (likely(rc != 31)) {
3069 if (ra != 31)
3070 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3071 else
3072 tcg_gen_movi_i64(cpu_ir[rc], 0);
3073 }
3074 break;
3075 case 0x78:
3076 /* FTOIS */
3077 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3078 goto invalid_opc;
3079 }
3080 if (rc != 31) {
3081 TCGv_i32 tmp1 = tcg_temp_new_i32();
3082 if (ra != 31)
3083 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
3084 else {
3085 TCGv tmp2 = tcg_const_i64(0);
3086 gen_helper_s_to_memory(tmp1, tmp2);
3087 tcg_temp_free(tmp2);
3088 }
3089 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
3090 tcg_temp_free_i32(tmp1);
3091 }
3092 break;
3093 default:
3094 goto invalid_opc;
3095 }
3096 break;
3097 case 0x1D:
3098 /* HW_MTPR (PALcode) */
3099 #ifndef CONFIG_USER_ONLY
3100 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3101 return gen_mtpr(ctx, rb, insn & 0xffff);
3102 }
3103 #endif
3104 goto invalid_opc;
3105 case 0x1E:
3106 /* HW_RET (PALcode) */
3107 #ifndef CONFIG_USER_ONLY
3108 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3109 if (rb == 31) {
3110 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3111 address from EXC_ADDR. This turns out to be useful for our
3112 emulation PALcode, so continue to accept it. */
3113 TCGv tmp = tcg_temp_new();
3114 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
3115 gen_helper_hw_ret(tmp);
3116 tcg_temp_free(tmp);
3117 } else {
3118 gen_helper_hw_ret(cpu_ir[rb]);
3119 }
3120 ret = EXIT_PC_UPDATED;
3121 break;
3122 }
3123 #endif
3124 goto invalid_opc;
3125 case 0x1F:
3126 /* HW_ST (PALcode) */
3127 #ifndef CONFIG_USER_ONLY
3128 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3129 TCGv addr, val;
3130 addr = tcg_temp_new();
3131 if (rb != 31)
3132 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3133 else
3134 tcg_gen_movi_i64(addr, disp12);
3135 if (ra != 31)
3136 val = cpu_ir[ra];
3137 else {
3138 val = tcg_temp_new();
3139 tcg_gen_movi_i64(val, 0);
3140 }
3141 switch ((insn >> 12) & 0xF) {
3142 case 0x0:
3143 /* Longword physical access */
3144 gen_helper_stl_phys(addr, val);
3145 break;
3146 case 0x1:
3147 /* Quadword physical access */
3148 gen_helper_stq_phys(addr, val);
3149 break;
3150 case 0x2:
3151 /* Longword physical access with lock */
3152 gen_helper_stl_c_phys(val, addr, val);
3153 break;
3154 case 0x3:
3155 /* Quadword physical access with lock */
3156 gen_helper_stq_c_phys(val, addr, val);
3157 break;
3158 case 0x4:
3159 /* Longword virtual access */
3160 goto invalid_opc;
3161 case 0x5:
3162 /* Quadword virtual access */
3163 goto invalid_opc;
3164 case 0x6:
3165 /* Invalid */
3166 goto invalid_opc;
3167 case 0x7:
3168 /* Invalid */
3169 goto invalid_opc;
3170 case 0x8:
3171 /* Invalid */
3172 goto invalid_opc;
3173 case 0x9:
3174 /* Invalid */
3175 goto invalid_opc;
3176 case 0xA:
3177 /* Invalid */
3178 goto invalid_opc;
3179 case 0xB:
3180 /* Invalid */
3181 goto invalid_opc;
3182 case 0xC:
3183 /* Longword virtual access with alternate access mode */
3184 goto invalid_opc;
3185 case 0xD:
3186 /* Quadword virtual access with alternate access mode */
3187 goto invalid_opc;
3188 case 0xE:
3189 /* Invalid */
3190 goto invalid_opc;
3191 case 0xF:
3192 /* Invalid */
3193 goto invalid_opc;
3194 }
3195 if (ra == 31)
3196 tcg_temp_free(val);
3197 tcg_temp_free(addr);
3198 break;
3199 }
3200 #endif
3201 goto invalid_opc;
3202 case 0x20:
3203 /* LDF */
3204 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
3205 break;
3206 case 0x21:
3207 /* LDG */
3208 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
3209 break;
3210 case 0x22:
3211 /* LDS */
3212 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
3213 break;
3214 case 0x23:
3215 /* LDT */
3216 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
3217 break;
3218 case 0x24:
3219 /* STF */
3220 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3221 break;
3222 case 0x25:
3223 /* STG */
3224 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3225 break;
3226 case 0x26:
3227 /* STS */
3228 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3229 break;
3230 case 0x27:
3231 /* STT */
3232 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3233 break;
3234 case 0x28:
3235 /* LDL */
3236 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3237 break;
3238 case 0x29:
3239 /* LDQ */
3240 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3241 break;
3242 case 0x2A:
3243 /* LDL_L */
3244 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3245 break;
3246 case 0x2B:
3247 /* LDQ_L */
3248 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3249 break;
3250 case 0x2C:
3251 /* STL */
3252 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3253 break;
3254 case 0x2D:
3255 /* STQ */
3256 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3257 break;
3258 case 0x2E:
3259 /* STL_C */
3260 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3261 break;
3262 case 0x2F:
3263 /* STQ_C */
3264 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3265 break;
3266 case 0x30:
3267 /* BR */
3268 ret = gen_bdirect(ctx, ra, disp21);
3269 break;
3270 case 0x31: /* FBEQ */
3271 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3272 break;
3273 case 0x32: /* FBLT */
3274 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3275 break;
3276 case 0x33: /* FBLE */
3277 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3278 break;
3279 case 0x34:
3280 /* BSR */
3281 ret = gen_bdirect(ctx, ra, disp21);
3282 break;
3283 case 0x35: /* FBNE */
3284 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3285 break;
3286 case 0x36: /* FBGE */
3287 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3288 break;
3289 case 0x37: /* FBGT */
3290 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3291 break;
3292 case 0x38:
3293 /* BLBC */
3294 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3295 break;
3296 case 0x39:
3297 /* BEQ */
3298 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3299 break;
3300 case 0x3A:
3301 /* BLT */
3302 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3303 break;
3304 case 0x3B:
3305 /* BLE */
3306 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3307 break;
3308 case 0x3C:
3309 /* BLBS */
3310 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3311 break;
3312 case 0x3D:
3313 /* BNE */
3314 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3315 break;
3316 case 0x3E:
3317 /* BGE */
3318 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3319 break;
3320 case 0x3F:
3321 /* BGT */
3322 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3323 break;
3324 invalid_opc:
3325 ret = gen_invalid(ctx);
3326 break;
3327 }
3328
3329 return ret;
3330 }
3331
3332 static inline void gen_intermediate_code_internal(CPUAlphaState *env,
3333 TranslationBlock *tb,
3334 int search_pc)
3335 {
3336 DisasContext ctx, *ctxp = &ctx;
3337 target_ulong pc_start;
3338 uint32_t insn;
3339 uint16_t *gen_opc_end;
3340 CPUBreakpoint *bp;
3341 int j, lj = -1;
3342 ExitStatus ret;
3343 int num_insns;
3344 int max_insns;
3345
3346 pc_start = tb->pc;
3347 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3348
3349 ctx.tb = tb;
3350 ctx.env = env;
3351 ctx.pc = pc_start;
3352 ctx.mem_idx = cpu_mmu_index(env);
3353
3354 /* ??? Every TB begins with unset rounding mode, to be initialized on
3355 the first fp insn of the TB. Alternately we could define a proper
3356 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3357 to reset the FP_STATUS to that default at the end of any TB that
3358 changes the default. We could even (gasp) dynamiclly figure out
3359 what default would be most efficient given the running program. */
3360 ctx.tb_rm = -1;
3361 /* Similarly for flush-to-zero. */
3362 ctx.tb_ftz = -1;
3363
3364 num_insns = 0;
3365 max_insns = tb->cflags & CF_COUNT_MASK;
3366 if (max_insns == 0)
3367 max_insns = CF_COUNT_MASK;
3368
3369 gen_icount_start();
3370 do {
3371 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3372 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3373 if (bp->pc == ctx.pc) {
3374 gen_excp(&ctx, EXCP_DEBUG, 0);
3375 break;
3376 }
3377 }
3378 }
3379 if (search_pc) {
3380 j = gen_opc_ptr - gen_opc_buf;
3381 if (lj < j) {
3382 lj++;
3383 while (lj < j)
3384 gen_opc_instr_start[lj++] = 0;
3385 }
3386 gen_opc_pc[lj] = ctx.pc;
3387 gen_opc_instr_start[lj] = 1;
3388 gen_opc_icount[lj] = num_insns;
3389 }
3390 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3391 gen_io_start();
3392 insn = ldl_code(ctx.pc);
3393 num_insns++;
3394
3395 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3396 tcg_gen_debug_insn_start(ctx.pc);
3397 }
3398
3399 ctx.pc += 4;
3400 ret = translate_one(ctxp, insn);
3401
3402 /* If we reach a page boundary, are single stepping,
3403 or exhaust instruction count, stop generation. */
3404 if (ret == NO_EXIT
3405 && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3406 || gen_opc_ptr >= gen_opc_end
3407 || num_insns >= max_insns
3408 || singlestep
3409 || env->singlestep_enabled)) {
3410 ret = EXIT_PC_STALE;
3411 }
3412 } while (ret == NO_EXIT);
3413
3414 if (tb->cflags & CF_LAST_IO) {
3415 gen_io_end();
3416 }
3417
3418 switch (ret) {
3419 case EXIT_GOTO_TB:
3420 case EXIT_NORETURN:
3421 break;
3422 case EXIT_PC_STALE:
3423 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3424 /* FALLTHRU */
3425 case EXIT_PC_UPDATED:
3426 if (env->singlestep_enabled) {
3427 gen_excp_1(EXCP_DEBUG, 0);
3428 } else {
3429 tcg_gen_exit_tb(0);
3430 }
3431 break;
3432 default:
3433 abort();
3434 }
3435
3436 gen_icount_end(tb, num_insns);
3437 *gen_opc_ptr = INDEX_op_end;
3438 if (search_pc) {
3439 j = gen_opc_ptr - gen_opc_buf;
3440 lj++;
3441 while (lj <= j)
3442 gen_opc_instr_start[lj++] = 0;
3443 } else {
3444 tb->size = ctx.pc - pc_start;
3445 tb->icount = num_insns;
3446 }
3447
3448 #ifdef DEBUG_DISAS
3449 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3450 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3451 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3452 qemu_log("\n");
3453 }
3454 #endif
3455 }
3456
3457 void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
3458 {
3459 gen_intermediate_code_internal(env, tb, 0);
3460 }
3461
3462 void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
3463 {
3464 gen_intermediate_code_internal(env, tb, 1);
3465 }
3466
3467 struct cpu_def_t {
3468 const char *name;
3469 int implver, amask;
3470 };
3471
3472 static const struct cpu_def_t cpu_defs[] = {
3473 { "ev4", IMPLVER_2106x, 0 },
3474 { "ev5", IMPLVER_21164, 0 },
3475 { "ev56", IMPLVER_21164, AMASK_BWX },
3476 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3477 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3478 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3479 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3480 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3481 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3482 { "21064", IMPLVER_2106x, 0 },
3483 { "21164", IMPLVER_21164, 0 },
3484 { "21164a", IMPLVER_21164, AMASK_BWX },
3485 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3486 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3487 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3488 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3489 };
3490
3491 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3492 {
3493 CPUAlphaState *env;
3494 int implver, amask, i, max;
3495
3496 env = g_malloc0(sizeof(CPUAlphaState));
3497 cpu_exec_init(env);
3498 alpha_translate_init();
3499 tlb_flush(env, 1);
3500
3501 /* Default to ev67; no reason not to emulate insns by default. */
3502 implver = IMPLVER_21264;
3503 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3504 | AMASK_TRAP | AMASK_PREFETCH);
3505
3506 max = ARRAY_SIZE(cpu_defs);
3507 for (i = 0; i < max; i++) {
3508 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3509 implver = cpu_defs[i].implver;
3510 amask = cpu_defs[i].amask;
3511 break;
3512 }
3513 }
3514 env->implver = implver;
3515 env->amask = amask;
3516
3517 #if defined (CONFIG_USER_ONLY)
3518 env->ps = PS_USER_MODE;
3519 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3520 | FPCR_UNFD | FPCR_INED | FPCR_DNOD
3521 | FPCR_DYN_NORMAL));
3522 #endif
3523 env->lock_addr = -1;
3524 env->fen = 1;
3525
3526 qemu_init_vcpu(env);
3527 return env;
3528 }
3529
3530 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
3531 {
3532 env->pc = gen_opc_pc[pc_pos];
3533 }