]> git.proxmox.com Git - qemu.git/blob - target-alpha/translate.c
9cb8084057cf5b433b9f8c3792e70c778e03432f
[qemu.git] / target-alpha / translate.c
1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
23 #include "tcg-op.h"
24
25 #include "helper.h"
26 #define GEN_HELPER 1
27 #include "helper.h"
28
29 #undef ALPHA_DEBUG_DISAS
30 #define CONFIG_SOFTFLOAT_INLINE
31
32 #ifdef ALPHA_DEBUG_DISAS
33 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
34 #else
35 # define LOG_DISAS(...) do { } while (0)
36 #endif
37
38 typedef struct DisasContext DisasContext;
39 struct DisasContext {
40 struct TranslationBlock *tb;
41 uint64_t pc;
42 int mem_idx;
43
44 /* Current rounding mode for this TB. */
45 int tb_rm;
46 /* Current flush-to-zero setting for this TB. */
47 int tb_ftz;
48
49 /* implver value for this CPU. */
50 int implver;
51
52 bool singlestep_enabled;
53 };
54
55 /* Return values from translate_one, indicating the state of the TB.
56 Note that zero indicates that we are not exiting the TB. */
57
58 typedef enum {
59 NO_EXIT,
60
61 /* We have emitted one or more goto_tb. No fixup required. */
62 EXIT_GOTO_TB,
63
64 /* We are not using a goto_tb (for whatever reason), but have updated
65 the PC (for whatever reason), so there's no need to do it again on
66 exiting the TB. */
67 EXIT_PC_UPDATED,
68
69 /* We are exiting the TB, but have neither emitted a goto_tb, nor
70 updated the PC for the next instruction to be executed. */
71 EXIT_PC_STALE,
72
73 /* We are ending the TB with a noreturn function call, e.g. longjmp.
74 No following code will be executed. */
75 EXIT_NORETURN,
76 } ExitStatus;
77
78 /* global register indexes */
79 static TCGv_ptr cpu_env;
80 static TCGv cpu_ir[31];
81 static TCGv cpu_fir[31];
82 static TCGv cpu_pc;
83 static TCGv cpu_lock_addr;
84 static TCGv cpu_lock_st_addr;
85 static TCGv cpu_lock_value;
86 static TCGv cpu_unique;
87 #ifndef CONFIG_USER_ONLY
88 static TCGv cpu_sysval;
89 static TCGv cpu_usp;
90 #endif
91
92 /* register names */
93 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
94
95 #include "exec/gen-icount.h"
96
97 void alpha_translate_init(void)
98 {
99 int i;
100 char *p;
101 static int done_init = 0;
102
103 if (done_init)
104 return;
105
106 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
107
108 p = cpu_reg_names;
109 for (i = 0; i < 31; i++) {
110 sprintf(p, "ir%d", i);
111 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
112 offsetof(CPUAlphaState, ir[i]), p);
113 p += (i < 10) ? 4 : 5;
114
115 sprintf(p, "fir%d", i);
116 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
117 offsetof(CPUAlphaState, fir[i]), p);
118 p += (i < 10) ? 5 : 6;
119 }
120
121 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
122 offsetof(CPUAlphaState, pc), "pc");
123
124 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
125 offsetof(CPUAlphaState, lock_addr),
126 "lock_addr");
127 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
128 offsetof(CPUAlphaState, lock_st_addr),
129 "lock_st_addr");
130 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
131 offsetof(CPUAlphaState, lock_value),
132 "lock_value");
133
134 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
135 offsetof(CPUAlphaState, unique), "unique");
136 #ifndef CONFIG_USER_ONLY
137 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
138 offsetof(CPUAlphaState, sysval), "sysval");
139 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
140 offsetof(CPUAlphaState, usp), "usp");
141 #endif
142
143 done_init = 1;
144 }
145
146 static void gen_excp_1(int exception, int error_code)
147 {
148 TCGv_i32 tmp1, tmp2;
149
150 tmp1 = tcg_const_i32(exception);
151 tmp2 = tcg_const_i32(error_code);
152 gen_helper_excp(cpu_env, tmp1, tmp2);
153 tcg_temp_free_i32(tmp2);
154 tcg_temp_free_i32(tmp1);
155 }
156
157 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
158 {
159 tcg_gen_movi_i64(cpu_pc, ctx->pc);
160 gen_excp_1(exception, error_code);
161 return EXIT_NORETURN;
162 }
163
164 static inline ExitStatus gen_invalid(DisasContext *ctx)
165 {
166 return gen_excp(ctx, EXCP_OPCDEC, 0);
167 }
168
169 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
170 {
171 TCGv tmp = tcg_temp_new();
172 TCGv_i32 tmp32 = tcg_temp_new_i32();
173 tcg_gen_qemu_ld32u(tmp, t1, flags);
174 tcg_gen_trunc_i64_i32(tmp32, tmp);
175 gen_helper_memory_to_f(t0, tmp32);
176 tcg_temp_free_i32(tmp32);
177 tcg_temp_free(tmp);
178 }
179
180 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
181 {
182 TCGv tmp = tcg_temp_new();
183 tcg_gen_qemu_ld64(tmp, t1, flags);
184 gen_helper_memory_to_g(t0, tmp);
185 tcg_temp_free(tmp);
186 }
187
188 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
189 {
190 TCGv tmp = tcg_temp_new();
191 TCGv_i32 tmp32 = tcg_temp_new_i32();
192 tcg_gen_qemu_ld32u(tmp, t1, flags);
193 tcg_gen_trunc_i64_i32(tmp32, tmp);
194 gen_helper_memory_to_s(t0, tmp32);
195 tcg_temp_free_i32(tmp32);
196 tcg_temp_free(tmp);
197 }
198
199 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
200 {
201 tcg_gen_qemu_ld32s(t0, t1, flags);
202 tcg_gen_mov_i64(cpu_lock_addr, t1);
203 tcg_gen_mov_i64(cpu_lock_value, t0);
204 }
205
206 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
207 {
208 tcg_gen_qemu_ld64(t0, t1, flags);
209 tcg_gen_mov_i64(cpu_lock_addr, t1);
210 tcg_gen_mov_i64(cpu_lock_value, t0);
211 }
212
213 static inline void gen_load_mem(DisasContext *ctx,
214 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
215 int flags),
216 int ra, int rb, int32_t disp16, int fp,
217 int clear)
218 {
219 TCGv addr, va;
220
221 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
222 prefetches, which we can treat as nops. No worries about
223 missed exceptions here. */
224 if (unlikely(ra == 31)) {
225 return;
226 }
227
228 addr = tcg_temp_new();
229 if (rb != 31) {
230 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
231 if (clear) {
232 tcg_gen_andi_i64(addr, addr, ~0x7);
233 }
234 } else {
235 if (clear) {
236 disp16 &= ~0x7;
237 }
238 tcg_gen_movi_i64(addr, disp16);
239 }
240
241 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
242 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
243
244 tcg_temp_free(addr);
245 }
246
247 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
248 {
249 TCGv_i32 tmp32 = tcg_temp_new_i32();
250 TCGv tmp = tcg_temp_new();
251 gen_helper_f_to_memory(tmp32, t0);
252 tcg_gen_extu_i32_i64(tmp, tmp32);
253 tcg_gen_qemu_st32(tmp, t1, flags);
254 tcg_temp_free(tmp);
255 tcg_temp_free_i32(tmp32);
256 }
257
258 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
259 {
260 TCGv tmp = tcg_temp_new();
261 gen_helper_g_to_memory(tmp, t0);
262 tcg_gen_qemu_st64(tmp, t1, flags);
263 tcg_temp_free(tmp);
264 }
265
266 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
267 {
268 TCGv_i32 tmp32 = tcg_temp_new_i32();
269 TCGv tmp = tcg_temp_new();
270 gen_helper_s_to_memory(tmp32, t0);
271 tcg_gen_extu_i32_i64(tmp, tmp32);
272 tcg_gen_qemu_st32(tmp, t1, flags);
273 tcg_temp_free(tmp);
274 tcg_temp_free_i32(tmp32);
275 }
276
277 static inline void gen_store_mem(DisasContext *ctx,
278 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
279 int flags),
280 int ra, int rb, int32_t disp16, int fp,
281 int clear)
282 {
283 TCGv addr, va;
284
285 addr = tcg_temp_new();
286 if (rb != 31) {
287 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
288 if (clear) {
289 tcg_gen_andi_i64(addr, addr, ~0x7);
290 }
291 } else {
292 if (clear) {
293 disp16 &= ~0x7;
294 }
295 tcg_gen_movi_i64(addr, disp16);
296 }
297
298 if (ra == 31) {
299 va = tcg_const_i64(0);
300 } else {
301 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
302 }
303 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
304
305 tcg_temp_free(addr);
306 if (ra == 31) {
307 tcg_temp_free(va);
308 }
309 }
310
311 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
312 int32_t disp16, int quad)
313 {
314 TCGv addr;
315
316 if (ra == 31) {
317 /* ??? Don't bother storing anything. The user can't tell
318 the difference, since the zero register always reads zero. */
319 return NO_EXIT;
320 }
321
322 #if defined(CONFIG_USER_ONLY)
323 addr = cpu_lock_st_addr;
324 #else
325 addr = tcg_temp_local_new();
326 #endif
327
328 if (rb != 31) {
329 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
330 } else {
331 tcg_gen_movi_i64(addr, disp16);
332 }
333
334 #if defined(CONFIG_USER_ONLY)
335 /* ??? This is handled via a complicated version of compare-and-swap
336 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
337 in TCG so that this isn't necessary. */
338 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
339 #else
340 /* ??? In system mode we are never multi-threaded, so CAS can be
341 implemented via a non-atomic load-compare-store sequence. */
342 {
343 int lab_fail, lab_done;
344 TCGv val;
345
346 lab_fail = gen_new_label();
347 lab_done = gen_new_label();
348 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
349
350 val = tcg_temp_new();
351 if (quad) {
352 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
353 } else {
354 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
355 }
356 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
357
358 if (quad) {
359 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
360 } else {
361 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
362 }
363 tcg_gen_movi_i64(cpu_ir[ra], 1);
364 tcg_gen_br(lab_done);
365
366 gen_set_label(lab_fail);
367 tcg_gen_movi_i64(cpu_ir[ra], 0);
368
369 gen_set_label(lab_done);
370 tcg_gen_movi_i64(cpu_lock_addr, -1);
371
372 tcg_temp_free(addr);
373 return NO_EXIT;
374 }
375 #endif
376 }
377
378 static bool in_superpage(DisasContext *ctx, int64_t addr)
379 {
380 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
381 && addr < 0
382 && ((addr >> 41) & 3) == 2
383 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
384 }
385
386 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
387 {
388 /* Suppress goto_tb in the case of single-steping and IO. */
389 if (ctx->singlestep_enabled || (ctx->tb->cflags & CF_LAST_IO)) {
390 return false;
391 }
392 /* If the destination is in the superpage, the page perms can't change. */
393 if (in_superpage(ctx, dest)) {
394 return true;
395 }
396 /* Check for the dest on the same page as the start of the TB. */
397 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
398 }
399
400 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
401 {
402 uint64_t dest = ctx->pc + (disp << 2);
403
404 if (ra != 31) {
405 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
406 }
407
408 /* Notice branch-to-next; used to initialize RA with the PC. */
409 if (disp == 0) {
410 return 0;
411 } else if (use_goto_tb(ctx, dest)) {
412 tcg_gen_goto_tb(0);
413 tcg_gen_movi_i64(cpu_pc, dest);
414 tcg_gen_exit_tb((uintptr_t)ctx->tb);
415 return EXIT_GOTO_TB;
416 } else {
417 tcg_gen_movi_i64(cpu_pc, dest);
418 return EXIT_PC_UPDATED;
419 }
420 }
421
422 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
423 TCGv cmp, int32_t disp)
424 {
425 uint64_t dest = ctx->pc + (disp << 2);
426 int lab_true = gen_new_label();
427
428 if (use_goto_tb(ctx, dest)) {
429 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
430
431 tcg_gen_goto_tb(0);
432 tcg_gen_movi_i64(cpu_pc, ctx->pc);
433 tcg_gen_exit_tb((uintptr_t)ctx->tb);
434
435 gen_set_label(lab_true);
436 tcg_gen_goto_tb(1);
437 tcg_gen_movi_i64(cpu_pc, dest);
438 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
439
440 return EXIT_GOTO_TB;
441 } else {
442 TCGv_i64 z = tcg_const_i64(0);
443 TCGv_i64 d = tcg_const_i64(dest);
444 TCGv_i64 p = tcg_const_i64(ctx->pc);
445
446 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
447
448 tcg_temp_free_i64(z);
449 tcg_temp_free_i64(d);
450 tcg_temp_free_i64(p);
451 return EXIT_PC_UPDATED;
452 }
453 }
454
455 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
456 int32_t disp, int mask)
457 {
458 TCGv cmp_tmp;
459
460 if (unlikely(ra == 31)) {
461 cmp_tmp = tcg_const_i64(0);
462 } else {
463 cmp_tmp = tcg_temp_new();
464 if (mask) {
465 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
466 } else {
467 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
468 }
469 }
470
471 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
472 }
473
474 /* Fold -0.0 for comparison with COND. */
475
476 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
477 {
478 uint64_t mzero = 1ull << 63;
479
480 switch (cond) {
481 case TCG_COND_LE:
482 case TCG_COND_GT:
483 /* For <= or >, the -0.0 value directly compares the way we want. */
484 tcg_gen_mov_i64(dest, src);
485 break;
486
487 case TCG_COND_EQ:
488 case TCG_COND_NE:
489 /* For == or !=, we can simply mask off the sign bit and compare. */
490 tcg_gen_andi_i64(dest, src, mzero - 1);
491 break;
492
493 case TCG_COND_GE:
494 case TCG_COND_LT:
495 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
496 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
497 tcg_gen_neg_i64(dest, dest);
498 tcg_gen_and_i64(dest, dest, src);
499 break;
500
501 default:
502 abort();
503 }
504 }
505
506 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
507 int32_t disp)
508 {
509 TCGv cmp_tmp;
510
511 if (unlikely(ra == 31)) {
512 /* Very uncommon case, but easier to optimize it to an integer
513 comparison than continuing with the floating point comparison. */
514 return gen_bcond(ctx, cond, ra, disp, 0);
515 }
516
517 cmp_tmp = tcg_temp_new();
518 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
519 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
520 }
521
522 static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
523 int islit, uint8_t lit, int mask)
524 {
525 TCGv_i64 c1, z, v1;
526
527 if (unlikely(rc == 31)) {
528 return;
529 }
530
531 if (ra == 31) {
532 /* Very uncommon case - Do not bother to optimize. */
533 c1 = tcg_const_i64(0);
534 } else if (mask) {
535 c1 = tcg_const_i64(1);
536 tcg_gen_and_i64(c1, c1, cpu_ir[ra]);
537 } else {
538 c1 = cpu_ir[ra];
539 }
540 if (islit) {
541 v1 = tcg_const_i64(lit);
542 } else {
543 v1 = cpu_ir[rb];
544 }
545 z = tcg_const_i64(0);
546
547 tcg_gen_movcond_i64(cond, cpu_ir[rc], c1, z, v1, cpu_ir[rc]);
548
549 tcg_temp_free_i64(z);
550 if (ra == 31 || mask) {
551 tcg_temp_free_i64(c1);
552 }
553 if (islit) {
554 tcg_temp_free_i64(v1);
555 }
556 }
557
558 static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
559 {
560 TCGv_i64 c1, z, v1;
561
562 if (unlikely(rc == 31)) {
563 return;
564 }
565
566 c1 = tcg_temp_new_i64();
567 if (unlikely(ra == 31)) {
568 tcg_gen_movi_i64(c1, 0);
569 } else {
570 gen_fold_mzero(cond, c1, cpu_fir[ra]);
571 }
572 if (rb == 31) {
573 v1 = tcg_const_i64(0);
574 } else {
575 v1 = cpu_fir[rb];
576 }
577 z = tcg_const_i64(0);
578
579 tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]);
580
581 tcg_temp_free_i64(z);
582 tcg_temp_free_i64(c1);
583 if (rb == 31) {
584 tcg_temp_free_i64(v1);
585 }
586 }
587
588 #define QUAL_RM_N 0x080 /* Round mode nearest even */
589 #define QUAL_RM_C 0x000 /* Round mode chopped */
590 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
591 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
592 #define QUAL_RM_MASK 0x0c0
593
594 #define QUAL_U 0x100 /* Underflow enable (fp output) */
595 #define QUAL_V 0x100 /* Overflow enable (int output) */
596 #define QUAL_S 0x400 /* Software completion enable */
597 #define QUAL_I 0x200 /* Inexact detection enable */
598
599 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
600 {
601 TCGv_i32 tmp;
602
603 fn11 &= QUAL_RM_MASK;
604 if (fn11 == ctx->tb_rm) {
605 return;
606 }
607 ctx->tb_rm = fn11;
608
609 tmp = tcg_temp_new_i32();
610 switch (fn11) {
611 case QUAL_RM_N:
612 tcg_gen_movi_i32(tmp, float_round_nearest_even);
613 break;
614 case QUAL_RM_C:
615 tcg_gen_movi_i32(tmp, float_round_to_zero);
616 break;
617 case QUAL_RM_M:
618 tcg_gen_movi_i32(tmp, float_round_down);
619 break;
620 case QUAL_RM_D:
621 tcg_gen_ld8u_i32(tmp, cpu_env,
622 offsetof(CPUAlphaState, fpcr_dyn_round));
623 break;
624 }
625
626 #if defined(CONFIG_SOFTFLOAT_INLINE)
627 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
628 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
629 sets the one field. */
630 tcg_gen_st8_i32(tmp, cpu_env,
631 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
632 #else
633 gen_helper_setroundmode(tmp);
634 #endif
635
636 tcg_temp_free_i32(tmp);
637 }
638
639 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
640 {
641 TCGv_i32 tmp;
642
643 fn11 &= QUAL_U;
644 if (fn11 == ctx->tb_ftz) {
645 return;
646 }
647 ctx->tb_ftz = fn11;
648
649 tmp = tcg_temp_new_i32();
650 if (fn11) {
651 /* Underflow is enabled, use the FPCR setting. */
652 tcg_gen_ld8u_i32(tmp, cpu_env,
653 offsetof(CPUAlphaState, fpcr_flush_to_zero));
654 } else {
655 /* Underflow is disabled, force flush-to-zero. */
656 tcg_gen_movi_i32(tmp, 1);
657 }
658
659 #if defined(CONFIG_SOFTFLOAT_INLINE)
660 tcg_gen_st8_i32(tmp, cpu_env,
661 offsetof(CPUAlphaState, fp_status.flush_to_zero));
662 #else
663 gen_helper_setflushzero(tmp);
664 #endif
665
666 tcg_temp_free_i32(tmp);
667 }
668
669 static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
670 {
671 TCGv val;
672 if (reg == 31) {
673 val = tcg_const_i64(0);
674 } else {
675 if ((fn11 & QUAL_S) == 0) {
676 if (is_cmp) {
677 gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
678 } else {
679 gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
680 }
681 }
682 val = tcg_temp_new();
683 tcg_gen_mov_i64(val, cpu_fir[reg]);
684 }
685 return val;
686 }
687
688 static void gen_fp_exc_clear(void)
689 {
690 #if defined(CONFIG_SOFTFLOAT_INLINE)
691 TCGv_i32 zero = tcg_const_i32(0);
692 tcg_gen_st8_i32(zero, cpu_env,
693 offsetof(CPUAlphaState, fp_status.float_exception_flags));
694 tcg_temp_free_i32(zero);
695 #else
696 gen_helper_fp_exc_clear(cpu_env);
697 #endif
698 }
699
700 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
701 {
702 /* ??? We ought to be able to do something with imprecise exceptions.
703 E.g. notice we're still in the trap shadow of something within the
704 TB and do not generate the code to signal the exception; end the TB
705 when an exception is forced to arrive, either by consumption of a
706 register value or TRAPB or EXCB. */
707 TCGv_i32 exc = tcg_temp_new_i32();
708 TCGv_i32 reg;
709
710 #if defined(CONFIG_SOFTFLOAT_INLINE)
711 tcg_gen_ld8u_i32(exc, cpu_env,
712 offsetof(CPUAlphaState, fp_status.float_exception_flags));
713 #else
714 gen_helper_fp_exc_get(exc, cpu_env);
715 #endif
716
717 if (ignore) {
718 tcg_gen_andi_i32(exc, exc, ~ignore);
719 }
720
721 /* ??? Pass in the regno of the destination so that the helper can
722 set EXC_MASK, which contains a bitmask of destination registers
723 that have caused arithmetic traps. A simple userspace emulation
724 does not require this. We do need it for a guest kernel's entArith,
725 or if we were to do something clever with imprecise exceptions. */
726 reg = tcg_const_i32(rc + 32);
727
728 if (fn11 & QUAL_S) {
729 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
730 } else {
731 gen_helper_fp_exc_raise(cpu_env, exc, reg);
732 }
733
734 tcg_temp_free_i32(reg);
735 tcg_temp_free_i32(exc);
736 }
737
738 static inline void gen_fp_exc_raise(int rc, int fn11)
739 {
740 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
741 }
742
743 static void gen_fcvtlq(int rb, int rc)
744 {
745 if (unlikely(rc == 31)) {
746 return;
747 }
748 if (unlikely(rb == 31)) {
749 tcg_gen_movi_i64(cpu_fir[rc], 0);
750 } else {
751 TCGv tmp = tcg_temp_new();
752
753 /* The arithmetic right shift here, plus the sign-extended mask below
754 yields a sign-extended result without an explicit ext32s_i64. */
755 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
756 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
757 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
758 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
759 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
760
761 tcg_temp_free(tmp);
762 }
763 }
764
765 static void gen_fcvtql(int rb, int rc)
766 {
767 if (unlikely(rc == 31)) {
768 return;
769 }
770 if (unlikely(rb == 31)) {
771 tcg_gen_movi_i64(cpu_fir[rc], 0);
772 } else {
773 TCGv tmp = tcg_temp_new();
774
775 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
776 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
777 tcg_gen_shli_i64(tmp, tmp, 32);
778 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
779 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
780
781 tcg_temp_free(tmp);
782 }
783 }
784
785 static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
786 {
787 if (rb != 31) {
788 int lab = gen_new_label();
789 TCGv tmp = tcg_temp_new();
790
791 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
792 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
793 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
794
795 gen_set_label(lab);
796 }
797 gen_fcvtql(rb, rc);
798 }
799
800 #define FARITH2(name) \
801 static inline void glue(gen_f, name)(int rb, int rc) \
802 { \
803 if (unlikely(rc == 31)) { \
804 return; \
805 } \
806 if (rb != 31) { \
807 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
808 } else { \
809 TCGv tmp = tcg_const_i64(0); \
810 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
811 tcg_temp_free(tmp); \
812 } \
813 }
814
815 /* ??? VAX instruction qualifiers ignored. */
816 FARITH2(sqrtf)
817 FARITH2(sqrtg)
818 FARITH2(cvtgf)
819 FARITH2(cvtgq)
820 FARITH2(cvtqf)
821 FARITH2(cvtqg)
822
823 static void gen_ieee_arith2(DisasContext *ctx,
824 void (*helper)(TCGv, TCGv_ptr, TCGv),
825 int rb, int rc, int fn11)
826 {
827 TCGv vb;
828
829 /* ??? This is wrong: the instruction is not a nop, it still may
830 raise exceptions. */
831 if (unlikely(rc == 31)) {
832 return;
833 }
834
835 gen_qual_roundmode(ctx, fn11);
836 gen_qual_flushzero(ctx, fn11);
837 gen_fp_exc_clear();
838
839 vb = gen_ieee_input(rb, fn11, 0);
840 helper(cpu_fir[rc], cpu_env, vb);
841 tcg_temp_free(vb);
842
843 gen_fp_exc_raise(rc, fn11);
844 }
845
846 #define IEEE_ARITH2(name) \
847 static inline void glue(gen_f, name)(DisasContext *ctx, \
848 int rb, int rc, int fn11) \
849 { \
850 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
851 }
852 IEEE_ARITH2(sqrts)
853 IEEE_ARITH2(sqrtt)
854 IEEE_ARITH2(cvtst)
855 IEEE_ARITH2(cvtts)
856
857 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
858 {
859 TCGv vb;
860 int ignore = 0;
861
862 /* ??? This is wrong: the instruction is not a nop, it still may
863 raise exceptions. */
864 if (unlikely(rc == 31)) {
865 return;
866 }
867
868 /* No need to set flushzero, since we have an integer output. */
869 gen_fp_exc_clear();
870 vb = gen_ieee_input(rb, fn11, 0);
871
872 /* Almost all integer conversions use cropped rounding, and most
873 also do not have integer overflow enabled. Special case that. */
874 switch (fn11) {
875 case QUAL_RM_C:
876 gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
877 break;
878 case QUAL_V | QUAL_RM_C:
879 case QUAL_S | QUAL_V | QUAL_RM_C:
880 ignore = float_flag_inexact;
881 /* FALLTHRU */
882 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
883 gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
884 break;
885 default:
886 gen_qual_roundmode(ctx, fn11);
887 gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
888 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
889 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
890 break;
891 }
892 tcg_temp_free(vb);
893
894 gen_fp_exc_raise_ignore(rc, fn11, ignore);
895 }
896
897 static void gen_ieee_intcvt(DisasContext *ctx,
898 void (*helper)(TCGv, TCGv_ptr, TCGv),
899 int rb, int rc, int fn11)
900 {
901 TCGv vb;
902
903 /* ??? This is wrong: the instruction is not a nop, it still may
904 raise exceptions. */
905 if (unlikely(rc == 31)) {
906 return;
907 }
908
909 gen_qual_roundmode(ctx, fn11);
910
911 if (rb == 31) {
912 vb = tcg_const_i64(0);
913 } else {
914 vb = cpu_fir[rb];
915 }
916
917 /* The only exception that can be raised by integer conversion
918 is inexact. Thus we only need to worry about exceptions when
919 inexact handling is requested. */
920 if (fn11 & QUAL_I) {
921 gen_fp_exc_clear();
922 helper(cpu_fir[rc], cpu_env, vb);
923 gen_fp_exc_raise(rc, fn11);
924 } else {
925 helper(cpu_fir[rc], cpu_env, vb);
926 }
927
928 if (rb == 31) {
929 tcg_temp_free(vb);
930 }
931 }
932
933 #define IEEE_INTCVT(name) \
934 static inline void glue(gen_f, name)(DisasContext *ctx, \
935 int rb, int rc, int fn11) \
936 { \
937 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
938 }
939 IEEE_INTCVT(cvtqs)
940 IEEE_INTCVT(cvtqt)
941
942 static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
943 {
944 TCGv va, vb, vmask;
945 int za = 0, zb = 0;
946
947 if (unlikely(rc == 31)) {
948 return;
949 }
950
951 vmask = tcg_const_i64(mask);
952
953 TCGV_UNUSED_I64(va);
954 if (ra == 31) {
955 if (inv_a) {
956 va = vmask;
957 } else {
958 za = 1;
959 }
960 } else {
961 va = tcg_temp_new_i64();
962 tcg_gen_mov_i64(va, cpu_fir[ra]);
963 if (inv_a) {
964 tcg_gen_andc_i64(va, vmask, va);
965 } else {
966 tcg_gen_and_i64(va, va, vmask);
967 }
968 }
969
970 TCGV_UNUSED_I64(vb);
971 if (rb == 31) {
972 zb = 1;
973 } else {
974 vb = tcg_temp_new_i64();
975 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
976 }
977
978 switch (za << 1 | zb) {
979 case 0 | 0:
980 tcg_gen_or_i64(cpu_fir[rc], va, vb);
981 break;
982 case 0 | 1:
983 tcg_gen_mov_i64(cpu_fir[rc], va);
984 break;
985 case 2 | 0:
986 tcg_gen_mov_i64(cpu_fir[rc], vb);
987 break;
988 case 2 | 1:
989 tcg_gen_movi_i64(cpu_fir[rc], 0);
990 break;
991 }
992
993 tcg_temp_free(vmask);
994 if (ra != 31) {
995 tcg_temp_free(va);
996 }
997 if (rb != 31) {
998 tcg_temp_free(vb);
999 }
1000 }
1001
1002 static inline void gen_fcpys(int ra, int rb, int rc)
1003 {
1004 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
1005 }
1006
1007 static inline void gen_fcpysn(int ra, int rb, int rc)
1008 {
1009 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1010 }
1011
1012 static inline void gen_fcpyse(int ra, int rb, int rc)
1013 {
1014 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1015 }
1016
1017 #define FARITH3(name) \
1018 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1019 { \
1020 TCGv va, vb; \
1021 \
1022 if (unlikely(rc == 31)) { \
1023 return; \
1024 } \
1025 if (ra == 31) { \
1026 va = tcg_const_i64(0); \
1027 } else { \
1028 va = cpu_fir[ra]; \
1029 } \
1030 if (rb == 31) { \
1031 vb = tcg_const_i64(0); \
1032 } else { \
1033 vb = cpu_fir[rb]; \
1034 } \
1035 \
1036 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1037 \
1038 if (ra == 31) { \
1039 tcg_temp_free(va); \
1040 } \
1041 if (rb == 31) { \
1042 tcg_temp_free(vb); \
1043 } \
1044 }
1045
1046 /* ??? VAX instruction qualifiers ignored. */
1047 FARITH3(addf)
1048 FARITH3(subf)
1049 FARITH3(mulf)
1050 FARITH3(divf)
1051 FARITH3(addg)
1052 FARITH3(subg)
1053 FARITH3(mulg)
1054 FARITH3(divg)
1055 FARITH3(cmpgeq)
1056 FARITH3(cmpglt)
1057 FARITH3(cmpgle)
1058
1059 static void gen_ieee_arith3(DisasContext *ctx,
1060 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1061 int ra, int rb, int rc, int fn11)
1062 {
1063 TCGv va, vb;
1064
1065 /* ??? This is wrong: the instruction is not a nop, it still may
1066 raise exceptions. */
1067 if (unlikely(rc == 31)) {
1068 return;
1069 }
1070
1071 gen_qual_roundmode(ctx, fn11);
1072 gen_qual_flushzero(ctx, fn11);
1073 gen_fp_exc_clear();
1074
1075 va = gen_ieee_input(ra, fn11, 0);
1076 vb = gen_ieee_input(rb, fn11, 0);
1077 helper(cpu_fir[rc], cpu_env, va, vb);
1078 tcg_temp_free(va);
1079 tcg_temp_free(vb);
1080
1081 gen_fp_exc_raise(rc, fn11);
1082 }
1083
1084 #define IEEE_ARITH3(name) \
1085 static inline void glue(gen_f, name)(DisasContext *ctx, \
1086 int ra, int rb, int rc, int fn11) \
1087 { \
1088 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1089 }
1090 IEEE_ARITH3(adds)
1091 IEEE_ARITH3(subs)
1092 IEEE_ARITH3(muls)
1093 IEEE_ARITH3(divs)
1094 IEEE_ARITH3(addt)
1095 IEEE_ARITH3(subt)
1096 IEEE_ARITH3(mult)
1097 IEEE_ARITH3(divt)
1098
1099 static void gen_ieee_compare(DisasContext *ctx,
1100 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1101 int ra, int rb, int rc, int fn11)
1102 {
1103 TCGv va, vb;
1104
1105 /* ??? This is wrong: the instruction is not a nop, it still may
1106 raise exceptions. */
1107 if (unlikely(rc == 31)) {
1108 return;
1109 }
1110
1111 gen_fp_exc_clear();
1112
1113 va = gen_ieee_input(ra, fn11, 1);
1114 vb = gen_ieee_input(rb, fn11, 1);
1115 helper(cpu_fir[rc], cpu_env, va, vb);
1116 tcg_temp_free(va);
1117 tcg_temp_free(vb);
1118
1119 gen_fp_exc_raise(rc, fn11);
1120 }
1121
1122 #define IEEE_CMP3(name) \
1123 static inline void glue(gen_f, name)(DisasContext *ctx, \
1124 int ra, int rb, int rc, int fn11) \
1125 { \
1126 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1127 }
1128 IEEE_CMP3(cmptun)
1129 IEEE_CMP3(cmpteq)
1130 IEEE_CMP3(cmptlt)
1131 IEEE_CMP3(cmptle)
1132
1133 static inline uint64_t zapnot_mask(uint8_t lit)
1134 {
1135 uint64_t mask = 0;
1136 int i;
1137
1138 for (i = 0; i < 8; ++i) {
1139 if ((lit >> i) & 1)
1140 mask |= 0xffull << (i * 8);
1141 }
1142 return mask;
1143 }
1144
1145 /* Implement zapnot with an immediate operand, which expands to some
1146 form of immediate AND. This is a basic building block in the
1147 definition of many of the other byte manipulation instructions. */
1148 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1149 {
1150 switch (lit) {
1151 case 0x00:
1152 tcg_gen_movi_i64(dest, 0);
1153 break;
1154 case 0x01:
1155 tcg_gen_ext8u_i64(dest, src);
1156 break;
1157 case 0x03:
1158 tcg_gen_ext16u_i64(dest, src);
1159 break;
1160 case 0x0f:
1161 tcg_gen_ext32u_i64(dest, src);
1162 break;
1163 case 0xff:
1164 tcg_gen_mov_i64(dest, src);
1165 break;
1166 default:
1167 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1168 break;
1169 }
1170 }
1171
1172 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1173 {
1174 if (unlikely(rc == 31))
1175 return;
1176 else if (unlikely(ra == 31))
1177 tcg_gen_movi_i64(cpu_ir[rc], 0);
1178 else if (islit)
1179 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1180 else
1181 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1182 }
1183
1184 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1185 {
1186 if (unlikely(rc == 31))
1187 return;
1188 else if (unlikely(ra == 31))
1189 tcg_gen_movi_i64(cpu_ir[rc], 0);
1190 else if (islit)
1191 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1192 else
1193 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1194 }
1195
1196
1197 /* EXTWH, EXTLH, EXTQH */
1198 static void gen_ext_h(int ra, int rb, int rc, int islit,
1199 uint8_t lit, uint8_t byte_mask)
1200 {
1201 if (unlikely(rc == 31))
1202 return;
1203 else if (unlikely(ra == 31))
1204 tcg_gen_movi_i64(cpu_ir[rc], 0);
1205 else {
1206 if (islit) {
1207 lit = (64 - (lit & 7) * 8) & 0x3f;
1208 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1209 } else {
1210 TCGv tmp1 = tcg_temp_new();
1211 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1212 tcg_gen_shli_i64(tmp1, tmp1, 3);
1213 tcg_gen_neg_i64(tmp1, tmp1);
1214 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1215 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1216 tcg_temp_free(tmp1);
1217 }
1218 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1219 }
1220 }
1221
1222 /* EXTBL, EXTWL, EXTLL, EXTQL */
1223 static void gen_ext_l(int ra, int rb, int rc, int islit,
1224 uint8_t lit, uint8_t byte_mask)
1225 {
1226 if (unlikely(rc == 31))
1227 return;
1228 else if (unlikely(ra == 31))
1229 tcg_gen_movi_i64(cpu_ir[rc], 0);
1230 else {
1231 if (islit) {
1232 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1233 } else {
1234 TCGv tmp = tcg_temp_new();
1235 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1236 tcg_gen_shli_i64(tmp, tmp, 3);
1237 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1238 tcg_temp_free(tmp);
1239 }
1240 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1241 }
1242 }
1243
1244 /* INSWH, INSLH, INSQH */
1245 static void gen_ins_h(int ra, int rb, int rc, int islit,
1246 uint8_t lit, uint8_t byte_mask)
1247 {
1248 if (unlikely(rc == 31))
1249 return;
1250 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1251 tcg_gen_movi_i64(cpu_ir[rc], 0);
1252 else {
1253 TCGv tmp = tcg_temp_new();
1254
1255 /* The instruction description has us left-shift the byte mask
1256 and extract bits <15:8> and apply that zap at the end. This
1257 is equivalent to simply performing the zap first and shifting
1258 afterward. */
1259 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1260
1261 if (islit) {
1262 /* Note that we have handled the lit==0 case above. */
1263 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1264 } else {
1265 TCGv shift = tcg_temp_new();
1266
1267 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1268 Do this portably by splitting the shift into two parts:
1269 shift_count-1 and 1. Arrange for the -1 by using
1270 ones-complement instead of twos-complement in the negation:
1271 ~((B & 7) * 8) & 63. */
1272
1273 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1274 tcg_gen_shli_i64(shift, shift, 3);
1275 tcg_gen_not_i64(shift, shift);
1276 tcg_gen_andi_i64(shift, shift, 0x3f);
1277
1278 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1279 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1280 tcg_temp_free(shift);
1281 }
1282 tcg_temp_free(tmp);
1283 }
1284 }
1285
1286 /* INSBL, INSWL, INSLL, INSQL */
1287 static void gen_ins_l(int ra, int rb, int rc, int islit,
1288 uint8_t lit, uint8_t byte_mask)
1289 {
1290 if (unlikely(rc == 31))
1291 return;
1292 else if (unlikely(ra == 31))
1293 tcg_gen_movi_i64(cpu_ir[rc], 0);
1294 else {
1295 TCGv tmp = tcg_temp_new();
1296
1297 /* The instruction description has us left-shift the byte mask
1298 the same number of byte slots as the data and apply the zap
1299 at the end. This is equivalent to simply performing the zap
1300 first and shifting afterward. */
1301 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1302
1303 if (islit) {
1304 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1305 } else {
1306 TCGv shift = tcg_temp_new();
1307 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1308 tcg_gen_shli_i64(shift, shift, 3);
1309 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1310 tcg_temp_free(shift);
1311 }
1312 tcg_temp_free(tmp);
1313 }
1314 }
1315
1316 /* MSKWH, MSKLH, MSKQH */
1317 static void gen_msk_h(int ra, int rb, int rc, int islit,
1318 uint8_t lit, uint8_t byte_mask)
1319 {
1320 if (unlikely(rc == 31))
1321 return;
1322 else if (unlikely(ra == 31))
1323 tcg_gen_movi_i64(cpu_ir[rc], 0);
1324 else if (islit) {
1325 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1326 } else {
1327 TCGv shift = tcg_temp_new();
1328 TCGv mask = tcg_temp_new();
1329
1330 /* The instruction description is as above, where the byte_mask
1331 is shifted left, and then we extract bits <15:8>. This can be
1332 emulated with a right-shift on the expanded byte mask. This
1333 requires extra care because for an input <2:0> == 0 we need a
1334 shift of 64 bits in order to generate a zero. This is done by
1335 splitting the shift into two parts, the variable shift - 1
1336 followed by a constant 1 shift. The code we expand below is
1337 equivalent to ~((B & 7) * 8) & 63. */
1338
1339 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1340 tcg_gen_shli_i64(shift, shift, 3);
1341 tcg_gen_not_i64(shift, shift);
1342 tcg_gen_andi_i64(shift, shift, 0x3f);
1343 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1344 tcg_gen_shr_i64(mask, mask, shift);
1345 tcg_gen_shri_i64(mask, mask, 1);
1346
1347 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1348
1349 tcg_temp_free(mask);
1350 tcg_temp_free(shift);
1351 }
1352 }
1353
1354 /* MSKBL, MSKWL, MSKLL, MSKQL */
1355 static void gen_msk_l(int ra, int rb, int rc, int islit,
1356 uint8_t lit, uint8_t byte_mask)
1357 {
1358 if (unlikely(rc == 31))
1359 return;
1360 else if (unlikely(ra == 31))
1361 tcg_gen_movi_i64(cpu_ir[rc], 0);
1362 else if (islit) {
1363 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1364 } else {
1365 TCGv shift = tcg_temp_new();
1366 TCGv mask = tcg_temp_new();
1367
1368 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1369 tcg_gen_shli_i64(shift, shift, 3);
1370 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1371 tcg_gen_shl_i64(mask, mask, shift);
1372
1373 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1374
1375 tcg_temp_free(mask);
1376 tcg_temp_free(shift);
1377 }
1378 }
1379
1380 /* Code to call arith3 helpers */
1381 #define ARITH3(name) \
1382 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1383 uint8_t lit) \
1384 { \
1385 if (unlikely(rc == 31)) \
1386 return; \
1387 \
1388 if (ra != 31) { \
1389 if (islit) { \
1390 TCGv tmp = tcg_const_i64(lit); \
1391 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1392 tcg_temp_free(tmp); \
1393 } else \
1394 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1395 } else { \
1396 TCGv tmp1 = tcg_const_i64(0); \
1397 if (islit) { \
1398 TCGv tmp2 = tcg_const_i64(lit); \
1399 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1400 tcg_temp_free(tmp2); \
1401 } else \
1402 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1403 tcg_temp_free(tmp1); \
1404 } \
1405 }
1406 ARITH3(cmpbge)
1407 ARITH3(minub8)
1408 ARITH3(minsb8)
1409 ARITH3(minuw4)
1410 ARITH3(minsw4)
1411 ARITH3(maxub8)
1412 ARITH3(maxsb8)
1413 ARITH3(maxuw4)
1414 ARITH3(maxsw4)
1415 ARITH3(perr)
1416
1417 /* Code to call arith3 helpers */
1418 #define ARITH3_EX(name) \
1419 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1420 int islit, uint8_t lit) \
1421 { \
1422 if (unlikely(rc == 31)) { \
1423 return; \
1424 } \
1425 if (ra != 31) { \
1426 if (islit) { \
1427 TCGv tmp = tcg_const_i64(lit); \
1428 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1429 cpu_ir[ra], tmp); \
1430 tcg_temp_free(tmp); \
1431 } else { \
1432 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1433 cpu_ir[ra], cpu_ir[rb]); \
1434 } \
1435 } else { \
1436 TCGv tmp1 = tcg_const_i64(0); \
1437 if (islit) { \
1438 TCGv tmp2 = tcg_const_i64(lit); \
1439 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1440 tcg_temp_free(tmp2); \
1441 } else { \
1442 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1443 } \
1444 tcg_temp_free(tmp1); \
1445 } \
1446 }
1447 ARITH3_EX(addlv)
1448 ARITH3_EX(sublv)
1449 ARITH3_EX(addqv)
1450 ARITH3_EX(subqv)
1451 ARITH3_EX(mullv)
1452 ARITH3_EX(mulqv)
1453
1454 #define MVIOP2(name) \
1455 static inline void glue(gen_, name)(int rb, int rc) \
1456 { \
1457 if (unlikely(rc == 31)) \
1458 return; \
1459 if (unlikely(rb == 31)) \
1460 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1461 else \
1462 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1463 }
1464 MVIOP2(pklb)
1465 MVIOP2(pkwb)
1466 MVIOP2(unpkbl)
1467 MVIOP2(unpkbw)
1468
1469 static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1470 int islit, uint8_t lit)
1471 {
1472 TCGv va, vb;
1473
1474 if (unlikely(rc == 31)) {
1475 return;
1476 }
1477
1478 if (ra == 31) {
1479 va = tcg_const_i64(0);
1480 } else {
1481 va = cpu_ir[ra];
1482 }
1483 if (islit) {
1484 vb = tcg_const_i64(lit);
1485 } else {
1486 vb = cpu_ir[rb];
1487 }
1488
1489 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1490
1491 if (ra == 31) {
1492 tcg_temp_free(va);
1493 }
1494 if (islit) {
1495 tcg_temp_free(vb);
1496 }
1497 }
1498
1499 static void gen_rx(int ra, int set)
1500 {
1501 TCGv_i32 tmp;
1502
1503 if (ra != 31) {
1504 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
1505 }
1506
1507 tmp = tcg_const_i32(set);
1508 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1509 tcg_temp_free_i32(tmp);
1510 }
1511
1512 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1513 {
1514 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1515 to internal cpu registers. */
1516
1517 /* Unprivileged PAL call */
1518 if (palcode >= 0x80 && palcode < 0xC0) {
1519 switch (palcode) {
1520 case 0x86:
1521 /* IMB */
1522 /* No-op inside QEMU. */
1523 break;
1524 case 0x9E:
1525 /* RDUNIQUE */
1526 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1527 break;
1528 case 0x9F:
1529 /* WRUNIQUE */
1530 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1531 break;
1532 default:
1533 palcode &= 0xbf;
1534 goto do_call_pal;
1535 }
1536 return NO_EXIT;
1537 }
1538
1539 #ifndef CONFIG_USER_ONLY
1540 /* Privileged PAL code */
1541 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1542 switch (palcode) {
1543 case 0x01:
1544 /* CFLUSH */
1545 /* No-op inside QEMU. */
1546 break;
1547 case 0x02:
1548 /* DRAINA */
1549 /* No-op inside QEMU. */
1550 break;
1551 case 0x2D:
1552 /* WRVPTPTR */
1553 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
1554 break;
1555 case 0x31:
1556 /* WRVAL */
1557 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1558 break;
1559 case 0x32:
1560 /* RDVAL */
1561 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1562 break;
1563
1564 case 0x35: {
1565 /* SWPIPL */
1566 TCGv tmp;
1567
1568 /* Note that we already know we're in kernel mode, so we know
1569 that PS only contains the 3 IPL bits. */
1570 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1571
1572 /* But make sure and store only the 3 IPL bits from the user. */
1573 tmp = tcg_temp_new();
1574 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1575 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1576 tcg_temp_free(tmp);
1577 break;
1578 }
1579
1580 case 0x36:
1581 /* RDPS */
1582 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1583 break;
1584 case 0x38:
1585 /* WRUSP */
1586 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1587 break;
1588 case 0x3A:
1589 /* RDUSP */
1590 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1591 break;
1592 case 0x3C:
1593 /* WHAMI */
1594 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1595 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1596 break;
1597
1598 default:
1599 palcode &= 0x3f;
1600 goto do_call_pal;
1601 }
1602 return NO_EXIT;
1603 }
1604 #endif
1605 return gen_invalid(ctx);
1606
1607 do_call_pal:
1608 #ifdef CONFIG_USER_ONLY
1609 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1610 #else
1611 {
1612 TCGv pc = tcg_const_i64(ctx->pc);
1613 TCGv entry = tcg_const_i64(palcode & 0x80
1614 ? 0x2000 + (palcode - 0x80) * 64
1615 : 0x1000 + palcode * 64);
1616
1617 gen_helper_call_pal(cpu_env, pc, entry);
1618
1619 tcg_temp_free(entry);
1620 tcg_temp_free(pc);
1621
1622 /* Since the destination is running in PALmode, we don't really
1623 need the page permissions check. We'll see the existance of
1624 the page when we create the TB, and we'll flush all TBs if
1625 we change the PAL base register. */
1626 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1627 tcg_gen_goto_tb(0);
1628 tcg_gen_exit_tb((uintptr_t)ctx->tb);
1629 return EXIT_GOTO_TB;
1630 }
1631
1632 return EXIT_PC_UPDATED;
1633 }
1634 #endif
1635 }
1636
1637 #ifndef CONFIG_USER_ONLY
1638
1639 #define PR_BYTE 0x100000
1640 #define PR_LONG 0x200000
1641
1642 static int cpu_pr_data(int pr)
1643 {
1644 switch (pr) {
1645 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1646 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1647 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1648 case 3: return offsetof(CPUAlphaState, trap_arg0);
1649 case 4: return offsetof(CPUAlphaState, trap_arg1);
1650 case 5: return offsetof(CPUAlphaState, trap_arg2);
1651 case 6: return offsetof(CPUAlphaState, exc_addr);
1652 case 7: return offsetof(CPUAlphaState, palbr);
1653 case 8: return offsetof(CPUAlphaState, ptbr);
1654 case 9: return offsetof(CPUAlphaState, vptptr);
1655 case 10: return offsetof(CPUAlphaState, unique);
1656 case 11: return offsetof(CPUAlphaState, sysval);
1657 case 12: return offsetof(CPUAlphaState, usp);
1658
1659 case 32 ... 39:
1660 return offsetof(CPUAlphaState, shadow[pr - 32]);
1661 case 40 ... 63:
1662 return offsetof(CPUAlphaState, scratch[pr - 40]);
1663
1664 case 251:
1665 return offsetof(CPUAlphaState, alarm_expire);
1666 }
1667 return 0;
1668 }
1669
1670 static ExitStatus gen_mfpr(int ra, int regno)
1671 {
1672 int data = cpu_pr_data(regno);
1673
1674 /* In our emulated PALcode, these processor registers have no
1675 side effects from reading. */
1676 if (ra == 31) {
1677 return NO_EXIT;
1678 }
1679
1680 /* Special help for VMTIME and WALLTIME. */
1681 if (regno == 250 || regno == 249) {
1682 void (*helper)(TCGv) = gen_helper_get_walltime;
1683 if (regno == 249) {
1684 helper = gen_helper_get_vmtime;
1685 }
1686 if (use_icount) {
1687 gen_io_start();
1688 helper(cpu_ir[ra]);
1689 gen_io_end();
1690 return EXIT_PC_STALE;
1691 } else {
1692 helper(cpu_ir[ra]);
1693 return NO_EXIT;
1694 }
1695 }
1696
1697 /* The basic registers are data only, and unknown registers
1698 are read-zero, write-ignore. */
1699 if (data == 0) {
1700 tcg_gen_movi_i64(cpu_ir[ra], 0);
1701 } else if (data & PR_BYTE) {
1702 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1703 } else if (data & PR_LONG) {
1704 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1705 } else {
1706 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1707 }
1708 return NO_EXIT;
1709 }
1710
1711 static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
1712 {
1713 TCGv tmp;
1714 int data;
1715
1716 if (rb == 31) {
1717 tmp = tcg_const_i64(0);
1718 } else {
1719 tmp = cpu_ir[rb];
1720 }
1721
1722 switch (regno) {
1723 case 255:
1724 /* TBIA */
1725 gen_helper_tbia(cpu_env);
1726 break;
1727
1728 case 254:
1729 /* TBIS */
1730 gen_helper_tbis(cpu_env, tmp);
1731 break;
1732
1733 case 253:
1734 /* WAIT */
1735 tmp = tcg_const_i64(1);
1736 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1737 offsetof(CPUState, halted));
1738 return gen_excp(ctx, EXCP_HLT, 0);
1739
1740 case 252:
1741 /* HALT */
1742 gen_helper_halt(tmp);
1743 return EXIT_PC_STALE;
1744
1745 case 251:
1746 /* ALARM */
1747 gen_helper_set_alarm(cpu_env, tmp);
1748 break;
1749
1750 case 7:
1751 /* PALBR */
1752 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, palbr));
1753 /* Changing the PAL base register implies un-chaining all of the TBs
1754 that ended with a CALL_PAL. Since the base register usually only
1755 changes during boot, flushing everything works well. */
1756 gen_helper_tb_flush(cpu_env);
1757 return EXIT_PC_STALE;
1758
1759 default:
1760 /* The basic registers are data only, and unknown registers
1761 are read-zero, write-ignore. */
1762 data = cpu_pr_data(regno);
1763 if (data != 0) {
1764 if (data & PR_BYTE) {
1765 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1766 } else if (data & PR_LONG) {
1767 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1768 } else {
1769 tcg_gen_st_i64(tmp, cpu_env, data);
1770 }
1771 }
1772 break;
1773 }
1774
1775 if (rb == 31) {
1776 tcg_temp_free(tmp);
1777 }
1778
1779 return NO_EXIT;
1780 }
1781 #endif /* !USER_ONLY*/
1782
1783 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1784 {
1785 uint32_t palcode;
1786 int32_t disp21, disp16;
1787 #ifndef CONFIG_USER_ONLY
1788 int32_t disp12;
1789 #endif
1790 uint16_t fn11;
1791 uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
1792 uint8_t lit;
1793 ExitStatus ret;
1794
1795 /* Decode all instruction fields */
1796 opc = insn >> 26;
1797 ra = (insn >> 21) & 0x1F;
1798 rb = (insn >> 16) & 0x1F;
1799 rc = insn & 0x1F;
1800 real_islit = islit = (insn >> 12) & 1;
1801 if (rb == 31 && !islit) {
1802 islit = 1;
1803 lit = 0;
1804 } else
1805 lit = (insn >> 13) & 0xFF;
1806 palcode = insn & 0x03FFFFFF;
1807 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1808 disp16 = (int16_t)(insn & 0x0000FFFF);
1809 #ifndef CONFIG_USER_ONLY
1810 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1811 #endif
1812 fn11 = (insn >> 5) & 0x000007FF;
1813 fpfn = fn11 & 0x3F;
1814 fn7 = (insn >> 5) & 0x0000007F;
1815 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1816 opc, ra, rb, rc, disp16);
1817
1818 ret = NO_EXIT;
1819 switch (opc) {
1820 case 0x00:
1821 /* CALL_PAL */
1822 ret = gen_call_pal(ctx, palcode);
1823 break;
1824 case 0x01:
1825 /* OPC01 */
1826 goto invalid_opc;
1827 case 0x02:
1828 /* OPC02 */
1829 goto invalid_opc;
1830 case 0x03:
1831 /* OPC03 */
1832 goto invalid_opc;
1833 case 0x04:
1834 /* OPC04 */
1835 goto invalid_opc;
1836 case 0x05:
1837 /* OPC05 */
1838 goto invalid_opc;
1839 case 0x06:
1840 /* OPC06 */
1841 goto invalid_opc;
1842 case 0x07:
1843 /* OPC07 */
1844 goto invalid_opc;
1845 case 0x08:
1846 /* LDA */
1847 if (likely(ra != 31)) {
1848 if (rb != 31)
1849 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1850 else
1851 tcg_gen_movi_i64(cpu_ir[ra], disp16);
1852 }
1853 break;
1854 case 0x09:
1855 /* LDAH */
1856 if (likely(ra != 31)) {
1857 if (rb != 31)
1858 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1859 else
1860 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1861 }
1862 break;
1863 case 0x0A:
1864 /* LDBU */
1865 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1866 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1867 break;
1868 }
1869 goto invalid_opc;
1870 case 0x0B:
1871 /* LDQ_U */
1872 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1873 break;
1874 case 0x0C:
1875 /* LDWU */
1876 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1877 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1878 break;
1879 }
1880 goto invalid_opc;
1881 case 0x0D:
1882 /* STW */
1883 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1884 break;
1885 case 0x0E:
1886 /* STB */
1887 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1888 break;
1889 case 0x0F:
1890 /* STQ_U */
1891 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1892 break;
1893 case 0x10:
1894 switch (fn7) {
1895 case 0x00:
1896 /* ADDL */
1897 if (likely(rc != 31)) {
1898 if (ra != 31) {
1899 if (islit) {
1900 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1901 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1902 } else {
1903 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1904 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1905 }
1906 } else {
1907 if (islit)
1908 tcg_gen_movi_i64(cpu_ir[rc], lit);
1909 else
1910 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1911 }
1912 }
1913 break;
1914 case 0x02:
1915 /* S4ADDL */
1916 if (likely(rc != 31)) {
1917 if (ra != 31) {
1918 TCGv tmp = tcg_temp_new();
1919 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1920 if (islit)
1921 tcg_gen_addi_i64(tmp, tmp, lit);
1922 else
1923 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1924 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1925 tcg_temp_free(tmp);
1926 } else {
1927 if (islit)
1928 tcg_gen_movi_i64(cpu_ir[rc], lit);
1929 else
1930 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1931 }
1932 }
1933 break;
1934 case 0x09:
1935 /* SUBL */
1936 if (likely(rc != 31)) {
1937 if (ra != 31) {
1938 if (islit)
1939 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1940 else
1941 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1942 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1943 } else {
1944 if (islit)
1945 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1946 else {
1947 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1948 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1949 }
1950 }
1951 break;
1952 case 0x0B:
1953 /* S4SUBL */
1954 if (likely(rc != 31)) {
1955 if (ra != 31) {
1956 TCGv tmp = tcg_temp_new();
1957 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1958 if (islit)
1959 tcg_gen_subi_i64(tmp, tmp, lit);
1960 else
1961 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1962 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1963 tcg_temp_free(tmp);
1964 } else {
1965 if (islit)
1966 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1967 else {
1968 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1969 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1970 }
1971 }
1972 }
1973 break;
1974 case 0x0F:
1975 /* CMPBGE */
1976 gen_cmpbge(ra, rb, rc, islit, lit);
1977 break;
1978 case 0x12:
1979 /* S8ADDL */
1980 if (likely(rc != 31)) {
1981 if (ra != 31) {
1982 TCGv tmp = tcg_temp_new();
1983 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1984 if (islit)
1985 tcg_gen_addi_i64(tmp, tmp, lit);
1986 else
1987 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1988 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1989 tcg_temp_free(tmp);
1990 } else {
1991 if (islit)
1992 tcg_gen_movi_i64(cpu_ir[rc], lit);
1993 else
1994 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1995 }
1996 }
1997 break;
1998 case 0x1B:
1999 /* S8SUBL */
2000 if (likely(rc != 31)) {
2001 if (ra != 31) {
2002 TCGv tmp = tcg_temp_new();
2003 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2004 if (islit)
2005 tcg_gen_subi_i64(tmp, tmp, lit);
2006 else
2007 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
2008 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
2009 tcg_temp_free(tmp);
2010 } else {
2011 if (islit)
2012 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2013 else
2014 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2015 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2016 }
2017 }
2018 }
2019 break;
2020 case 0x1D:
2021 /* CMPULT */
2022 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
2023 break;
2024 case 0x20:
2025 /* ADDQ */
2026 if (likely(rc != 31)) {
2027 if (ra != 31) {
2028 if (islit)
2029 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2030 else
2031 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2032 } else {
2033 if (islit)
2034 tcg_gen_movi_i64(cpu_ir[rc], lit);
2035 else
2036 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2037 }
2038 }
2039 break;
2040 case 0x22:
2041 /* S4ADDQ */
2042 if (likely(rc != 31)) {
2043 if (ra != 31) {
2044 TCGv tmp = tcg_temp_new();
2045 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2046 if (islit)
2047 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2048 else
2049 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2050 tcg_temp_free(tmp);
2051 } else {
2052 if (islit)
2053 tcg_gen_movi_i64(cpu_ir[rc], lit);
2054 else
2055 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2056 }
2057 }
2058 break;
2059 case 0x29:
2060 /* SUBQ */
2061 if (likely(rc != 31)) {
2062 if (ra != 31) {
2063 if (islit)
2064 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2065 else
2066 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2067 } else {
2068 if (islit)
2069 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2070 else
2071 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2072 }
2073 }
2074 break;
2075 case 0x2B:
2076 /* S4SUBQ */
2077 if (likely(rc != 31)) {
2078 if (ra != 31) {
2079 TCGv tmp = tcg_temp_new();
2080 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2081 if (islit)
2082 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2083 else
2084 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2085 tcg_temp_free(tmp);
2086 } else {
2087 if (islit)
2088 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2089 else
2090 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2091 }
2092 }
2093 break;
2094 case 0x2D:
2095 /* CMPEQ */
2096 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
2097 break;
2098 case 0x32:
2099 /* S8ADDQ */
2100 if (likely(rc != 31)) {
2101 if (ra != 31) {
2102 TCGv tmp = tcg_temp_new();
2103 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2104 if (islit)
2105 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2106 else
2107 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2108 tcg_temp_free(tmp);
2109 } else {
2110 if (islit)
2111 tcg_gen_movi_i64(cpu_ir[rc], lit);
2112 else
2113 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2114 }
2115 }
2116 break;
2117 case 0x3B:
2118 /* S8SUBQ */
2119 if (likely(rc != 31)) {
2120 if (ra != 31) {
2121 TCGv tmp = tcg_temp_new();
2122 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2123 if (islit)
2124 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2125 else
2126 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2127 tcg_temp_free(tmp);
2128 } else {
2129 if (islit)
2130 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2131 else
2132 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2133 }
2134 }
2135 break;
2136 case 0x3D:
2137 /* CMPULE */
2138 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
2139 break;
2140 case 0x40:
2141 /* ADDL/V */
2142 gen_addlv(ra, rb, rc, islit, lit);
2143 break;
2144 case 0x49:
2145 /* SUBL/V */
2146 gen_sublv(ra, rb, rc, islit, lit);
2147 break;
2148 case 0x4D:
2149 /* CMPLT */
2150 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
2151 break;
2152 case 0x60:
2153 /* ADDQ/V */
2154 gen_addqv(ra, rb, rc, islit, lit);
2155 break;
2156 case 0x69:
2157 /* SUBQ/V */
2158 gen_subqv(ra, rb, rc, islit, lit);
2159 break;
2160 case 0x6D:
2161 /* CMPLE */
2162 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
2163 break;
2164 default:
2165 goto invalid_opc;
2166 }
2167 break;
2168 case 0x11:
2169 switch (fn7) {
2170 case 0x00:
2171 /* AND */
2172 if (likely(rc != 31)) {
2173 if (ra == 31)
2174 tcg_gen_movi_i64(cpu_ir[rc], 0);
2175 else if (islit)
2176 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2177 else
2178 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2179 }
2180 break;
2181 case 0x08:
2182 /* BIC */
2183 if (likely(rc != 31)) {
2184 if (ra != 31) {
2185 if (islit)
2186 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2187 else
2188 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2189 } else
2190 tcg_gen_movi_i64(cpu_ir[rc], 0);
2191 }
2192 break;
2193 case 0x14:
2194 /* CMOVLBS */
2195 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
2196 break;
2197 case 0x16:
2198 /* CMOVLBC */
2199 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
2200 break;
2201 case 0x20:
2202 /* BIS */
2203 if (likely(rc != 31)) {
2204 if (ra != 31) {
2205 if (islit)
2206 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2207 else
2208 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2209 } else {
2210 if (islit)
2211 tcg_gen_movi_i64(cpu_ir[rc], lit);
2212 else
2213 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2214 }
2215 }
2216 break;
2217 case 0x24:
2218 /* CMOVEQ */
2219 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
2220 break;
2221 case 0x26:
2222 /* CMOVNE */
2223 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
2224 break;
2225 case 0x28:
2226 /* ORNOT */
2227 if (likely(rc != 31)) {
2228 if (ra != 31) {
2229 if (islit)
2230 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2231 else
2232 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2233 } else {
2234 if (islit)
2235 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2236 else
2237 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2238 }
2239 }
2240 break;
2241 case 0x40:
2242 /* XOR */
2243 if (likely(rc != 31)) {
2244 if (ra != 31) {
2245 if (islit)
2246 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2247 else
2248 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2249 } else {
2250 if (islit)
2251 tcg_gen_movi_i64(cpu_ir[rc], lit);
2252 else
2253 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2254 }
2255 }
2256 break;
2257 case 0x44:
2258 /* CMOVLT */
2259 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
2260 break;
2261 case 0x46:
2262 /* CMOVGE */
2263 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
2264 break;
2265 case 0x48:
2266 /* EQV */
2267 if (likely(rc != 31)) {
2268 if (ra != 31) {
2269 if (islit)
2270 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2271 else
2272 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2273 } else {
2274 if (islit)
2275 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2276 else
2277 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2278 }
2279 }
2280 break;
2281 case 0x61:
2282 /* AMASK */
2283 if (likely(rc != 31)) {
2284 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2285
2286 if (islit) {
2287 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2288 } else {
2289 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
2290 }
2291 }
2292 break;
2293 case 0x64:
2294 /* CMOVLE */
2295 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2296 break;
2297 case 0x66:
2298 /* CMOVGT */
2299 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2300 break;
2301 case 0x6C:
2302 /* IMPLVER */
2303 if (rc != 31) {
2304 tcg_gen_movi_i64(cpu_ir[rc], ctx->implver);
2305 }
2306 break;
2307 default:
2308 goto invalid_opc;
2309 }
2310 break;
2311 case 0x12:
2312 switch (fn7) {
2313 case 0x02:
2314 /* MSKBL */
2315 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2316 break;
2317 case 0x06:
2318 /* EXTBL */
2319 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2320 break;
2321 case 0x0B:
2322 /* INSBL */
2323 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2324 break;
2325 case 0x12:
2326 /* MSKWL */
2327 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2328 break;
2329 case 0x16:
2330 /* EXTWL */
2331 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2332 break;
2333 case 0x1B:
2334 /* INSWL */
2335 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2336 break;
2337 case 0x22:
2338 /* MSKLL */
2339 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2340 break;
2341 case 0x26:
2342 /* EXTLL */
2343 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2344 break;
2345 case 0x2B:
2346 /* INSLL */
2347 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2348 break;
2349 case 0x30:
2350 /* ZAP */
2351 gen_zap(ra, rb, rc, islit, lit);
2352 break;
2353 case 0x31:
2354 /* ZAPNOT */
2355 gen_zapnot(ra, rb, rc, islit, lit);
2356 break;
2357 case 0x32:
2358 /* MSKQL */
2359 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2360 break;
2361 case 0x34:
2362 /* SRL */
2363 if (likely(rc != 31)) {
2364 if (ra != 31) {
2365 if (islit)
2366 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2367 else {
2368 TCGv shift = tcg_temp_new();
2369 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2370 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2371 tcg_temp_free(shift);
2372 }
2373 } else
2374 tcg_gen_movi_i64(cpu_ir[rc], 0);
2375 }
2376 break;
2377 case 0x36:
2378 /* EXTQL */
2379 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2380 break;
2381 case 0x39:
2382 /* SLL */
2383 if (likely(rc != 31)) {
2384 if (ra != 31) {
2385 if (islit)
2386 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2387 else {
2388 TCGv shift = tcg_temp_new();
2389 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2390 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2391 tcg_temp_free(shift);
2392 }
2393 } else
2394 tcg_gen_movi_i64(cpu_ir[rc], 0);
2395 }
2396 break;
2397 case 0x3B:
2398 /* INSQL */
2399 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2400 break;
2401 case 0x3C:
2402 /* SRA */
2403 if (likely(rc != 31)) {
2404 if (ra != 31) {
2405 if (islit)
2406 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2407 else {
2408 TCGv shift = tcg_temp_new();
2409 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2410 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2411 tcg_temp_free(shift);
2412 }
2413 } else
2414 tcg_gen_movi_i64(cpu_ir[rc], 0);
2415 }
2416 break;
2417 case 0x52:
2418 /* MSKWH */
2419 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2420 break;
2421 case 0x57:
2422 /* INSWH */
2423 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2424 break;
2425 case 0x5A:
2426 /* EXTWH */
2427 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2428 break;
2429 case 0x62:
2430 /* MSKLH */
2431 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2432 break;
2433 case 0x67:
2434 /* INSLH */
2435 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2436 break;
2437 case 0x6A:
2438 /* EXTLH */
2439 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2440 break;
2441 case 0x72:
2442 /* MSKQH */
2443 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2444 break;
2445 case 0x77:
2446 /* INSQH */
2447 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2448 break;
2449 case 0x7A:
2450 /* EXTQH */
2451 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2452 break;
2453 default:
2454 goto invalid_opc;
2455 }
2456 break;
2457 case 0x13:
2458 switch (fn7) {
2459 case 0x00:
2460 /* MULL */
2461 if (likely(rc != 31)) {
2462 if (ra == 31)
2463 tcg_gen_movi_i64(cpu_ir[rc], 0);
2464 else {
2465 if (islit)
2466 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2467 else
2468 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2469 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2470 }
2471 }
2472 break;
2473 case 0x20:
2474 /* MULQ */
2475 if (likely(rc != 31)) {
2476 if (ra == 31)
2477 tcg_gen_movi_i64(cpu_ir[rc], 0);
2478 else if (islit)
2479 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2480 else
2481 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2482 }
2483 break;
2484 case 0x30:
2485 /* UMULH */
2486 {
2487 TCGv low;
2488 if (unlikely(rc == 31)){
2489 break;
2490 }
2491 if (ra == 31) {
2492 tcg_gen_movi_i64(cpu_ir[rc], 0);
2493 break;
2494 }
2495 low = tcg_temp_new();
2496 if (islit) {
2497 tcg_gen_movi_tl(low, lit);
2498 tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], low);
2499 } else {
2500 tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2501 }
2502 tcg_temp_free(low);
2503 }
2504 break;
2505 case 0x40:
2506 /* MULL/V */
2507 gen_mullv(ra, rb, rc, islit, lit);
2508 break;
2509 case 0x60:
2510 /* MULQ/V */
2511 gen_mulqv(ra, rb, rc, islit, lit);
2512 break;
2513 default:
2514 goto invalid_opc;
2515 }
2516 break;
2517 case 0x14:
2518 switch (fpfn) { /* fn11 & 0x3F */
2519 case 0x04:
2520 /* ITOFS */
2521 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2522 goto invalid_opc;
2523 }
2524 if (likely(rc != 31)) {
2525 if (ra != 31) {
2526 TCGv_i32 tmp = tcg_temp_new_i32();
2527 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2528 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2529 tcg_temp_free_i32(tmp);
2530 } else
2531 tcg_gen_movi_i64(cpu_fir[rc], 0);
2532 }
2533 break;
2534 case 0x0A:
2535 /* SQRTF */
2536 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2537 gen_fsqrtf(rb, rc);
2538 break;
2539 }
2540 goto invalid_opc;
2541 case 0x0B:
2542 /* SQRTS */
2543 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2544 gen_fsqrts(ctx, rb, rc, fn11);
2545 break;
2546 }
2547 goto invalid_opc;
2548 case 0x14:
2549 /* ITOFF */
2550 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2551 goto invalid_opc;
2552 }
2553 if (likely(rc != 31)) {
2554 if (ra != 31) {
2555 TCGv_i32 tmp = tcg_temp_new_i32();
2556 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2557 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2558 tcg_temp_free_i32(tmp);
2559 } else
2560 tcg_gen_movi_i64(cpu_fir[rc], 0);
2561 }
2562 break;
2563 case 0x24:
2564 /* ITOFT */
2565 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2566 goto invalid_opc;
2567 }
2568 if (likely(rc != 31)) {
2569 if (ra != 31)
2570 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2571 else
2572 tcg_gen_movi_i64(cpu_fir[rc], 0);
2573 }
2574 break;
2575 case 0x2A:
2576 /* SQRTG */
2577 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2578 gen_fsqrtg(rb, rc);
2579 break;
2580 }
2581 goto invalid_opc;
2582 case 0x02B:
2583 /* SQRTT */
2584 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2585 gen_fsqrtt(ctx, rb, rc, fn11);
2586 break;
2587 }
2588 goto invalid_opc;
2589 default:
2590 goto invalid_opc;
2591 }
2592 break;
2593 case 0x15:
2594 /* VAX floating point */
2595 /* XXX: rounding mode and trap are ignored (!) */
2596 switch (fpfn) { /* fn11 & 0x3F */
2597 case 0x00:
2598 /* ADDF */
2599 gen_faddf(ra, rb, rc);
2600 break;
2601 case 0x01:
2602 /* SUBF */
2603 gen_fsubf(ra, rb, rc);
2604 break;
2605 case 0x02:
2606 /* MULF */
2607 gen_fmulf(ra, rb, rc);
2608 break;
2609 case 0x03:
2610 /* DIVF */
2611 gen_fdivf(ra, rb, rc);
2612 break;
2613 case 0x1E:
2614 /* CVTDG */
2615 #if 0 // TODO
2616 gen_fcvtdg(rb, rc);
2617 #else
2618 goto invalid_opc;
2619 #endif
2620 break;
2621 case 0x20:
2622 /* ADDG */
2623 gen_faddg(ra, rb, rc);
2624 break;
2625 case 0x21:
2626 /* SUBG */
2627 gen_fsubg(ra, rb, rc);
2628 break;
2629 case 0x22:
2630 /* MULG */
2631 gen_fmulg(ra, rb, rc);
2632 break;
2633 case 0x23:
2634 /* DIVG */
2635 gen_fdivg(ra, rb, rc);
2636 break;
2637 case 0x25:
2638 /* CMPGEQ */
2639 gen_fcmpgeq(ra, rb, rc);
2640 break;
2641 case 0x26:
2642 /* CMPGLT */
2643 gen_fcmpglt(ra, rb, rc);
2644 break;
2645 case 0x27:
2646 /* CMPGLE */
2647 gen_fcmpgle(ra, rb, rc);
2648 break;
2649 case 0x2C:
2650 /* CVTGF */
2651 gen_fcvtgf(rb, rc);
2652 break;
2653 case 0x2D:
2654 /* CVTGD */
2655 #if 0 // TODO
2656 gen_fcvtgd(rb, rc);
2657 #else
2658 goto invalid_opc;
2659 #endif
2660 break;
2661 case 0x2F:
2662 /* CVTGQ */
2663 gen_fcvtgq(rb, rc);
2664 break;
2665 case 0x3C:
2666 /* CVTQF */
2667 gen_fcvtqf(rb, rc);
2668 break;
2669 case 0x3E:
2670 /* CVTQG */
2671 gen_fcvtqg(rb, rc);
2672 break;
2673 default:
2674 goto invalid_opc;
2675 }
2676 break;
2677 case 0x16:
2678 /* IEEE floating-point */
2679 switch (fpfn) { /* fn11 & 0x3F */
2680 case 0x00:
2681 /* ADDS */
2682 gen_fadds(ctx, ra, rb, rc, fn11);
2683 break;
2684 case 0x01:
2685 /* SUBS */
2686 gen_fsubs(ctx, ra, rb, rc, fn11);
2687 break;
2688 case 0x02:
2689 /* MULS */
2690 gen_fmuls(ctx, ra, rb, rc, fn11);
2691 break;
2692 case 0x03:
2693 /* DIVS */
2694 gen_fdivs(ctx, ra, rb, rc, fn11);
2695 break;
2696 case 0x20:
2697 /* ADDT */
2698 gen_faddt(ctx, ra, rb, rc, fn11);
2699 break;
2700 case 0x21:
2701 /* SUBT */
2702 gen_fsubt(ctx, ra, rb, rc, fn11);
2703 break;
2704 case 0x22:
2705 /* MULT */
2706 gen_fmult(ctx, ra, rb, rc, fn11);
2707 break;
2708 case 0x23:
2709 /* DIVT */
2710 gen_fdivt(ctx, ra, rb, rc, fn11);
2711 break;
2712 case 0x24:
2713 /* CMPTUN */
2714 gen_fcmptun(ctx, ra, rb, rc, fn11);
2715 break;
2716 case 0x25:
2717 /* CMPTEQ */
2718 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2719 break;
2720 case 0x26:
2721 /* CMPTLT */
2722 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2723 break;
2724 case 0x27:
2725 /* CMPTLE */
2726 gen_fcmptle(ctx, ra, rb, rc, fn11);
2727 break;
2728 case 0x2C:
2729 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2730 /* CVTST */
2731 gen_fcvtst(ctx, rb, rc, fn11);
2732 } else {
2733 /* CVTTS */
2734 gen_fcvtts(ctx, rb, rc, fn11);
2735 }
2736 break;
2737 case 0x2F:
2738 /* CVTTQ */
2739 gen_fcvttq(ctx, rb, rc, fn11);
2740 break;
2741 case 0x3C:
2742 /* CVTQS */
2743 gen_fcvtqs(ctx, rb, rc, fn11);
2744 break;
2745 case 0x3E:
2746 /* CVTQT */
2747 gen_fcvtqt(ctx, rb, rc, fn11);
2748 break;
2749 default:
2750 goto invalid_opc;
2751 }
2752 break;
2753 case 0x17:
2754 switch (fn11) {
2755 case 0x010:
2756 /* CVTLQ */
2757 gen_fcvtlq(rb, rc);
2758 break;
2759 case 0x020:
2760 if (likely(rc != 31)) {
2761 if (ra == rb) {
2762 /* FMOV */
2763 if (ra == 31)
2764 tcg_gen_movi_i64(cpu_fir[rc], 0);
2765 else
2766 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2767 } else {
2768 /* CPYS */
2769 gen_fcpys(ra, rb, rc);
2770 }
2771 }
2772 break;
2773 case 0x021:
2774 /* CPYSN */
2775 gen_fcpysn(ra, rb, rc);
2776 break;
2777 case 0x022:
2778 /* CPYSE */
2779 gen_fcpyse(ra, rb, rc);
2780 break;
2781 case 0x024:
2782 /* MT_FPCR */
2783 if (likely(ra != 31))
2784 gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
2785 else {
2786 TCGv tmp = tcg_const_i64(0);
2787 gen_helper_store_fpcr(cpu_env, tmp);
2788 tcg_temp_free(tmp);
2789 }
2790 break;
2791 case 0x025:
2792 /* MF_FPCR */
2793 if (likely(ra != 31))
2794 gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
2795 break;
2796 case 0x02A:
2797 /* FCMOVEQ */
2798 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2799 break;
2800 case 0x02B:
2801 /* FCMOVNE */
2802 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2803 break;
2804 case 0x02C:
2805 /* FCMOVLT */
2806 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2807 break;
2808 case 0x02D:
2809 /* FCMOVGE */
2810 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2811 break;
2812 case 0x02E:
2813 /* FCMOVLE */
2814 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2815 break;
2816 case 0x02F:
2817 /* FCMOVGT */
2818 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2819 break;
2820 case 0x030:
2821 /* CVTQL */
2822 gen_fcvtql(rb, rc);
2823 break;
2824 case 0x130:
2825 /* CVTQL/V */
2826 case 0x530:
2827 /* CVTQL/SV */
2828 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2829 /v doesn't do. The only thing I can think is that /sv is a
2830 valid instruction merely for completeness in the ISA. */
2831 gen_fcvtql_v(ctx, rb, rc);
2832 break;
2833 default:
2834 goto invalid_opc;
2835 }
2836 break;
2837 case 0x18:
2838 switch ((uint16_t)disp16) {
2839 case 0x0000:
2840 /* TRAPB */
2841 /* No-op. */
2842 break;
2843 case 0x0400:
2844 /* EXCB */
2845 /* No-op. */
2846 break;
2847 case 0x4000:
2848 /* MB */
2849 /* No-op */
2850 break;
2851 case 0x4400:
2852 /* WMB */
2853 /* No-op */
2854 break;
2855 case 0x8000:
2856 /* FETCH */
2857 /* No-op */
2858 break;
2859 case 0xA000:
2860 /* FETCH_M */
2861 /* No-op */
2862 break;
2863 case 0xC000:
2864 /* RPCC */
2865 if (ra != 31) {
2866 if (use_icount) {
2867 gen_io_start();
2868 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
2869 gen_io_end();
2870 ret = EXIT_PC_STALE;
2871 } else {
2872 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
2873 }
2874 }
2875 break;
2876 case 0xE000:
2877 /* RC */
2878 gen_rx(ra, 0);
2879 break;
2880 case 0xE800:
2881 /* ECB */
2882 break;
2883 case 0xF000:
2884 /* RS */
2885 gen_rx(ra, 1);
2886 break;
2887 case 0xF800:
2888 /* WH64 */
2889 /* No-op */
2890 break;
2891 default:
2892 goto invalid_opc;
2893 }
2894 break;
2895 case 0x19:
2896 /* HW_MFPR (PALcode) */
2897 #ifndef CONFIG_USER_ONLY
2898 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2899 return gen_mfpr(ra, insn & 0xffff);
2900 }
2901 #endif
2902 goto invalid_opc;
2903 case 0x1A:
2904 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2905 prediction stack action, which of course we don't implement. */
2906 if (rb != 31) {
2907 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2908 } else {
2909 tcg_gen_movi_i64(cpu_pc, 0);
2910 }
2911 if (ra != 31) {
2912 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2913 }
2914 ret = EXIT_PC_UPDATED;
2915 break;
2916 case 0x1B:
2917 /* HW_LD (PALcode) */
2918 #ifndef CONFIG_USER_ONLY
2919 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2920 TCGv addr;
2921
2922 if (ra == 31) {
2923 break;
2924 }
2925
2926 addr = tcg_temp_new();
2927 if (rb != 31)
2928 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2929 else
2930 tcg_gen_movi_i64(addr, disp12);
2931 switch ((insn >> 12) & 0xF) {
2932 case 0x0:
2933 /* Longword physical access (hw_ldl/p) */
2934 gen_helper_ldl_phys(cpu_ir[ra], addr);
2935 break;
2936 case 0x1:
2937 /* Quadword physical access (hw_ldq/p) */
2938 gen_helper_ldq_phys(cpu_ir[ra], addr);
2939 break;
2940 case 0x2:
2941 /* Longword physical access with lock (hw_ldl_l/p) */
2942 gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr);
2943 break;
2944 case 0x3:
2945 /* Quadword physical access with lock (hw_ldq_l/p) */
2946 gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr);
2947 break;
2948 case 0x4:
2949 /* Longword virtual PTE fetch (hw_ldl/v) */
2950 goto invalid_opc;
2951 case 0x5:
2952 /* Quadword virtual PTE fetch (hw_ldq/v) */
2953 goto invalid_opc;
2954 break;
2955 case 0x6:
2956 /* Incpu_ir[ra]id */
2957 goto invalid_opc;
2958 case 0x7:
2959 /* Incpu_ir[ra]id */
2960 goto invalid_opc;
2961 case 0x8:
2962 /* Longword virtual access (hw_ldl) */
2963 goto invalid_opc;
2964 case 0x9:
2965 /* Quadword virtual access (hw_ldq) */
2966 goto invalid_opc;
2967 case 0xA:
2968 /* Longword virtual access with protection check (hw_ldl/w) */
2969 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2970 break;
2971 case 0xB:
2972 /* Quadword virtual access with protection check (hw_ldq/w) */
2973 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2974 break;
2975 case 0xC:
2976 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2977 goto invalid_opc;
2978 case 0xD:
2979 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2980 goto invalid_opc;
2981 case 0xE:
2982 /* Longword virtual access with alternate access mode and
2983 protection checks (hw_ldl/wa) */
2984 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2985 break;
2986 case 0xF:
2987 /* Quadword virtual access with alternate access mode and
2988 protection checks (hw_ldq/wa) */
2989 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2990 break;
2991 }
2992 tcg_temp_free(addr);
2993 break;
2994 }
2995 #endif
2996 goto invalid_opc;
2997 case 0x1C:
2998 switch (fn7) {
2999 case 0x00:
3000 /* SEXTB */
3001 if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
3002 goto invalid_opc;
3003 }
3004 if (likely(rc != 31)) {
3005 if (islit)
3006 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
3007 else
3008 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
3009 }
3010 break;
3011 case 0x01:
3012 /* SEXTW */
3013 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
3014 if (likely(rc != 31)) {
3015 if (islit) {
3016 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
3017 } else {
3018 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
3019 }
3020 }
3021 break;
3022 }
3023 goto invalid_opc;
3024 case 0x30:
3025 /* CTPOP */
3026 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3027 if (likely(rc != 31)) {
3028 if (islit) {
3029 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
3030 } else {
3031 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
3032 }
3033 }
3034 break;
3035 }
3036 goto invalid_opc;
3037 case 0x31:
3038 /* PERR */
3039 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3040 gen_perr(ra, rb, rc, islit, lit);
3041 break;
3042 }
3043 goto invalid_opc;
3044 case 0x32:
3045 /* CTLZ */
3046 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3047 if (likely(rc != 31)) {
3048 if (islit) {
3049 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
3050 } else {
3051 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
3052 }
3053 }
3054 break;
3055 }
3056 goto invalid_opc;
3057 case 0x33:
3058 /* CTTZ */
3059 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3060 if (likely(rc != 31)) {
3061 if (islit) {
3062 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
3063 } else {
3064 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
3065 }
3066 }
3067 break;
3068 }
3069 goto invalid_opc;
3070 case 0x34:
3071 /* UNPKBW */
3072 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3073 if (real_islit || ra != 31) {
3074 goto invalid_opc;
3075 }
3076 gen_unpkbw(rb, rc);
3077 break;
3078 }
3079 goto invalid_opc;
3080 case 0x35:
3081 /* UNPKBL */
3082 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3083 if (real_islit || ra != 31) {
3084 goto invalid_opc;
3085 }
3086 gen_unpkbl(rb, rc);
3087 break;
3088 }
3089 goto invalid_opc;
3090 case 0x36:
3091 /* PKWB */
3092 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3093 if (real_islit || ra != 31) {
3094 goto invalid_opc;
3095 }
3096 gen_pkwb(rb, rc);
3097 break;
3098 }
3099 goto invalid_opc;
3100 case 0x37:
3101 /* PKLB */
3102 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3103 if (real_islit || ra != 31) {
3104 goto invalid_opc;
3105 }
3106 gen_pklb(rb, rc);
3107 break;
3108 }
3109 goto invalid_opc;
3110 case 0x38:
3111 /* MINSB8 */
3112 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3113 gen_minsb8(ra, rb, rc, islit, lit);
3114 break;
3115 }
3116 goto invalid_opc;
3117 case 0x39:
3118 /* MINSW4 */
3119 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3120 gen_minsw4(ra, rb, rc, islit, lit);
3121 break;
3122 }
3123 goto invalid_opc;
3124 case 0x3A:
3125 /* MINUB8 */
3126 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3127 gen_minub8(ra, rb, rc, islit, lit);
3128 break;
3129 }
3130 goto invalid_opc;
3131 case 0x3B:
3132 /* MINUW4 */
3133 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3134 gen_minuw4(ra, rb, rc, islit, lit);
3135 break;
3136 }
3137 goto invalid_opc;
3138 case 0x3C:
3139 /* MAXUB8 */
3140 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3141 gen_maxub8(ra, rb, rc, islit, lit);
3142 break;
3143 }
3144 goto invalid_opc;
3145 case 0x3D:
3146 /* MAXUW4 */
3147 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3148 gen_maxuw4(ra, rb, rc, islit, lit);
3149 break;
3150 }
3151 goto invalid_opc;
3152 case 0x3E:
3153 /* MAXSB8 */
3154 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3155 gen_maxsb8(ra, rb, rc, islit, lit);
3156 break;
3157 }
3158 goto invalid_opc;
3159 case 0x3F:
3160 /* MAXSW4 */
3161 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3162 gen_maxsw4(ra, rb, rc, islit, lit);
3163 break;
3164 }
3165 goto invalid_opc;
3166 case 0x70:
3167 /* FTOIT */
3168 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3169 goto invalid_opc;
3170 }
3171 if (likely(rc != 31)) {
3172 if (ra != 31)
3173 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3174 else
3175 tcg_gen_movi_i64(cpu_ir[rc], 0);
3176 }
3177 break;
3178 case 0x78:
3179 /* FTOIS */
3180 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3181 goto invalid_opc;
3182 }
3183 if (rc != 31) {
3184 TCGv_i32 tmp1 = tcg_temp_new_i32();
3185 if (ra != 31)
3186 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
3187 else {
3188 TCGv tmp2 = tcg_const_i64(0);
3189 gen_helper_s_to_memory(tmp1, tmp2);
3190 tcg_temp_free(tmp2);
3191 }
3192 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
3193 tcg_temp_free_i32(tmp1);
3194 }
3195 break;
3196 default:
3197 goto invalid_opc;
3198 }
3199 break;
3200 case 0x1D:
3201 /* HW_MTPR (PALcode) */
3202 #ifndef CONFIG_USER_ONLY
3203 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3204 return gen_mtpr(ctx, rb, insn & 0xffff);
3205 }
3206 #endif
3207 goto invalid_opc;
3208 case 0x1E:
3209 /* HW_RET (PALcode) */
3210 #ifndef CONFIG_USER_ONLY
3211 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3212 if (rb == 31) {
3213 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3214 address from EXC_ADDR. This turns out to be useful for our
3215 emulation PALcode, so continue to accept it. */
3216 TCGv tmp = tcg_temp_new();
3217 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
3218 gen_helper_hw_ret(cpu_env, tmp);
3219 tcg_temp_free(tmp);
3220 } else {
3221 gen_helper_hw_ret(cpu_env, cpu_ir[rb]);
3222 }
3223 ret = EXIT_PC_UPDATED;
3224 break;
3225 }
3226 #endif
3227 goto invalid_opc;
3228 case 0x1F:
3229 /* HW_ST (PALcode) */
3230 #ifndef CONFIG_USER_ONLY
3231 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3232 TCGv addr, val;
3233 addr = tcg_temp_new();
3234 if (rb != 31)
3235 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3236 else
3237 tcg_gen_movi_i64(addr, disp12);
3238 if (ra != 31)
3239 val = cpu_ir[ra];
3240 else {
3241 val = tcg_temp_new();
3242 tcg_gen_movi_i64(val, 0);
3243 }
3244 switch ((insn >> 12) & 0xF) {
3245 case 0x0:
3246 /* Longword physical access */
3247 gen_helper_stl_phys(addr, val);
3248 break;
3249 case 0x1:
3250 /* Quadword physical access */
3251 gen_helper_stq_phys(addr, val);
3252 break;
3253 case 0x2:
3254 /* Longword physical access with lock */
3255 gen_helper_stl_c_phys(val, cpu_env, addr, val);
3256 break;
3257 case 0x3:
3258 /* Quadword physical access with lock */
3259 gen_helper_stq_c_phys(val, cpu_env, addr, val);
3260 break;
3261 case 0x4:
3262 /* Longword virtual access */
3263 goto invalid_opc;
3264 case 0x5:
3265 /* Quadword virtual access */
3266 goto invalid_opc;
3267 case 0x6:
3268 /* Invalid */
3269 goto invalid_opc;
3270 case 0x7:
3271 /* Invalid */
3272 goto invalid_opc;
3273 case 0x8:
3274 /* Invalid */
3275 goto invalid_opc;
3276 case 0x9:
3277 /* Invalid */
3278 goto invalid_opc;
3279 case 0xA:
3280 /* Invalid */
3281 goto invalid_opc;
3282 case 0xB:
3283 /* Invalid */
3284 goto invalid_opc;
3285 case 0xC:
3286 /* Longword virtual access with alternate access mode */
3287 goto invalid_opc;
3288 case 0xD:
3289 /* Quadword virtual access with alternate access mode */
3290 goto invalid_opc;
3291 case 0xE:
3292 /* Invalid */
3293 goto invalid_opc;
3294 case 0xF:
3295 /* Invalid */
3296 goto invalid_opc;
3297 }
3298 if (ra == 31)
3299 tcg_temp_free(val);
3300 tcg_temp_free(addr);
3301 break;
3302 }
3303 #endif
3304 goto invalid_opc;
3305 case 0x20:
3306 /* LDF */
3307 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
3308 break;
3309 case 0x21:
3310 /* LDG */
3311 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
3312 break;
3313 case 0x22:
3314 /* LDS */
3315 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
3316 break;
3317 case 0x23:
3318 /* LDT */
3319 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
3320 break;
3321 case 0x24:
3322 /* STF */
3323 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3324 break;
3325 case 0x25:
3326 /* STG */
3327 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3328 break;
3329 case 0x26:
3330 /* STS */
3331 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3332 break;
3333 case 0x27:
3334 /* STT */
3335 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3336 break;
3337 case 0x28:
3338 /* LDL */
3339 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3340 break;
3341 case 0x29:
3342 /* LDQ */
3343 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3344 break;
3345 case 0x2A:
3346 /* LDL_L */
3347 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3348 break;
3349 case 0x2B:
3350 /* LDQ_L */
3351 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3352 break;
3353 case 0x2C:
3354 /* STL */
3355 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3356 break;
3357 case 0x2D:
3358 /* STQ */
3359 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3360 break;
3361 case 0x2E:
3362 /* STL_C */
3363 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3364 break;
3365 case 0x2F:
3366 /* STQ_C */
3367 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3368 break;
3369 case 0x30:
3370 /* BR */
3371 ret = gen_bdirect(ctx, ra, disp21);
3372 break;
3373 case 0x31: /* FBEQ */
3374 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3375 break;
3376 case 0x32: /* FBLT */
3377 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3378 break;
3379 case 0x33: /* FBLE */
3380 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3381 break;
3382 case 0x34:
3383 /* BSR */
3384 ret = gen_bdirect(ctx, ra, disp21);
3385 break;
3386 case 0x35: /* FBNE */
3387 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3388 break;
3389 case 0x36: /* FBGE */
3390 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3391 break;
3392 case 0x37: /* FBGT */
3393 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3394 break;
3395 case 0x38:
3396 /* BLBC */
3397 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3398 break;
3399 case 0x39:
3400 /* BEQ */
3401 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3402 break;
3403 case 0x3A:
3404 /* BLT */
3405 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3406 break;
3407 case 0x3B:
3408 /* BLE */
3409 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3410 break;
3411 case 0x3C:
3412 /* BLBS */
3413 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3414 break;
3415 case 0x3D:
3416 /* BNE */
3417 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3418 break;
3419 case 0x3E:
3420 /* BGE */
3421 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3422 break;
3423 case 0x3F:
3424 /* BGT */
3425 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3426 break;
3427 invalid_opc:
3428 ret = gen_invalid(ctx);
3429 break;
3430 }
3431
3432 return ret;
3433 }
3434
3435 static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
3436 TranslationBlock *tb,
3437 bool search_pc)
3438 {
3439 CPUState *cs = CPU(cpu);
3440 CPUAlphaState *env = &cpu->env;
3441 DisasContext ctx, *ctxp = &ctx;
3442 target_ulong pc_start;
3443 target_ulong pc_mask;
3444 uint32_t insn;
3445 uint16_t *gen_opc_end;
3446 CPUBreakpoint *bp;
3447 int j, lj = -1;
3448 ExitStatus ret;
3449 int num_insns;
3450 int max_insns;
3451
3452 pc_start = tb->pc;
3453 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
3454
3455 ctx.tb = tb;
3456 ctx.pc = pc_start;
3457 ctx.mem_idx = cpu_mmu_index(env);
3458 ctx.implver = env->implver;
3459 ctx.singlestep_enabled = cs->singlestep_enabled;
3460
3461 /* ??? Every TB begins with unset rounding mode, to be initialized on
3462 the first fp insn of the TB. Alternately we could define a proper
3463 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3464 to reset the FP_STATUS to that default at the end of any TB that
3465 changes the default. We could even (gasp) dynamiclly figure out
3466 what default would be most efficient given the running program. */
3467 ctx.tb_rm = -1;
3468 /* Similarly for flush-to-zero. */
3469 ctx.tb_ftz = -1;
3470
3471 num_insns = 0;
3472 max_insns = tb->cflags & CF_COUNT_MASK;
3473 if (max_insns == 0) {
3474 max_insns = CF_COUNT_MASK;
3475 }
3476
3477 if (in_superpage(&ctx, pc_start)) {
3478 pc_mask = (1ULL << 41) - 1;
3479 } else {
3480 pc_mask = ~TARGET_PAGE_MASK;
3481 }
3482
3483 gen_tb_start();
3484 do {
3485 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3486 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3487 if (bp->pc == ctx.pc) {
3488 gen_excp(&ctx, EXCP_DEBUG, 0);
3489 break;
3490 }
3491 }
3492 }
3493 if (search_pc) {
3494 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3495 if (lj < j) {
3496 lj++;
3497 while (lj < j)
3498 tcg_ctx.gen_opc_instr_start[lj++] = 0;
3499 }
3500 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
3501 tcg_ctx.gen_opc_instr_start[lj] = 1;
3502 tcg_ctx.gen_opc_icount[lj] = num_insns;
3503 }
3504 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3505 gen_io_start();
3506 insn = cpu_ldl_code(env, ctx.pc);
3507 num_insns++;
3508
3509 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
3510 tcg_gen_debug_insn_start(ctx.pc);
3511 }
3512
3513 ctx.pc += 4;
3514 ret = translate_one(ctxp, insn);
3515
3516 /* If we reach a page boundary, are single stepping,
3517 or exhaust instruction count, stop generation. */
3518 if (ret == NO_EXIT
3519 && ((ctx.pc & pc_mask) == 0
3520 || tcg_ctx.gen_opc_ptr >= gen_opc_end
3521 || num_insns >= max_insns
3522 || singlestep
3523 || ctx.singlestep_enabled)) {
3524 ret = EXIT_PC_STALE;
3525 }
3526 } while (ret == NO_EXIT);
3527
3528 if (tb->cflags & CF_LAST_IO) {
3529 gen_io_end();
3530 }
3531
3532 switch (ret) {
3533 case EXIT_GOTO_TB:
3534 case EXIT_NORETURN:
3535 break;
3536 case EXIT_PC_STALE:
3537 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3538 /* FALLTHRU */
3539 case EXIT_PC_UPDATED:
3540 if (ctx.singlestep_enabled) {
3541 gen_excp_1(EXCP_DEBUG, 0);
3542 } else {
3543 tcg_gen_exit_tb(0);
3544 }
3545 break;
3546 default:
3547 abort();
3548 }
3549
3550 gen_tb_end(tb, num_insns);
3551 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
3552 if (search_pc) {
3553 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3554 lj++;
3555 while (lj <= j)
3556 tcg_ctx.gen_opc_instr_start[lj++] = 0;
3557 } else {
3558 tb->size = ctx.pc - pc_start;
3559 tb->icount = num_insns;
3560 }
3561
3562 #ifdef DEBUG_DISAS
3563 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3564 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3565 log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
3566 qemu_log("\n");
3567 }
3568 #endif
3569 }
3570
3571 void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
3572 {
3573 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
3574 }
3575
3576 void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
3577 {
3578 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
3579 }
3580
3581 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
3582 {
3583 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
3584 }