]> git.proxmox.com Git - qemu.git/blame_incremental - target-alpha/translate.c
tcg: Use uintptr_t in TCGHelperInfo
[qemu.git] / target-alpha / translate.c
... / ...
CommitLineData
1/*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "cpu.h"
21#include "disas/disas.h"
22#include "qemu/host-utils.h"
23#include "tcg-op.h"
24
25#include "helper.h"
26#define GEN_HELPER 1
27#include "helper.h"
28
29#undef ALPHA_DEBUG_DISAS
30#define CONFIG_SOFTFLOAT_INLINE
31
32#ifdef ALPHA_DEBUG_DISAS
33# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
34#else
35# define LOG_DISAS(...) do { } while (0)
36#endif
37
38typedef struct DisasContext DisasContext;
39struct DisasContext {
40 struct TranslationBlock *tb;
41 uint64_t pc;
42 int mem_idx;
43
44 /* Current rounding mode for this TB. */
45 int tb_rm;
46 /* Current flush-to-zero setting for this TB. */
47 int tb_ftz;
48
49 /* implver value for this CPU. */
50 int implver;
51
52 bool singlestep_enabled;
53};
54
55/* Return values from translate_one, indicating the state of the TB.
56 Note that zero indicates that we are not exiting the TB. */
57
58typedef enum {
59 NO_EXIT,
60
61 /* We have emitted one or more goto_tb. No fixup required. */
62 EXIT_GOTO_TB,
63
64 /* We are not using a goto_tb (for whatever reason), but have updated
65 the PC (for whatever reason), so there's no need to do it again on
66 exiting the TB. */
67 EXIT_PC_UPDATED,
68
69 /* We are exiting the TB, but have neither emitted a goto_tb, nor
70 updated the PC for the next instruction to be executed. */
71 EXIT_PC_STALE,
72
73 /* We are ending the TB with a noreturn function call, e.g. longjmp.
74 No following code will be executed. */
75 EXIT_NORETURN,
76} ExitStatus;
77
78/* global register indexes */
79static TCGv_ptr cpu_env;
80static TCGv cpu_ir[31];
81static TCGv cpu_fir[31];
82static TCGv cpu_pc;
83static TCGv cpu_lock_addr;
84static TCGv cpu_lock_st_addr;
85static TCGv cpu_lock_value;
86static TCGv cpu_unique;
87#ifndef CONFIG_USER_ONLY
88static TCGv cpu_sysval;
89static TCGv cpu_usp;
90#endif
91
92/* register names */
93static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
94
95#include "exec/gen-icount.h"
96
97void alpha_translate_init(void)
98{
99 int i;
100 char *p;
101 static int done_init = 0;
102
103 if (done_init)
104 return;
105
106 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
107
108 p = cpu_reg_names;
109 for (i = 0; i < 31; i++) {
110 sprintf(p, "ir%d", i);
111 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
112 offsetof(CPUAlphaState, ir[i]), p);
113 p += (i < 10) ? 4 : 5;
114
115 sprintf(p, "fir%d", i);
116 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
117 offsetof(CPUAlphaState, fir[i]), p);
118 p += (i < 10) ? 5 : 6;
119 }
120
121 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
122 offsetof(CPUAlphaState, pc), "pc");
123
124 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
125 offsetof(CPUAlphaState, lock_addr),
126 "lock_addr");
127 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
128 offsetof(CPUAlphaState, lock_st_addr),
129 "lock_st_addr");
130 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
131 offsetof(CPUAlphaState, lock_value),
132 "lock_value");
133
134 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
135 offsetof(CPUAlphaState, unique), "unique");
136#ifndef CONFIG_USER_ONLY
137 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
138 offsetof(CPUAlphaState, sysval), "sysval");
139 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
140 offsetof(CPUAlphaState, usp), "usp");
141#endif
142
143 /* register helpers */
144#define GEN_HELPER 2
145#include "helper.h"
146
147 done_init = 1;
148}
149
150static void gen_excp_1(int exception, int error_code)
151{
152 TCGv_i32 tmp1, tmp2;
153
154 tmp1 = tcg_const_i32(exception);
155 tmp2 = tcg_const_i32(error_code);
156 gen_helper_excp(cpu_env, tmp1, tmp2);
157 tcg_temp_free_i32(tmp2);
158 tcg_temp_free_i32(tmp1);
159}
160
161static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
162{
163 tcg_gen_movi_i64(cpu_pc, ctx->pc);
164 gen_excp_1(exception, error_code);
165 return EXIT_NORETURN;
166}
167
168static inline ExitStatus gen_invalid(DisasContext *ctx)
169{
170 return gen_excp(ctx, EXCP_OPCDEC, 0);
171}
172
173static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
174{
175 TCGv tmp = tcg_temp_new();
176 TCGv_i32 tmp32 = tcg_temp_new_i32();
177 tcg_gen_qemu_ld32u(tmp, t1, flags);
178 tcg_gen_trunc_i64_i32(tmp32, tmp);
179 gen_helper_memory_to_f(t0, tmp32);
180 tcg_temp_free_i32(tmp32);
181 tcg_temp_free(tmp);
182}
183
184static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
185{
186 TCGv tmp = tcg_temp_new();
187 tcg_gen_qemu_ld64(tmp, t1, flags);
188 gen_helper_memory_to_g(t0, tmp);
189 tcg_temp_free(tmp);
190}
191
192static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
193{
194 TCGv tmp = tcg_temp_new();
195 TCGv_i32 tmp32 = tcg_temp_new_i32();
196 tcg_gen_qemu_ld32u(tmp, t1, flags);
197 tcg_gen_trunc_i64_i32(tmp32, tmp);
198 gen_helper_memory_to_s(t0, tmp32);
199 tcg_temp_free_i32(tmp32);
200 tcg_temp_free(tmp);
201}
202
203static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
204{
205 tcg_gen_qemu_ld32s(t0, t1, flags);
206 tcg_gen_mov_i64(cpu_lock_addr, t1);
207 tcg_gen_mov_i64(cpu_lock_value, t0);
208}
209
210static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
211{
212 tcg_gen_qemu_ld64(t0, t1, flags);
213 tcg_gen_mov_i64(cpu_lock_addr, t1);
214 tcg_gen_mov_i64(cpu_lock_value, t0);
215}
216
217static inline void gen_load_mem(DisasContext *ctx,
218 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
219 int flags),
220 int ra, int rb, int32_t disp16, int fp,
221 int clear)
222{
223 TCGv addr, va;
224
225 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
226 prefetches, which we can treat as nops. No worries about
227 missed exceptions here. */
228 if (unlikely(ra == 31)) {
229 return;
230 }
231
232 addr = tcg_temp_new();
233 if (rb != 31) {
234 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
235 if (clear) {
236 tcg_gen_andi_i64(addr, addr, ~0x7);
237 }
238 } else {
239 if (clear) {
240 disp16 &= ~0x7;
241 }
242 tcg_gen_movi_i64(addr, disp16);
243 }
244
245 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
246 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
247
248 tcg_temp_free(addr);
249}
250
251static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
252{
253 TCGv_i32 tmp32 = tcg_temp_new_i32();
254 TCGv tmp = tcg_temp_new();
255 gen_helper_f_to_memory(tmp32, t0);
256 tcg_gen_extu_i32_i64(tmp, tmp32);
257 tcg_gen_qemu_st32(tmp, t1, flags);
258 tcg_temp_free(tmp);
259 tcg_temp_free_i32(tmp32);
260}
261
262static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
263{
264 TCGv tmp = tcg_temp_new();
265 gen_helper_g_to_memory(tmp, t0);
266 tcg_gen_qemu_st64(tmp, t1, flags);
267 tcg_temp_free(tmp);
268}
269
270static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
271{
272 TCGv_i32 tmp32 = tcg_temp_new_i32();
273 TCGv tmp = tcg_temp_new();
274 gen_helper_s_to_memory(tmp32, t0);
275 tcg_gen_extu_i32_i64(tmp, tmp32);
276 tcg_gen_qemu_st32(tmp, t1, flags);
277 tcg_temp_free(tmp);
278 tcg_temp_free_i32(tmp32);
279}
280
281static inline void gen_store_mem(DisasContext *ctx,
282 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
283 int flags),
284 int ra, int rb, int32_t disp16, int fp,
285 int clear)
286{
287 TCGv addr, va;
288
289 addr = tcg_temp_new();
290 if (rb != 31) {
291 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
292 if (clear) {
293 tcg_gen_andi_i64(addr, addr, ~0x7);
294 }
295 } else {
296 if (clear) {
297 disp16 &= ~0x7;
298 }
299 tcg_gen_movi_i64(addr, disp16);
300 }
301
302 if (ra == 31) {
303 va = tcg_const_i64(0);
304 } else {
305 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
306 }
307 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
308
309 tcg_temp_free(addr);
310 if (ra == 31) {
311 tcg_temp_free(va);
312 }
313}
314
315static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
316 int32_t disp16, int quad)
317{
318 TCGv addr;
319
320 if (ra == 31) {
321 /* ??? Don't bother storing anything. The user can't tell
322 the difference, since the zero register always reads zero. */
323 return NO_EXIT;
324 }
325
326#if defined(CONFIG_USER_ONLY)
327 addr = cpu_lock_st_addr;
328#else
329 addr = tcg_temp_local_new();
330#endif
331
332 if (rb != 31) {
333 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
334 } else {
335 tcg_gen_movi_i64(addr, disp16);
336 }
337
338#if defined(CONFIG_USER_ONLY)
339 /* ??? This is handled via a complicated version of compare-and-swap
340 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
341 in TCG so that this isn't necessary. */
342 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
343#else
344 /* ??? In system mode we are never multi-threaded, so CAS can be
345 implemented via a non-atomic load-compare-store sequence. */
346 {
347 int lab_fail, lab_done;
348 TCGv val;
349
350 lab_fail = gen_new_label();
351 lab_done = gen_new_label();
352 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
353
354 val = tcg_temp_new();
355 if (quad) {
356 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
357 } else {
358 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
359 }
360 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
361
362 if (quad) {
363 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
364 } else {
365 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
366 }
367 tcg_gen_movi_i64(cpu_ir[ra], 1);
368 tcg_gen_br(lab_done);
369
370 gen_set_label(lab_fail);
371 tcg_gen_movi_i64(cpu_ir[ra], 0);
372
373 gen_set_label(lab_done);
374 tcg_gen_movi_i64(cpu_lock_addr, -1);
375
376 tcg_temp_free(addr);
377 return NO_EXIT;
378 }
379#endif
380}
381
382static bool in_superpage(DisasContext *ctx, int64_t addr)
383{
384 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
385 && addr < 0
386 && ((addr >> 41) & 3) == 2
387 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
388}
389
390static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
391{
392 /* Suppress goto_tb in the case of single-steping and IO. */
393 if (ctx->singlestep_enabled || (ctx->tb->cflags & CF_LAST_IO)) {
394 return false;
395 }
396 /* If the destination is in the superpage, the page perms can't change. */
397 if (in_superpage(ctx, dest)) {
398 return true;
399 }
400 /* Check for the dest on the same page as the start of the TB. */
401 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
402}
403
404static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
405{
406 uint64_t dest = ctx->pc + (disp << 2);
407
408 if (ra != 31) {
409 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
410 }
411
412 /* Notice branch-to-next; used to initialize RA with the PC. */
413 if (disp == 0) {
414 return 0;
415 } else if (use_goto_tb(ctx, dest)) {
416 tcg_gen_goto_tb(0);
417 tcg_gen_movi_i64(cpu_pc, dest);
418 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
419 return EXIT_GOTO_TB;
420 } else {
421 tcg_gen_movi_i64(cpu_pc, dest);
422 return EXIT_PC_UPDATED;
423 }
424}
425
426static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
427 TCGv cmp, int32_t disp)
428{
429 uint64_t dest = ctx->pc + (disp << 2);
430 int lab_true = gen_new_label();
431
432 if (use_goto_tb(ctx, dest)) {
433 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
434
435 tcg_gen_goto_tb(0);
436 tcg_gen_movi_i64(cpu_pc, ctx->pc);
437 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
438
439 gen_set_label(lab_true);
440 tcg_gen_goto_tb(1);
441 tcg_gen_movi_i64(cpu_pc, dest);
442 tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
443
444 return EXIT_GOTO_TB;
445 } else {
446 TCGv_i64 z = tcg_const_i64(0);
447 TCGv_i64 d = tcg_const_i64(dest);
448 TCGv_i64 p = tcg_const_i64(ctx->pc);
449
450 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
451
452 tcg_temp_free_i64(z);
453 tcg_temp_free_i64(d);
454 tcg_temp_free_i64(p);
455 return EXIT_PC_UPDATED;
456 }
457}
458
459static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
460 int32_t disp, int mask)
461{
462 TCGv cmp_tmp;
463
464 if (unlikely(ra == 31)) {
465 cmp_tmp = tcg_const_i64(0);
466 } else {
467 cmp_tmp = tcg_temp_new();
468 if (mask) {
469 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
470 } else {
471 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
472 }
473 }
474
475 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
476}
477
478/* Fold -0.0 for comparison with COND. */
479
480static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
481{
482 uint64_t mzero = 1ull << 63;
483
484 switch (cond) {
485 case TCG_COND_LE:
486 case TCG_COND_GT:
487 /* For <= or >, the -0.0 value directly compares the way we want. */
488 tcg_gen_mov_i64(dest, src);
489 break;
490
491 case TCG_COND_EQ:
492 case TCG_COND_NE:
493 /* For == or !=, we can simply mask off the sign bit and compare. */
494 tcg_gen_andi_i64(dest, src, mzero - 1);
495 break;
496
497 case TCG_COND_GE:
498 case TCG_COND_LT:
499 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
500 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
501 tcg_gen_neg_i64(dest, dest);
502 tcg_gen_and_i64(dest, dest, src);
503 break;
504
505 default:
506 abort();
507 }
508}
509
510static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
511 int32_t disp)
512{
513 TCGv cmp_tmp;
514
515 if (unlikely(ra == 31)) {
516 /* Very uncommon case, but easier to optimize it to an integer
517 comparison than continuing with the floating point comparison. */
518 return gen_bcond(ctx, cond, ra, disp, 0);
519 }
520
521 cmp_tmp = tcg_temp_new();
522 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
523 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
524}
525
526static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
527 int islit, uint8_t lit, int mask)
528{
529 TCGv_i64 c1, z, v1;
530
531 if (unlikely(rc == 31)) {
532 return;
533 }
534
535 if (ra == 31) {
536 /* Very uncommon case - Do not bother to optimize. */
537 c1 = tcg_const_i64(0);
538 } else if (mask) {
539 c1 = tcg_const_i64(1);
540 tcg_gen_and_i64(c1, c1, cpu_ir[ra]);
541 } else {
542 c1 = cpu_ir[ra];
543 }
544 if (islit) {
545 v1 = tcg_const_i64(lit);
546 } else {
547 v1 = cpu_ir[rb];
548 }
549 z = tcg_const_i64(0);
550
551 tcg_gen_movcond_i64(cond, cpu_ir[rc], c1, z, v1, cpu_ir[rc]);
552
553 tcg_temp_free_i64(z);
554 if (ra == 31 || mask) {
555 tcg_temp_free_i64(c1);
556 }
557 if (islit) {
558 tcg_temp_free_i64(v1);
559 }
560}
561
562static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
563{
564 TCGv_i64 c1, z, v1;
565
566 if (unlikely(rc == 31)) {
567 return;
568 }
569
570 c1 = tcg_temp_new_i64();
571 if (unlikely(ra == 31)) {
572 tcg_gen_movi_i64(c1, 0);
573 } else {
574 gen_fold_mzero(cond, c1, cpu_fir[ra]);
575 }
576 if (rb == 31) {
577 v1 = tcg_const_i64(0);
578 } else {
579 v1 = cpu_fir[rb];
580 }
581 z = tcg_const_i64(0);
582
583 tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]);
584
585 tcg_temp_free_i64(z);
586 tcg_temp_free_i64(c1);
587 if (rb == 31) {
588 tcg_temp_free_i64(v1);
589 }
590}
591
592#define QUAL_RM_N 0x080 /* Round mode nearest even */
593#define QUAL_RM_C 0x000 /* Round mode chopped */
594#define QUAL_RM_M 0x040 /* Round mode minus infinity */
595#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
596#define QUAL_RM_MASK 0x0c0
597
598#define QUAL_U 0x100 /* Underflow enable (fp output) */
599#define QUAL_V 0x100 /* Overflow enable (int output) */
600#define QUAL_S 0x400 /* Software completion enable */
601#define QUAL_I 0x200 /* Inexact detection enable */
602
603static void gen_qual_roundmode(DisasContext *ctx, int fn11)
604{
605 TCGv_i32 tmp;
606
607 fn11 &= QUAL_RM_MASK;
608 if (fn11 == ctx->tb_rm) {
609 return;
610 }
611 ctx->tb_rm = fn11;
612
613 tmp = tcg_temp_new_i32();
614 switch (fn11) {
615 case QUAL_RM_N:
616 tcg_gen_movi_i32(tmp, float_round_nearest_even);
617 break;
618 case QUAL_RM_C:
619 tcg_gen_movi_i32(tmp, float_round_to_zero);
620 break;
621 case QUAL_RM_M:
622 tcg_gen_movi_i32(tmp, float_round_down);
623 break;
624 case QUAL_RM_D:
625 tcg_gen_ld8u_i32(tmp, cpu_env,
626 offsetof(CPUAlphaState, fpcr_dyn_round));
627 break;
628 }
629
630#if defined(CONFIG_SOFTFLOAT_INLINE)
631 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
632 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
633 sets the one field. */
634 tcg_gen_st8_i32(tmp, cpu_env,
635 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
636#else
637 gen_helper_setroundmode(tmp);
638#endif
639
640 tcg_temp_free_i32(tmp);
641}
642
643static void gen_qual_flushzero(DisasContext *ctx, int fn11)
644{
645 TCGv_i32 tmp;
646
647 fn11 &= QUAL_U;
648 if (fn11 == ctx->tb_ftz) {
649 return;
650 }
651 ctx->tb_ftz = fn11;
652
653 tmp = tcg_temp_new_i32();
654 if (fn11) {
655 /* Underflow is enabled, use the FPCR setting. */
656 tcg_gen_ld8u_i32(tmp, cpu_env,
657 offsetof(CPUAlphaState, fpcr_flush_to_zero));
658 } else {
659 /* Underflow is disabled, force flush-to-zero. */
660 tcg_gen_movi_i32(tmp, 1);
661 }
662
663#if defined(CONFIG_SOFTFLOAT_INLINE)
664 tcg_gen_st8_i32(tmp, cpu_env,
665 offsetof(CPUAlphaState, fp_status.flush_to_zero));
666#else
667 gen_helper_setflushzero(tmp);
668#endif
669
670 tcg_temp_free_i32(tmp);
671}
672
673static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
674{
675 TCGv val;
676 if (reg == 31) {
677 val = tcg_const_i64(0);
678 } else {
679 if ((fn11 & QUAL_S) == 0) {
680 if (is_cmp) {
681 gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
682 } else {
683 gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
684 }
685 }
686 val = tcg_temp_new();
687 tcg_gen_mov_i64(val, cpu_fir[reg]);
688 }
689 return val;
690}
691
692static void gen_fp_exc_clear(void)
693{
694#if defined(CONFIG_SOFTFLOAT_INLINE)
695 TCGv_i32 zero = tcg_const_i32(0);
696 tcg_gen_st8_i32(zero, cpu_env,
697 offsetof(CPUAlphaState, fp_status.float_exception_flags));
698 tcg_temp_free_i32(zero);
699#else
700 gen_helper_fp_exc_clear(cpu_env);
701#endif
702}
703
704static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
705{
706 /* ??? We ought to be able to do something with imprecise exceptions.
707 E.g. notice we're still in the trap shadow of something within the
708 TB and do not generate the code to signal the exception; end the TB
709 when an exception is forced to arrive, either by consumption of a
710 register value or TRAPB or EXCB. */
711 TCGv_i32 exc = tcg_temp_new_i32();
712 TCGv_i32 reg;
713
714#if defined(CONFIG_SOFTFLOAT_INLINE)
715 tcg_gen_ld8u_i32(exc, cpu_env,
716 offsetof(CPUAlphaState, fp_status.float_exception_flags));
717#else
718 gen_helper_fp_exc_get(exc, cpu_env);
719#endif
720
721 if (ignore) {
722 tcg_gen_andi_i32(exc, exc, ~ignore);
723 }
724
725 /* ??? Pass in the regno of the destination so that the helper can
726 set EXC_MASK, which contains a bitmask of destination registers
727 that have caused arithmetic traps. A simple userspace emulation
728 does not require this. We do need it for a guest kernel's entArith,
729 or if we were to do something clever with imprecise exceptions. */
730 reg = tcg_const_i32(rc + 32);
731
732 if (fn11 & QUAL_S) {
733 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
734 } else {
735 gen_helper_fp_exc_raise(cpu_env, exc, reg);
736 }
737
738 tcg_temp_free_i32(reg);
739 tcg_temp_free_i32(exc);
740}
741
742static inline void gen_fp_exc_raise(int rc, int fn11)
743{
744 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
745}
746
747static void gen_fcvtlq(int rb, int rc)
748{
749 if (unlikely(rc == 31)) {
750 return;
751 }
752 if (unlikely(rb == 31)) {
753 tcg_gen_movi_i64(cpu_fir[rc], 0);
754 } else {
755 TCGv tmp = tcg_temp_new();
756
757 /* The arithmetic right shift here, plus the sign-extended mask below
758 yields a sign-extended result without an explicit ext32s_i64. */
759 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
760 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
761 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
762 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
763 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
764
765 tcg_temp_free(tmp);
766 }
767}
768
769static void gen_fcvtql(int rb, int rc)
770{
771 if (unlikely(rc == 31)) {
772 return;
773 }
774 if (unlikely(rb == 31)) {
775 tcg_gen_movi_i64(cpu_fir[rc], 0);
776 } else {
777 TCGv tmp = tcg_temp_new();
778
779 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
780 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
781 tcg_gen_shli_i64(tmp, tmp, 32);
782 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
783 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
784
785 tcg_temp_free(tmp);
786 }
787}
788
789static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
790{
791 if (rb != 31) {
792 int lab = gen_new_label();
793 TCGv tmp = tcg_temp_new();
794
795 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
796 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
797 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
798
799 gen_set_label(lab);
800 }
801 gen_fcvtql(rb, rc);
802}
803
804#define FARITH2(name) \
805 static inline void glue(gen_f, name)(int rb, int rc) \
806 { \
807 if (unlikely(rc == 31)) { \
808 return; \
809 } \
810 if (rb != 31) { \
811 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
812 } else { \
813 TCGv tmp = tcg_const_i64(0); \
814 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
815 tcg_temp_free(tmp); \
816 } \
817 }
818
819/* ??? VAX instruction qualifiers ignored. */
820FARITH2(sqrtf)
821FARITH2(sqrtg)
822FARITH2(cvtgf)
823FARITH2(cvtgq)
824FARITH2(cvtqf)
825FARITH2(cvtqg)
826
827static void gen_ieee_arith2(DisasContext *ctx,
828 void (*helper)(TCGv, TCGv_ptr, TCGv),
829 int rb, int rc, int fn11)
830{
831 TCGv vb;
832
833 /* ??? This is wrong: the instruction is not a nop, it still may
834 raise exceptions. */
835 if (unlikely(rc == 31)) {
836 return;
837 }
838
839 gen_qual_roundmode(ctx, fn11);
840 gen_qual_flushzero(ctx, fn11);
841 gen_fp_exc_clear();
842
843 vb = gen_ieee_input(rb, fn11, 0);
844 helper(cpu_fir[rc], cpu_env, vb);
845 tcg_temp_free(vb);
846
847 gen_fp_exc_raise(rc, fn11);
848}
849
850#define IEEE_ARITH2(name) \
851static inline void glue(gen_f, name)(DisasContext *ctx, \
852 int rb, int rc, int fn11) \
853{ \
854 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
855}
856IEEE_ARITH2(sqrts)
857IEEE_ARITH2(sqrtt)
858IEEE_ARITH2(cvtst)
859IEEE_ARITH2(cvtts)
860
861static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
862{
863 TCGv vb;
864 int ignore = 0;
865
866 /* ??? This is wrong: the instruction is not a nop, it still may
867 raise exceptions. */
868 if (unlikely(rc == 31)) {
869 return;
870 }
871
872 /* No need to set flushzero, since we have an integer output. */
873 gen_fp_exc_clear();
874 vb = gen_ieee_input(rb, fn11, 0);
875
876 /* Almost all integer conversions use cropped rounding, and most
877 also do not have integer overflow enabled. Special case that. */
878 switch (fn11) {
879 case QUAL_RM_C:
880 gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
881 break;
882 case QUAL_V | QUAL_RM_C:
883 case QUAL_S | QUAL_V | QUAL_RM_C:
884 ignore = float_flag_inexact;
885 /* FALLTHRU */
886 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
887 gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
888 break;
889 default:
890 gen_qual_roundmode(ctx, fn11);
891 gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
892 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
893 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
894 break;
895 }
896 tcg_temp_free(vb);
897
898 gen_fp_exc_raise_ignore(rc, fn11, ignore);
899}
900
901static void gen_ieee_intcvt(DisasContext *ctx,
902 void (*helper)(TCGv, TCGv_ptr, TCGv),
903 int rb, int rc, int fn11)
904{
905 TCGv vb;
906
907 /* ??? This is wrong: the instruction is not a nop, it still may
908 raise exceptions. */
909 if (unlikely(rc == 31)) {
910 return;
911 }
912
913 gen_qual_roundmode(ctx, fn11);
914
915 if (rb == 31) {
916 vb = tcg_const_i64(0);
917 } else {
918 vb = cpu_fir[rb];
919 }
920
921 /* The only exception that can be raised by integer conversion
922 is inexact. Thus we only need to worry about exceptions when
923 inexact handling is requested. */
924 if (fn11 & QUAL_I) {
925 gen_fp_exc_clear();
926 helper(cpu_fir[rc], cpu_env, vb);
927 gen_fp_exc_raise(rc, fn11);
928 } else {
929 helper(cpu_fir[rc], cpu_env, vb);
930 }
931
932 if (rb == 31) {
933 tcg_temp_free(vb);
934 }
935}
936
937#define IEEE_INTCVT(name) \
938static inline void glue(gen_f, name)(DisasContext *ctx, \
939 int rb, int rc, int fn11) \
940{ \
941 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
942}
943IEEE_INTCVT(cvtqs)
944IEEE_INTCVT(cvtqt)
945
946static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
947{
948 TCGv va, vb, vmask;
949 int za = 0, zb = 0;
950
951 if (unlikely(rc == 31)) {
952 return;
953 }
954
955 vmask = tcg_const_i64(mask);
956
957 TCGV_UNUSED_I64(va);
958 if (ra == 31) {
959 if (inv_a) {
960 va = vmask;
961 } else {
962 za = 1;
963 }
964 } else {
965 va = tcg_temp_new_i64();
966 tcg_gen_mov_i64(va, cpu_fir[ra]);
967 if (inv_a) {
968 tcg_gen_andc_i64(va, vmask, va);
969 } else {
970 tcg_gen_and_i64(va, va, vmask);
971 }
972 }
973
974 TCGV_UNUSED_I64(vb);
975 if (rb == 31) {
976 zb = 1;
977 } else {
978 vb = tcg_temp_new_i64();
979 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
980 }
981
982 switch (za << 1 | zb) {
983 case 0 | 0:
984 tcg_gen_or_i64(cpu_fir[rc], va, vb);
985 break;
986 case 0 | 1:
987 tcg_gen_mov_i64(cpu_fir[rc], va);
988 break;
989 case 2 | 0:
990 tcg_gen_mov_i64(cpu_fir[rc], vb);
991 break;
992 case 2 | 1:
993 tcg_gen_movi_i64(cpu_fir[rc], 0);
994 break;
995 }
996
997 tcg_temp_free(vmask);
998 if (ra != 31) {
999 tcg_temp_free(va);
1000 }
1001 if (rb != 31) {
1002 tcg_temp_free(vb);
1003 }
1004}
1005
1006static inline void gen_fcpys(int ra, int rb, int rc)
1007{
1008 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
1009}
1010
1011static inline void gen_fcpysn(int ra, int rb, int rc)
1012{
1013 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1014}
1015
1016static inline void gen_fcpyse(int ra, int rb, int rc)
1017{
1018 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1019}
1020
1021#define FARITH3(name) \
1022 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1023 { \
1024 TCGv va, vb; \
1025 \
1026 if (unlikely(rc == 31)) { \
1027 return; \
1028 } \
1029 if (ra == 31) { \
1030 va = tcg_const_i64(0); \
1031 } else { \
1032 va = cpu_fir[ra]; \
1033 } \
1034 if (rb == 31) { \
1035 vb = tcg_const_i64(0); \
1036 } else { \
1037 vb = cpu_fir[rb]; \
1038 } \
1039 \
1040 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1041 \
1042 if (ra == 31) { \
1043 tcg_temp_free(va); \
1044 } \
1045 if (rb == 31) { \
1046 tcg_temp_free(vb); \
1047 } \
1048 }
1049
1050/* ??? VAX instruction qualifiers ignored. */
1051FARITH3(addf)
1052FARITH3(subf)
1053FARITH3(mulf)
1054FARITH3(divf)
1055FARITH3(addg)
1056FARITH3(subg)
1057FARITH3(mulg)
1058FARITH3(divg)
1059FARITH3(cmpgeq)
1060FARITH3(cmpglt)
1061FARITH3(cmpgle)
1062
1063static void gen_ieee_arith3(DisasContext *ctx,
1064 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1065 int ra, int rb, int rc, int fn11)
1066{
1067 TCGv va, vb;
1068
1069 /* ??? This is wrong: the instruction is not a nop, it still may
1070 raise exceptions. */
1071 if (unlikely(rc == 31)) {
1072 return;
1073 }
1074
1075 gen_qual_roundmode(ctx, fn11);
1076 gen_qual_flushzero(ctx, fn11);
1077 gen_fp_exc_clear();
1078
1079 va = gen_ieee_input(ra, fn11, 0);
1080 vb = gen_ieee_input(rb, fn11, 0);
1081 helper(cpu_fir[rc], cpu_env, va, vb);
1082 tcg_temp_free(va);
1083 tcg_temp_free(vb);
1084
1085 gen_fp_exc_raise(rc, fn11);
1086}
1087
1088#define IEEE_ARITH3(name) \
1089static inline void glue(gen_f, name)(DisasContext *ctx, \
1090 int ra, int rb, int rc, int fn11) \
1091{ \
1092 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1093}
1094IEEE_ARITH3(adds)
1095IEEE_ARITH3(subs)
1096IEEE_ARITH3(muls)
1097IEEE_ARITH3(divs)
1098IEEE_ARITH3(addt)
1099IEEE_ARITH3(subt)
1100IEEE_ARITH3(mult)
1101IEEE_ARITH3(divt)
1102
1103static void gen_ieee_compare(DisasContext *ctx,
1104 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1105 int ra, int rb, int rc, int fn11)
1106{
1107 TCGv va, vb;
1108
1109 /* ??? This is wrong: the instruction is not a nop, it still may
1110 raise exceptions. */
1111 if (unlikely(rc == 31)) {
1112 return;
1113 }
1114
1115 gen_fp_exc_clear();
1116
1117 va = gen_ieee_input(ra, fn11, 1);
1118 vb = gen_ieee_input(rb, fn11, 1);
1119 helper(cpu_fir[rc], cpu_env, va, vb);
1120 tcg_temp_free(va);
1121 tcg_temp_free(vb);
1122
1123 gen_fp_exc_raise(rc, fn11);
1124}
1125
1126#define IEEE_CMP3(name) \
1127static inline void glue(gen_f, name)(DisasContext *ctx, \
1128 int ra, int rb, int rc, int fn11) \
1129{ \
1130 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1131}
1132IEEE_CMP3(cmptun)
1133IEEE_CMP3(cmpteq)
1134IEEE_CMP3(cmptlt)
1135IEEE_CMP3(cmptle)
1136
1137static inline uint64_t zapnot_mask(uint8_t lit)
1138{
1139 uint64_t mask = 0;
1140 int i;
1141
1142 for (i = 0; i < 8; ++i) {
1143 if ((lit >> i) & 1)
1144 mask |= 0xffull << (i * 8);
1145 }
1146 return mask;
1147}
1148
1149/* Implement zapnot with an immediate operand, which expands to some
1150 form of immediate AND. This is a basic building block in the
1151 definition of many of the other byte manipulation instructions. */
1152static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1153{
1154 switch (lit) {
1155 case 0x00:
1156 tcg_gen_movi_i64(dest, 0);
1157 break;
1158 case 0x01:
1159 tcg_gen_ext8u_i64(dest, src);
1160 break;
1161 case 0x03:
1162 tcg_gen_ext16u_i64(dest, src);
1163 break;
1164 case 0x0f:
1165 tcg_gen_ext32u_i64(dest, src);
1166 break;
1167 case 0xff:
1168 tcg_gen_mov_i64(dest, src);
1169 break;
1170 default:
1171 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1172 break;
1173 }
1174}
1175
1176static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1177{
1178 if (unlikely(rc == 31))
1179 return;
1180 else if (unlikely(ra == 31))
1181 tcg_gen_movi_i64(cpu_ir[rc], 0);
1182 else if (islit)
1183 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1184 else
1185 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1186}
1187
1188static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1189{
1190 if (unlikely(rc == 31))
1191 return;
1192 else if (unlikely(ra == 31))
1193 tcg_gen_movi_i64(cpu_ir[rc], 0);
1194 else if (islit)
1195 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1196 else
1197 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1198}
1199
1200
1201/* EXTWH, EXTLH, EXTQH */
1202static void gen_ext_h(int ra, int rb, int rc, int islit,
1203 uint8_t lit, uint8_t byte_mask)
1204{
1205 if (unlikely(rc == 31))
1206 return;
1207 else if (unlikely(ra == 31))
1208 tcg_gen_movi_i64(cpu_ir[rc], 0);
1209 else {
1210 if (islit) {
1211 lit = (64 - (lit & 7) * 8) & 0x3f;
1212 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1213 } else {
1214 TCGv tmp1 = tcg_temp_new();
1215 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1216 tcg_gen_shli_i64(tmp1, tmp1, 3);
1217 tcg_gen_neg_i64(tmp1, tmp1);
1218 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1219 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1220 tcg_temp_free(tmp1);
1221 }
1222 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1223 }
1224}
1225
1226/* EXTBL, EXTWL, EXTLL, EXTQL */
1227static void gen_ext_l(int ra, int rb, int rc, int islit,
1228 uint8_t lit, uint8_t byte_mask)
1229{
1230 if (unlikely(rc == 31))
1231 return;
1232 else if (unlikely(ra == 31))
1233 tcg_gen_movi_i64(cpu_ir[rc], 0);
1234 else {
1235 if (islit) {
1236 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1237 } else {
1238 TCGv tmp = tcg_temp_new();
1239 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1240 tcg_gen_shli_i64(tmp, tmp, 3);
1241 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1242 tcg_temp_free(tmp);
1243 }
1244 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1245 }
1246}
1247
1248/* INSWH, INSLH, INSQH */
1249static void gen_ins_h(int ra, int rb, int rc, int islit,
1250 uint8_t lit, uint8_t byte_mask)
1251{
1252 if (unlikely(rc == 31))
1253 return;
1254 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1255 tcg_gen_movi_i64(cpu_ir[rc], 0);
1256 else {
1257 TCGv tmp = tcg_temp_new();
1258
1259 /* The instruction description has us left-shift the byte mask
1260 and extract bits <15:8> and apply that zap at the end. This
1261 is equivalent to simply performing the zap first and shifting
1262 afterward. */
1263 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1264
1265 if (islit) {
1266 /* Note that we have handled the lit==0 case above. */
1267 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1268 } else {
1269 TCGv shift = tcg_temp_new();
1270
1271 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1272 Do this portably by splitting the shift into two parts:
1273 shift_count-1 and 1. Arrange for the -1 by using
1274 ones-complement instead of twos-complement in the negation:
1275 ~((B & 7) * 8) & 63. */
1276
1277 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1278 tcg_gen_shli_i64(shift, shift, 3);
1279 tcg_gen_not_i64(shift, shift);
1280 tcg_gen_andi_i64(shift, shift, 0x3f);
1281
1282 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1283 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1284 tcg_temp_free(shift);
1285 }
1286 tcg_temp_free(tmp);
1287 }
1288}
1289
1290/* INSBL, INSWL, INSLL, INSQL */
1291static void gen_ins_l(int ra, int rb, int rc, int islit,
1292 uint8_t lit, uint8_t byte_mask)
1293{
1294 if (unlikely(rc == 31))
1295 return;
1296 else if (unlikely(ra == 31))
1297 tcg_gen_movi_i64(cpu_ir[rc], 0);
1298 else {
1299 TCGv tmp = tcg_temp_new();
1300
1301 /* The instruction description has us left-shift the byte mask
1302 the same number of byte slots as the data and apply the zap
1303 at the end. This is equivalent to simply performing the zap
1304 first and shifting afterward. */
1305 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1306
1307 if (islit) {
1308 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1309 } else {
1310 TCGv shift = tcg_temp_new();
1311 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1312 tcg_gen_shli_i64(shift, shift, 3);
1313 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1314 tcg_temp_free(shift);
1315 }
1316 tcg_temp_free(tmp);
1317 }
1318}
1319
1320/* MSKWH, MSKLH, MSKQH */
1321static void gen_msk_h(int ra, int rb, int rc, int islit,
1322 uint8_t lit, uint8_t byte_mask)
1323{
1324 if (unlikely(rc == 31))
1325 return;
1326 else if (unlikely(ra == 31))
1327 tcg_gen_movi_i64(cpu_ir[rc], 0);
1328 else if (islit) {
1329 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1330 } else {
1331 TCGv shift = tcg_temp_new();
1332 TCGv mask = tcg_temp_new();
1333
1334 /* The instruction description is as above, where the byte_mask
1335 is shifted left, and then we extract bits <15:8>. This can be
1336 emulated with a right-shift on the expanded byte mask. This
1337 requires extra care because for an input <2:0> == 0 we need a
1338 shift of 64 bits in order to generate a zero. This is done by
1339 splitting the shift into two parts, the variable shift - 1
1340 followed by a constant 1 shift. The code we expand below is
1341 equivalent to ~((B & 7) * 8) & 63. */
1342
1343 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1344 tcg_gen_shli_i64(shift, shift, 3);
1345 tcg_gen_not_i64(shift, shift);
1346 tcg_gen_andi_i64(shift, shift, 0x3f);
1347 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1348 tcg_gen_shr_i64(mask, mask, shift);
1349 tcg_gen_shri_i64(mask, mask, 1);
1350
1351 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1352
1353 tcg_temp_free(mask);
1354 tcg_temp_free(shift);
1355 }
1356}
1357
1358/* MSKBL, MSKWL, MSKLL, MSKQL */
1359static void gen_msk_l(int ra, int rb, int rc, int islit,
1360 uint8_t lit, uint8_t byte_mask)
1361{
1362 if (unlikely(rc == 31))
1363 return;
1364 else if (unlikely(ra == 31))
1365 tcg_gen_movi_i64(cpu_ir[rc], 0);
1366 else if (islit) {
1367 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1368 } else {
1369 TCGv shift = tcg_temp_new();
1370 TCGv mask = tcg_temp_new();
1371
1372 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1373 tcg_gen_shli_i64(shift, shift, 3);
1374 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1375 tcg_gen_shl_i64(mask, mask, shift);
1376
1377 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1378
1379 tcg_temp_free(mask);
1380 tcg_temp_free(shift);
1381 }
1382}
1383
1384/* Code to call arith3 helpers */
1385#define ARITH3(name) \
1386static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1387 uint8_t lit) \
1388{ \
1389 if (unlikely(rc == 31)) \
1390 return; \
1391 \
1392 if (ra != 31) { \
1393 if (islit) { \
1394 TCGv tmp = tcg_const_i64(lit); \
1395 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1396 tcg_temp_free(tmp); \
1397 } else \
1398 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1399 } else { \
1400 TCGv tmp1 = tcg_const_i64(0); \
1401 if (islit) { \
1402 TCGv tmp2 = tcg_const_i64(lit); \
1403 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1404 tcg_temp_free(tmp2); \
1405 } else \
1406 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1407 tcg_temp_free(tmp1); \
1408 } \
1409}
1410ARITH3(cmpbge)
1411ARITH3(minub8)
1412ARITH3(minsb8)
1413ARITH3(minuw4)
1414ARITH3(minsw4)
1415ARITH3(maxub8)
1416ARITH3(maxsb8)
1417ARITH3(maxuw4)
1418ARITH3(maxsw4)
1419ARITH3(perr)
1420
1421/* Code to call arith3 helpers */
1422#define ARITH3_EX(name) \
1423 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1424 int islit, uint8_t lit) \
1425 { \
1426 if (unlikely(rc == 31)) { \
1427 return; \
1428 } \
1429 if (ra != 31) { \
1430 if (islit) { \
1431 TCGv tmp = tcg_const_i64(lit); \
1432 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1433 cpu_ir[ra], tmp); \
1434 tcg_temp_free(tmp); \
1435 } else { \
1436 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1437 cpu_ir[ra], cpu_ir[rb]); \
1438 } \
1439 } else { \
1440 TCGv tmp1 = tcg_const_i64(0); \
1441 if (islit) { \
1442 TCGv tmp2 = tcg_const_i64(lit); \
1443 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1444 tcg_temp_free(tmp2); \
1445 } else { \
1446 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1447 } \
1448 tcg_temp_free(tmp1); \
1449 } \
1450 }
1451ARITH3_EX(addlv)
1452ARITH3_EX(sublv)
1453ARITH3_EX(addqv)
1454ARITH3_EX(subqv)
1455ARITH3_EX(mullv)
1456ARITH3_EX(mulqv)
1457
1458#define MVIOP2(name) \
1459static inline void glue(gen_, name)(int rb, int rc) \
1460{ \
1461 if (unlikely(rc == 31)) \
1462 return; \
1463 if (unlikely(rb == 31)) \
1464 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1465 else \
1466 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1467}
1468MVIOP2(pklb)
1469MVIOP2(pkwb)
1470MVIOP2(unpkbl)
1471MVIOP2(unpkbw)
1472
1473static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1474 int islit, uint8_t lit)
1475{
1476 TCGv va, vb;
1477
1478 if (unlikely(rc == 31)) {
1479 return;
1480 }
1481
1482 if (ra == 31) {
1483 va = tcg_const_i64(0);
1484 } else {
1485 va = cpu_ir[ra];
1486 }
1487 if (islit) {
1488 vb = tcg_const_i64(lit);
1489 } else {
1490 vb = cpu_ir[rb];
1491 }
1492
1493 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1494
1495 if (ra == 31) {
1496 tcg_temp_free(va);
1497 }
1498 if (islit) {
1499 tcg_temp_free(vb);
1500 }
1501}
1502
1503static void gen_rx(int ra, int set)
1504{
1505 TCGv_i32 tmp;
1506
1507 if (ra != 31) {
1508 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
1509 }
1510
1511 tmp = tcg_const_i32(set);
1512 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1513 tcg_temp_free_i32(tmp);
1514}
1515
1516static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1517{
1518 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1519 to internal cpu registers. */
1520
1521 /* Unprivileged PAL call */
1522 if (palcode >= 0x80 && palcode < 0xC0) {
1523 switch (palcode) {
1524 case 0x86:
1525 /* IMB */
1526 /* No-op inside QEMU. */
1527 break;
1528 case 0x9E:
1529 /* RDUNIQUE */
1530 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1531 break;
1532 case 0x9F:
1533 /* WRUNIQUE */
1534 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1535 break;
1536 default:
1537 palcode &= 0xbf;
1538 goto do_call_pal;
1539 }
1540 return NO_EXIT;
1541 }
1542
1543#ifndef CONFIG_USER_ONLY
1544 /* Privileged PAL code */
1545 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1546 switch (palcode) {
1547 case 0x01:
1548 /* CFLUSH */
1549 /* No-op inside QEMU. */
1550 break;
1551 case 0x02:
1552 /* DRAINA */
1553 /* No-op inside QEMU. */
1554 break;
1555 case 0x2D:
1556 /* WRVPTPTR */
1557 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
1558 break;
1559 case 0x31:
1560 /* WRVAL */
1561 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1562 break;
1563 case 0x32:
1564 /* RDVAL */
1565 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1566 break;
1567
1568 case 0x35: {
1569 /* SWPIPL */
1570 TCGv tmp;
1571
1572 /* Note that we already know we're in kernel mode, so we know
1573 that PS only contains the 3 IPL bits. */
1574 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1575
1576 /* But make sure and store only the 3 IPL bits from the user. */
1577 tmp = tcg_temp_new();
1578 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1579 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1580 tcg_temp_free(tmp);
1581 break;
1582 }
1583
1584 case 0x36:
1585 /* RDPS */
1586 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1587 break;
1588 case 0x38:
1589 /* WRUSP */
1590 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1591 break;
1592 case 0x3A:
1593 /* RDUSP */
1594 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1595 break;
1596 case 0x3C:
1597 /* WHAMI */
1598 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1599 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1600 break;
1601
1602 default:
1603 palcode &= 0x3f;
1604 goto do_call_pal;
1605 }
1606 return NO_EXIT;
1607 }
1608#endif
1609 return gen_invalid(ctx);
1610
1611 do_call_pal:
1612#ifdef CONFIG_USER_ONLY
1613 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1614#else
1615 {
1616 TCGv pc = tcg_const_i64(ctx->pc);
1617 TCGv entry = tcg_const_i64(palcode & 0x80
1618 ? 0x2000 + (palcode - 0x80) * 64
1619 : 0x1000 + palcode * 64);
1620
1621 gen_helper_call_pal(cpu_env, pc, entry);
1622
1623 tcg_temp_free(entry);
1624 tcg_temp_free(pc);
1625
1626 /* Since the destination is running in PALmode, we don't really
1627 need the page permissions check. We'll see the existance of
1628 the page when we create the TB, and we'll flush all TBs if
1629 we change the PAL base register. */
1630 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1631 tcg_gen_goto_tb(0);
1632 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
1633 return EXIT_GOTO_TB;
1634 }
1635
1636 return EXIT_PC_UPDATED;
1637 }
1638#endif
1639}
1640
1641#ifndef CONFIG_USER_ONLY
1642
1643#define PR_BYTE 0x100000
1644#define PR_LONG 0x200000
1645
1646static int cpu_pr_data(int pr)
1647{
1648 switch (pr) {
1649 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1650 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1651 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1652 case 3: return offsetof(CPUAlphaState, trap_arg0);
1653 case 4: return offsetof(CPUAlphaState, trap_arg1);
1654 case 5: return offsetof(CPUAlphaState, trap_arg2);
1655 case 6: return offsetof(CPUAlphaState, exc_addr);
1656 case 7: return offsetof(CPUAlphaState, palbr);
1657 case 8: return offsetof(CPUAlphaState, ptbr);
1658 case 9: return offsetof(CPUAlphaState, vptptr);
1659 case 10: return offsetof(CPUAlphaState, unique);
1660 case 11: return offsetof(CPUAlphaState, sysval);
1661 case 12: return offsetof(CPUAlphaState, usp);
1662
1663 case 32 ... 39:
1664 return offsetof(CPUAlphaState, shadow[pr - 32]);
1665 case 40 ... 63:
1666 return offsetof(CPUAlphaState, scratch[pr - 40]);
1667
1668 case 251:
1669 return offsetof(CPUAlphaState, alarm_expire);
1670 }
1671 return 0;
1672}
1673
1674static ExitStatus gen_mfpr(int ra, int regno)
1675{
1676 int data = cpu_pr_data(regno);
1677
1678 /* In our emulated PALcode, these processor registers have no
1679 side effects from reading. */
1680 if (ra == 31) {
1681 return NO_EXIT;
1682 }
1683
1684 /* Special help for VMTIME and WALLTIME. */
1685 if (regno == 250 || regno == 249) {
1686 void (*helper)(TCGv) = gen_helper_get_walltime;
1687 if (regno == 249) {
1688 helper = gen_helper_get_vmtime;
1689 }
1690 if (use_icount) {
1691 gen_io_start();
1692 helper(cpu_ir[ra]);
1693 gen_io_end();
1694 return EXIT_PC_STALE;
1695 } else {
1696 helper(cpu_ir[ra]);
1697 return NO_EXIT;
1698 }
1699 }
1700
1701 /* The basic registers are data only, and unknown registers
1702 are read-zero, write-ignore. */
1703 if (data == 0) {
1704 tcg_gen_movi_i64(cpu_ir[ra], 0);
1705 } else if (data & PR_BYTE) {
1706 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1707 } else if (data & PR_LONG) {
1708 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1709 } else {
1710 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1711 }
1712 return NO_EXIT;
1713}
1714
1715static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
1716{
1717 TCGv tmp;
1718 int data;
1719
1720 if (rb == 31) {
1721 tmp = tcg_const_i64(0);
1722 } else {
1723 tmp = cpu_ir[rb];
1724 }
1725
1726 switch (regno) {
1727 case 255:
1728 /* TBIA */
1729 gen_helper_tbia(cpu_env);
1730 break;
1731
1732 case 254:
1733 /* TBIS */
1734 gen_helper_tbis(cpu_env, tmp);
1735 break;
1736
1737 case 253:
1738 /* WAIT */
1739 tmp = tcg_const_i64(1);
1740 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1741 offsetof(CPUState, halted));
1742 return gen_excp(ctx, EXCP_HLT, 0);
1743
1744 case 252:
1745 /* HALT */
1746 gen_helper_halt(tmp);
1747 return EXIT_PC_STALE;
1748
1749 case 251:
1750 /* ALARM */
1751 gen_helper_set_alarm(cpu_env, tmp);
1752 break;
1753
1754 case 7:
1755 /* PALBR */
1756 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, palbr));
1757 /* Changing the PAL base register implies un-chaining all of the TBs
1758 that ended with a CALL_PAL. Since the base register usually only
1759 changes during boot, flushing everything works well. */
1760 gen_helper_tb_flush(cpu_env);
1761 return EXIT_PC_STALE;
1762
1763 default:
1764 /* The basic registers are data only, and unknown registers
1765 are read-zero, write-ignore. */
1766 data = cpu_pr_data(regno);
1767 if (data != 0) {
1768 if (data & PR_BYTE) {
1769 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1770 } else if (data & PR_LONG) {
1771 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1772 } else {
1773 tcg_gen_st_i64(tmp, cpu_env, data);
1774 }
1775 }
1776 break;
1777 }
1778
1779 if (rb == 31) {
1780 tcg_temp_free(tmp);
1781 }
1782
1783 return NO_EXIT;
1784}
1785#endif /* !USER_ONLY*/
1786
1787static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1788{
1789 uint32_t palcode;
1790 int32_t disp21, disp16;
1791#ifndef CONFIG_USER_ONLY
1792 int32_t disp12;
1793#endif
1794 uint16_t fn11;
1795 uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
1796 uint8_t lit;
1797 ExitStatus ret;
1798
1799 /* Decode all instruction fields */
1800 opc = insn >> 26;
1801 ra = (insn >> 21) & 0x1F;
1802 rb = (insn >> 16) & 0x1F;
1803 rc = insn & 0x1F;
1804 real_islit = islit = (insn >> 12) & 1;
1805 if (rb == 31 && !islit) {
1806 islit = 1;
1807 lit = 0;
1808 } else
1809 lit = (insn >> 13) & 0xFF;
1810 palcode = insn & 0x03FFFFFF;
1811 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1812 disp16 = (int16_t)(insn & 0x0000FFFF);
1813#ifndef CONFIG_USER_ONLY
1814 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1815#endif
1816 fn11 = (insn >> 5) & 0x000007FF;
1817 fpfn = fn11 & 0x3F;
1818 fn7 = (insn >> 5) & 0x0000007F;
1819 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1820 opc, ra, rb, rc, disp16);
1821
1822 ret = NO_EXIT;
1823 switch (opc) {
1824 case 0x00:
1825 /* CALL_PAL */
1826 ret = gen_call_pal(ctx, palcode);
1827 break;
1828 case 0x01:
1829 /* OPC01 */
1830 goto invalid_opc;
1831 case 0x02:
1832 /* OPC02 */
1833 goto invalid_opc;
1834 case 0x03:
1835 /* OPC03 */
1836 goto invalid_opc;
1837 case 0x04:
1838 /* OPC04 */
1839 goto invalid_opc;
1840 case 0x05:
1841 /* OPC05 */
1842 goto invalid_opc;
1843 case 0x06:
1844 /* OPC06 */
1845 goto invalid_opc;
1846 case 0x07:
1847 /* OPC07 */
1848 goto invalid_opc;
1849 case 0x08:
1850 /* LDA */
1851 if (likely(ra != 31)) {
1852 if (rb != 31)
1853 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1854 else
1855 tcg_gen_movi_i64(cpu_ir[ra], disp16);
1856 }
1857 break;
1858 case 0x09:
1859 /* LDAH */
1860 if (likely(ra != 31)) {
1861 if (rb != 31)
1862 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1863 else
1864 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1865 }
1866 break;
1867 case 0x0A:
1868 /* LDBU */
1869 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1870 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1871 break;
1872 }
1873 goto invalid_opc;
1874 case 0x0B:
1875 /* LDQ_U */
1876 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1877 break;
1878 case 0x0C:
1879 /* LDWU */
1880 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1881 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1882 break;
1883 }
1884 goto invalid_opc;
1885 case 0x0D:
1886 /* STW */
1887 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1888 break;
1889 case 0x0E:
1890 /* STB */
1891 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1892 break;
1893 case 0x0F:
1894 /* STQ_U */
1895 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1896 break;
1897 case 0x10:
1898 switch (fn7) {
1899 case 0x00:
1900 /* ADDL */
1901 if (likely(rc != 31)) {
1902 if (ra != 31) {
1903 if (islit) {
1904 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1905 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1906 } else {
1907 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1908 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1909 }
1910 } else {
1911 if (islit)
1912 tcg_gen_movi_i64(cpu_ir[rc], lit);
1913 else
1914 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1915 }
1916 }
1917 break;
1918 case 0x02:
1919 /* S4ADDL */
1920 if (likely(rc != 31)) {
1921 if (ra != 31) {
1922 TCGv tmp = tcg_temp_new();
1923 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1924 if (islit)
1925 tcg_gen_addi_i64(tmp, tmp, lit);
1926 else
1927 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1928 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1929 tcg_temp_free(tmp);
1930 } else {
1931 if (islit)
1932 tcg_gen_movi_i64(cpu_ir[rc], lit);
1933 else
1934 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1935 }
1936 }
1937 break;
1938 case 0x09:
1939 /* SUBL */
1940 if (likely(rc != 31)) {
1941 if (ra != 31) {
1942 if (islit)
1943 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1944 else
1945 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1946 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1947 } else {
1948 if (islit)
1949 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1950 else {
1951 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1952 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1953 }
1954 }
1955 break;
1956 case 0x0B:
1957 /* S4SUBL */
1958 if (likely(rc != 31)) {
1959 if (ra != 31) {
1960 TCGv tmp = tcg_temp_new();
1961 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1962 if (islit)
1963 tcg_gen_subi_i64(tmp, tmp, lit);
1964 else
1965 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1966 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1967 tcg_temp_free(tmp);
1968 } else {
1969 if (islit)
1970 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1971 else {
1972 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1973 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1974 }
1975 }
1976 }
1977 break;
1978 case 0x0F:
1979 /* CMPBGE */
1980 gen_cmpbge(ra, rb, rc, islit, lit);
1981 break;
1982 case 0x12:
1983 /* S8ADDL */
1984 if (likely(rc != 31)) {
1985 if (ra != 31) {
1986 TCGv tmp = tcg_temp_new();
1987 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1988 if (islit)
1989 tcg_gen_addi_i64(tmp, tmp, lit);
1990 else
1991 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1992 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1993 tcg_temp_free(tmp);
1994 } else {
1995 if (islit)
1996 tcg_gen_movi_i64(cpu_ir[rc], lit);
1997 else
1998 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1999 }
2000 }
2001 break;
2002 case 0x1B:
2003 /* S8SUBL */
2004 if (likely(rc != 31)) {
2005 if (ra != 31) {
2006 TCGv tmp = tcg_temp_new();
2007 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2008 if (islit)
2009 tcg_gen_subi_i64(tmp, tmp, lit);
2010 else
2011 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
2012 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
2013 tcg_temp_free(tmp);
2014 } else {
2015 if (islit)
2016 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2017 else
2018 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2019 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2020 }
2021 }
2022 }
2023 break;
2024 case 0x1D:
2025 /* CMPULT */
2026 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
2027 break;
2028 case 0x20:
2029 /* ADDQ */
2030 if (likely(rc != 31)) {
2031 if (ra != 31) {
2032 if (islit)
2033 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2034 else
2035 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2036 } else {
2037 if (islit)
2038 tcg_gen_movi_i64(cpu_ir[rc], lit);
2039 else
2040 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2041 }
2042 }
2043 break;
2044 case 0x22:
2045 /* S4ADDQ */
2046 if (likely(rc != 31)) {
2047 if (ra != 31) {
2048 TCGv tmp = tcg_temp_new();
2049 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2050 if (islit)
2051 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2052 else
2053 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2054 tcg_temp_free(tmp);
2055 } else {
2056 if (islit)
2057 tcg_gen_movi_i64(cpu_ir[rc], lit);
2058 else
2059 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2060 }
2061 }
2062 break;
2063 case 0x29:
2064 /* SUBQ */
2065 if (likely(rc != 31)) {
2066 if (ra != 31) {
2067 if (islit)
2068 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2069 else
2070 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2071 } else {
2072 if (islit)
2073 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2074 else
2075 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2076 }
2077 }
2078 break;
2079 case 0x2B:
2080 /* S4SUBQ */
2081 if (likely(rc != 31)) {
2082 if (ra != 31) {
2083 TCGv tmp = tcg_temp_new();
2084 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2085 if (islit)
2086 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2087 else
2088 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2089 tcg_temp_free(tmp);
2090 } else {
2091 if (islit)
2092 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2093 else
2094 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2095 }
2096 }
2097 break;
2098 case 0x2D:
2099 /* CMPEQ */
2100 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
2101 break;
2102 case 0x32:
2103 /* S8ADDQ */
2104 if (likely(rc != 31)) {
2105 if (ra != 31) {
2106 TCGv tmp = tcg_temp_new();
2107 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2108 if (islit)
2109 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2110 else
2111 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2112 tcg_temp_free(tmp);
2113 } else {
2114 if (islit)
2115 tcg_gen_movi_i64(cpu_ir[rc], lit);
2116 else
2117 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2118 }
2119 }
2120 break;
2121 case 0x3B:
2122 /* S8SUBQ */
2123 if (likely(rc != 31)) {
2124 if (ra != 31) {
2125 TCGv tmp = tcg_temp_new();
2126 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2127 if (islit)
2128 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2129 else
2130 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2131 tcg_temp_free(tmp);
2132 } else {
2133 if (islit)
2134 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2135 else
2136 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2137 }
2138 }
2139 break;
2140 case 0x3D:
2141 /* CMPULE */
2142 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
2143 break;
2144 case 0x40:
2145 /* ADDL/V */
2146 gen_addlv(ra, rb, rc, islit, lit);
2147 break;
2148 case 0x49:
2149 /* SUBL/V */
2150 gen_sublv(ra, rb, rc, islit, lit);
2151 break;
2152 case 0x4D:
2153 /* CMPLT */
2154 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
2155 break;
2156 case 0x60:
2157 /* ADDQ/V */
2158 gen_addqv(ra, rb, rc, islit, lit);
2159 break;
2160 case 0x69:
2161 /* SUBQ/V */
2162 gen_subqv(ra, rb, rc, islit, lit);
2163 break;
2164 case 0x6D:
2165 /* CMPLE */
2166 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
2167 break;
2168 default:
2169 goto invalid_opc;
2170 }
2171 break;
2172 case 0x11:
2173 switch (fn7) {
2174 case 0x00:
2175 /* AND */
2176 if (likely(rc != 31)) {
2177 if (ra == 31)
2178 tcg_gen_movi_i64(cpu_ir[rc], 0);
2179 else if (islit)
2180 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2181 else
2182 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2183 }
2184 break;
2185 case 0x08:
2186 /* BIC */
2187 if (likely(rc != 31)) {
2188 if (ra != 31) {
2189 if (islit)
2190 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2191 else
2192 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2193 } else
2194 tcg_gen_movi_i64(cpu_ir[rc], 0);
2195 }
2196 break;
2197 case 0x14:
2198 /* CMOVLBS */
2199 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
2200 break;
2201 case 0x16:
2202 /* CMOVLBC */
2203 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
2204 break;
2205 case 0x20:
2206 /* BIS */
2207 if (likely(rc != 31)) {
2208 if (ra != 31) {
2209 if (islit)
2210 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2211 else
2212 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2213 } else {
2214 if (islit)
2215 tcg_gen_movi_i64(cpu_ir[rc], lit);
2216 else
2217 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2218 }
2219 }
2220 break;
2221 case 0x24:
2222 /* CMOVEQ */
2223 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
2224 break;
2225 case 0x26:
2226 /* CMOVNE */
2227 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
2228 break;
2229 case 0x28:
2230 /* ORNOT */
2231 if (likely(rc != 31)) {
2232 if (ra != 31) {
2233 if (islit)
2234 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2235 else
2236 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2237 } else {
2238 if (islit)
2239 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2240 else
2241 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2242 }
2243 }
2244 break;
2245 case 0x40:
2246 /* XOR */
2247 if (likely(rc != 31)) {
2248 if (ra != 31) {
2249 if (islit)
2250 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2251 else
2252 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2253 } else {
2254 if (islit)
2255 tcg_gen_movi_i64(cpu_ir[rc], lit);
2256 else
2257 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2258 }
2259 }
2260 break;
2261 case 0x44:
2262 /* CMOVLT */
2263 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
2264 break;
2265 case 0x46:
2266 /* CMOVGE */
2267 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
2268 break;
2269 case 0x48:
2270 /* EQV */
2271 if (likely(rc != 31)) {
2272 if (ra != 31) {
2273 if (islit)
2274 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2275 else
2276 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2277 } else {
2278 if (islit)
2279 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2280 else
2281 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2282 }
2283 }
2284 break;
2285 case 0x61:
2286 /* AMASK */
2287 if (likely(rc != 31)) {
2288 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2289
2290 if (islit) {
2291 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2292 } else {
2293 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
2294 }
2295 }
2296 break;
2297 case 0x64:
2298 /* CMOVLE */
2299 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2300 break;
2301 case 0x66:
2302 /* CMOVGT */
2303 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2304 break;
2305 case 0x6C:
2306 /* IMPLVER */
2307 if (rc != 31) {
2308 tcg_gen_movi_i64(cpu_ir[rc], ctx->implver);
2309 }
2310 break;
2311 default:
2312 goto invalid_opc;
2313 }
2314 break;
2315 case 0x12:
2316 switch (fn7) {
2317 case 0x02:
2318 /* MSKBL */
2319 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2320 break;
2321 case 0x06:
2322 /* EXTBL */
2323 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2324 break;
2325 case 0x0B:
2326 /* INSBL */
2327 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2328 break;
2329 case 0x12:
2330 /* MSKWL */
2331 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2332 break;
2333 case 0x16:
2334 /* EXTWL */
2335 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2336 break;
2337 case 0x1B:
2338 /* INSWL */
2339 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2340 break;
2341 case 0x22:
2342 /* MSKLL */
2343 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2344 break;
2345 case 0x26:
2346 /* EXTLL */
2347 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2348 break;
2349 case 0x2B:
2350 /* INSLL */
2351 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2352 break;
2353 case 0x30:
2354 /* ZAP */
2355 gen_zap(ra, rb, rc, islit, lit);
2356 break;
2357 case 0x31:
2358 /* ZAPNOT */
2359 gen_zapnot(ra, rb, rc, islit, lit);
2360 break;
2361 case 0x32:
2362 /* MSKQL */
2363 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2364 break;
2365 case 0x34:
2366 /* SRL */
2367 if (likely(rc != 31)) {
2368 if (ra != 31) {
2369 if (islit)
2370 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2371 else {
2372 TCGv shift = tcg_temp_new();
2373 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2374 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2375 tcg_temp_free(shift);
2376 }
2377 } else
2378 tcg_gen_movi_i64(cpu_ir[rc], 0);
2379 }
2380 break;
2381 case 0x36:
2382 /* EXTQL */
2383 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2384 break;
2385 case 0x39:
2386 /* SLL */
2387 if (likely(rc != 31)) {
2388 if (ra != 31) {
2389 if (islit)
2390 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2391 else {
2392 TCGv shift = tcg_temp_new();
2393 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2394 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2395 tcg_temp_free(shift);
2396 }
2397 } else
2398 tcg_gen_movi_i64(cpu_ir[rc], 0);
2399 }
2400 break;
2401 case 0x3B:
2402 /* INSQL */
2403 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2404 break;
2405 case 0x3C:
2406 /* SRA */
2407 if (likely(rc != 31)) {
2408 if (ra != 31) {
2409 if (islit)
2410 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2411 else {
2412 TCGv shift = tcg_temp_new();
2413 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2414 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2415 tcg_temp_free(shift);
2416 }
2417 } else
2418 tcg_gen_movi_i64(cpu_ir[rc], 0);
2419 }
2420 break;
2421 case 0x52:
2422 /* MSKWH */
2423 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2424 break;
2425 case 0x57:
2426 /* INSWH */
2427 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2428 break;
2429 case 0x5A:
2430 /* EXTWH */
2431 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2432 break;
2433 case 0x62:
2434 /* MSKLH */
2435 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2436 break;
2437 case 0x67:
2438 /* INSLH */
2439 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2440 break;
2441 case 0x6A:
2442 /* EXTLH */
2443 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2444 break;
2445 case 0x72:
2446 /* MSKQH */
2447 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2448 break;
2449 case 0x77:
2450 /* INSQH */
2451 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2452 break;
2453 case 0x7A:
2454 /* EXTQH */
2455 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2456 break;
2457 default:
2458 goto invalid_opc;
2459 }
2460 break;
2461 case 0x13:
2462 switch (fn7) {
2463 case 0x00:
2464 /* MULL */
2465 if (likely(rc != 31)) {
2466 if (ra == 31)
2467 tcg_gen_movi_i64(cpu_ir[rc], 0);
2468 else {
2469 if (islit)
2470 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2471 else
2472 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2473 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2474 }
2475 }
2476 break;
2477 case 0x20:
2478 /* MULQ */
2479 if (likely(rc != 31)) {
2480 if (ra == 31)
2481 tcg_gen_movi_i64(cpu_ir[rc], 0);
2482 else if (islit)
2483 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2484 else
2485 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2486 }
2487 break;
2488 case 0x30:
2489 /* UMULH */
2490 {
2491 TCGv low;
2492 if (unlikely(rc == 31)){
2493 break;
2494 }
2495 if (ra == 31) {
2496 tcg_gen_movi_i64(cpu_ir[rc], 0);
2497 break;
2498 }
2499 low = tcg_temp_new();
2500 if (islit) {
2501 tcg_gen_movi_tl(low, lit);
2502 tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], low);
2503 } else {
2504 tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2505 }
2506 tcg_temp_free(low);
2507 }
2508 break;
2509 case 0x40:
2510 /* MULL/V */
2511 gen_mullv(ra, rb, rc, islit, lit);
2512 break;
2513 case 0x60:
2514 /* MULQ/V */
2515 gen_mulqv(ra, rb, rc, islit, lit);
2516 break;
2517 default:
2518 goto invalid_opc;
2519 }
2520 break;
2521 case 0x14:
2522 switch (fpfn) { /* fn11 & 0x3F */
2523 case 0x04:
2524 /* ITOFS */
2525 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2526 goto invalid_opc;
2527 }
2528 if (likely(rc != 31)) {
2529 if (ra != 31) {
2530 TCGv_i32 tmp = tcg_temp_new_i32();
2531 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2532 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2533 tcg_temp_free_i32(tmp);
2534 } else
2535 tcg_gen_movi_i64(cpu_fir[rc], 0);
2536 }
2537 break;
2538 case 0x0A:
2539 /* SQRTF */
2540 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2541 gen_fsqrtf(rb, rc);
2542 break;
2543 }
2544 goto invalid_opc;
2545 case 0x0B:
2546 /* SQRTS */
2547 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2548 gen_fsqrts(ctx, rb, rc, fn11);
2549 break;
2550 }
2551 goto invalid_opc;
2552 case 0x14:
2553 /* ITOFF */
2554 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2555 goto invalid_opc;
2556 }
2557 if (likely(rc != 31)) {
2558 if (ra != 31) {
2559 TCGv_i32 tmp = tcg_temp_new_i32();
2560 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2561 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2562 tcg_temp_free_i32(tmp);
2563 } else
2564 tcg_gen_movi_i64(cpu_fir[rc], 0);
2565 }
2566 break;
2567 case 0x24:
2568 /* ITOFT */
2569 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2570 goto invalid_opc;
2571 }
2572 if (likely(rc != 31)) {
2573 if (ra != 31)
2574 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2575 else
2576 tcg_gen_movi_i64(cpu_fir[rc], 0);
2577 }
2578 break;
2579 case 0x2A:
2580 /* SQRTG */
2581 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2582 gen_fsqrtg(rb, rc);
2583 break;
2584 }
2585 goto invalid_opc;
2586 case 0x02B:
2587 /* SQRTT */
2588 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2589 gen_fsqrtt(ctx, rb, rc, fn11);
2590 break;
2591 }
2592 goto invalid_opc;
2593 default:
2594 goto invalid_opc;
2595 }
2596 break;
2597 case 0x15:
2598 /* VAX floating point */
2599 /* XXX: rounding mode and trap are ignored (!) */
2600 switch (fpfn) { /* fn11 & 0x3F */
2601 case 0x00:
2602 /* ADDF */
2603 gen_faddf(ra, rb, rc);
2604 break;
2605 case 0x01:
2606 /* SUBF */
2607 gen_fsubf(ra, rb, rc);
2608 break;
2609 case 0x02:
2610 /* MULF */
2611 gen_fmulf(ra, rb, rc);
2612 break;
2613 case 0x03:
2614 /* DIVF */
2615 gen_fdivf(ra, rb, rc);
2616 break;
2617 case 0x1E:
2618 /* CVTDG */
2619#if 0 // TODO
2620 gen_fcvtdg(rb, rc);
2621#else
2622 goto invalid_opc;
2623#endif
2624 break;
2625 case 0x20:
2626 /* ADDG */
2627 gen_faddg(ra, rb, rc);
2628 break;
2629 case 0x21:
2630 /* SUBG */
2631 gen_fsubg(ra, rb, rc);
2632 break;
2633 case 0x22:
2634 /* MULG */
2635 gen_fmulg(ra, rb, rc);
2636 break;
2637 case 0x23:
2638 /* DIVG */
2639 gen_fdivg(ra, rb, rc);
2640 break;
2641 case 0x25:
2642 /* CMPGEQ */
2643 gen_fcmpgeq(ra, rb, rc);
2644 break;
2645 case 0x26:
2646 /* CMPGLT */
2647 gen_fcmpglt(ra, rb, rc);
2648 break;
2649 case 0x27:
2650 /* CMPGLE */
2651 gen_fcmpgle(ra, rb, rc);
2652 break;
2653 case 0x2C:
2654 /* CVTGF */
2655 gen_fcvtgf(rb, rc);
2656 break;
2657 case 0x2D:
2658 /* CVTGD */
2659#if 0 // TODO
2660 gen_fcvtgd(rb, rc);
2661#else
2662 goto invalid_opc;
2663#endif
2664 break;
2665 case 0x2F:
2666 /* CVTGQ */
2667 gen_fcvtgq(rb, rc);
2668 break;
2669 case 0x3C:
2670 /* CVTQF */
2671 gen_fcvtqf(rb, rc);
2672 break;
2673 case 0x3E:
2674 /* CVTQG */
2675 gen_fcvtqg(rb, rc);
2676 break;
2677 default:
2678 goto invalid_opc;
2679 }
2680 break;
2681 case 0x16:
2682 /* IEEE floating-point */
2683 switch (fpfn) { /* fn11 & 0x3F */
2684 case 0x00:
2685 /* ADDS */
2686 gen_fadds(ctx, ra, rb, rc, fn11);
2687 break;
2688 case 0x01:
2689 /* SUBS */
2690 gen_fsubs(ctx, ra, rb, rc, fn11);
2691 break;
2692 case 0x02:
2693 /* MULS */
2694 gen_fmuls(ctx, ra, rb, rc, fn11);
2695 break;
2696 case 0x03:
2697 /* DIVS */
2698 gen_fdivs(ctx, ra, rb, rc, fn11);
2699 break;
2700 case 0x20:
2701 /* ADDT */
2702 gen_faddt(ctx, ra, rb, rc, fn11);
2703 break;
2704 case 0x21:
2705 /* SUBT */
2706 gen_fsubt(ctx, ra, rb, rc, fn11);
2707 break;
2708 case 0x22:
2709 /* MULT */
2710 gen_fmult(ctx, ra, rb, rc, fn11);
2711 break;
2712 case 0x23:
2713 /* DIVT */
2714 gen_fdivt(ctx, ra, rb, rc, fn11);
2715 break;
2716 case 0x24:
2717 /* CMPTUN */
2718 gen_fcmptun(ctx, ra, rb, rc, fn11);
2719 break;
2720 case 0x25:
2721 /* CMPTEQ */
2722 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2723 break;
2724 case 0x26:
2725 /* CMPTLT */
2726 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2727 break;
2728 case 0x27:
2729 /* CMPTLE */
2730 gen_fcmptle(ctx, ra, rb, rc, fn11);
2731 break;
2732 case 0x2C:
2733 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2734 /* CVTST */
2735 gen_fcvtst(ctx, rb, rc, fn11);
2736 } else {
2737 /* CVTTS */
2738 gen_fcvtts(ctx, rb, rc, fn11);
2739 }
2740 break;
2741 case 0x2F:
2742 /* CVTTQ */
2743 gen_fcvttq(ctx, rb, rc, fn11);
2744 break;
2745 case 0x3C:
2746 /* CVTQS */
2747 gen_fcvtqs(ctx, rb, rc, fn11);
2748 break;
2749 case 0x3E:
2750 /* CVTQT */
2751 gen_fcvtqt(ctx, rb, rc, fn11);
2752 break;
2753 default:
2754 goto invalid_opc;
2755 }
2756 break;
2757 case 0x17:
2758 switch (fn11) {
2759 case 0x010:
2760 /* CVTLQ */
2761 gen_fcvtlq(rb, rc);
2762 break;
2763 case 0x020:
2764 if (likely(rc != 31)) {
2765 if (ra == rb) {
2766 /* FMOV */
2767 if (ra == 31)
2768 tcg_gen_movi_i64(cpu_fir[rc], 0);
2769 else
2770 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2771 } else {
2772 /* CPYS */
2773 gen_fcpys(ra, rb, rc);
2774 }
2775 }
2776 break;
2777 case 0x021:
2778 /* CPYSN */
2779 gen_fcpysn(ra, rb, rc);
2780 break;
2781 case 0x022:
2782 /* CPYSE */
2783 gen_fcpyse(ra, rb, rc);
2784 break;
2785 case 0x024:
2786 /* MT_FPCR */
2787 if (likely(ra != 31))
2788 gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
2789 else {
2790 TCGv tmp = tcg_const_i64(0);
2791 gen_helper_store_fpcr(cpu_env, tmp);
2792 tcg_temp_free(tmp);
2793 }
2794 break;
2795 case 0x025:
2796 /* MF_FPCR */
2797 if (likely(ra != 31))
2798 gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
2799 break;
2800 case 0x02A:
2801 /* FCMOVEQ */
2802 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2803 break;
2804 case 0x02B:
2805 /* FCMOVNE */
2806 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2807 break;
2808 case 0x02C:
2809 /* FCMOVLT */
2810 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2811 break;
2812 case 0x02D:
2813 /* FCMOVGE */
2814 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2815 break;
2816 case 0x02E:
2817 /* FCMOVLE */
2818 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2819 break;
2820 case 0x02F:
2821 /* FCMOVGT */
2822 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2823 break;
2824 case 0x030:
2825 /* CVTQL */
2826 gen_fcvtql(rb, rc);
2827 break;
2828 case 0x130:
2829 /* CVTQL/V */
2830 case 0x530:
2831 /* CVTQL/SV */
2832 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2833 /v doesn't do. The only thing I can think is that /sv is a
2834 valid instruction merely for completeness in the ISA. */
2835 gen_fcvtql_v(ctx, rb, rc);
2836 break;
2837 default:
2838 goto invalid_opc;
2839 }
2840 break;
2841 case 0x18:
2842 switch ((uint16_t)disp16) {
2843 case 0x0000:
2844 /* TRAPB */
2845 /* No-op. */
2846 break;
2847 case 0x0400:
2848 /* EXCB */
2849 /* No-op. */
2850 break;
2851 case 0x4000:
2852 /* MB */
2853 /* No-op */
2854 break;
2855 case 0x4400:
2856 /* WMB */
2857 /* No-op */
2858 break;
2859 case 0x8000:
2860 /* FETCH */
2861 /* No-op */
2862 break;
2863 case 0xA000:
2864 /* FETCH_M */
2865 /* No-op */
2866 break;
2867 case 0xC000:
2868 /* RPCC */
2869 if (ra != 31) {
2870 if (use_icount) {
2871 gen_io_start();
2872 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
2873 gen_io_end();
2874 ret = EXIT_PC_STALE;
2875 } else {
2876 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
2877 }
2878 }
2879 break;
2880 case 0xE000:
2881 /* RC */
2882 gen_rx(ra, 0);
2883 break;
2884 case 0xE800:
2885 /* ECB */
2886 break;
2887 case 0xF000:
2888 /* RS */
2889 gen_rx(ra, 1);
2890 break;
2891 case 0xF800:
2892 /* WH64 */
2893 /* No-op */
2894 break;
2895 default:
2896 goto invalid_opc;
2897 }
2898 break;
2899 case 0x19:
2900 /* HW_MFPR (PALcode) */
2901#ifndef CONFIG_USER_ONLY
2902 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2903 return gen_mfpr(ra, insn & 0xffff);
2904 }
2905#endif
2906 goto invalid_opc;
2907 case 0x1A:
2908 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2909 prediction stack action, which of course we don't implement. */
2910 if (rb != 31) {
2911 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2912 } else {
2913 tcg_gen_movi_i64(cpu_pc, 0);
2914 }
2915 if (ra != 31) {
2916 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2917 }
2918 ret = EXIT_PC_UPDATED;
2919 break;
2920 case 0x1B:
2921 /* HW_LD (PALcode) */
2922#ifndef CONFIG_USER_ONLY
2923 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2924 TCGv addr;
2925
2926 if (ra == 31) {
2927 break;
2928 }
2929
2930 addr = tcg_temp_new();
2931 if (rb != 31)
2932 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2933 else
2934 tcg_gen_movi_i64(addr, disp12);
2935 switch ((insn >> 12) & 0xF) {
2936 case 0x0:
2937 /* Longword physical access (hw_ldl/p) */
2938 gen_helper_ldl_phys(cpu_ir[ra], addr);
2939 break;
2940 case 0x1:
2941 /* Quadword physical access (hw_ldq/p) */
2942 gen_helper_ldq_phys(cpu_ir[ra], addr);
2943 break;
2944 case 0x2:
2945 /* Longword physical access with lock (hw_ldl_l/p) */
2946 gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr);
2947 break;
2948 case 0x3:
2949 /* Quadword physical access with lock (hw_ldq_l/p) */
2950 gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr);
2951 break;
2952 case 0x4:
2953 /* Longword virtual PTE fetch (hw_ldl/v) */
2954 goto invalid_opc;
2955 case 0x5:
2956 /* Quadword virtual PTE fetch (hw_ldq/v) */
2957 goto invalid_opc;
2958 break;
2959 case 0x6:
2960 /* Incpu_ir[ra]id */
2961 goto invalid_opc;
2962 case 0x7:
2963 /* Incpu_ir[ra]id */
2964 goto invalid_opc;
2965 case 0x8:
2966 /* Longword virtual access (hw_ldl) */
2967 goto invalid_opc;
2968 case 0x9:
2969 /* Quadword virtual access (hw_ldq) */
2970 goto invalid_opc;
2971 case 0xA:
2972 /* Longword virtual access with protection check (hw_ldl/w) */
2973 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2974 break;
2975 case 0xB:
2976 /* Quadword virtual access with protection check (hw_ldq/w) */
2977 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2978 break;
2979 case 0xC:
2980 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2981 goto invalid_opc;
2982 case 0xD:
2983 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2984 goto invalid_opc;
2985 case 0xE:
2986 /* Longword virtual access with alternate access mode and
2987 protection checks (hw_ldl/wa) */
2988 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2989 break;
2990 case 0xF:
2991 /* Quadword virtual access with alternate access mode and
2992 protection checks (hw_ldq/wa) */
2993 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2994 break;
2995 }
2996 tcg_temp_free(addr);
2997 break;
2998 }
2999#endif
3000 goto invalid_opc;
3001 case 0x1C:
3002 switch (fn7) {
3003 case 0x00:
3004 /* SEXTB */
3005 if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
3006 goto invalid_opc;
3007 }
3008 if (likely(rc != 31)) {
3009 if (islit)
3010 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
3011 else
3012 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
3013 }
3014 break;
3015 case 0x01:
3016 /* SEXTW */
3017 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
3018 if (likely(rc != 31)) {
3019 if (islit) {
3020 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
3021 } else {
3022 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
3023 }
3024 }
3025 break;
3026 }
3027 goto invalid_opc;
3028 case 0x30:
3029 /* CTPOP */
3030 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3031 if (likely(rc != 31)) {
3032 if (islit) {
3033 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
3034 } else {
3035 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
3036 }
3037 }
3038 break;
3039 }
3040 goto invalid_opc;
3041 case 0x31:
3042 /* PERR */
3043 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3044 gen_perr(ra, rb, rc, islit, lit);
3045 break;
3046 }
3047 goto invalid_opc;
3048 case 0x32:
3049 /* CTLZ */
3050 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3051 if (likely(rc != 31)) {
3052 if (islit) {
3053 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
3054 } else {
3055 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
3056 }
3057 }
3058 break;
3059 }
3060 goto invalid_opc;
3061 case 0x33:
3062 /* CTTZ */
3063 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3064 if (likely(rc != 31)) {
3065 if (islit) {
3066 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
3067 } else {
3068 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
3069 }
3070 }
3071 break;
3072 }
3073 goto invalid_opc;
3074 case 0x34:
3075 /* UNPKBW */
3076 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3077 if (real_islit || ra != 31) {
3078 goto invalid_opc;
3079 }
3080 gen_unpkbw(rb, rc);
3081 break;
3082 }
3083 goto invalid_opc;
3084 case 0x35:
3085 /* UNPKBL */
3086 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3087 if (real_islit || ra != 31) {
3088 goto invalid_opc;
3089 }
3090 gen_unpkbl(rb, rc);
3091 break;
3092 }
3093 goto invalid_opc;
3094 case 0x36:
3095 /* PKWB */
3096 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3097 if (real_islit || ra != 31) {
3098 goto invalid_opc;
3099 }
3100 gen_pkwb(rb, rc);
3101 break;
3102 }
3103 goto invalid_opc;
3104 case 0x37:
3105 /* PKLB */
3106 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3107 if (real_islit || ra != 31) {
3108 goto invalid_opc;
3109 }
3110 gen_pklb(rb, rc);
3111 break;
3112 }
3113 goto invalid_opc;
3114 case 0x38:
3115 /* MINSB8 */
3116 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3117 gen_minsb8(ra, rb, rc, islit, lit);
3118 break;
3119 }
3120 goto invalid_opc;
3121 case 0x39:
3122 /* MINSW4 */
3123 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3124 gen_minsw4(ra, rb, rc, islit, lit);
3125 break;
3126 }
3127 goto invalid_opc;
3128 case 0x3A:
3129 /* MINUB8 */
3130 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3131 gen_minub8(ra, rb, rc, islit, lit);
3132 break;
3133 }
3134 goto invalid_opc;
3135 case 0x3B:
3136 /* MINUW4 */
3137 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3138 gen_minuw4(ra, rb, rc, islit, lit);
3139 break;
3140 }
3141 goto invalid_opc;
3142 case 0x3C:
3143 /* MAXUB8 */
3144 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3145 gen_maxub8(ra, rb, rc, islit, lit);
3146 break;
3147 }
3148 goto invalid_opc;
3149 case 0x3D:
3150 /* MAXUW4 */
3151 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3152 gen_maxuw4(ra, rb, rc, islit, lit);
3153 break;
3154 }
3155 goto invalid_opc;
3156 case 0x3E:
3157 /* MAXSB8 */
3158 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3159 gen_maxsb8(ra, rb, rc, islit, lit);
3160 break;
3161 }
3162 goto invalid_opc;
3163 case 0x3F:
3164 /* MAXSW4 */
3165 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3166 gen_maxsw4(ra, rb, rc, islit, lit);
3167 break;
3168 }
3169 goto invalid_opc;
3170 case 0x70:
3171 /* FTOIT */
3172 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3173 goto invalid_opc;
3174 }
3175 if (likely(rc != 31)) {
3176 if (ra != 31)
3177 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3178 else
3179 tcg_gen_movi_i64(cpu_ir[rc], 0);
3180 }
3181 break;
3182 case 0x78:
3183 /* FTOIS */
3184 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3185 goto invalid_opc;
3186 }
3187 if (rc != 31) {
3188 TCGv_i32 tmp1 = tcg_temp_new_i32();
3189 if (ra != 31)
3190 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
3191 else {
3192 TCGv tmp2 = tcg_const_i64(0);
3193 gen_helper_s_to_memory(tmp1, tmp2);
3194 tcg_temp_free(tmp2);
3195 }
3196 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
3197 tcg_temp_free_i32(tmp1);
3198 }
3199 break;
3200 default:
3201 goto invalid_opc;
3202 }
3203 break;
3204 case 0x1D:
3205 /* HW_MTPR (PALcode) */
3206#ifndef CONFIG_USER_ONLY
3207 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3208 return gen_mtpr(ctx, rb, insn & 0xffff);
3209 }
3210#endif
3211 goto invalid_opc;
3212 case 0x1E:
3213 /* HW_RET (PALcode) */
3214#ifndef CONFIG_USER_ONLY
3215 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3216 if (rb == 31) {
3217 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3218 address from EXC_ADDR. This turns out to be useful for our
3219 emulation PALcode, so continue to accept it. */
3220 TCGv tmp = tcg_temp_new();
3221 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
3222 gen_helper_hw_ret(cpu_env, tmp);
3223 tcg_temp_free(tmp);
3224 } else {
3225 gen_helper_hw_ret(cpu_env, cpu_ir[rb]);
3226 }
3227 ret = EXIT_PC_UPDATED;
3228 break;
3229 }
3230#endif
3231 goto invalid_opc;
3232 case 0x1F:
3233 /* HW_ST (PALcode) */
3234#ifndef CONFIG_USER_ONLY
3235 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3236 TCGv addr, val;
3237 addr = tcg_temp_new();
3238 if (rb != 31)
3239 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3240 else
3241 tcg_gen_movi_i64(addr, disp12);
3242 if (ra != 31)
3243 val = cpu_ir[ra];
3244 else {
3245 val = tcg_temp_new();
3246 tcg_gen_movi_i64(val, 0);
3247 }
3248 switch ((insn >> 12) & 0xF) {
3249 case 0x0:
3250 /* Longword physical access */
3251 gen_helper_stl_phys(addr, val);
3252 break;
3253 case 0x1:
3254 /* Quadword physical access */
3255 gen_helper_stq_phys(addr, val);
3256 break;
3257 case 0x2:
3258 /* Longword physical access with lock */
3259 gen_helper_stl_c_phys(val, cpu_env, addr, val);
3260 break;
3261 case 0x3:
3262 /* Quadword physical access with lock */
3263 gen_helper_stq_c_phys(val, cpu_env, addr, val);
3264 break;
3265 case 0x4:
3266 /* Longword virtual access */
3267 goto invalid_opc;
3268 case 0x5:
3269 /* Quadword virtual access */
3270 goto invalid_opc;
3271 case 0x6:
3272 /* Invalid */
3273 goto invalid_opc;
3274 case 0x7:
3275 /* Invalid */
3276 goto invalid_opc;
3277 case 0x8:
3278 /* Invalid */
3279 goto invalid_opc;
3280 case 0x9:
3281 /* Invalid */
3282 goto invalid_opc;
3283 case 0xA:
3284 /* Invalid */
3285 goto invalid_opc;
3286 case 0xB:
3287 /* Invalid */
3288 goto invalid_opc;
3289 case 0xC:
3290 /* Longword virtual access with alternate access mode */
3291 goto invalid_opc;
3292 case 0xD:
3293 /* Quadword virtual access with alternate access mode */
3294 goto invalid_opc;
3295 case 0xE:
3296 /* Invalid */
3297 goto invalid_opc;
3298 case 0xF:
3299 /* Invalid */
3300 goto invalid_opc;
3301 }
3302 if (ra == 31)
3303 tcg_temp_free(val);
3304 tcg_temp_free(addr);
3305 break;
3306 }
3307#endif
3308 goto invalid_opc;
3309 case 0x20:
3310 /* LDF */
3311 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
3312 break;
3313 case 0x21:
3314 /* LDG */
3315 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
3316 break;
3317 case 0x22:
3318 /* LDS */
3319 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
3320 break;
3321 case 0x23:
3322 /* LDT */
3323 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
3324 break;
3325 case 0x24:
3326 /* STF */
3327 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3328 break;
3329 case 0x25:
3330 /* STG */
3331 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3332 break;
3333 case 0x26:
3334 /* STS */
3335 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3336 break;
3337 case 0x27:
3338 /* STT */
3339 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3340 break;
3341 case 0x28:
3342 /* LDL */
3343 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3344 break;
3345 case 0x29:
3346 /* LDQ */
3347 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3348 break;
3349 case 0x2A:
3350 /* LDL_L */
3351 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3352 break;
3353 case 0x2B:
3354 /* LDQ_L */
3355 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3356 break;
3357 case 0x2C:
3358 /* STL */
3359 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3360 break;
3361 case 0x2D:
3362 /* STQ */
3363 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3364 break;
3365 case 0x2E:
3366 /* STL_C */
3367 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3368 break;
3369 case 0x2F:
3370 /* STQ_C */
3371 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3372 break;
3373 case 0x30:
3374 /* BR */
3375 ret = gen_bdirect(ctx, ra, disp21);
3376 break;
3377 case 0x31: /* FBEQ */
3378 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3379 break;
3380 case 0x32: /* FBLT */
3381 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3382 break;
3383 case 0x33: /* FBLE */
3384 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3385 break;
3386 case 0x34:
3387 /* BSR */
3388 ret = gen_bdirect(ctx, ra, disp21);
3389 break;
3390 case 0x35: /* FBNE */
3391 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3392 break;
3393 case 0x36: /* FBGE */
3394 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3395 break;
3396 case 0x37: /* FBGT */
3397 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3398 break;
3399 case 0x38:
3400 /* BLBC */
3401 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3402 break;
3403 case 0x39:
3404 /* BEQ */
3405 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3406 break;
3407 case 0x3A:
3408 /* BLT */
3409 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3410 break;
3411 case 0x3B:
3412 /* BLE */
3413 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3414 break;
3415 case 0x3C:
3416 /* BLBS */
3417 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3418 break;
3419 case 0x3D:
3420 /* BNE */
3421 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3422 break;
3423 case 0x3E:
3424 /* BGE */
3425 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3426 break;
3427 case 0x3F:
3428 /* BGT */
3429 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3430 break;
3431 invalid_opc:
3432 ret = gen_invalid(ctx);
3433 break;
3434 }
3435
3436 return ret;
3437}
3438
3439static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
3440 TranslationBlock *tb,
3441 bool search_pc)
3442{
3443 CPUState *cs = CPU(cpu);
3444 CPUAlphaState *env = &cpu->env;
3445 DisasContext ctx, *ctxp = &ctx;
3446 target_ulong pc_start;
3447 target_ulong pc_mask;
3448 uint32_t insn;
3449 uint16_t *gen_opc_end;
3450 CPUBreakpoint *bp;
3451 int j, lj = -1;
3452 ExitStatus ret;
3453 int num_insns;
3454 int max_insns;
3455
3456 pc_start = tb->pc;
3457 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
3458
3459 ctx.tb = tb;
3460 ctx.pc = pc_start;
3461 ctx.mem_idx = cpu_mmu_index(env);
3462 ctx.implver = env->implver;
3463 ctx.singlestep_enabled = cs->singlestep_enabled;
3464
3465 /* ??? Every TB begins with unset rounding mode, to be initialized on
3466 the first fp insn of the TB. Alternately we could define a proper
3467 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3468 to reset the FP_STATUS to that default at the end of any TB that
3469 changes the default. We could even (gasp) dynamiclly figure out
3470 what default would be most efficient given the running program. */
3471 ctx.tb_rm = -1;
3472 /* Similarly for flush-to-zero. */
3473 ctx.tb_ftz = -1;
3474
3475 num_insns = 0;
3476 max_insns = tb->cflags & CF_COUNT_MASK;
3477 if (max_insns == 0) {
3478 max_insns = CF_COUNT_MASK;
3479 }
3480
3481 if (in_superpage(&ctx, pc_start)) {
3482 pc_mask = (1ULL << 41) - 1;
3483 } else {
3484 pc_mask = ~TARGET_PAGE_MASK;
3485 }
3486
3487 gen_tb_start();
3488 do {
3489 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3490 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3491 if (bp->pc == ctx.pc) {
3492 gen_excp(&ctx, EXCP_DEBUG, 0);
3493 break;
3494 }
3495 }
3496 }
3497 if (search_pc) {
3498 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3499 if (lj < j) {
3500 lj++;
3501 while (lj < j)
3502 tcg_ctx.gen_opc_instr_start[lj++] = 0;
3503 }
3504 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
3505 tcg_ctx.gen_opc_instr_start[lj] = 1;
3506 tcg_ctx.gen_opc_icount[lj] = num_insns;
3507 }
3508 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3509 gen_io_start();
3510 insn = cpu_ldl_code(env, ctx.pc);
3511 num_insns++;
3512
3513 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
3514 tcg_gen_debug_insn_start(ctx.pc);
3515 }
3516
3517 ctx.pc += 4;
3518 ret = translate_one(ctxp, insn);
3519
3520 /* If we reach a page boundary, are single stepping,
3521 or exhaust instruction count, stop generation. */
3522 if (ret == NO_EXIT
3523 && ((ctx.pc & pc_mask) == 0
3524 || tcg_ctx.gen_opc_ptr >= gen_opc_end
3525 || num_insns >= max_insns
3526 || singlestep
3527 || ctx.singlestep_enabled)) {
3528 ret = EXIT_PC_STALE;
3529 }
3530 } while (ret == NO_EXIT);
3531
3532 if (tb->cflags & CF_LAST_IO) {
3533 gen_io_end();
3534 }
3535
3536 switch (ret) {
3537 case EXIT_GOTO_TB:
3538 case EXIT_NORETURN:
3539 break;
3540 case EXIT_PC_STALE:
3541 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3542 /* FALLTHRU */
3543 case EXIT_PC_UPDATED:
3544 if (ctx.singlestep_enabled) {
3545 gen_excp_1(EXCP_DEBUG, 0);
3546 } else {
3547 tcg_gen_exit_tb(0);
3548 }
3549 break;
3550 default:
3551 abort();
3552 }
3553
3554 gen_tb_end(tb, num_insns);
3555 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
3556 if (search_pc) {
3557 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3558 lj++;
3559 while (lj <= j)
3560 tcg_ctx.gen_opc_instr_start[lj++] = 0;
3561 } else {
3562 tb->size = ctx.pc - pc_start;
3563 tb->icount = num_insns;
3564 }
3565
3566#ifdef DEBUG_DISAS
3567 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3568 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3569 log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
3570 qemu_log("\n");
3571 }
3572#endif
3573}
3574
3575void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
3576{
3577 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
3578}
3579
3580void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
3581{
3582 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
3583}
3584
3585void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
3586{
3587 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
3588}