]> git.proxmox.com Git - mirror_qemu.git/blob - target-alpha/translate.c
tcg: Invert the inclusion of helper.h
[mirror_qemu.git] / target-alpha / translate.c
1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
23 #include "tcg-op.h"
24
25 #include "exec/helper-proto.h"
26 #include "exec/helper-gen.h"
27
28 #undef ALPHA_DEBUG_DISAS
29 #define CONFIG_SOFTFLOAT_INLINE
30
31 #ifdef ALPHA_DEBUG_DISAS
32 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
33 #else
34 # define LOG_DISAS(...) do { } while (0)
35 #endif
36
37 typedef struct DisasContext DisasContext;
38 struct DisasContext {
39 struct TranslationBlock *tb;
40 uint64_t pc;
41 int mem_idx;
42
43 /* Current rounding mode for this TB. */
44 int tb_rm;
45 /* Current flush-to-zero setting for this TB. */
46 int tb_ftz;
47
48 /* implver value for this CPU. */
49 int implver;
50
51 /* Temporaries for $31 and $f31 as source and destination. */
52 TCGv zero;
53 TCGv sink;
54 /* Temporary for immediate constants. */
55 TCGv lit;
56
57 bool singlestep_enabled;
58 };
59
60 /* Return values from translate_one, indicating the state of the TB.
61 Note that zero indicates that we are not exiting the TB. */
62
63 typedef enum {
64 NO_EXIT,
65
66 /* We have emitted one or more goto_tb. No fixup required. */
67 EXIT_GOTO_TB,
68
69 /* We are not using a goto_tb (for whatever reason), but have updated
70 the PC (for whatever reason), so there's no need to do it again on
71 exiting the TB. */
72 EXIT_PC_UPDATED,
73
74 /* We are exiting the TB, but have neither emitted a goto_tb, nor
75 updated the PC for the next instruction to be executed. */
76 EXIT_PC_STALE,
77
78 /* We are ending the TB with a noreturn function call, e.g. longjmp.
79 No following code will be executed. */
80 EXIT_NORETURN,
81 } ExitStatus;
82
83 /* global register indexes */
84 static TCGv_ptr cpu_env;
85 static TCGv cpu_ir[31];
86 static TCGv cpu_fir[31];
87 static TCGv cpu_pc;
88 static TCGv cpu_lock_addr;
89 static TCGv cpu_lock_st_addr;
90 static TCGv cpu_lock_value;
91
92 #include "exec/gen-icount.h"
93
94 void alpha_translate_init(void)
95 {
96 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
97
98 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
99 static const GlobalVar vars[] = {
100 DEF_VAR(pc),
101 DEF_VAR(lock_addr),
102 DEF_VAR(lock_st_addr),
103 DEF_VAR(lock_value),
104 };
105
106 #undef DEF_VAR
107
108 /* Use the symbolic register names that match the disassembler. */
109 static const char greg_names[31][4] = {
110 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
111 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
112 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
113 "t10", "t11", "ra", "t12", "at", "gp", "sp"
114 };
115 static const char freg_names[31][4] = {
116 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
117 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
118 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
119 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
120 };
121
122 static bool done_init = 0;
123 int i;
124
125 if (done_init) {
126 return;
127 }
128 done_init = 1;
129
130 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
131
132 for (i = 0; i < 31; i++) {
133 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
134 offsetof(CPUAlphaState, ir[i]),
135 greg_names[i]);
136 }
137
138 for (i = 0; i < 31; i++) {
139 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
140 offsetof(CPUAlphaState, fir[i]),
141 freg_names[i]);
142 }
143
144 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
145 const GlobalVar *v = &vars[i];
146 *v->var = tcg_global_mem_new_i64(TCG_AREG0, v->ofs, v->name);
147 }
148 }
149
150 static TCGv load_zero(DisasContext *ctx)
151 {
152 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
153 ctx->zero = tcg_const_i64(0);
154 }
155 return ctx->zero;
156 }
157
158 static TCGv dest_sink(DisasContext *ctx)
159 {
160 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
161 ctx->sink = tcg_temp_new();
162 }
163 return ctx->sink;
164 }
165
166 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
167 {
168 if (likely(reg < 31)) {
169 return cpu_ir[reg];
170 } else {
171 return load_zero(ctx);
172 }
173 }
174
175 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
176 uint8_t lit, bool islit)
177 {
178 if (islit) {
179 ctx->lit = tcg_const_i64(lit);
180 return ctx->lit;
181 } else if (likely(reg < 31)) {
182 return cpu_ir[reg];
183 } else {
184 return load_zero(ctx);
185 }
186 }
187
188 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
189 {
190 if (likely(reg < 31)) {
191 return cpu_ir[reg];
192 } else {
193 return dest_sink(ctx);
194 }
195 }
196
197 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
198 {
199 if (likely(reg < 31)) {
200 return cpu_fir[reg];
201 } else {
202 return load_zero(ctx);
203 }
204 }
205
206 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
207 {
208 if (likely(reg < 31)) {
209 return cpu_fir[reg];
210 } else {
211 return dest_sink(ctx);
212 }
213 }
214
215 static void gen_excp_1(int exception, int error_code)
216 {
217 TCGv_i32 tmp1, tmp2;
218
219 tmp1 = tcg_const_i32(exception);
220 tmp2 = tcg_const_i32(error_code);
221 gen_helper_excp(cpu_env, tmp1, tmp2);
222 tcg_temp_free_i32(tmp2);
223 tcg_temp_free_i32(tmp1);
224 }
225
226 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
227 {
228 tcg_gen_movi_i64(cpu_pc, ctx->pc);
229 gen_excp_1(exception, error_code);
230 return EXIT_NORETURN;
231 }
232
233 static inline ExitStatus gen_invalid(DisasContext *ctx)
234 {
235 return gen_excp(ctx, EXCP_OPCDEC, 0);
236 }
237
238 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
239 {
240 TCGv_i32 tmp32 = tcg_temp_new_i32();
241 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
242 gen_helper_memory_to_f(t0, tmp32);
243 tcg_temp_free_i32(tmp32);
244 }
245
246 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
247 {
248 TCGv tmp = tcg_temp_new();
249 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
250 gen_helper_memory_to_g(t0, tmp);
251 tcg_temp_free(tmp);
252 }
253
254 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
255 {
256 TCGv_i32 tmp32 = tcg_temp_new_i32();
257 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
258 gen_helper_memory_to_s(t0, tmp32);
259 tcg_temp_free_i32(tmp32);
260 }
261
262 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
263 {
264 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
265 tcg_gen_mov_i64(cpu_lock_addr, t1);
266 tcg_gen_mov_i64(cpu_lock_value, t0);
267 }
268
269 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
270 {
271 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
272 tcg_gen_mov_i64(cpu_lock_addr, t1);
273 tcg_gen_mov_i64(cpu_lock_value, t0);
274 }
275
276 static inline void gen_load_mem(DisasContext *ctx,
277 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
278 int flags),
279 int ra, int rb, int32_t disp16, bool fp,
280 bool clear)
281 {
282 TCGv tmp, addr, va;
283
284 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
285 prefetches, which we can treat as nops. No worries about
286 missed exceptions here. */
287 if (unlikely(ra == 31)) {
288 return;
289 }
290
291 tmp = tcg_temp_new();
292 addr = load_gpr(ctx, rb);
293
294 if (disp16) {
295 tcg_gen_addi_i64(tmp, addr, disp16);
296 addr = tmp;
297 }
298 if (clear) {
299 tcg_gen_andi_i64(tmp, addr, ~0x7);
300 addr = tmp;
301 }
302
303 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
304 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
305
306 tcg_temp_free(tmp);
307 }
308
309 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
310 {
311 TCGv_i32 tmp32 = tcg_temp_new_i32();
312 gen_helper_f_to_memory(tmp32, t0);
313 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
314 tcg_temp_free_i32(tmp32);
315 }
316
317 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
318 {
319 TCGv tmp = tcg_temp_new();
320 gen_helper_g_to_memory(tmp, t0);
321 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
322 tcg_temp_free(tmp);
323 }
324
325 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
326 {
327 TCGv_i32 tmp32 = tcg_temp_new_i32();
328 gen_helper_s_to_memory(tmp32, t0);
329 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
330 tcg_temp_free_i32(tmp32);
331 }
332
333 static inline void gen_store_mem(DisasContext *ctx,
334 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
335 int flags),
336 int ra, int rb, int32_t disp16, bool fp,
337 bool clear)
338 {
339 TCGv tmp, addr, va;
340
341 tmp = tcg_temp_new();
342 addr = load_gpr(ctx, rb);
343
344 if (disp16) {
345 tcg_gen_addi_i64(tmp, addr, disp16);
346 addr = tmp;
347 }
348 if (clear) {
349 tcg_gen_andi_i64(tmp, addr, ~0x7);
350 addr = tmp;
351 }
352
353 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
354 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
355
356 tcg_temp_free(tmp);
357 }
358
359 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
360 int32_t disp16, int quad)
361 {
362 TCGv addr;
363
364 if (ra == 31) {
365 /* ??? Don't bother storing anything. The user can't tell
366 the difference, since the zero register always reads zero. */
367 return NO_EXIT;
368 }
369
370 #if defined(CONFIG_USER_ONLY)
371 addr = cpu_lock_st_addr;
372 #else
373 addr = tcg_temp_local_new();
374 #endif
375
376 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
377
378 #if defined(CONFIG_USER_ONLY)
379 /* ??? This is handled via a complicated version of compare-and-swap
380 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
381 in TCG so that this isn't necessary. */
382 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
383 #else
384 /* ??? In system mode we are never multi-threaded, so CAS can be
385 implemented via a non-atomic load-compare-store sequence. */
386 {
387 int lab_fail, lab_done;
388 TCGv val;
389
390 lab_fail = gen_new_label();
391 lab_done = gen_new_label();
392 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
393
394 val = tcg_temp_new();
395 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
396 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
397
398 tcg_gen_qemu_st_i64(cpu_ir[ra], addr, ctx->mem_idx,
399 quad ? MO_LEQ : MO_LEUL);
400 tcg_gen_movi_i64(cpu_ir[ra], 1);
401 tcg_gen_br(lab_done);
402
403 gen_set_label(lab_fail);
404 tcg_gen_movi_i64(cpu_ir[ra], 0);
405
406 gen_set_label(lab_done);
407 tcg_gen_movi_i64(cpu_lock_addr, -1);
408
409 tcg_temp_free(addr);
410 return NO_EXIT;
411 }
412 #endif
413 }
414
415 static bool in_superpage(DisasContext *ctx, int64_t addr)
416 {
417 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
418 && addr < 0
419 && ((addr >> 41) & 3) == 2
420 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
421 }
422
423 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
424 {
425 /* Suppress goto_tb in the case of single-steping and IO. */
426 if ((ctx->tb->cflags & CF_LAST_IO)
427 || ctx->singlestep_enabled || singlestep) {
428 return false;
429 }
430 /* If the destination is in the superpage, the page perms can't change. */
431 if (in_superpage(ctx, dest)) {
432 return true;
433 }
434 /* Check for the dest on the same page as the start of the TB. */
435 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
436 }
437
438 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
439 {
440 uint64_t dest = ctx->pc + (disp << 2);
441
442 if (ra != 31) {
443 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
444 }
445
446 /* Notice branch-to-next; used to initialize RA with the PC. */
447 if (disp == 0) {
448 return 0;
449 } else if (use_goto_tb(ctx, dest)) {
450 tcg_gen_goto_tb(0);
451 tcg_gen_movi_i64(cpu_pc, dest);
452 tcg_gen_exit_tb((uintptr_t)ctx->tb);
453 return EXIT_GOTO_TB;
454 } else {
455 tcg_gen_movi_i64(cpu_pc, dest);
456 return EXIT_PC_UPDATED;
457 }
458 }
459
460 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
461 TCGv cmp, int32_t disp)
462 {
463 uint64_t dest = ctx->pc + (disp << 2);
464 int lab_true = gen_new_label();
465
466 if (use_goto_tb(ctx, dest)) {
467 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
468
469 tcg_gen_goto_tb(0);
470 tcg_gen_movi_i64(cpu_pc, ctx->pc);
471 tcg_gen_exit_tb((uintptr_t)ctx->tb);
472
473 gen_set_label(lab_true);
474 tcg_gen_goto_tb(1);
475 tcg_gen_movi_i64(cpu_pc, dest);
476 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
477
478 return EXIT_GOTO_TB;
479 } else {
480 TCGv_i64 z = tcg_const_i64(0);
481 TCGv_i64 d = tcg_const_i64(dest);
482 TCGv_i64 p = tcg_const_i64(ctx->pc);
483
484 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
485
486 tcg_temp_free_i64(z);
487 tcg_temp_free_i64(d);
488 tcg_temp_free_i64(p);
489 return EXIT_PC_UPDATED;
490 }
491 }
492
493 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
494 int32_t disp, int mask)
495 {
496 TCGv cmp_tmp;
497
498 if (mask) {
499 cmp_tmp = tcg_temp_new();
500 tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
501 } else {
502 cmp_tmp = load_gpr(ctx, ra);
503 }
504
505 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
506 }
507
508 /* Fold -0.0 for comparison with COND. */
509
510 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
511 {
512 uint64_t mzero = 1ull << 63;
513
514 switch (cond) {
515 case TCG_COND_LE:
516 case TCG_COND_GT:
517 /* For <= or >, the -0.0 value directly compares the way we want. */
518 tcg_gen_mov_i64(dest, src);
519 break;
520
521 case TCG_COND_EQ:
522 case TCG_COND_NE:
523 /* For == or !=, we can simply mask off the sign bit and compare. */
524 tcg_gen_andi_i64(dest, src, mzero - 1);
525 break;
526
527 case TCG_COND_GE:
528 case TCG_COND_LT:
529 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
530 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
531 tcg_gen_neg_i64(dest, dest);
532 tcg_gen_and_i64(dest, dest, src);
533 break;
534
535 default:
536 abort();
537 }
538 }
539
540 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
541 int32_t disp)
542 {
543 TCGv cmp_tmp = tcg_temp_new();
544 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
545 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
546 }
547
548 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
549 {
550 TCGv_i64 va, vb, z;
551
552 z = load_zero(ctx);
553 vb = load_fpr(ctx, rb);
554 va = tcg_temp_new();
555 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
556
557 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
558
559 tcg_temp_free(va);
560 }
561
562 #define QUAL_RM_N 0x080 /* Round mode nearest even */
563 #define QUAL_RM_C 0x000 /* Round mode chopped */
564 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
565 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
566 #define QUAL_RM_MASK 0x0c0
567
568 #define QUAL_U 0x100 /* Underflow enable (fp output) */
569 #define QUAL_V 0x100 /* Overflow enable (int output) */
570 #define QUAL_S 0x400 /* Software completion enable */
571 #define QUAL_I 0x200 /* Inexact detection enable */
572
573 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
574 {
575 TCGv_i32 tmp;
576
577 fn11 &= QUAL_RM_MASK;
578 if (fn11 == ctx->tb_rm) {
579 return;
580 }
581 ctx->tb_rm = fn11;
582
583 tmp = tcg_temp_new_i32();
584 switch (fn11) {
585 case QUAL_RM_N:
586 tcg_gen_movi_i32(tmp, float_round_nearest_even);
587 break;
588 case QUAL_RM_C:
589 tcg_gen_movi_i32(tmp, float_round_to_zero);
590 break;
591 case QUAL_RM_M:
592 tcg_gen_movi_i32(tmp, float_round_down);
593 break;
594 case QUAL_RM_D:
595 tcg_gen_ld8u_i32(tmp, cpu_env,
596 offsetof(CPUAlphaState, fpcr_dyn_round));
597 break;
598 }
599
600 #if defined(CONFIG_SOFTFLOAT_INLINE)
601 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
602 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
603 sets the one field. */
604 tcg_gen_st8_i32(tmp, cpu_env,
605 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
606 #else
607 gen_helper_setroundmode(tmp);
608 #endif
609
610 tcg_temp_free_i32(tmp);
611 }
612
613 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
614 {
615 TCGv_i32 tmp;
616
617 fn11 &= QUAL_U;
618 if (fn11 == ctx->tb_ftz) {
619 return;
620 }
621 ctx->tb_ftz = fn11;
622
623 tmp = tcg_temp_new_i32();
624 if (fn11) {
625 /* Underflow is enabled, use the FPCR setting. */
626 tcg_gen_ld8u_i32(tmp, cpu_env,
627 offsetof(CPUAlphaState, fpcr_flush_to_zero));
628 } else {
629 /* Underflow is disabled, force flush-to-zero. */
630 tcg_gen_movi_i32(tmp, 1);
631 }
632
633 #if defined(CONFIG_SOFTFLOAT_INLINE)
634 tcg_gen_st8_i32(tmp, cpu_env,
635 offsetof(CPUAlphaState, fp_status.flush_to_zero));
636 #else
637 gen_helper_setflushzero(tmp);
638 #endif
639
640 tcg_temp_free_i32(tmp);
641 }
642
643 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
644 {
645 TCGv val;
646
647 if (unlikely(reg == 31)) {
648 val = load_zero(ctx);
649 } else {
650 val = cpu_fir[reg];
651 if ((fn11 & QUAL_S) == 0) {
652 if (is_cmp) {
653 gen_helper_ieee_input_cmp(cpu_env, val);
654 } else {
655 gen_helper_ieee_input(cpu_env, val);
656 }
657 }
658 }
659 return val;
660 }
661
662 static void gen_fp_exc_clear(void)
663 {
664 #if defined(CONFIG_SOFTFLOAT_INLINE)
665 TCGv_i32 zero = tcg_const_i32(0);
666 tcg_gen_st8_i32(zero, cpu_env,
667 offsetof(CPUAlphaState, fp_status.float_exception_flags));
668 tcg_temp_free_i32(zero);
669 #else
670 gen_helper_fp_exc_clear(cpu_env);
671 #endif
672 }
673
674 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
675 {
676 /* ??? We ought to be able to do something with imprecise exceptions.
677 E.g. notice we're still in the trap shadow of something within the
678 TB and do not generate the code to signal the exception; end the TB
679 when an exception is forced to arrive, either by consumption of a
680 register value or TRAPB or EXCB. */
681 TCGv_i32 exc = tcg_temp_new_i32();
682 TCGv_i32 reg;
683
684 #if defined(CONFIG_SOFTFLOAT_INLINE)
685 tcg_gen_ld8u_i32(exc, cpu_env,
686 offsetof(CPUAlphaState, fp_status.float_exception_flags));
687 #else
688 gen_helper_fp_exc_get(exc, cpu_env);
689 #endif
690
691 if (ignore) {
692 tcg_gen_andi_i32(exc, exc, ~ignore);
693 }
694
695 /* ??? Pass in the regno of the destination so that the helper can
696 set EXC_MASK, which contains a bitmask of destination registers
697 that have caused arithmetic traps. A simple userspace emulation
698 does not require this. We do need it for a guest kernel's entArith,
699 or if we were to do something clever with imprecise exceptions. */
700 reg = tcg_const_i32(rc + 32);
701
702 if (fn11 & QUAL_S) {
703 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
704 } else {
705 gen_helper_fp_exc_raise(cpu_env, exc, reg);
706 }
707
708 tcg_temp_free_i32(reg);
709 tcg_temp_free_i32(exc);
710 }
711
712 static inline void gen_fp_exc_raise(int rc, int fn11)
713 {
714 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
715 }
716
717 static void gen_fcvtlq(TCGv vc, TCGv vb)
718 {
719 TCGv tmp = tcg_temp_new();
720
721 /* The arithmetic right shift here, plus the sign-extended mask below
722 yields a sign-extended result without an explicit ext32s_i64. */
723 tcg_gen_sari_i64(tmp, vb, 32);
724 tcg_gen_shri_i64(vc, vb, 29);
725 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
726 tcg_gen_andi_i64(vc, vc, 0x3fffffff);
727 tcg_gen_or_i64(vc, vc, tmp);
728
729 tcg_temp_free(tmp);
730 }
731
732 static void gen_fcvtql(TCGv vc, TCGv vb)
733 {
734 TCGv tmp = tcg_temp_new();
735
736 tcg_gen_andi_i64(tmp, vb, (int32_t)0xc0000000);
737 tcg_gen_andi_i64(vc, vb, 0x3FFFFFFF);
738 tcg_gen_shli_i64(tmp, tmp, 32);
739 tcg_gen_shli_i64(vc, vc, 29);
740 tcg_gen_or_i64(vc, vc, tmp);
741
742 tcg_temp_free(tmp);
743 }
744
745 static void gen_ieee_arith2(DisasContext *ctx,
746 void (*helper)(TCGv, TCGv_ptr, TCGv),
747 int rb, int rc, int fn11)
748 {
749 TCGv vb;
750
751 gen_qual_roundmode(ctx, fn11);
752 gen_qual_flushzero(ctx, fn11);
753 gen_fp_exc_clear();
754
755 vb = gen_ieee_input(ctx, rb, fn11, 0);
756 helper(dest_fpr(ctx, rc), cpu_env, vb);
757
758 gen_fp_exc_raise(rc, fn11);
759 }
760
761 #define IEEE_ARITH2(name) \
762 static inline void glue(gen_f, name)(DisasContext *ctx, \
763 int rb, int rc, int fn11) \
764 { \
765 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
766 }
767 IEEE_ARITH2(sqrts)
768 IEEE_ARITH2(sqrtt)
769 IEEE_ARITH2(cvtst)
770 IEEE_ARITH2(cvtts)
771
772 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
773 {
774 TCGv vb, vc;
775 int ignore = 0;
776
777 /* No need to set flushzero, since we have an integer output. */
778 gen_fp_exc_clear();
779 vb = gen_ieee_input(ctx, rb, fn11, 0);
780 vc = dest_fpr(ctx, rc);
781
782 /* Almost all integer conversions use cropped rounding, and most
783 also do not have integer overflow enabled. Special case that. */
784 switch (fn11) {
785 case QUAL_RM_C:
786 gen_helper_cvttq_c(vc, cpu_env, vb);
787 break;
788 case QUAL_V | QUAL_RM_C:
789 case QUAL_S | QUAL_V | QUAL_RM_C:
790 ignore = float_flag_inexact;
791 /* FALLTHRU */
792 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
793 gen_helper_cvttq_svic(vc, cpu_env, vb);
794 break;
795 default:
796 gen_qual_roundmode(ctx, fn11);
797 gen_helper_cvttq(vc, cpu_env, vb);
798 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
799 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
800 break;
801 }
802
803 gen_fp_exc_raise_ignore(rc, fn11, ignore);
804 }
805
806 static void gen_ieee_intcvt(DisasContext *ctx,
807 void (*helper)(TCGv, TCGv_ptr, TCGv),
808 int rb, int rc, int fn11)
809 {
810 TCGv vb, vc;
811
812 gen_qual_roundmode(ctx, fn11);
813 vb = load_fpr(ctx, rb);
814 vc = dest_fpr(ctx, rc);
815
816 /* The only exception that can be raised by integer conversion
817 is inexact. Thus we only need to worry about exceptions when
818 inexact handling is requested. */
819 if (fn11 & QUAL_I) {
820 gen_fp_exc_clear();
821 helper(vc, cpu_env, vb);
822 gen_fp_exc_raise(rc, fn11);
823 } else {
824 helper(vc, cpu_env, vb);
825 }
826 }
827
828 #define IEEE_INTCVT(name) \
829 static inline void glue(gen_f, name)(DisasContext *ctx, \
830 int rb, int rc, int fn11) \
831 { \
832 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
833 }
834 IEEE_INTCVT(cvtqs)
835 IEEE_INTCVT(cvtqt)
836
837 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
838 {
839 TCGv vmask = tcg_const_i64(mask);
840 TCGv tmp = tcg_temp_new_i64();
841
842 if (inv_a) {
843 tcg_gen_andc_i64(tmp, vmask, va);
844 } else {
845 tcg_gen_and_i64(tmp, va, vmask);
846 }
847
848 tcg_gen_andc_i64(vc, vb, vmask);
849 tcg_gen_or_i64(vc, vc, tmp);
850
851 tcg_temp_free(vmask);
852 tcg_temp_free(tmp);
853 }
854
855 static void gen_ieee_arith3(DisasContext *ctx,
856 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
857 int ra, int rb, int rc, int fn11)
858 {
859 TCGv va, vb, vc;
860
861 gen_qual_roundmode(ctx, fn11);
862 gen_qual_flushzero(ctx, fn11);
863 gen_fp_exc_clear();
864
865 va = gen_ieee_input(ctx, ra, fn11, 0);
866 vb = gen_ieee_input(ctx, rb, fn11, 0);
867 vc = dest_fpr(ctx, rc);
868 helper(vc, cpu_env, va, vb);
869
870 gen_fp_exc_raise(rc, fn11);
871 }
872
873 #define IEEE_ARITH3(name) \
874 static inline void glue(gen_f, name)(DisasContext *ctx, \
875 int ra, int rb, int rc, int fn11) \
876 { \
877 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
878 }
879 IEEE_ARITH3(adds)
880 IEEE_ARITH3(subs)
881 IEEE_ARITH3(muls)
882 IEEE_ARITH3(divs)
883 IEEE_ARITH3(addt)
884 IEEE_ARITH3(subt)
885 IEEE_ARITH3(mult)
886 IEEE_ARITH3(divt)
887
888 static void gen_ieee_compare(DisasContext *ctx,
889 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
890 int ra, int rb, int rc, int fn11)
891 {
892 TCGv va, vb, vc;
893
894 gen_fp_exc_clear();
895
896 va = gen_ieee_input(ctx, ra, fn11, 1);
897 vb = gen_ieee_input(ctx, rb, fn11, 1);
898 vc = dest_fpr(ctx, rc);
899 helper(vc, cpu_env, va, vb);
900
901 gen_fp_exc_raise(rc, fn11);
902 }
903
904 #define IEEE_CMP3(name) \
905 static inline void glue(gen_f, name)(DisasContext *ctx, \
906 int ra, int rb, int rc, int fn11) \
907 { \
908 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
909 }
910 IEEE_CMP3(cmptun)
911 IEEE_CMP3(cmpteq)
912 IEEE_CMP3(cmptlt)
913 IEEE_CMP3(cmptle)
914
915 static inline uint64_t zapnot_mask(uint8_t lit)
916 {
917 uint64_t mask = 0;
918 int i;
919
920 for (i = 0; i < 8; ++i) {
921 if ((lit >> i) & 1) {
922 mask |= 0xffull << (i * 8);
923 }
924 }
925 return mask;
926 }
927
928 /* Implement zapnot with an immediate operand, which expands to some
929 form of immediate AND. This is a basic building block in the
930 definition of many of the other byte manipulation instructions. */
931 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
932 {
933 switch (lit) {
934 case 0x00:
935 tcg_gen_movi_i64(dest, 0);
936 break;
937 case 0x01:
938 tcg_gen_ext8u_i64(dest, src);
939 break;
940 case 0x03:
941 tcg_gen_ext16u_i64(dest, src);
942 break;
943 case 0x0f:
944 tcg_gen_ext32u_i64(dest, src);
945 break;
946 case 0xff:
947 tcg_gen_mov_i64(dest, src);
948 break;
949 default:
950 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
951 break;
952 }
953 }
954
955 /* EXTWH, EXTLH, EXTQH */
956 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
957 uint8_t lit, uint8_t byte_mask)
958 {
959 if (islit) {
960 tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
961 } else {
962 TCGv tmp = tcg_temp_new();
963 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
964 tcg_gen_neg_i64(tmp, tmp);
965 tcg_gen_andi_i64(tmp, tmp, 0x3f);
966 tcg_gen_shl_i64(vc, va, tmp);
967 tcg_temp_free(tmp);
968 }
969 gen_zapnoti(vc, vc, byte_mask);
970 }
971
972 /* EXTBL, EXTWL, EXTLL, EXTQL */
973 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
974 uint8_t lit, uint8_t byte_mask)
975 {
976 if (islit) {
977 tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
978 } else {
979 TCGv tmp = tcg_temp_new();
980 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
981 tcg_gen_shli_i64(tmp, tmp, 3);
982 tcg_gen_shr_i64(vc, va, tmp);
983 tcg_temp_free(tmp);
984 }
985 gen_zapnoti(vc, vc, byte_mask);
986 }
987
988 /* INSWH, INSLH, INSQH */
989 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
990 uint8_t lit, uint8_t byte_mask)
991 {
992 TCGv tmp = tcg_temp_new();
993
994 /* The instruction description has us left-shift the byte mask and extract
995 bits <15:8> and apply that zap at the end. This is equivalent to simply
996 performing the zap first and shifting afterward. */
997 gen_zapnoti(tmp, va, byte_mask);
998
999 if (islit) {
1000 lit &= 7;
1001 if (unlikely(lit == 0)) {
1002 tcg_gen_movi_i64(vc, 0);
1003 } else {
1004 tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
1005 }
1006 } else {
1007 TCGv shift = tcg_temp_new();
1008
1009 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1010 portably by splitting the shift into two parts: shift_count-1 and 1.
1011 Arrange for the -1 by using ones-complement instead of
1012 twos-complement in the negation: ~(B * 8) & 63. */
1013
1014 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1015 tcg_gen_not_i64(shift, shift);
1016 tcg_gen_andi_i64(shift, shift, 0x3f);
1017
1018 tcg_gen_shr_i64(vc, tmp, shift);
1019 tcg_gen_shri_i64(vc, vc, 1);
1020 tcg_temp_free(shift);
1021 }
1022 tcg_temp_free(tmp);
1023 }
1024
1025 /* INSBL, INSWL, INSLL, INSQL */
1026 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1027 uint8_t lit, uint8_t byte_mask)
1028 {
1029 TCGv tmp = tcg_temp_new();
1030
1031 /* The instruction description has us left-shift the byte mask
1032 the same number of byte slots as the data and apply the zap
1033 at the end. This is equivalent to simply performing the zap
1034 first and shifting afterward. */
1035 gen_zapnoti(tmp, va, byte_mask);
1036
1037 if (islit) {
1038 tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
1039 } else {
1040 TCGv shift = tcg_temp_new();
1041 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1042 tcg_gen_shli_i64(shift, shift, 3);
1043 tcg_gen_shl_i64(vc, tmp, shift);
1044 tcg_temp_free(shift);
1045 }
1046 tcg_temp_free(tmp);
1047 }
1048
1049 /* MSKWH, MSKLH, MSKQH */
1050 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1051 uint8_t lit, uint8_t byte_mask)
1052 {
1053 if (islit) {
1054 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1055 } else {
1056 TCGv shift = tcg_temp_new();
1057 TCGv mask = tcg_temp_new();
1058
1059 /* The instruction description is as above, where the byte_mask
1060 is shifted left, and then we extract bits <15:8>. This can be
1061 emulated with a right-shift on the expanded byte mask. This
1062 requires extra care because for an input <2:0> == 0 we need a
1063 shift of 64 bits in order to generate a zero. This is done by
1064 splitting the shift into two parts, the variable shift - 1
1065 followed by a constant 1 shift. The code we expand below is
1066 equivalent to ~(B * 8) & 63. */
1067
1068 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1069 tcg_gen_not_i64(shift, shift);
1070 tcg_gen_andi_i64(shift, shift, 0x3f);
1071 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1072 tcg_gen_shr_i64(mask, mask, shift);
1073 tcg_gen_shri_i64(mask, mask, 1);
1074
1075 tcg_gen_andc_i64(vc, va, mask);
1076
1077 tcg_temp_free(mask);
1078 tcg_temp_free(shift);
1079 }
1080 }
1081
1082 /* MSKBL, MSKWL, MSKLL, MSKQL */
1083 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1084 uint8_t lit, uint8_t byte_mask)
1085 {
1086 if (islit) {
1087 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1088 } else {
1089 TCGv shift = tcg_temp_new();
1090 TCGv mask = tcg_temp_new();
1091
1092 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1093 tcg_gen_shli_i64(shift, shift, 3);
1094 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1095 tcg_gen_shl_i64(mask, mask, shift);
1096
1097 tcg_gen_andc_i64(vc, va, mask);
1098
1099 tcg_temp_free(mask);
1100 tcg_temp_free(shift);
1101 }
1102 }
1103
1104 static void gen_rx(int ra, int set)
1105 {
1106 TCGv_i32 tmp;
1107
1108 if (ra != 31) {
1109 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
1110 }
1111
1112 tmp = tcg_const_i32(set);
1113 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1114 tcg_temp_free_i32(tmp);
1115 }
1116
1117 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1118 {
1119 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1120 to internal cpu registers. */
1121
1122 /* Unprivileged PAL call */
1123 if (palcode >= 0x80 && palcode < 0xC0) {
1124 switch (palcode) {
1125 case 0x86:
1126 /* IMB */
1127 /* No-op inside QEMU. */
1128 break;
1129 case 0x9E:
1130 /* RDUNIQUE */
1131 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
1132 offsetof(CPUAlphaState, unique));
1133 break;
1134 case 0x9F:
1135 /* WRUNIQUE */
1136 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1137 offsetof(CPUAlphaState, unique));
1138 break;
1139 default:
1140 palcode &= 0xbf;
1141 goto do_call_pal;
1142 }
1143 return NO_EXIT;
1144 }
1145
1146 #ifndef CONFIG_USER_ONLY
1147 /* Privileged PAL code */
1148 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1149 switch (palcode) {
1150 case 0x01:
1151 /* CFLUSH */
1152 /* No-op inside QEMU. */
1153 break;
1154 case 0x02:
1155 /* DRAINA */
1156 /* No-op inside QEMU. */
1157 break;
1158 case 0x2D:
1159 /* WRVPTPTR */
1160 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1161 offsetof(CPUAlphaState, vptptr));
1162 break;
1163 case 0x31:
1164 /* WRVAL */
1165 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1166 offsetof(CPUAlphaState, sysval));
1167 break;
1168 case 0x32:
1169 /* RDVAL */
1170 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
1171 offsetof(CPUAlphaState, sysval));
1172 break;
1173
1174 case 0x35: {
1175 /* SWPIPL */
1176 TCGv tmp;
1177
1178 /* Note that we already know we're in kernel mode, so we know
1179 that PS only contains the 3 IPL bits. */
1180 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
1181 offsetof(CPUAlphaState, ps));
1182
1183 /* But make sure and store only the 3 IPL bits from the user. */
1184 tmp = tcg_temp_new();
1185 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1186 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1187 tcg_temp_free(tmp);
1188 break;
1189 }
1190
1191 case 0x36:
1192 /* RDPS */
1193 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
1194 offsetof(CPUAlphaState, ps));
1195 break;
1196 case 0x38:
1197 /* WRUSP */
1198 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1199 offsetof(CPUAlphaState, usp));
1200 break;
1201 case 0x3A:
1202 /* RDUSP */
1203 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
1204 offsetof(CPUAlphaState, usp));
1205 break;
1206 case 0x3C:
1207 /* WHAMI */
1208 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1209 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1210 break;
1211
1212 default:
1213 palcode &= 0x3f;
1214 goto do_call_pal;
1215 }
1216 return NO_EXIT;
1217 }
1218 #endif
1219 return gen_invalid(ctx);
1220
1221 do_call_pal:
1222 #ifdef CONFIG_USER_ONLY
1223 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1224 #else
1225 {
1226 TCGv pc = tcg_const_i64(ctx->pc);
1227 TCGv entry = tcg_const_i64(palcode & 0x80
1228 ? 0x2000 + (palcode - 0x80) * 64
1229 : 0x1000 + palcode * 64);
1230
1231 gen_helper_call_pal(cpu_env, pc, entry);
1232
1233 tcg_temp_free(entry);
1234 tcg_temp_free(pc);
1235
1236 /* Since the destination is running in PALmode, we don't really
1237 need the page permissions check. We'll see the existence of
1238 the page when we create the TB, and we'll flush all TBs if
1239 we change the PAL base register. */
1240 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1241 tcg_gen_goto_tb(0);
1242 tcg_gen_exit_tb((uintptr_t)ctx->tb);
1243 return EXIT_GOTO_TB;
1244 }
1245
1246 return EXIT_PC_UPDATED;
1247 }
1248 #endif
1249 }
1250
1251 #ifndef CONFIG_USER_ONLY
1252
1253 #define PR_BYTE 0x100000
1254 #define PR_LONG 0x200000
1255
1256 static int cpu_pr_data(int pr)
1257 {
1258 switch (pr) {
1259 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1260 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1261 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1262 case 3: return offsetof(CPUAlphaState, trap_arg0);
1263 case 4: return offsetof(CPUAlphaState, trap_arg1);
1264 case 5: return offsetof(CPUAlphaState, trap_arg2);
1265 case 6: return offsetof(CPUAlphaState, exc_addr);
1266 case 7: return offsetof(CPUAlphaState, palbr);
1267 case 8: return offsetof(CPUAlphaState, ptbr);
1268 case 9: return offsetof(CPUAlphaState, vptptr);
1269 case 10: return offsetof(CPUAlphaState, unique);
1270 case 11: return offsetof(CPUAlphaState, sysval);
1271 case 12: return offsetof(CPUAlphaState, usp);
1272
1273 case 32 ... 39:
1274 return offsetof(CPUAlphaState, shadow[pr - 32]);
1275 case 40 ... 63:
1276 return offsetof(CPUAlphaState, scratch[pr - 40]);
1277
1278 case 251:
1279 return offsetof(CPUAlphaState, alarm_expire);
1280 }
1281 return 0;
1282 }
1283
1284 static ExitStatus gen_mfpr(TCGv va, int regno)
1285 {
1286 int data = cpu_pr_data(regno);
1287
1288 /* Special help for VMTIME and WALLTIME. */
1289 if (regno == 250 || regno == 249) {
1290 void (*helper)(TCGv) = gen_helper_get_walltime;
1291 if (regno == 249) {
1292 helper = gen_helper_get_vmtime;
1293 }
1294 if (use_icount) {
1295 gen_io_start();
1296 helper(va);
1297 gen_io_end();
1298 return EXIT_PC_STALE;
1299 } else {
1300 helper(va);
1301 return NO_EXIT;
1302 }
1303 }
1304
1305 /* The basic registers are data only, and unknown registers
1306 are read-zero, write-ignore. */
1307 if (data == 0) {
1308 tcg_gen_movi_i64(va, 0);
1309 } else if (data & PR_BYTE) {
1310 tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
1311 } else if (data & PR_LONG) {
1312 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1313 } else {
1314 tcg_gen_ld_i64(va, cpu_env, data);
1315 }
1316 return NO_EXIT;
1317 }
1318
1319 static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1320 {
1321 TCGv tmp;
1322 int data;
1323
1324 switch (regno) {
1325 case 255:
1326 /* TBIA */
1327 gen_helper_tbia(cpu_env);
1328 break;
1329
1330 case 254:
1331 /* TBIS */
1332 gen_helper_tbis(cpu_env, vb);
1333 break;
1334
1335 case 253:
1336 /* WAIT */
1337 tmp = tcg_const_i64(1);
1338 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1339 offsetof(CPUState, halted));
1340 return gen_excp(ctx, EXCP_HLT, 0);
1341
1342 case 252:
1343 /* HALT */
1344 gen_helper_halt(vb);
1345 return EXIT_PC_STALE;
1346
1347 case 251:
1348 /* ALARM */
1349 gen_helper_set_alarm(cpu_env, vb);
1350 break;
1351
1352 case 7:
1353 /* PALBR */
1354 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1355 /* Changing the PAL base register implies un-chaining all of the TBs
1356 that ended with a CALL_PAL. Since the base register usually only
1357 changes during boot, flushing everything works well. */
1358 gen_helper_tb_flush(cpu_env);
1359 return EXIT_PC_STALE;
1360
1361 default:
1362 /* The basic registers are data only, and unknown registers
1363 are read-zero, write-ignore. */
1364 data = cpu_pr_data(regno);
1365 if (data != 0) {
1366 if (data & PR_BYTE) {
1367 tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
1368 } else if (data & PR_LONG) {
1369 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1370 } else {
1371 tcg_gen_st_i64(vb, cpu_env, data);
1372 }
1373 }
1374 break;
1375 }
1376
1377 return NO_EXIT;
1378 }
1379 #endif /* !USER_ONLY*/
1380
1381 #define REQUIRE_TB_FLAG(FLAG) \
1382 do { \
1383 if ((ctx->tb->flags & (FLAG)) == 0) { \
1384 goto invalid_opc; \
1385 } \
1386 } while (0)
1387
1388 #define REQUIRE_REG_31(WHICH) \
1389 do { \
1390 if (WHICH != 31) { \
1391 goto invalid_opc; \
1392 } \
1393 } while (0)
1394
1395 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1396 {
1397 int32_t disp21, disp16, disp12 __attribute__((unused));
1398 uint16_t fn11;
1399 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1400 bool islit;
1401 TCGv va, vb, vc, tmp;
1402 TCGv_i32 t32;
1403 ExitStatus ret;
1404
1405 /* Decode all instruction fields */
1406 opc = extract32(insn, 26, 6);
1407 ra = extract32(insn, 21, 5);
1408 rb = extract32(insn, 16, 5);
1409 rc = extract32(insn, 0, 5);
1410 islit = extract32(insn, 12, 1);
1411 lit = extract32(insn, 13, 8);
1412
1413 disp21 = sextract32(insn, 0, 21);
1414 disp16 = sextract32(insn, 0, 16);
1415 disp12 = sextract32(insn, 0, 12);
1416
1417 fn11 = extract32(insn, 5, 11);
1418 fpfn = extract32(insn, 5, 6);
1419 fn7 = extract32(insn, 5, 7);
1420
1421 if (rb == 31 && !islit) {
1422 islit = true;
1423 lit = 0;
1424 }
1425
1426 ret = NO_EXIT;
1427 switch (opc) {
1428 case 0x00:
1429 /* CALL_PAL */
1430 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1431 break;
1432 case 0x01:
1433 /* OPC01 */
1434 goto invalid_opc;
1435 case 0x02:
1436 /* OPC02 */
1437 goto invalid_opc;
1438 case 0x03:
1439 /* OPC03 */
1440 goto invalid_opc;
1441 case 0x04:
1442 /* OPC04 */
1443 goto invalid_opc;
1444 case 0x05:
1445 /* OPC05 */
1446 goto invalid_opc;
1447 case 0x06:
1448 /* OPC06 */
1449 goto invalid_opc;
1450 case 0x07:
1451 /* OPC07 */
1452 goto invalid_opc;
1453
1454 case 0x09:
1455 /* LDAH */
1456 disp16 = (uint32_t)disp16 << 16;
1457 /* fall through */
1458 case 0x08:
1459 /* LDA */
1460 va = dest_gpr(ctx, ra);
1461 /* It's worth special-casing immediate loads. */
1462 if (rb == 31) {
1463 tcg_gen_movi_i64(va, disp16);
1464 } else {
1465 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1466 }
1467 break;
1468
1469 case 0x0A:
1470 /* LDBU */
1471 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1472 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1473 break;
1474 case 0x0B:
1475 /* LDQ_U */
1476 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1477 break;
1478 case 0x0C:
1479 /* LDWU */
1480 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1481 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1482 break;
1483 case 0x0D:
1484 /* STW */
1485 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1486 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1487 break;
1488 case 0x0E:
1489 /* STB */
1490 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1491 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1492 break;
1493 case 0x0F:
1494 /* STQ_U */
1495 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1496 break;
1497
1498 case 0x10:
1499 vc = dest_gpr(ctx, rc);
1500 vb = load_gpr_lit(ctx, rb, lit, islit);
1501
1502 if (ra == 31) {
1503 if (fn7 == 0x00) {
1504 /* Special case ADDL as SEXTL. */
1505 tcg_gen_ext32s_i64(vc, vb);
1506 break;
1507 }
1508 if (fn7 == 0x29) {
1509 /* Special case SUBQ as NEGQ. */
1510 tcg_gen_neg_i64(vc, vb);
1511 break;
1512 }
1513 }
1514
1515 va = load_gpr(ctx, ra);
1516 switch (fn7) {
1517 case 0x00:
1518 /* ADDL */
1519 tcg_gen_add_i64(vc, va, vb);
1520 tcg_gen_ext32s_i64(vc, vc);
1521 break;
1522 case 0x02:
1523 /* S4ADDL */
1524 tmp = tcg_temp_new();
1525 tcg_gen_shli_i64(tmp, va, 2);
1526 tcg_gen_add_i64(tmp, tmp, vb);
1527 tcg_gen_ext32s_i64(vc, tmp);
1528 tcg_temp_free(tmp);
1529 break;
1530 case 0x09:
1531 /* SUBL */
1532 tcg_gen_sub_i64(vc, va, vb);
1533 tcg_gen_ext32s_i64(vc, vc);
1534 break;
1535 case 0x0B:
1536 /* S4SUBL */
1537 tmp = tcg_temp_new();
1538 tcg_gen_shli_i64(tmp, va, 2);
1539 tcg_gen_sub_i64(tmp, tmp, vb);
1540 tcg_gen_ext32s_i64(vc, tmp);
1541 tcg_temp_free(tmp);
1542 break;
1543 case 0x0F:
1544 /* CMPBGE */
1545 gen_helper_cmpbge(vc, va, vb);
1546 break;
1547 case 0x12:
1548 /* S8ADDL */
1549 tmp = tcg_temp_new();
1550 tcg_gen_shli_i64(tmp, va, 3);
1551 tcg_gen_add_i64(tmp, tmp, vb);
1552 tcg_gen_ext32s_i64(vc, tmp);
1553 tcg_temp_free(tmp);
1554 break;
1555 case 0x1B:
1556 /* S8SUBL */
1557 tmp = tcg_temp_new();
1558 tcg_gen_shli_i64(tmp, va, 3);
1559 tcg_gen_sub_i64(tmp, tmp, vb);
1560 tcg_gen_ext32s_i64(vc, tmp);
1561 tcg_temp_free(tmp);
1562 break;
1563 case 0x1D:
1564 /* CMPULT */
1565 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1566 break;
1567 case 0x20:
1568 /* ADDQ */
1569 tcg_gen_add_i64(vc, va, vb);
1570 break;
1571 case 0x22:
1572 /* S4ADDQ */
1573 tmp = tcg_temp_new();
1574 tcg_gen_shli_i64(tmp, va, 2);
1575 tcg_gen_add_i64(vc, tmp, vb);
1576 tcg_temp_free(tmp);
1577 break;
1578 case 0x29:
1579 /* SUBQ */
1580 tcg_gen_sub_i64(vc, va, vb);
1581 break;
1582 case 0x2B:
1583 /* S4SUBQ */
1584 tmp = tcg_temp_new();
1585 tcg_gen_shli_i64(tmp, va, 2);
1586 tcg_gen_sub_i64(vc, tmp, vb);
1587 tcg_temp_free(tmp);
1588 break;
1589 case 0x2D:
1590 /* CMPEQ */
1591 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1592 break;
1593 case 0x32:
1594 /* S8ADDQ */
1595 tmp = tcg_temp_new();
1596 tcg_gen_shli_i64(tmp, va, 3);
1597 tcg_gen_add_i64(vc, tmp, vb);
1598 tcg_temp_free(tmp);
1599 break;
1600 case 0x3B:
1601 /* S8SUBQ */
1602 tmp = tcg_temp_new();
1603 tcg_gen_shli_i64(tmp, va, 3);
1604 tcg_gen_sub_i64(vc, tmp, vb);
1605 tcg_temp_free(tmp);
1606 break;
1607 case 0x3D:
1608 /* CMPULE */
1609 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1610 break;
1611 case 0x40:
1612 /* ADDL/V */
1613 gen_helper_addlv(vc, cpu_env, va, vb);
1614 break;
1615 case 0x49:
1616 /* SUBL/V */
1617 gen_helper_sublv(vc, cpu_env, va, vb);
1618 break;
1619 case 0x4D:
1620 /* CMPLT */
1621 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1622 break;
1623 case 0x60:
1624 /* ADDQ/V */
1625 gen_helper_addqv(vc, cpu_env, va, vb);
1626 break;
1627 case 0x69:
1628 /* SUBQ/V */
1629 gen_helper_subqv(vc, cpu_env, va, vb);
1630 break;
1631 case 0x6D:
1632 /* CMPLE */
1633 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1634 break;
1635 default:
1636 goto invalid_opc;
1637 }
1638 break;
1639
1640 case 0x11:
1641 if (fn7 == 0x20) {
1642 if (rc == 31) {
1643 /* Special case BIS as NOP. */
1644 break;
1645 }
1646 if (ra == 31) {
1647 /* Special case BIS as MOV. */
1648 vc = dest_gpr(ctx, rc);
1649 if (islit) {
1650 tcg_gen_movi_i64(vc, lit);
1651 } else {
1652 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1653 }
1654 break;
1655 }
1656 }
1657
1658 vc = dest_gpr(ctx, rc);
1659 vb = load_gpr_lit(ctx, rb, lit, islit);
1660
1661 if (fn7 == 0x28 && ra == 31) {
1662 /* Special case ORNOT as NOT. */
1663 tcg_gen_not_i64(vc, vb);
1664 break;
1665 }
1666
1667 va = load_gpr(ctx, ra);
1668 switch (fn7) {
1669 case 0x00:
1670 /* AND */
1671 tcg_gen_and_i64(vc, va, vb);
1672 break;
1673 case 0x08:
1674 /* BIC */
1675 tcg_gen_andc_i64(vc, va, vb);
1676 break;
1677 case 0x14:
1678 /* CMOVLBS */
1679 tmp = tcg_temp_new();
1680 tcg_gen_andi_i64(tmp, va, 1);
1681 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1682 vb, load_gpr(ctx, rc));
1683 tcg_temp_free(tmp);
1684 break;
1685 case 0x16:
1686 /* CMOVLBC */
1687 tmp = tcg_temp_new();
1688 tcg_gen_andi_i64(tmp, va, 1);
1689 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1690 vb, load_gpr(ctx, rc));
1691 tcg_temp_free(tmp);
1692 break;
1693 case 0x20:
1694 /* BIS */
1695 tcg_gen_or_i64(vc, va, vb);
1696 break;
1697 case 0x24:
1698 /* CMOVEQ */
1699 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1700 vb, load_gpr(ctx, rc));
1701 break;
1702 case 0x26:
1703 /* CMOVNE */
1704 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1705 vb, load_gpr(ctx, rc));
1706 break;
1707 case 0x28:
1708 /* ORNOT */
1709 tcg_gen_orc_i64(vc, va, vb);
1710 break;
1711 case 0x40:
1712 /* XOR */
1713 tcg_gen_xor_i64(vc, va, vb);
1714 break;
1715 case 0x44:
1716 /* CMOVLT */
1717 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1718 vb, load_gpr(ctx, rc));
1719 break;
1720 case 0x46:
1721 /* CMOVGE */
1722 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1723 vb, load_gpr(ctx, rc));
1724 break;
1725 case 0x48:
1726 /* EQV */
1727 tcg_gen_eqv_i64(vc, va, vb);
1728 break;
1729 case 0x61:
1730 /* AMASK */
1731 REQUIRE_REG_31(ra);
1732 {
1733 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
1734 tcg_gen_andi_i64(vc, vb, ~amask);
1735 }
1736 break;
1737 case 0x64:
1738 /* CMOVLE */
1739 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1740 vb, load_gpr(ctx, rc));
1741 break;
1742 case 0x66:
1743 /* CMOVGT */
1744 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1745 vb, load_gpr(ctx, rc));
1746 break;
1747 case 0x6C:
1748 /* IMPLVER */
1749 REQUIRE_REG_31(ra);
1750 tcg_gen_movi_i64(vc, ctx->implver);
1751 break;
1752 default:
1753 goto invalid_opc;
1754 }
1755 break;
1756
1757 case 0x12:
1758 vc = dest_gpr(ctx, rc);
1759 va = load_gpr(ctx, ra);
1760 switch (fn7) {
1761 case 0x02:
1762 /* MSKBL */
1763 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1764 break;
1765 case 0x06:
1766 /* EXTBL */
1767 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1768 break;
1769 case 0x0B:
1770 /* INSBL */
1771 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1772 break;
1773 case 0x12:
1774 /* MSKWL */
1775 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1776 break;
1777 case 0x16:
1778 /* EXTWL */
1779 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1780 break;
1781 case 0x1B:
1782 /* INSWL */
1783 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1784 break;
1785 case 0x22:
1786 /* MSKLL */
1787 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1788 break;
1789 case 0x26:
1790 /* EXTLL */
1791 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1792 break;
1793 case 0x2B:
1794 /* INSLL */
1795 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1796 break;
1797 case 0x30:
1798 /* ZAP */
1799 if (islit) {
1800 gen_zapnoti(vc, va, ~lit);
1801 } else {
1802 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1803 }
1804 break;
1805 case 0x31:
1806 /* ZAPNOT */
1807 if (islit) {
1808 gen_zapnoti(vc, va, lit);
1809 } else {
1810 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1811 }
1812 break;
1813 case 0x32:
1814 /* MSKQL */
1815 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1816 break;
1817 case 0x34:
1818 /* SRL */
1819 if (islit) {
1820 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1821 } else {
1822 tmp = tcg_temp_new();
1823 vb = load_gpr(ctx, rb);
1824 tcg_gen_andi_i64(tmp, vb, 0x3f);
1825 tcg_gen_shr_i64(vc, va, tmp);
1826 tcg_temp_free(tmp);
1827 }
1828 break;
1829 case 0x36:
1830 /* EXTQL */
1831 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1832 break;
1833 case 0x39:
1834 /* SLL */
1835 if (islit) {
1836 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1837 } else {
1838 tmp = tcg_temp_new();
1839 vb = load_gpr(ctx, rb);
1840 tcg_gen_andi_i64(tmp, vb, 0x3f);
1841 tcg_gen_shl_i64(vc, va, tmp);
1842 tcg_temp_free(tmp);
1843 }
1844 break;
1845 case 0x3B:
1846 /* INSQL */
1847 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1848 break;
1849 case 0x3C:
1850 /* SRA */
1851 if (islit) {
1852 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1853 } else {
1854 tmp = tcg_temp_new();
1855 vb = load_gpr(ctx, rb);
1856 tcg_gen_andi_i64(tmp, vb, 0x3f);
1857 tcg_gen_sar_i64(vc, va, tmp);
1858 tcg_temp_free(tmp);
1859 }
1860 break;
1861 case 0x52:
1862 /* MSKWH */
1863 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1864 break;
1865 case 0x57:
1866 /* INSWH */
1867 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1868 break;
1869 case 0x5A:
1870 /* EXTWH */
1871 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1872 break;
1873 case 0x62:
1874 /* MSKLH */
1875 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1876 break;
1877 case 0x67:
1878 /* INSLH */
1879 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1880 break;
1881 case 0x6A:
1882 /* EXTLH */
1883 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1884 break;
1885 case 0x72:
1886 /* MSKQH */
1887 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1888 break;
1889 case 0x77:
1890 /* INSQH */
1891 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1892 break;
1893 case 0x7A:
1894 /* EXTQH */
1895 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1896 break;
1897 default:
1898 goto invalid_opc;
1899 }
1900 break;
1901
1902 case 0x13:
1903 vc = dest_gpr(ctx, rc);
1904 vb = load_gpr_lit(ctx, rb, lit, islit);
1905 va = load_gpr(ctx, ra);
1906 switch (fn7) {
1907 case 0x00:
1908 /* MULL */
1909 tcg_gen_mul_i64(vc, va, vb);
1910 tcg_gen_ext32s_i64(vc, vc);
1911 break;
1912 case 0x20:
1913 /* MULQ */
1914 tcg_gen_mul_i64(vc, va, vb);
1915 break;
1916 case 0x30:
1917 /* UMULH */
1918 tmp = tcg_temp_new();
1919 tcg_gen_mulu2_i64(tmp, vc, va, vb);
1920 tcg_temp_free(tmp);
1921 break;
1922 case 0x40:
1923 /* MULL/V */
1924 gen_helper_mullv(vc, cpu_env, va, vb);
1925 break;
1926 case 0x60:
1927 /* MULQ/V */
1928 gen_helper_mulqv(vc, cpu_env, va, vb);
1929 break;
1930 default:
1931 goto invalid_opc;
1932 }
1933 break;
1934
1935 case 0x14:
1936 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
1937 vc = dest_fpr(ctx, rc);
1938 switch (fpfn) { /* fn11 & 0x3F */
1939 case 0x04:
1940 /* ITOFS */
1941 REQUIRE_REG_31(rb);
1942 t32 = tcg_temp_new_i32();
1943 va = load_gpr(ctx, ra);
1944 tcg_gen_trunc_i64_i32(t32, va);
1945 gen_helper_memory_to_s(vc, t32);
1946 tcg_temp_free_i32(t32);
1947 break;
1948 case 0x0A:
1949 /* SQRTF */
1950 REQUIRE_REG_31(ra);
1951 vb = load_fpr(ctx, rb);
1952 gen_helper_sqrtf(vc, cpu_env, vb);
1953 break;
1954 case 0x0B:
1955 /* SQRTS */
1956 REQUIRE_REG_31(ra);
1957 gen_fsqrts(ctx, rb, rc, fn11);
1958 break;
1959 case 0x14:
1960 /* ITOFF */
1961 REQUIRE_REG_31(rb);
1962 t32 = tcg_temp_new_i32();
1963 va = load_gpr(ctx, ra);
1964 tcg_gen_trunc_i64_i32(t32, va);
1965 gen_helper_memory_to_f(vc, t32);
1966 tcg_temp_free_i32(t32);
1967 break;
1968 case 0x24:
1969 /* ITOFT */
1970 REQUIRE_REG_31(rb);
1971 va = load_gpr(ctx, ra);
1972 tcg_gen_mov_i64(vc, va);
1973 break;
1974 case 0x2A:
1975 /* SQRTG */
1976 REQUIRE_REG_31(ra);
1977 vb = load_fpr(ctx, rb);
1978 gen_helper_sqrtg(vc, cpu_env, vb);
1979 break;
1980 case 0x02B:
1981 /* SQRTT */
1982 REQUIRE_REG_31(ra);
1983 gen_fsqrtt(ctx, rb, rc, fn11);
1984 break;
1985 default:
1986 goto invalid_opc;
1987 }
1988 break;
1989
1990 case 0x15:
1991 /* VAX floating point */
1992 /* XXX: rounding mode and trap are ignored (!) */
1993 vc = dest_fpr(ctx, rc);
1994 vb = load_fpr(ctx, rb);
1995 va = load_fpr(ctx, ra);
1996 switch (fpfn) { /* fn11 & 0x3F */
1997 case 0x00:
1998 /* ADDF */
1999 gen_helper_addf(vc, cpu_env, va, vb);
2000 break;
2001 case 0x01:
2002 /* SUBF */
2003 gen_helper_subf(vc, cpu_env, va, vb);
2004 break;
2005 case 0x02:
2006 /* MULF */
2007 gen_helper_mulf(vc, cpu_env, va, vb);
2008 break;
2009 case 0x03:
2010 /* DIVF */
2011 gen_helper_divf(vc, cpu_env, va, vb);
2012 break;
2013 case 0x1E:
2014 /* CVTDG -- TODO */
2015 REQUIRE_REG_31(ra);
2016 goto invalid_opc;
2017 case 0x20:
2018 /* ADDG */
2019 gen_helper_addg(vc, cpu_env, va, vb);
2020 break;
2021 case 0x21:
2022 /* SUBG */
2023 gen_helper_subg(vc, cpu_env, va, vb);
2024 break;
2025 case 0x22:
2026 /* MULG */
2027 gen_helper_mulg(vc, cpu_env, va, vb);
2028 break;
2029 case 0x23:
2030 /* DIVG */
2031 gen_helper_divg(vc, cpu_env, va, vb);
2032 break;
2033 case 0x25:
2034 /* CMPGEQ */
2035 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2036 break;
2037 case 0x26:
2038 /* CMPGLT */
2039 gen_helper_cmpglt(vc, cpu_env, va, vb);
2040 break;
2041 case 0x27:
2042 /* CMPGLE */
2043 gen_helper_cmpgle(vc, cpu_env, va, vb);
2044 break;
2045 case 0x2C:
2046 /* CVTGF */
2047 REQUIRE_REG_31(ra);
2048 gen_helper_cvtgf(vc, cpu_env, vb);
2049 break;
2050 case 0x2D:
2051 /* CVTGD -- TODO */
2052 REQUIRE_REG_31(ra);
2053 goto invalid_opc;
2054 case 0x2F:
2055 /* CVTGQ */
2056 REQUIRE_REG_31(ra);
2057 gen_helper_cvtgq(vc, cpu_env, vb);
2058 break;
2059 case 0x3C:
2060 /* CVTQF */
2061 REQUIRE_REG_31(ra);
2062 gen_helper_cvtqf(vc, cpu_env, vb);
2063 break;
2064 case 0x3E:
2065 /* CVTQG */
2066 REQUIRE_REG_31(ra);
2067 gen_helper_cvtqg(vc, cpu_env, vb);
2068 break;
2069 default:
2070 goto invalid_opc;
2071 }
2072 break;
2073
2074 case 0x16:
2075 /* IEEE floating-point */
2076 switch (fpfn) { /* fn11 & 0x3F */
2077 case 0x00:
2078 /* ADDS */
2079 gen_fadds(ctx, ra, rb, rc, fn11);
2080 break;
2081 case 0x01:
2082 /* SUBS */
2083 gen_fsubs(ctx, ra, rb, rc, fn11);
2084 break;
2085 case 0x02:
2086 /* MULS */
2087 gen_fmuls(ctx, ra, rb, rc, fn11);
2088 break;
2089 case 0x03:
2090 /* DIVS */
2091 gen_fdivs(ctx, ra, rb, rc, fn11);
2092 break;
2093 case 0x20:
2094 /* ADDT */
2095 gen_faddt(ctx, ra, rb, rc, fn11);
2096 break;
2097 case 0x21:
2098 /* SUBT */
2099 gen_fsubt(ctx, ra, rb, rc, fn11);
2100 break;
2101 case 0x22:
2102 /* MULT */
2103 gen_fmult(ctx, ra, rb, rc, fn11);
2104 break;
2105 case 0x23:
2106 /* DIVT */
2107 gen_fdivt(ctx, ra, rb, rc, fn11);
2108 break;
2109 case 0x24:
2110 /* CMPTUN */
2111 gen_fcmptun(ctx, ra, rb, rc, fn11);
2112 break;
2113 case 0x25:
2114 /* CMPTEQ */
2115 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2116 break;
2117 case 0x26:
2118 /* CMPTLT */
2119 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2120 break;
2121 case 0x27:
2122 /* CMPTLE */
2123 gen_fcmptle(ctx, ra, rb, rc, fn11);
2124 break;
2125 case 0x2C:
2126 REQUIRE_REG_31(ra);
2127 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2128 /* CVTST */
2129 gen_fcvtst(ctx, rb, rc, fn11);
2130 } else {
2131 /* CVTTS */
2132 gen_fcvtts(ctx, rb, rc, fn11);
2133 }
2134 break;
2135 case 0x2F:
2136 /* CVTTQ */
2137 REQUIRE_REG_31(ra);
2138 gen_fcvttq(ctx, rb, rc, fn11);
2139 break;
2140 case 0x3C:
2141 /* CVTQS */
2142 REQUIRE_REG_31(ra);
2143 gen_fcvtqs(ctx, rb, rc, fn11);
2144 break;
2145 case 0x3E:
2146 /* CVTQT */
2147 REQUIRE_REG_31(ra);
2148 gen_fcvtqt(ctx, rb, rc, fn11);
2149 break;
2150 default:
2151 goto invalid_opc;
2152 }
2153 break;
2154
2155 case 0x17:
2156 switch (fn11) {
2157 case 0x010:
2158 /* CVTLQ */
2159 REQUIRE_REG_31(ra);
2160 vc = dest_fpr(ctx, rc);
2161 vb = load_fpr(ctx, rb);
2162 gen_fcvtlq(vc, vb);
2163 break;
2164 case 0x020:
2165 /* CPYS */
2166 if (rc == 31) {
2167 /* Special case CPYS as FNOP. */
2168 } else {
2169 vc = dest_fpr(ctx, rc);
2170 va = load_fpr(ctx, ra);
2171 if (ra == rb) {
2172 /* Special case CPYS as FMOV. */
2173 tcg_gen_mov_i64(vc, va);
2174 } else {
2175 vb = load_fpr(ctx, rb);
2176 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2177 }
2178 }
2179 break;
2180 case 0x021:
2181 /* CPYSN */
2182 vc = dest_fpr(ctx, rc);
2183 vb = load_fpr(ctx, rb);
2184 va = load_fpr(ctx, ra);
2185 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2186 break;
2187 case 0x022:
2188 /* CPYSE */
2189 vc = dest_fpr(ctx, rc);
2190 vb = load_fpr(ctx, rb);
2191 va = load_fpr(ctx, ra);
2192 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2193 break;
2194 case 0x024:
2195 /* MT_FPCR */
2196 va = load_fpr(ctx, ra);
2197 gen_helper_store_fpcr(cpu_env, va);
2198 break;
2199 case 0x025:
2200 /* MF_FPCR */
2201 va = dest_fpr(ctx, ra);
2202 gen_helper_load_fpcr(va, cpu_env);
2203 break;
2204 case 0x02A:
2205 /* FCMOVEQ */
2206 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2207 break;
2208 case 0x02B:
2209 /* FCMOVNE */
2210 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2211 break;
2212 case 0x02C:
2213 /* FCMOVLT */
2214 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2215 break;
2216 case 0x02D:
2217 /* FCMOVGE */
2218 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2219 break;
2220 case 0x02E:
2221 /* FCMOVLE */
2222 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2223 break;
2224 case 0x02F:
2225 /* FCMOVGT */
2226 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2227 break;
2228 case 0x030:
2229 /* CVTQL */
2230 REQUIRE_REG_31(ra);
2231 vc = dest_fpr(ctx, rc);
2232 vb = load_fpr(ctx, rb);
2233 gen_fcvtql(vc, vb);
2234 break;
2235 case 0x130:
2236 /* CVTQL/V */
2237 case 0x530:
2238 /* CVTQL/SV */
2239 REQUIRE_REG_31(ra);
2240 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2241 /v doesn't do. The only thing I can think is that /sv is a
2242 valid instruction merely for completeness in the ISA. */
2243 vc = dest_fpr(ctx, rc);
2244 vb = load_fpr(ctx, rb);
2245 gen_helper_fcvtql_v_input(cpu_env, vb);
2246 gen_fcvtql(vc, vb);
2247 break;
2248 default:
2249 goto invalid_opc;
2250 }
2251 break;
2252
2253 case 0x18:
2254 switch ((uint16_t)disp16) {
2255 case 0x0000:
2256 /* TRAPB */
2257 /* No-op. */
2258 break;
2259 case 0x0400:
2260 /* EXCB */
2261 /* No-op. */
2262 break;
2263 case 0x4000:
2264 /* MB */
2265 /* No-op */
2266 break;
2267 case 0x4400:
2268 /* WMB */
2269 /* No-op */
2270 break;
2271 case 0x8000:
2272 /* FETCH */
2273 /* No-op */
2274 break;
2275 case 0xA000:
2276 /* FETCH_M */
2277 /* No-op */
2278 break;
2279 case 0xC000:
2280 /* RPCC */
2281 va = dest_gpr(ctx, ra);
2282 if (use_icount) {
2283 gen_io_start();
2284 gen_helper_load_pcc(va, cpu_env);
2285 gen_io_end();
2286 ret = EXIT_PC_STALE;
2287 } else {
2288 gen_helper_load_pcc(va, cpu_env);
2289 }
2290 break;
2291 case 0xE000:
2292 /* RC */
2293 gen_rx(ra, 0);
2294 break;
2295 case 0xE800:
2296 /* ECB */
2297 break;
2298 case 0xF000:
2299 /* RS */
2300 gen_rx(ra, 1);
2301 break;
2302 case 0xF800:
2303 /* WH64 */
2304 /* No-op */
2305 break;
2306 default:
2307 goto invalid_opc;
2308 }
2309 break;
2310
2311 case 0x19:
2312 /* HW_MFPR (PALcode) */
2313 #ifndef CONFIG_USER_ONLY
2314 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2315 va = dest_gpr(ctx, ra);
2316 ret = gen_mfpr(va, insn & 0xffff);
2317 break;
2318 #else
2319 goto invalid_opc;
2320 #endif
2321
2322 case 0x1A:
2323 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2324 prediction stack action, which of course we don't implement. */
2325 vb = load_gpr(ctx, rb);
2326 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2327 if (ra != 31) {
2328 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2329 }
2330 ret = EXIT_PC_UPDATED;
2331 break;
2332
2333 case 0x1B:
2334 /* HW_LD (PALcode) */
2335 #ifndef CONFIG_USER_ONLY
2336 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2337 {
2338 TCGv addr = tcg_temp_new();
2339 vb = load_gpr(ctx, rb);
2340 va = dest_gpr(ctx, ra);
2341
2342 tcg_gen_addi_i64(addr, vb, disp12);
2343 switch ((insn >> 12) & 0xF) {
2344 case 0x0:
2345 /* Longword physical access (hw_ldl/p) */
2346 gen_helper_ldl_phys(va, cpu_env, addr);
2347 break;
2348 case 0x1:
2349 /* Quadword physical access (hw_ldq/p) */
2350 gen_helper_ldq_phys(va, cpu_env, addr);
2351 break;
2352 case 0x2:
2353 /* Longword physical access with lock (hw_ldl_l/p) */
2354 gen_helper_ldl_l_phys(va, cpu_env, addr);
2355 break;
2356 case 0x3:
2357 /* Quadword physical access with lock (hw_ldq_l/p) */
2358 gen_helper_ldq_l_phys(va, cpu_env, addr);
2359 break;
2360 case 0x4:
2361 /* Longword virtual PTE fetch (hw_ldl/v) */
2362 goto invalid_opc;
2363 case 0x5:
2364 /* Quadword virtual PTE fetch (hw_ldq/v) */
2365 goto invalid_opc;
2366 break;
2367 case 0x6:
2368 /* Incpu_ir[ra]id */
2369 goto invalid_opc;
2370 case 0x7:
2371 /* Incpu_ir[ra]id */
2372 goto invalid_opc;
2373 case 0x8:
2374 /* Longword virtual access (hw_ldl) */
2375 goto invalid_opc;
2376 case 0x9:
2377 /* Quadword virtual access (hw_ldq) */
2378 goto invalid_opc;
2379 case 0xA:
2380 /* Longword virtual access with protection check (hw_ldl/w) */
2381 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2382 break;
2383 case 0xB:
2384 /* Quadword virtual access with protection check (hw_ldq/w) */
2385 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2386 break;
2387 case 0xC:
2388 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2389 goto invalid_opc;
2390 case 0xD:
2391 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2392 goto invalid_opc;
2393 case 0xE:
2394 /* Longword virtual access with alternate access mode and
2395 protection checks (hw_ldl/wa) */
2396 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2397 break;
2398 case 0xF:
2399 /* Quadword virtual access with alternate access mode and
2400 protection checks (hw_ldq/wa) */
2401 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2402 break;
2403 }
2404 tcg_temp_free(addr);
2405 break;
2406 }
2407 #else
2408 goto invalid_opc;
2409 #endif
2410
2411 case 0x1C:
2412 vc = dest_gpr(ctx, rc);
2413 if (fn7 == 0x70) {
2414 /* FTOIT */
2415 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2416 REQUIRE_REG_31(rb);
2417 va = load_fpr(ctx, ra);
2418 tcg_gen_mov_i64(vc, va);
2419 break;
2420 } else if (fn7 == 0x78) {
2421 /* FTOIS */
2422 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2423 REQUIRE_REG_31(rb);
2424 t32 = tcg_temp_new_i32();
2425 va = load_fpr(ctx, ra);
2426 gen_helper_s_to_memory(t32, va);
2427 tcg_gen_ext_i32_i64(vc, t32);
2428 tcg_temp_free_i32(t32);
2429 break;
2430 }
2431
2432 vb = load_gpr_lit(ctx, rb, lit, islit);
2433 switch (fn7) {
2434 case 0x00:
2435 /* SEXTB */
2436 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2437 REQUIRE_REG_31(ra);
2438 tcg_gen_ext8s_i64(vc, vb);
2439 break;
2440 case 0x01:
2441 /* SEXTW */
2442 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2443 REQUIRE_REG_31(ra);
2444 tcg_gen_ext16s_i64(vc, vb);
2445 break;
2446 case 0x30:
2447 /* CTPOP */
2448 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2449 REQUIRE_REG_31(ra);
2450 gen_helper_ctpop(vc, vb);
2451 break;
2452 case 0x31:
2453 /* PERR */
2454 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2455 va = load_gpr(ctx, ra);
2456 gen_helper_perr(vc, va, vb);
2457 break;
2458 case 0x32:
2459 /* CTLZ */
2460 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2461 REQUIRE_REG_31(ra);
2462 gen_helper_ctlz(vc, vb);
2463 break;
2464 case 0x33:
2465 /* CTTZ */
2466 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2467 REQUIRE_REG_31(ra);
2468 gen_helper_cttz(vc, vb);
2469 break;
2470 case 0x34:
2471 /* UNPKBW */
2472 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2473 REQUIRE_REG_31(ra);
2474 gen_helper_unpkbw(vc, vb);
2475 break;
2476 case 0x35:
2477 /* UNPKBL */
2478 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2479 REQUIRE_REG_31(ra);
2480 gen_helper_unpkbl(vc, vb);
2481 break;
2482 case 0x36:
2483 /* PKWB */
2484 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2485 REQUIRE_REG_31(ra);
2486 gen_helper_pkwb(vc, vb);
2487 break;
2488 case 0x37:
2489 /* PKLB */
2490 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2491 REQUIRE_REG_31(ra);
2492 gen_helper_pklb(vc, vb);
2493 break;
2494 case 0x38:
2495 /* MINSB8 */
2496 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2497 va = load_gpr(ctx, ra);
2498 gen_helper_minsb8(vc, va, vb);
2499 break;
2500 case 0x39:
2501 /* MINSW4 */
2502 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2503 va = load_gpr(ctx, ra);
2504 gen_helper_minsw4(vc, va, vb);
2505 break;
2506 case 0x3A:
2507 /* MINUB8 */
2508 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2509 va = load_gpr(ctx, ra);
2510 gen_helper_minub8(vc, va, vb);
2511 break;
2512 case 0x3B:
2513 /* MINUW4 */
2514 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2515 va = load_gpr(ctx, ra);
2516 gen_helper_minuw4(vc, va, vb);
2517 break;
2518 case 0x3C:
2519 /* MAXUB8 */
2520 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2521 va = load_gpr(ctx, ra);
2522 gen_helper_maxub8(vc, va, vb);
2523 break;
2524 case 0x3D:
2525 /* MAXUW4 */
2526 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2527 va = load_gpr(ctx, ra);
2528 gen_helper_maxuw4(vc, va, vb);
2529 break;
2530 case 0x3E:
2531 /* MAXSB8 */
2532 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2533 va = load_gpr(ctx, ra);
2534 gen_helper_maxsb8(vc, va, vb);
2535 break;
2536 case 0x3F:
2537 /* MAXSW4 */
2538 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2539 va = load_gpr(ctx, ra);
2540 gen_helper_maxsw4(vc, va, vb);
2541 break;
2542 default:
2543 goto invalid_opc;
2544 }
2545 break;
2546
2547 case 0x1D:
2548 /* HW_MTPR (PALcode) */
2549 #ifndef CONFIG_USER_ONLY
2550 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2551 vb = load_gpr(ctx, rb);
2552 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2553 break;
2554 #else
2555 goto invalid_opc;
2556 #endif
2557
2558 case 0x1E:
2559 /* HW_RET (PALcode) */
2560 #ifndef CONFIG_USER_ONLY
2561 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2562 if (rb == 31) {
2563 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2564 address from EXC_ADDR. This turns out to be useful for our
2565 emulation PALcode, so continue to accept it. */
2566 tmp = tcg_temp_new();
2567 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
2568 gen_helper_hw_ret(cpu_env, tmp);
2569 tcg_temp_free(tmp);
2570 } else {
2571 gen_helper_hw_ret(cpu_env, load_gpr(ctx, rb));
2572 }
2573 ret = EXIT_PC_UPDATED;
2574 break;
2575 #else
2576 goto invalid_opc;
2577 #endif
2578
2579 case 0x1F:
2580 /* HW_ST (PALcode) */
2581 #ifndef CONFIG_USER_ONLY
2582 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2583 {
2584 TCGv addr = tcg_temp_new();
2585 va = load_gpr(ctx, ra);
2586 vb = load_gpr(ctx, rb);
2587
2588 tcg_gen_addi_i64(addr, vb, disp12);
2589 switch ((insn >> 12) & 0xF) {
2590 case 0x0:
2591 /* Longword physical access */
2592 gen_helper_stl_phys(cpu_env, addr, va);
2593 break;
2594 case 0x1:
2595 /* Quadword physical access */
2596 gen_helper_stq_phys(cpu_env, addr, va);
2597 break;
2598 case 0x2:
2599 /* Longword physical access with lock */
2600 gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2601 break;
2602 case 0x3:
2603 /* Quadword physical access with lock */
2604 gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2605 break;
2606 case 0x4:
2607 /* Longword virtual access */
2608 goto invalid_opc;
2609 case 0x5:
2610 /* Quadword virtual access */
2611 goto invalid_opc;
2612 case 0x6:
2613 /* Invalid */
2614 goto invalid_opc;
2615 case 0x7:
2616 /* Invalid */
2617 goto invalid_opc;
2618 case 0x8:
2619 /* Invalid */
2620 goto invalid_opc;
2621 case 0x9:
2622 /* Invalid */
2623 goto invalid_opc;
2624 case 0xA:
2625 /* Invalid */
2626 goto invalid_opc;
2627 case 0xB:
2628 /* Invalid */
2629 goto invalid_opc;
2630 case 0xC:
2631 /* Longword virtual access with alternate access mode */
2632 goto invalid_opc;
2633 case 0xD:
2634 /* Quadword virtual access with alternate access mode */
2635 goto invalid_opc;
2636 case 0xE:
2637 /* Invalid */
2638 goto invalid_opc;
2639 case 0xF:
2640 /* Invalid */
2641 goto invalid_opc;
2642 }
2643 tcg_temp_free(addr);
2644 break;
2645 }
2646 #else
2647 goto invalid_opc;
2648 #endif
2649 case 0x20:
2650 /* LDF */
2651 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2652 break;
2653 case 0x21:
2654 /* LDG */
2655 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2656 break;
2657 case 0x22:
2658 /* LDS */
2659 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2660 break;
2661 case 0x23:
2662 /* LDT */
2663 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2664 break;
2665 case 0x24:
2666 /* STF */
2667 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2668 break;
2669 case 0x25:
2670 /* STG */
2671 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2672 break;
2673 case 0x26:
2674 /* STS */
2675 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2676 break;
2677 case 0x27:
2678 /* STT */
2679 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2680 break;
2681 case 0x28:
2682 /* LDL */
2683 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2684 break;
2685 case 0x29:
2686 /* LDQ */
2687 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2688 break;
2689 case 0x2A:
2690 /* LDL_L */
2691 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2692 break;
2693 case 0x2B:
2694 /* LDQ_L */
2695 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2696 break;
2697 case 0x2C:
2698 /* STL */
2699 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2700 break;
2701 case 0x2D:
2702 /* STQ */
2703 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2704 break;
2705 case 0x2E:
2706 /* STL_C */
2707 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
2708 break;
2709 case 0x2F:
2710 /* STQ_C */
2711 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
2712 break;
2713 case 0x30:
2714 /* BR */
2715 ret = gen_bdirect(ctx, ra, disp21);
2716 break;
2717 case 0x31: /* FBEQ */
2718 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2719 break;
2720 case 0x32: /* FBLT */
2721 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2722 break;
2723 case 0x33: /* FBLE */
2724 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2725 break;
2726 case 0x34:
2727 /* BSR */
2728 ret = gen_bdirect(ctx, ra, disp21);
2729 break;
2730 case 0x35: /* FBNE */
2731 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2732 break;
2733 case 0x36: /* FBGE */
2734 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2735 break;
2736 case 0x37: /* FBGT */
2737 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2738 break;
2739 case 0x38:
2740 /* BLBC */
2741 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2742 break;
2743 case 0x39:
2744 /* BEQ */
2745 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2746 break;
2747 case 0x3A:
2748 /* BLT */
2749 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2750 break;
2751 case 0x3B:
2752 /* BLE */
2753 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2754 break;
2755 case 0x3C:
2756 /* BLBS */
2757 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2758 break;
2759 case 0x3D:
2760 /* BNE */
2761 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2762 break;
2763 case 0x3E:
2764 /* BGE */
2765 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2766 break;
2767 case 0x3F:
2768 /* BGT */
2769 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2770 break;
2771 invalid_opc:
2772 ret = gen_invalid(ctx);
2773 break;
2774 }
2775
2776 return ret;
2777 }
2778
2779 static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
2780 TranslationBlock *tb,
2781 bool search_pc)
2782 {
2783 CPUState *cs = CPU(cpu);
2784 CPUAlphaState *env = &cpu->env;
2785 DisasContext ctx, *ctxp = &ctx;
2786 target_ulong pc_start;
2787 target_ulong pc_mask;
2788 uint32_t insn;
2789 uint16_t *gen_opc_end;
2790 CPUBreakpoint *bp;
2791 int j, lj = -1;
2792 ExitStatus ret;
2793 int num_insns;
2794 int max_insns;
2795
2796 pc_start = tb->pc;
2797 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2798
2799 ctx.tb = tb;
2800 ctx.pc = pc_start;
2801 ctx.mem_idx = cpu_mmu_index(env);
2802 ctx.implver = env->implver;
2803 ctx.singlestep_enabled = cs->singlestep_enabled;
2804
2805 /* ??? Every TB begins with unset rounding mode, to be initialized on
2806 the first fp insn of the TB. Alternately we could define a proper
2807 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2808 to reset the FP_STATUS to that default at the end of any TB that
2809 changes the default. We could even (gasp) dynamiclly figure out
2810 what default would be most efficient given the running program. */
2811 ctx.tb_rm = -1;
2812 /* Similarly for flush-to-zero. */
2813 ctx.tb_ftz = -1;
2814
2815 num_insns = 0;
2816 max_insns = tb->cflags & CF_COUNT_MASK;
2817 if (max_insns == 0) {
2818 max_insns = CF_COUNT_MASK;
2819 }
2820
2821 if (in_superpage(&ctx, pc_start)) {
2822 pc_mask = (1ULL << 41) - 1;
2823 } else {
2824 pc_mask = ~TARGET_PAGE_MASK;
2825 }
2826
2827 gen_tb_start();
2828 do {
2829 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
2830 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
2831 if (bp->pc == ctx.pc) {
2832 gen_excp(&ctx, EXCP_DEBUG, 0);
2833 break;
2834 }
2835 }
2836 }
2837 if (search_pc) {
2838 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2839 if (lj < j) {
2840 lj++;
2841 while (lj < j)
2842 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2843 }
2844 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
2845 tcg_ctx.gen_opc_instr_start[lj] = 1;
2846 tcg_ctx.gen_opc_icount[lj] = num_insns;
2847 }
2848 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2849 gen_io_start();
2850 }
2851 insn = cpu_ldl_code(env, ctx.pc);
2852 num_insns++;
2853
2854 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2855 tcg_gen_debug_insn_start(ctx.pc);
2856 }
2857
2858 TCGV_UNUSED_I64(ctx.zero);
2859 TCGV_UNUSED_I64(ctx.sink);
2860 TCGV_UNUSED_I64(ctx.lit);
2861
2862 ctx.pc += 4;
2863 ret = translate_one(ctxp, insn);
2864
2865 if (!TCGV_IS_UNUSED_I64(ctx.sink)) {
2866 tcg_gen_discard_i64(ctx.sink);
2867 tcg_temp_free(ctx.sink);
2868 }
2869 if (!TCGV_IS_UNUSED_I64(ctx.zero)) {
2870 tcg_temp_free(ctx.zero);
2871 }
2872 if (!TCGV_IS_UNUSED_I64(ctx.lit)) {
2873 tcg_temp_free(ctx.lit);
2874 }
2875
2876 /* If we reach a page boundary, are single stepping,
2877 or exhaust instruction count, stop generation. */
2878 if (ret == NO_EXIT
2879 && ((ctx.pc & pc_mask) == 0
2880 || tcg_ctx.gen_opc_ptr >= gen_opc_end
2881 || num_insns >= max_insns
2882 || singlestep
2883 || ctx.singlestep_enabled)) {
2884 ret = EXIT_PC_STALE;
2885 }
2886 } while (ret == NO_EXIT);
2887
2888 if (tb->cflags & CF_LAST_IO) {
2889 gen_io_end();
2890 }
2891
2892 switch (ret) {
2893 case EXIT_GOTO_TB:
2894 case EXIT_NORETURN:
2895 break;
2896 case EXIT_PC_STALE:
2897 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2898 /* FALLTHRU */
2899 case EXIT_PC_UPDATED:
2900 if (ctx.singlestep_enabled) {
2901 gen_excp_1(EXCP_DEBUG, 0);
2902 } else {
2903 tcg_gen_exit_tb(0);
2904 }
2905 break;
2906 default:
2907 abort();
2908 }
2909
2910 gen_tb_end(tb, num_insns);
2911 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2912 if (search_pc) {
2913 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2914 lj++;
2915 while (lj <= j)
2916 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2917 } else {
2918 tb->size = ctx.pc - pc_start;
2919 tb->icount = num_insns;
2920 }
2921
2922 #ifdef DEBUG_DISAS
2923 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2924 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2925 log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
2926 qemu_log("\n");
2927 }
2928 #endif
2929 }
2930
2931 void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
2932 {
2933 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
2934 }
2935
2936 void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
2937 {
2938 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
2939 }
2940
2941 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
2942 {
2943 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
2944 }