]> git.proxmox.com Git - mirror_qemu.git/blob - target/alpha/translate.c
Merge tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu into staging
[mirror_qemu.git] / target / alpha / translate.c
1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32
33 #define HELPER_H "helper.h"
34 #include "exec/helper-info.c.inc"
35 #undef HELPER_H
36
37 #undef ALPHA_DEBUG_DISAS
38 #define CONFIG_SOFTFLOAT_INLINE
39
40 #ifdef ALPHA_DEBUG_DISAS
41 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 #else
43 # define LOG_DISAS(...) do { } while (0)
44 #endif
45
46 typedef struct DisasContext DisasContext;
47 struct DisasContext {
48 DisasContextBase base;
49
50 #ifdef CONFIG_USER_ONLY
51 MemOp unalign;
52 #else
53 uint64_t palbr;
54 #endif
55 uint32_t tbflags;
56 int mem_idx;
57
58 /* implver and amask values for this CPU. */
59 int implver;
60 int amask;
61
62 /* Current rounding mode for this TB. */
63 int tb_rm;
64 /* Current flush-to-zero setting for this TB. */
65 int tb_ftz;
66
67 /* The set of registers active in the current context. */
68 TCGv *ir;
69
70 /* Temporaries for $31 and $f31 as source and destination. */
71 TCGv zero;
72 TCGv sink;
73 };
74
75 #ifdef CONFIG_USER_ONLY
76 #define UNALIGN(C) (C)->unalign
77 #else
78 #define UNALIGN(C) MO_ALIGN
79 #endif
80
81 /* Target-specific return values from translate_one, indicating the
82 state of the TB. Note that DISAS_NEXT indicates that we are not
83 exiting the TB. */
84 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
85 #define DISAS_PC_UPDATED DISAS_TARGET_1
86 #define DISAS_PC_STALE DISAS_TARGET_2
87
88 /* global register indexes */
89 static TCGv cpu_std_ir[31];
90 static TCGv cpu_fir[31];
91 static TCGv cpu_pc;
92 static TCGv cpu_lock_addr;
93 static TCGv cpu_lock_value;
94
95 #ifndef CONFIG_USER_ONLY
96 static TCGv cpu_pal_ir[31];
97 #endif
98
99 void alpha_translate_init(void)
100 {
101 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
102
103 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
104 static const GlobalVar vars[] = {
105 DEF_VAR(pc),
106 DEF_VAR(lock_addr),
107 DEF_VAR(lock_value),
108 };
109
110 #undef DEF_VAR
111
112 /* Use the symbolic register names that match the disassembler. */
113 static const char greg_names[31][4] = {
114 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
115 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
116 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
117 "t10", "t11", "ra", "t12", "at", "gp", "sp"
118 };
119 static const char freg_names[31][4] = {
120 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
121 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
122 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
123 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
124 };
125 #ifndef CONFIG_USER_ONLY
126 static const char shadow_names[8][8] = {
127 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
128 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
129 };
130 #endif
131
132 int i;
133
134 for (i = 0; i < 31; i++) {
135 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
136 offsetof(CPUAlphaState, ir[i]),
137 greg_names[i]);
138 }
139
140 for (i = 0; i < 31; i++) {
141 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
142 offsetof(CPUAlphaState, fir[i]),
143 freg_names[i]);
144 }
145
146 #ifndef CONFIG_USER_ONLY
147 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
148 for (i = 0; i < 8; i++) {
149 int r = (i == 7 ? 25 : i + 8);
150 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
151 offsetof(CPUAlphaState,
152 shadow[i]),
153 shadow_names[i]);
154 }
155 #endif
156
157 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
158 const GlobalVar *v = &vars[i];
159 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
160 }
161 }
162
163 static TCGv load_zero(DisasContext *ctx)
164 {
165 if (!ctx->zero) {
166 ctx->zero = tcg_constant_i64(0);
167 }
168 return ctx->zero;
169 }
170
171 static TCGv dest_sink(DisasContext *ctx)
172 {
173 if (!ctx->sink) {
174 ctx->sink = tcg_temp_new();
175 }
176 return ctx->sink;
177 }
178
179 static void free_context_temps(DisasContext *ctx)
180 {
181 if (ctx->sink) {
182 tcg_gen_discard_i64(ctx->sink);
183 ctx->sink = NULL;
184 }
185 }
186
187 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
188 {
189 if (likely(reg < 31)) {
190 return ctx->ir[reg];
191 } else {
192 return load_zero(ctx);
193 }
194 }
195
196 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
197 uint8_t lit, bool islit)
198 {
199 if (islit) {
200 return tcg_constant_i64(lit);
201 } else if (likely(reg < 31)) {
202 return ctx->ir[reg];
203 } else {
204 return load_zero(ctx);
205 }
206 }
207
208 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
209 {
210 if (likely(reg < 31)) {
211 return ctx->ir[reg];
212 } else {
213 return dest_sink(ctx);
214 }
215 }
216
217 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
218 {
219 if (likely(reg < 31)) {
220 return cpu_fir[reg];
221 } else {
222 return load_zero(ctx);
223 }
224 }
225
226 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
227 {
228 if (likely(reg < 31)) {
229 return cpu_fir[reg];
230 } else {
231 return dest_sink(ctx);
232 }
233 }
234
235 static int get_flag_ofs(unsigned shift)
236 {
237 int ofs = offsetof(CPUAlphaState, flags);
238 #if HOST_BIG_ENDIAN
239 ofs += 3 - (shift / 8);
240 #else
241 ofs += shift / 8;
242 #endif
243 return ofs;
244 }
245
246 static void ld_flag_byte(TCGv val, unsigned shift)
247 {
248 tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift));
249 }
250
251 static void st_flag_byte(TCGv val, unsigned shift)
252 {
253 tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift));
254 }
255
256 static void gen_excp_1(int exception, int error_code)
257 {
258 TCGv_i32 tmp1, tmp2;
259
260 tmp1 = tcg_constant_i32(exception);
261 tmp2 = tcg_constant_i32(error_code);
262 gen_helper_excp(cpu_env, tmp1, tmp2);
263 }
264
265 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
266 {
267 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
268 gen_excp_1(exception, error_code);
269 return DISAS_NORETURN;
270 }
271
272 static inline DisasJumpType gen_invalid(DisasContext *ctx)
273 {
274 return gen_excp(ctx, EXCP_OPCDEC, 0);
275 }
276
277 static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr)
278 {
279 TCGv_i32 tmp32 = tcg_temp_new_i32();
280 tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
281 gen_helper_memory_to_f(dest, tmp32);
282 }
283
284 static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr)
285 {
286 TCGv tmp = tcg_temp_new();
287 tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
288 gen_helper_memory_to_g(dest, tmp);
289 }
290
291 static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr)
292 {
293 TCGv_i32 tmp32 = tcg_temp_new_i32();
294 tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
295 gen_helper_memory_to_s(dest, tmp32);
296 }
297
298 static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr)
299 {
300 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
301 }
302
303 static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
304 void (*func)(DisasContext *, TCGv, TCGv))
305 {
306 /* Loads to $f31 are prefetches, which we can treat as nops. */
307 if (likely(ra != 31)) {
308 TCGv addr = tcg_temp_new();
309 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
310 func(ctx, cpu_fir[ra], addr);
311 }
312 }
313
314 static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
315 MemOp op, bool clear, bool locked)
316 {
317 TCGv addr, dest;
318
319 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
320 prefetches, which we can treat as nops. No worries about
321 missed exceptions here. */
322 if (unlikely(ra == 31)) {
323 return;
324 }
325
326 addr = tcg_temp_new();
327 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
328 if (clear) {
329 tcg_gen_andi_i64(addr, addr, ~0x7);
330 } else if (!locked) {
331 op |= UNALIGN(ctx);
332 }
333
334 dest = ctx->ir[ra];
335 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op);
336
337 if (locked) {
338 tcg_gen_mov_i64(cpu_lock_addr, addr);
339 tcg_gen_mov_i64(cpu_lock_value, dest);
340 }
341 }
342
343 static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr)
344 {
345 TCGv_i32 tmp32 = tcg_temp_new_i32();
346 gen_helper_f_to_memory(tmp32, addr);
347 tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
348 }
349
350 static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr)
351 {
352 TCGv tmp = tcg_temp_new();
353 gen_helper_g_to_memory(tmp, src);
354 tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
355 }
356
357 static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr)
358 {
359 TCGv_i32 tmp32 = tcg_temp_new_i32();
360 gen_helper_s_to_memory(tmp32, src);
361 tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
362 }
363
364 static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr)
365 {
366 tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
367 }
368
369 static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
370 void (*func)(DisasContext *, TCGv, TCGv))
371 {
372 TCGv addr = tcg_temp_new();
373 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
374 func(ctx, load_fpr(ctx, ra), addr);
375 }
376
377 static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
378 MemOp op, bool clear)
379 {
380 TCGv addr, src;
381
382 addr = tcg_temp_new();
383 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
384 if (clear) {
385 tcg_gen_andi_i64(addr, addr, ~0x7);
386 } else {
387 op |= UNALIGN(ctx);
388 }
389
390 src = load_gpr(ctx, ra);
391 tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op);
392 }
393
394 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
395 int32_t disp16, int mem_idx,
396 MemOp op)
397 {
398 TCGLabel *lab_fail, *lab_done;
399 TCGv addr, val;
400
401 addr = tcg_temp_new_i64();
402 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
403 free_context_temps(ctx);
404
405 lab_fail = gen_new_label();
406 lab_done = gen_new_label();
407 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
408
409 val = tcg_temp_new_i64();
410 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
411 load_gpr(ctx, ra), mem_idx, op);
412 free_context_temps(ctx);
413
414 if (ra != 31) {
415 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
416 }
417 tcg_gen_br(lab_done);
418
419 gen_set_label(lab_fail);
420 if (ra != 31) {
421 tcg_gen_movi_i64(ctx->ir[ra], 0);
422 }
423
424 gen_set_label(lab_done);
425 tcg_gen_movi_i64(cpu_lock_addr, -1);
426 return DISAS_NEXT;
427 }
428
429 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
430 {
431 return translator_use_goto_tb(&ctx->base, dest);
432 }
433
434 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
435 {
436 uint64_t dest = ctx->base.pc_next + (disp << 2);
437
438 if (ra != 31) {
439 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
440 }
441
442 /* Notice branch-to-next; used to initialize RA with the PC. */
443 if (disp == 0) {
444 return 0;
445 } else if (use_goto_tb(ctx, dest)) {
446 tcg_gen_goto_tb(0);
447 tcg_gen_movi_i64(cpu_pc, dest);
448 tcg_gen_exit_tb(ctx->base.tb, 0);
449 return DISAS_NORETURN;
450 } else {
451 tcg_gen_movi_i64(cpu_pc, dest);
452 return DISAS_PC_UPDATED;
453 }
454 }
455
456 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
457 TCGv cmp, int32_t disp)
458 {
459 uint64_t dest = ctx->base.pc_next + (disp << 2);
460 TCGLabel *lab_true = gen_new_label();
461
462 if (use_goto_tb(ctx, dest)) {
463 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
464
465 tcg_gen_goto_tb(0);
466 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
467 tcg_gen_exit_tb(ctx->base.tb, 0);
468
469 gen_set_label(lab_true);
470 tcg_gen_goto_tb(1);
471 tcg_gen_movi_i64(cpu_pc, dest);
472 tcg_gen_exit_tb(ctx->base.tb, 1);
473
474 return DISAS_NORETURN;
475 } else {
476 TCGv_i64 z = load_zero(ctx);
477 TCGv_i64 d = tcg_constant_i64(dest);
478 TCGv_i64 p = tcg_constant_i64(ctx->base.pc_next);
479
480 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
481 return DISAS_PC_UPDATED;
482 }
483 }
484
485 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
486 int32_t disp, int mask)
487 {
488 if (mask) {
489 TCGv tmp = tcg_temp_new();
490 DisasJumpType ret;
491
492 tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
493 ret = gen_bcond_internal(ctx, cond, tmp, disp);
494 return ret;
495 }
496 return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
497 }
498
499 /* Fold -0.0 for comparison with COND. */
500
501 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
502 {
503 uint64_t mzero = 1ull << 63;
504
505 switch (cond) {
506 case TCG_COND_LE:
507 case TCG_COND_GT:
508 /* For <= or >, the -0.0 value directly compares the way we want. */
509 tcg_gen_mov_i64(dest, src);
510 break;
511
512 case TCG_COND_EQ:
513 case TCG_COND_NE:
514 /* For == or !=, we can simply mask off the sign bit and compare. */
515 tcg_gen_andi_i64(dest, src, mzero - 1);
516 break;
517
518 case TCG_COND_GE:
519 case TCG_COND_LT:
520 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
521 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
522 tcg_gen_neg_i64(dest, dest);
523 tcg_gen_and_i64(dest, dest, src);
524 break;
525
526 default:
527 abort();
528 }
529 }
530
531 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
532 int32_t disp)
533 {
534 TCGv cmp_tmp = tcg_temp_new();
535 DisasJumpType ret;
536
537 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
538 ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
539 return ret;
540 }
541
542 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
543 {
544 TCGv_i64 va, vb, z;
545
546 z = load_zero(ctx);
547 vb = load_fpr(ctx, rb);
548 va = tcg_temp_new();
549 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
550
551 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
552 }
553
554 #define QUAL_RM_N 0x080 /* Round mode nearest even */
555 #define QUAL_RM_C 0x000 /* Round mode chopped */
556 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
557 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
558 #define QUAL_RM_MASK 0x0c0
559
560 #define QUAL_U 0x100 /* Underflow enable (fp output) */
561 #define QUAL_V 0x100 /* Overflow enable (int output) */
562 #define QUAL_S 0x400 /* Software completion enable */
563 #define QUAL_I 0x200 /* Inexact detection enable */
564
565 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
566 {
567 TCGv_i32 tmp;
568
569 fn11 &= QUAL_RM_MASK;
570 if (fn11 == ctx->tb_rm) {
571 return;
572 }
573 ctx->tb_rm = fn11;
574
575 tmp = tcg_temp_new_i32();
576 switch (fn11) {
577 case QUAL_RM_N:
578 tcg_gen_movi_i32(tmp, float_round_nearest_even);
579 break;
580 case QUAL_RM_C:
581 tcg_gen_movi_i32(tmp, float_round_to_zero);
582 break;
583 case QUAL_RM_M:
584 tcg_gen_movi_i32(tmp, float_round_down);
585 break;
586 case QUAL_RM_D:
587 tcg_gen_ld8u_i32(tmp, cpu_env,
588 offsetof(CPUAlphaState, fpcr_dyn_round));
589 break;
590 }
591
592 #if defined(CONFIG_SOFTFLOAT_INLINE)
593 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
594 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
595 sets the one field. */
596 tcg_gen_st8_i32(tmp, cpu_env,
597 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
598 #else
599 gen_helper_setroundmode(tmp);
600 #endif
601 }
602
603 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
604 {
605 TCGv_i32 tmp;
606
607 fn11 &= QUAL_U;
608 if (fn11 == ctx->tb_ftz) {
609 return;
610 }
611 ctx->tb_ftz = fn11;
612
613 tmp = tcg_temp_new_i32();
614 if (fn11) {
615 /* Underflow is enabled, use the FPCR setting. */
616 tcg_gen_ld8u_i32(tmp, cpu_env,
617 offsetof(CPUAlphaState, fpcr_flush_to_zero));
618 } else {
619 /* Underflow is disabled, force flush-to-zero. */
620 tcg_gen_movi_i32(tmp, 1);
621 }
622
623 #if defined(CONFIG_SOFTFLOAT_INLINE)
624 tcg_gen_st8_i32(tmp, cpu_env,
625 offsetof(CPUAlphaState, fp_status.flush_to_zero));
626 #else
627 gen_helper_setflushzero(tmp);
628 #endif
629 }
630
631 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
632 {
633 TCGv val;
634
635 if (unlikely(reg == 31)) {
636 val = load_zero(ctx);
637 } else {
638 val = cpu_fir[reg];
639 if ((fn11 & QUAL_S) == 0) {
640 if (is_cmp) {
641 gen_helper_ieee_input_cmp(cpu_env, val);
642 } else {
643 gen_helper_ieee_input(cpu_env, val);
644 }
645 } else {
646 #ifndef CONFIG_USER_ONLY
647 /* In system mode, raise exceptions for denormals like real
648 hardware. In user mode, proceed as if the OS completion
649 handler is handling the denormal as per spec. */
650 gen_helper_ieee_input_s(cpu_env, val);
651 #endif
652 }
653 }
654 return val;
655 }
656
657 static void gen_fp_exc_raise(int rc, int fn11)
658 {
659 /* ??? We ought to be able to do something with imprecise exceptions.
660 E.g. notice we're still in the trap shadow of something within the
661 TB and do not generate the code to signal the exception; end the TB
662 when an exception is forced to arrive, either by consumption of a
663 register value or TRAPB or EXCB. */
664 TCGv_i32 reg, ign;
665 uint32_t ignore = 0;
666
667 if (!(fn11 & QUAL_U)) {
668 /* Note that QUAL_U == QUAL_V, so ignore either. */
669 ignore |= FPCR_UNF | FPCR_IOV;
670 }
671 if (!(fn11 & QUAL_I)) {
672 ignore |= FPCR_INE;
673 }
674 ign = tcg_constant_i32(ignore);
675
676 /* ??? Pass in the regno of the destination so that the helper can
677 set EXC_MASK, which contains a bitmask of destination registers
678 that have caused arithmetic traps. A simple userspace emulation
679 does not require this. We do need it for a guest kernel's entArith,
680 or if we were to do something clever with imprecise exceptions. */
681 reg = tcg_constant_i32(rc + 32);
682 if (fn11 & QUAL_S) {
683 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
684 } else {
685 gen_helper_fp_exc_raise(cpu_env, ign, reg);
686 }
687 }
688
689 static void gen_cvtlq(TCGv vc, TCGv vb)
690 {
691 TCGv tmp = tcg_temp_new();
692
693 /* The arithmetic right shift here, plus the sign-extended mask below
694 yields a sign-extended result without an explicit ext32s_i64. */
695 tcg_gen_shri_i64(tmp, vb, 29);
696 tcg_gen_sari_i64(vc, vb, 32);
697 tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
698 }
699
700 static void gen_ieee_arith2(DisasContext *ctx,
701 void (*helper)(TCGv, TCGv_ptr, TCGv),
702 int rb, int rc, int fn11)
703 {
704 TCGv vb;
705
706 gen_qual_roundmode(ctx, fn11);
707 gen_qual_flushzero(ctx, fn11);
708
709 vb = gen_ieee_input(ctx, rb, fn11, 0);
710 helper(dest_fpr(ctx, rc), cpu_env, vb);
711
712 gen_fp_exc_raise(rc, fn11);
713 }
714
715 #define IEEE_ARITH2(name) \
716 static inline void glue(gen_, name)(DisasContext *ctx, \
717 int rb, int rc, int fn11) \
718 { \
719 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
720 }
721 IEEE_ARITH2(sqrts)
722 IEEE_ARITH2(sqrtt)
723 IEEE_ARITH2(cvtst)
724 IEEE_ARITH2(cvtts)
725
726 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
727 {
728 TCGv vb, vc;
729
730 /* No need to set flushzero, since we have an integer output. */
731 vb = gen_ieee_input(ctx, rb, fn11, 0);
732 vc = dest_fpr(ctx, rc);
733
734 /* Almost all integer conversions use cropped rounding;
735 special case that. */
736 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
737 gen_helper_cvttq_c(vc, cpu_env, vb);
738 } else {
739 gen_qual_roundmode(ctx, fn11);
740 gen_helper_cvttq(vc, cpu_env, vb);
741 }
742 gen_fp_exc_raise(rc, fn11);
743 }
744
745 static void gen_ieee_intcvt(DisasContext *ctx,
746 void (*helper)(TCGv, TCGv_ptr, TCGv),
747 int rb, int rc, int fn11)
748 {
749 TCGv vb, vc;
750
751 gen_qual_roundmode(ctx, fn11);
752 vb = load_fpr(ctx, rb);
753 vc = dest_fpr(ctx, rc);
754
755 /* The only exception that can be raised by integer conversion
756 is inexact. Thus we only need to worry about exceptions when
757 inexact handling is requested. */
758 if (fn11 & QUAL_I) {
759 helper(vc, cpu_env, vb);
760 gen_fp_exc_raise(rc, fn11);
761 } else {
762 helper(vc, cpu_env, vb);
763 }
764 }
765
766 #define IEEE_INTCVT(name) \
767 static inline void glue(gen_, name)(DisasContext *ctx, \
768 int rb, int rc, int fn11) \
769 { \
770 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
771 }
772 IEEE_INTCVT(cvtqs)
773 IEEE_INTCVT(cvtqt)
774
775 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
776 {
777 TCGv vmask = tcg_constant_i64(mask);
778 TCGv tmp = tcg_temp_new_i64();
779
780 if (inv_a) {
781 tcg_gen_andc_i64(tmp, vmask, va);
782 } else {
783 tcg_gen_and_i64(tmp, va, vmask);
784 }
785
786 tcg_gen_andc_i64(vc, vb, vmask);
787 tcg_gen_or_i64(vc, vc, tmp);
788 }
789
790 static void gen_ieee_arith3(DisasContext *ctx,
791 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
792 int ra, int rb, int rc, int fn11)
793 {
794 TCGv va, vb, vc;
795
796 gen_qual_roundmode(ctx, fn11);
797 gen_qual_flushzero(ctx, fn11);
798
799 va = gen_ieee_input(ctx, ra, fn11, 0);
800 vb = gen_ieee_input(ctx, rb, fn11, 0);
801 vc = dest_fpr(ctx, rc);
802 helper(vc, cpu_env, va, vb);
803
804 gen_fp_exc_raise(rc, fn11);
805 }
806
807 #define IEEE_ARITH3(name) \
808 static inline void glue(gen_, name)(DisasContext *ctx, \
809 int ra, int rb, int rc, int fn11) \
810 { \
811 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
812 }
813 IEEE_ARITH3(adds)
814 IEEE_ARITH3(subs)
815 IEEE_ARITH3(muls)
816 IEEE_ARITH3(divs)
817 IEEE_ARITH3(addt)
818 IEEE_ARITH3(subt)
819 IEEE_ARITH3(mult)
820 IEEE_ARITH3(divt)
821
822 static void gen_ieee_compare(DisasContext *ctx,
823 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
824 int ra, int rb, int rc, int fn11)
825 {
826 TCGv va, vb, vc;
827
828 va = gen_ieee_input(ctx, ra, fn11, 1);
829 vb = gen_ieee_input(ctx, rb, fn11, 1);
830 vc = dest_fpr(ctx, rc);
831 helper(vc, cpu_env, va, vb);
832
833 gen_fp_exc_raise(rc, fn11);
834 }
835
836 #define IEEE_CMP3(name) \
837 static inline void glue(gen_, name)(DisasContext *ctx, \
838 int ra, int rb, int rc, int fn11) \
839 { \
840 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
841 }
842 IEEE_CMP3(cmptun)
843 IEEE_CMP3(cmpteq)
844 IEEE_CMP3(cmptlt)
845 IEEE_CMP3(cmptle)
846
847 static inline uint64_t zapnot_mask(uint8_t lit)
848 {
849 uint64_t mask = 0;
850 int i;
851
852 for (i = 0; i < 8; ++i) {
853 if ((lit >> i) & 1) {
854 mask |= 0xffull << (i * 8);
855 }
856 }
857 return mask;
858 }
859
860 /* Implement zapnot with an immediate operand, which expands to some
861 form of immediate AND. This is a basic building block in the
862 definition of many of the other byte manipulation instructions. */
863 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
864 {
865 switch (lit) {
866 case 0x00:
867 tcg_gen_movi_i64(dest, 0);
868 break;
869 case 0x01:
870 tcg_gen_ext8u_i64(dest, src);
871 break;
872 case 0x03:
873 tcg_gen_ext16u_i64(dest, src);
874 break;
875 case 0x0f:
876 tcg_gen_ext32u_i64(dest, src);
877 break;
878 case 0xff:
879 tcg_gen_mov_i64(dest, src);
880 break;
881 default:
882 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
883 break;
884 }
885 }
886
887 /* EXTWH, EXTLH, EXTQH */
888 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
889 uint8_t lit, uint8_t byte_mask)
890 {
891 if (islit) {
892 int pos = (64 - lit * 8) & 0x3f;
893 int len = cto32(byte_mask) * 8;
894 if (pos < len) {
895 tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
896 } else {
897 tcg_gen_movi_i64(vc, 0);
898 }
899 } else {
900 TCGv tmp = tcg_temp_new();
901 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
902 tcg_gen_neg_i64(tmp, tmp);
903 tcg_gen_andi_i64(tmp, tmp, 0x3f);
904 tcg_gen_shl_i64(vc, va, tmp);
905 }
906 gen_zapnoti(vc, vc, byte_mask);
907 }
908
909 /* EXTBL, EXTWL, EXTLL, EXTQL */
910 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
911 uint8_t lit, uint8_t byte_mask)
912 {
913 if (islit) {
914 int pos = (lit & 7) * 8;
915 int len = cto32(byte_mask) * 8;
916 if (pos + len >= 64) {
917 len = 64 - pos;
918 }
919 tcg_gen_extract_i64(vc, va, pos, len);
920 } else {
921 TCGv tmp = tcg_temp_new();
922 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
923 tcg_gen_shli_i64(tmp, tmp, 3);
924 tcg_gen_shr_i64(vc, va, tmp);
925 gen_zapnoti(vc, vc, byte_mask);
926 }
927 }
928
929 /* INSWH, INSLH, INSQH */
930 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
931 uint8_t lit, uint8_t byte_mask)
932 {
933 if (islit) {
934 int pos = 64 - (lit & 7) * 8;
935 int len = cto32(byte_mask) * 8;
936 if (pos < len) {
937 tcg_gen_extract_i64(vc, va, pos, len - pos);
938 } else {
939 tcg_gen_movi_i64(vc, 0);
940 }
941 } else {
942 TCGv tmp = tcg_temp_new();
943 TCGv shift = tcg_temp_new();
944
945 /* The instruction description has us left-shift the byte mask
946 and extract bits <15:8> and apply that zap at the end. This
947 is equivalent to simply performing the zap first and shifting
948 afterward. */
949 gen_zapnoti(tmp, va, byte_mask);
950
951 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
952 portably by splitting the shift into two parts: shift_count-1 and 1.
953 Arrange for the -1 by using ones-complement instead of
954 twos-complement in the negation: ~(B * 8) & 63. */
955
956 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
957 tcg_gen_not_i64(shift, shift);
958 tcg_gen_andi_i64(shift, shift, 0x3f);
959
960 tcg_gen_shr_i64(vc, tmp, shift);
961 tcg_gen_shri_i64(vc, vc, 1);
962 }
963 }
964
965 /* INSBL, INSWL, INSLL, INSQL */
966 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
967 uint8_t lit, uint8_t byte_mask)
968 {
969 if (islit) {
970 int pos = (lit & 7) * 8;
971 int len = cto32(byte_mask) * 8;
972 if (pos + len > 64) {
973 len = 64 - pos;
974 }
975 tcg_gen_deposit_z_i64(vc, va, pos, len);
976 } else {
977 TCGv tmp = tcg_temp_new();
978 TCGv shift = tcg_temp_new();
979
980 /* The instruction description has us left-shift the byte mask
981 and extract bits <15:8> and apply that zap at the end. This
982 is equivalent to simply performing the zap first and shifting
983 afterward. */
984 gen_zapnoti(tmp, va, byte_mask);
985
986 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
987 tcg_gen_shli_i64(shift, shift, 3);
988 tcg_gen_shl_i64(vc, tmp, shift);
989 }
990 }
991
992 /* MSKWH, MSKLH, MSKQH */
993 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
994 uint8_t lit, uint8_t byte_mask)
995 {
996 if (islit) {
997 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
998 } else {
999 TCGv shift = tcg_temp_new();
1000 TCGv mask = tcg_temp_new();
1001
1002 /* The instruction description is as above, where the byte_mask
1003 is shifted left, and then we extract bits <15:8>. This can be
1004 emulated with a right-shift on the expanded byte mask. This
1005 requires extra care because for an input <2:0> == 0 we need a
1006 shift of 64 bits in order to generate a zero. This is done by
1007 splitting the shift into two parts, the variable shift - 1
1008 followed by a constant 1 shift. The code we expand below is
1009 equivalent to ~(B * 8) & 63. */
1010
1011 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1012 tcg_gen_not_i64(shift, shift);
1013 tcg_gen_andi_i64(shift, shift, 0x3f);
1014 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1015 tcg_gen_shr_i64(mask, mask, shift);
1016 tcg_gen_shri_i64(mask, mask, 1);
1017
1018 tcg_gen_andc_i64(vc, va, mask);
1019 }
1020 }
1021
1022 /* MSKBL, MSKWL, MSKLL, MSKQL */
1023 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1024 uint8_t lit, uint8_t byte_mask)
1025 {
1026 if (islit) {
1027 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1028 } else {
1029 TCGv shift = tcg_temp_new();
1030 TCGv mask = tcg_temp_new();
1031
1032 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1033 tcg_gen_shli_i64(shift, shift, 3);
1034 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1035 tcg_gen_shl_i64(mask, mask, shift);
1036
1037 tcg_gen_andc_i64(vc, va, mask);
1038 }
1039 }
1040
1041 static void gen_rx(DisasContext *ctx, int ra, int set)
1042 {
1043 if (ra != 31) {
1044 ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1045 }
1046
1047 st_flag_byte(tcg_constant_i64(set), ENV_FLAG_RX_SHIFT);
1048 }
1049
1050 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
1051 {
1052 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1053 to internal cpu registers. */
1054
1055 /* Unprivileged PAL call */
1056 if (palcode >= 0x80 && palcode < 0xC0) {
1057 switch (palcode) {
1058 case 0x86:
1059 /* IMB */
1060 /* No-op inside QEMU. */
1061 break;
1062 case 0x9E:
1063 /* RDUNIQUE */
1064 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1065 offsetof(CPUAlphaState, unique));
1066 break;
1067 case 0x9F:
1068 /* WRUNIQUE */
1069 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1070 offsetof(CPUAlphaState, unique));
1071 break;
1072 default:
1073 palcode &= 0xbf;
1074 goto do_call_pal;
1075 }
1076 return DISAS_NEXT;
1077 }
1078
1079 #ifndef CONFIG_USER_ONLY
1080 /* Privileged PAL code */
1081 if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
1082 switch (palcode) {
1083 case 0x01:
1084 /* CFLUSH */
1085 /* No-op inside QEMU. */
1086 break;
1087 case 0x02:
1088 /* DRAINA */
1089 /* No-op inside QEMU. */
1090 break;
1091 case 0x2D:
1092 /* WRVPTPTR */
1093 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1094 offsetof(CPUAlphaState, vptptr));
1095 break;
1096 case 0x31:
1097 /* WRVAL */
1098 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1099 offsetof(CPUAlphaState, sysval));
1100 break;
1101 case 0x32:
1102 /* RDVAL */
1103 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1104 offsetof(CPUAlphaState, sysval));
1105 break;
1106
1107 case 0x35:
1108 /* SWPIPL */
1109 /* Note that we already know we're in kernel mode, so we know
1110 that PS only contains the 3 IPL bits. */
1111 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1112
1113 /* But make sure and store only the 3 IPL bits from the user. */
1114 {
1115 TCGv tmp = tcg_temp_new();
1116 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1117 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
1118 }
1119
1120 /* Allow interrupts to be recognized right away. */
1121 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
1122 return DISAS_PC_UPDATED_NOCHAIN;
1123
1124 case 0x36:
1125 /* RDPS */
1126 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1127 break;
1128
1129 case 0x38:
1130 /* WRUSP */
1131 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1132 offsetof(CPUAlphaState, usp));
1133 break;
1134 case 0x3A:
1135 /* RDUSP */
1136 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1137 offsetof(CPUAlphaState, usp));
1138 break;
1139 case 0x3C:
1140 /* WHAMI */
1141 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1142 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1143 break;
1144
1145 case 0x3E:
1146 /* WTINT */
1147 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
1148 -offsetof(AlphaCPU, env) +
1149 offsetof(CPUState, halted));
1150 tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
1151 return gen_excp(ctx, EXCP_HALTED, 0);
1152
1153 default:
1154 palcode &= 0x3f;
1155 goto do_call_pal;
1156 }
1157 return DISAS_NEXT;
1158 }
1159 #endif
1160 return gen_invalid(ctx);
1161
1162 do_call_pal:
1163 #ifdef CONFIG_USER_ONLY
1164 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1165 #else
1166 {
1167 TCGv tmp = tcg_temp_new();
1168 uint64_t exc_addr = ctx->base.pc_next;
1169 uint64_t entry = ctx->palbr;
1170
1171 if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
1172 exc_addr |= 1;
1173 } else {
1174 tcg_gen_movi_i64(tmp, 1);
1175 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
1176 }
1177
1178 tcg_gen_movi_i64(tmp, exc_addr);
1179 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1180
1181 entry += (palcode & 0x80
1182 ? 0x2000 + (palcode - 0x80) * 64
1183 : 0x1000 + palcode * 64);
1184
1185 tcg_gen_movi_i64(cpu_pc, entry);
1186 return DISAS_PC_UPDATED;
1187 }
1188 #endif
1189 }
1190
1191 #ifndef CONFIG_USER_ONLY
1192
1193 #define PR_LONG 0x200000
1194
1195 static int cpu_pr_data(int pr)
1196 {
1197 switch (pr) {
1198 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1199 case 3: return offsetof(CPUAlphaState, trap_arg0);
1200 case 4: return offsetof(CPUAlphaState, trap_arg1);
1201 case 5: return offsetof(CPUAlphaState, trap_arg2);
1202 case 6: return offsetof(CPUAlphaState, exc_addr);
1203 case 7: return offsetof(CPUAlphaState, palbr);
1204 case 8: return offsetof(CPUAlphaState, ptbr);
1205 case 9: return offsetof(CPUAlphaState, vptptr);
1206 case 10: return offsetof(CPUAlphaState, unique);
1207 case 11: return offsetof(CPUAlphaState, sysval);
1208 case 12: return offsetof(CPUAlphaState, usp);
1209
1210 case 40 ... 63:
1211 return offsetof(CPUAlphaState, scratch[pr - 40]);
1212
1213 case 251:
1214 return offsetof(CPUAlphaState, alarm_expire);
1215 }
1216 return 0;
1217 }
1218
1219 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1220 {
1221 void (*helper)(TCGv);
1222 int data;
1223
1224 switch (regno) {
1225 case 32 ... 39:
1226 /* Accessing the "non-shadow" general registers. */
1227 regno = regno == 39 ? 25 : regno - 32 + 8;
1228 tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1229 break;
1230
1231 case 250: /* WALLTIME */
1232 helper = gen_helper_get_walltime;
1233 goto do_helper;
1234 case 249: /* VMTIME */
1235 helper = gen_helper_get_vmtime;
1236 do_helper:
1237 if (translator_io_start(&ctx->base)) {
1238 helper(va);
1239 return DISAS_PC_STALE;
1240 } else {
1241 helper(va);
1242 }
1243 break;
1244
1245 case 0: /* PS */
1246 ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
1247 break;
1248 case 1: /* FEN */
1249 ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
1250 break;
1251
1252 default:
1253 /* The basic registers are data only, and unknown registers
1254 are read-zero, write-ignore. */
1255 data = cpu_pr_data(regno);
1256 if (data == 0) {
1257 tcg_gen_movi_i64(va, 0);
1258 } else if (data & PR_LONG) {
1259 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1260 } else {
1261 tcg_gen_ld_i64(va, cpu_env, data);
1262 }
1263 break;
1264 }
1265
1266 return DISAS_NEXT;
1267 }
1268
1269 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1270 {
1271 int data;
1272 DisasJumpType ret = DISAS_NEXT;
1273
1274 switch (regno) {
1275 case 255:
1276 /* TBIA */
1277 gen_helper_tbia(cpu_env);
1278 break;
1279
1280 case 254:
1281 /* TBIS */
1282 gen_helper_tbis(cpu_env, vb);
1283 break;
1284
1285 case 253:
1286 /* WAIT */
1287 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
1288 -offsetof(AlphaCPU, env) + offsetof(CPUState, halted));
1289 return gen_excp(ctx, EXCP_HALTED, 0);
1290
1291 case 252:
1292 /* HALT */
1293 gen_helper_halt(vb);
1294 return DISAS_PC_STALE;
1295
1296 case 251:
1297 /* ALARM */
1298 if (translator_io_start(&ctx->base)) {
1299 ret = DISAS_PC_STALE;
1300 }
1301 gen_helper_set_alarm(cpu_env, vb);
1302 break;
1303
1304 case 7:
1305 /* PALBR */
1306 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1307 /* Changing the PAL base register implies un-chaining all of the TBs
1308 that ended with a CALL_PAL. Since the base register usually only
1309 changes during boot, flushing everything works well. */
1310 gen_helper_tb_flush(cpu_env);
1311 return DISAS_PC_STALE;
1312
1313 case 32 ... 39:
1314 /* Accessing the "non-shadow" general registers. */
1315 regno = regno == 39 ? 25 : regno - 32 + 8;
1316 tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1317 break;
1318
1319 case 0: /* PS */
1320 st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
1321 break;
1322 case 1: /* FEN */
1323 st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
1324 break;
1325
1326 default:
1327 /* The basic registers are data only, and unknown registers
1328 are read-zero, write-ignore. */
1329 data = cpu_pr_data(regno);
1330 if (data != 0) {
1331 if (data & PR_LONG) {
1332 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1333 } else {
1334 tcg_gen_st_i64(vb, cpu_env, data);
1335 }
1336 }
1337 break;
1338 }
1339
1340 return ret;
1341 }
1342 #endif /* !USER_ONLY*/
1343
1344 #define REQUIRE_NO_LIT \
1345 do { \
1346 if (real_islit) { \
1347 goto invalid_opc; \
1348 } \
1349 } while (0)
1350
1351 #define REQUIRE_AMASK(FLAG) \
1352 do { \
1353 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1354 goto invalid_opc; \
1355 } \
1356 } while (0)
1357
1358 #define REQUIRE_TB_FLAG(FLAG) \
1359 do { \
1360 if ((ctx->tbflags & (FLAG)) == 0) { \
1361 goto invalid_opc; \
1362 } \
1363 } while (0)
1364
1365 #define REQUIRE_REG_31(WHICH) \
1366 do { \
1367 if (WHICH != 31) { \
1368 goto invalid_opc; \
1369 } \
1370 } while (0)
1371
1372 #define REQUIRE_FEN \
1373 do { \
1374 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \
1375 goto raise_fen; \
1376 } \
1377 } while (0)
1378
1379 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
1380 {
1381 int32_t disp21, disp16, disp12 __attribute__((unused));
1382 uint16_t fn11;
1383 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1384 bool islit, real_islit;
1385 TCGv va, vb, vc, tmp, tmp2;
1386 TCGv_i32 t32;
1387 DisasJumpType ret;
1388
1389 /* Decode all instruction fields */
1390 opc = extract32(insn, 26, 6);
1391 ra = extract32(insn, 21, 5);
1392 rb = extract32(insn, 16, 5);
1393 rc = extract32(insn, 0, 5);
1394 real_islit = islit = extract32(insn, 12, 1);
1395 lit = extract32(insn, 13, 8);
1396
1397 disp21 = sextract32(insn, 0, 21);
1398 disp16 = sextract32(insn, 0, 16);
1399 disp12 = sextract32(insn, 0, 12);
1400
1401 fn11 = extract32(insn, 5, 11);
1402 fpfn = extract32(insn, 5, 6);
1403 fn7 = extract32(insn, 5, 7);
1404
1405 if (rb == 31 && !islit) {
1406 islit = true;
1407 lit = 0;
1408 }
1409
1410 ret = DISAS_NEXT;
1411 switch (opc) {
1412 case 0x00:
1413 /* CALL_PAL */
1414 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1415 break;
1416 case 0x01:
1417 /* OPC01 */
1418 goto invalid_opc;
1419 case 0x02:
1420 /* OPC02 */
1421 goto invalid_opc;
1422 case 0x03:
1423 /* OPC03 */
1424 goto invalid_opc;
1425 case 0x04:
1426 /* OPC04 */
1427 goto invalid_opc;
1428 case 0x05:
1429 /* OPC05 */
1430 goto invalid_opc;
1431 case 0x06:
1432 /* OPC06 */
1433 goto invalid_opc;
1434 case 0x07:
1435 /* OPC07 */
1436 goto invalid_opc;
1437
1438 case 0x09:
1439 /* LDAH */
1440 disp16 = (uint32_t)disp16 << 16;
1441 /* fall through */
1442 case 0x08:
1443 /* LDA */
1444 va = dest_gpr(ctx, ra);
1445 /* It's worth special-casing immediate loads. */
1446 if (rb == 31) {
1447 tcg_gen_movi_i64(va, disp16);
1448 } else {
1449 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1450 }
1451 break;
1452
1453 case 0x0A:
1454 /* LDBU */
1455 REQUIRE_AMASK(BWX);
1456 gen_load_int(ctx, ra, rb, disp16, MO_UB, 0, 0);
1457 break;
1458 case 0x0B:
1459 /* LDQ_U */
1460 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 1, 0);
1461 break;
1462 case 0x0C:
1463 /* LDWU */
1464 REQUIRE_AMASK(BWX);
1465 gen_load_int(ctx, ra, rb, disp16, MO_LEUW, 0, 0);
1466 break;
1467 case 0x0D:
1468 /* STW */
1469 REQUIRE_AMASK(BWX);
1470 gen_store_int(ctx, ra, rb, disp16, MO_LEUW, 0);
1471 break;
1472 case 0x0E:
1473 /* STB */
1474 REQUIRE_AMASK(BWX);
1475 gen_store_int(ctx, ra, rb, disp16, MO_UB, 0);
1476 break;
1477 case 0x0F:
1478 /* STQ_U */
1479 gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 1);
1480 break;
1481
1482 case 0x10:
1483 vc = dest_gpr(ctx, rc);
1484 vb = load_gpr_lit(ctx, rb, lit, islit);
1485
1486 if (ra == 31) {
1487 if (fn7 == 0x00) {
1488 /* Special case ADDL as SEXTL. */
1489 tcg_gen_ext32s_i64(vc, vb);
1490 break;
1491 }
1492 if (fn7 == 0x29) {
1493 /* Special case SUBQ as NEGQ. */
1494 tcg_gen_neg_i64(vc, vb);
1495 break;
1496 }
1497 }
1498
1499 va = load_gpr(ctx, ra);
1500 switch (fn7) {
1501 case 0x00:
1502 /* ADDL */
1503 tcg_gen_add_i64(vc, va, vb);
1504 tcg_gen_ext32s_i64(vc, vc);
1505 break;
1506 case 0x02:
1507 /* S4ADDL */
1508 tmp = tcg_temp_new();
1509 tcg_gen_shli_i64(tmp, va, 2);
1510 tcg_gen_add_i64(tmp, tmp, vb);
1511 tcg_gen_ext32s_i64(vc, tmp);
1512 break;
1513 case 0x09:
1514 /* SUBL */
1515 tcg_gen_sub_i64(vc, va, vb);
1516 tcg_gen_ext32s_i64(vc, vc);
1517 break;
1518 case 0x0B:
1519 /* S4SUBL */
1520 tmp = tcg_temp_new();
1521 tcg_gen_shli_i64(tmp, va, 2);
1522 tcg_gen_sub_i64(tmp, tmp, vb);
1523 tcg_gen_ext32s_i64(vc, tmp);
1524 break;
1525 case 0x0F:
1526 /* CMPBGE */
1527 if (ra == 31) {
1528 /* Special case 0 >= X as X == 0. */
1529 gen_helper_cmpbe0(vc, vb);
1530 } else {
1531 gen_helper_cmpbge(vc, va, vb);
1532 }
1533 break;
1534 case 0x12:
1535 /* S8ADDL */
1536 tmp = tcg_temp_new();
1537 tcg_gen_shli_i64(tmp, va, 3);
1538 tcg_gen_add_i64(tmp, tmp, vb);
1539 tcg_gen_ext32s_i64(vc, tmp);
1540 break;
1541 case 0x1B:
1542 /* S8SUBL */
1543 tmp = tcg_temp_new();
1544 tcg_gen_shli_i64(tmp, va, 3);
1545 tcg_gen_sub_i64(tmp, tmp, vb);
1546 tcg_gen_ext32s_i64(vc, tmp);
1547 break;
1548 case 0x1D:
1549 /* CMPULT */
1550 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1551 break;
1552 case 0x20:
1553 /* ADDQ */
1554 tcg_gen_add_i64(vc, va, vb);
1555 break;
1556 case 0x22:
1557 /* S4ADDQ */
1558 tmp = tcg_temp_new();
1559 tcg_gen_shli_i64(tmp, va, 2);
1560 tcg_gen_add_i64(vc, tmp, vb);
1561 break;
1562 case 0x29:
1563 /* SUBQ */
1564 tcg_gen_sub_i64(vc, va, vb);
1565 break;
1566 case 0x2B:
1567 /* S4SUBQ */
1568 tmp = tcg_temp_new();
1569 tcg_gen_shli_i64(tmp, va, 2);
1570 tcg_gen_sub_i64(vc, tmp, vb);
1571 break;
1572 case 0x2D:
1573 /* CMPEQ */
1574 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1575 break;
1576 case 0x32:
1577 /* S8ADDQ */
1578 tmp = tcg_temp_new();
1579 tcg_gen_shli_i64(tmp, va, 3);
1580 tcg_gen_add_i64(vc, tmp, vb);
1581 break;
1582 case 0x3B:
1583 /* S8SUBQ */
1584 tmp = tcg_temp_new();
1585 tcg_gen_shli_i64(tmp, va, 3);
1586 tcg_gen_sub_i64(vc, tmp, vb);
1587 break;
1588 case 0x3D:
1589 /* CMPULE */
1590 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1591 break;
1592 case 0x40:
1593 /* ADDL/V */
1594 tmp = tcg_temp_new();
1595 tcg_gen_ext32s_i64(tmp, va);
1596 tcg_gen_ext32s_i64(vc, vb);
1597 tcg_gen_add_i64(tmp, tmp, vc);
1598 tcg_gen_ext32s_i64(vc, tmp);
1599 gen_helper_check_overflow(cpu_env, vc, tmp);
1600 break;
1601 case 0x49:
1602 /* SUBL/V */
1603 tmp = tcg_temp_new();
1604 tcg_gen_ext32s_i64(tmp, va);
1605 tcg_gen_ext32s_i64(vc, vb);
1606 tcg_gen_sub_i64(tmp, tmp, vc);
1607 tcg_gen_ext32s_i64(vc, tmp);
1608 gen_helper_check_overflow(cpu_env, vc, tmp);
1609 break;
1610 case 0x4D:
1611 /* CMPLT */
1612 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1613 break;
1614 case 0x60:
1615 /* ADDQ/V */
1616 tmp = tcg_temp_new();
1617 tmp2 = tcg_temp_new();
1618 tcg_gen_eqv_i64(tmp, va, vb);
1619 tcg_gen_mov_i64(tmp2, va);
1620 tcg_gen_add_i64(vc, va, vb);
1621 tcg_gen_xor_i64(tmp2, tmp2, vc);
1622 tcg_gen_and_i64(tmp, tmp, tmp2);
1623 tcg_gen_shri_i64(tmp, tmp, 63);
1624 tcg_gen_movi_i64(tmp2, 0);
1625 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1626 break;
1627 case 0x69:
1628 /* SUBQ/V */
1629 tmp = tcg_temp_new();
1630 tmp2 = tcg_temp_new();
1631 tcg_gen_xor_i64(tmp, va, vb);
1632 tcg_gen_mov_i64(tmp2, va);
1633 tcg_gen_sub_i64(vc, va, vb);
1634 tcg_gen_xor_i64(tmp2, tmp2, vc);
1635 tcg_gen_and_i64(tmp, tmp, tmp2);
1636 tcg_gen_shri_i64(tmp, tmp, 63);
1637 tcg_gen_movi_i64(tmp2, 0);
1638 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1639 break;
1640 case 0x6D:
1641 /* CMPLE */
1642 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1643 break;
1644 default:
1645 goto invalid_opc;
1646 }
1647 break;
1648
1649 case 0x11:
1650 if (fn7 == 0x20) {
1651 if (rc == 31) {
1652 /* Special case BIS as NOP. */
1653 break;
1654 }
1655 if (ra == 31) {
1656 /* Special case BIS as MOV. */
1657 vc = dest_gpr(ctx, rc);
1658 if (islit) {
1659 tcg_gen_movi_i64(vc, lit);
1660 } else {
1661 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1662 }
1663 break;
1664 }
1665 }
1666
1667 vc = dest_gpr(ctx, rc);
1668 vb = load_gpr_lit(ctx, rb, lit, islit);
1669
1670 if (fn7 == 0x28 && ra == 31) {
1671 /* Special case ORNOT as NOT. */
1672 tcg_gen_not_i64(vc, vb);
1673 break;
1674 }
1675
1676 va = load_gpr(ctx, ra);
1677 switch (fn7) {
1678 case 0x00:
1679 /* AND */
1680 tcg_gen_and_i64(vc, va, vb);
1681 break;
1682 case 0x08:
1683 /* BIC */
1684 tcg_gen_andc_i64(vc, va, vb);
1685 break;
1686 case 0x14:
1687 /* CMOVLBS */
1688 tmp = tcg_temp_new();
1689 tcg_gen_andi_i64(tmp, va, 1);
1690 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1691 vb, load_gpr(ctx, rc));
1692 break;
1693 case 0x16:
1694 /* CMOVLBC */
1695 tmp = tcg_temp_new();
1696 tcg_gen_andi_i64(tmp, va, 1);
1697 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1698 vb, load_gpr(ctx, rc));
1699 break;
1700 case 0x20:
1701 /* BIS */
1702 tcg_gen_or_i64(vc, va, vb);
1703 break;
1704 case 0x24:
1705 /* CMOVEQ */
1706 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1707 vb, load_gpr(ctx, rc));
1708 break;
1709 case 0x26:
1710 /* CMOVNE */
1711 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1712 vb, load_gpr(ctx, rc));
1713 break;
1714 case 0x28:
1715 /* ORNOT */
1716 tcg_gen_orc_i64(vc, va, vb);
1717 break;
1718 case 0x40:
1719 /* XOR */
1720 tcg_gen_xor_i64(vc, va, vb);
1721 break;
1722 case 0x44:
1723 /* CMOVLT */
1724 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1725 vb, load_gpr(ctx, rc));
1726 break;
1727 case 0x46:
1728 /* CMOVGE */
1729 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1730 vb, load_gpr(ctx, rc));
1731 break;
1732 case 0x48:
1733 /* EQV */
1734 tcg_gen_eqv_i64(vc, va, vb);
1735 break;
1736 case 0x61:
1737 /* AMASK */
1738 REQUIRE_REG_31(ra);
1739 tcg_gen_andi_i64(vc, vb, ~ctx->amask);
1740 break;
1741 case 0x64:
1742 /* CMOVLE */
1743 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1744 vb, load_gpr(ctx, rc));
1745 break;
1746 case 0x66:
1747 /* CMOVGT */
1748 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1749 vb, load_gpr(ctx, rc));
1750 break;
1751 case 0x6C:
1752 /* IMPLVER */
1753 REQUIRE_REG_31(ra);
1754 tcg_gen_movi_i64(vc, ctx->implver);
1755 break;
1756 default:
1757 goto invalid_opc;
1758 }
1759 break;
1760
1761 case 0x12:
1762 vc = dest_gpr(ctx, rc);
1763 va = load_gpr(ctx, ra);
1764 switch (fn7) {
1765 case 0x02:
1766 /* MSKBL */
1767 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1768 break;
1769 case 0x06:
1770 /* EXTBL */
1771 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1772 break;
1773 case 0x0B:
1774 /* INSBL */
1775 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1776 break;
1777 case 0x12:
1778 /* MSKWL */
1779 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1780 break;
1781 case 0x16:
1782 /* EXTWL */
1783 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1784 break;
1785 case 0x1B:
1786 /* INSWL */
1787 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1788 break;
1789 case 0x22:
1790 /* MSKLL */
1791 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1792 break;
1793 case 0x26:
1794 /* EXTLL */
1795 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1796 break;
1797 case 0x2B:
1798 /* INSLL */
1799 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1800 break;
1801 case 0x30:
1802 /* ZAP */
1803 if (islit) {
1804 gen_zapnoti(vc, va, ~lit);
1805 } else {
1806 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1807 }
1808 break;
1809 case 0x31:
1810 /* ZAPNOT */
1811 if (islit) {
1812 gen_zapnoti(vc, va, lit);
1813 } else {
1814 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1815 }
1816 break;
1817 case 0x32:
1818 /* MSKQL */
1819 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1820 break;
1821 case 0x34:
1822 /* SRL */
1823 if (islit) {
1824 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1825 } else {
1826 tmp = tcg_temp_new();
1827 vb = load_gpr(ctx, rb);
1828 tcg_gen_andi_i64(tmp, vb, 0x3f);
1829 tcg_gen_shr_i64(vc, va, tmp);
1830 }
1831 break;
1832 case 0x36:
1833 /* EXTQL */
1834 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1835 break;
1836 case 0x39:
1837 /* SLL */
1838 if (islit) {
1839 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1840 } else {
1841 tmp = tcg_temp_new();
1842 vb = load_gpr(ctx, rb);
1843 tcg_gen_andi_i64(tmp, vb, 0x3f);
1844 tcg_gen_shl_i64(vc, va, tmp);
1845 }
1846 break;
1847 case 0x3B:
1848 /* INSQL */
1849 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1850 break;
1851 case 0x3C:
1852 /* SRA */
1853 if (islit) {
1854 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1855 } else {
1856 tmp = tcg_temp_new();
1857 vb = load_gpr(ctx, rb);
1858 tcg_gen_andi_i64(tmp, vb, 0x3f);
1859 tcg_gen_sar_i64(vc, va, tmp);
1860 }
1861 break;
1862 case 0x52:
1863 /* MSKWH */
1864 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1865 break;
1866 case 0x57:
1867 /* INSWH */
1868 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1869 break;
1870 case 0x5A:
1871 /* EXTWH */
1872 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1873 break;
1874 case 0x62:
1875 /* MSKLH */
1876 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1877 break;
1878 case 0x67:
1879 /* INSLH */
1880 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1881 break;
1882 case 0x6A:
1883 /* EXTLH */
1884 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1885 break;
1886 case 0x72:
1887 /* MSKQH */
1888 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1889 break;
1890 case 0x77:
1891 /* INSQH */
1892 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1893 break;
1894 case 0x7A:
1895 /* EXTQH */
1896 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1897 break;
1898 default:
1899 goto invalid_opc;
1900 }
1901 break;
1902
1903 case 0x13:
1904 vc = dest_gpr(ctx, rc);
1905 vb = load_gpr_lit(ctx, rb, lit, islit);
1906 va = load_gpr(ctx, ra);
1907 switch (fn7) {
1908 case 0x00:
1909 /* MULL */
1910 tcg_gen_mul_i64(vc, va, vb);
1911 tcg_gen_ext32s_i64(vc, vc);
1912 break;
1913 case 0x20:
1914 /* MULQ */
1915 tcg_gen_mul_i64(vc, va, vb);
1916 break;
1917 case 0x30:
1918 /* UMULH */
1919 tmp = tcg_temp_new();
1920 tcg_gen_mulu2_i64(tmp, vc, va, vb);
1921 break;
1922 case 0x40:
1923 /* MULL/V */
1924 tmp = tcg_temp_new();
1925 tcg_gen_ext32s_i64(tmp, va);
1926 tcg_gen_ext32s_i64(vc, vb);
1927 tcg_gen_mul_i64(tmp, tmp, vc);
1928 tcg_gen_ext32s_i64(vc, tmp);
1929 gen_helper_check_overflow(cpu_env, vc, tmp);
1930 break;
1931 case 0x60:
1932 /* MULQ/V */
1933 tmp = tcg_temp_new();
1934 tmp2 = tcg_temp_new();
1935 tcg_gen_muls2_i64(vc, tmp, va, vb);
1936 tcg_gen_sari_i64(tmp2, vc, 63);
1937 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1938 break;
1939 default:
1940 goto invalid_opc;
1941 }
1942 break;
1943
1944 case 0x14:
1945 REQUIRE_AMASK(FIX);
1946 vc = dest_fpr(ctx, rc);
1947 switch (fpfn) { /* fn11 & 0x3F */
1948 case 0x04:
1949 /* ITOFS */
1950 REQUIRE_REG_31(rb);
1951 REQUIRE_FEN;
1952 t32 = tcg_temp_new_i32();
1953 va = load_gpr(ctx, ra);
1954 tcg_gen_extrl_i64_i32(t32, va);
1955 gen_helper_memory_to_s(vc, t32);
1956 break;
1957 case 0x0A:
1958 /* SQRTF */
1959 REQUIRE_REG_31(ra);
1960 REQUIRE_FEN;
1961 vb = load_fpr(ctx, rb);
1962 gen_helper_sqrtf(vc, cpu_env, vb);
1963 break;
1964 case 0x0B:
1965 /* SQRTS */
1966 REQUIRE_REG_31(ra);
1967 REQUIRE_FEN;
1968 gen_sqrts(ctx, rb, rc, fn11);
1969 break;
1970 case 0x14:
1971 /* ITOFF */
1972 REQUIRE_REG_31(rb);
1973 REQUIRE_FEN;
1974 t32 = tcg_temp_new_i32();
1975 va = load_gpr(ctx, ra);
1976 tcg_gen_extrl_i64_i32(t32, va);
1977 gen_helper_memory_to_f(vc, t32);
1978 break;
1979 case 0x24:
1980 /* ITOFT */
1981 REQUIRE_REG_31(rb);
1982 REQUIRE_FEN;
1983 va = load_gpr(ctx, ra);
1984 tcg_gen_mov_i64(vc, va);
1985 break;
1986 case 0x2A:
1987 /* SQRTG */
1988 REQUIRE_REG_31(ra);
1989 REQUIRE_FEN;
1990 vb = load_fpr(ctx, rb);
1991 gen_helper_sqrtg(vc, cpu_env, vb);
1992 break;
1993 case 0x02B:
1994 /* SQRTT */
1995 REQUIRE_REG_31(ra);
1996 REQUIRE_FEN;
1997 gen_sqrtt(ctx, rb, rc, fn11);
1998 break;
1999 default:
2000 goto invalid_opc;
2001 }
2002 break;
2003
2004 case 0x15:
2005 /* VAX floating point */
2006 /* XXX: rounding mode and trap are ignored (!) */
2007 vc = dest_fpr(ctx, rc);
2008 vb = load_fpr(ctx, rb);
2009 va = load_fpr(ctx, ra);
2010 switch (fpfn) { /* fn11 & 0x3F */
2011 case 0x00:
2012 /* ADDF */
2013 REQUIRE_FEN;
2014 gen_helper_addf(vc, cpu_env, va, vb);
2015 break;
2016 case 0x01:
2017 /* SUBF */
2018 REQUIRE_FEN;
2019 gen_helper_subf(vc, cpu_env, va, vb);
2020 break;
2021 case 0x02:
2022 /* MULF */
2023 REQUIRE_FEN;
2024 gen_helper_mulf(vc, cpu_env, va, vb);
2025 break;
2026 case 0x03:
2027 /* DIVF */
2028 REQUIRE_FEN;
2029 gen_helper_divf(vc, cpu_env, va, vb);
2030 break;
2031 case 0x1E:
2032 /* CVTDG -- TODO */
2033 REQUIRE_REG_31(ra);
2034 goto invalid_opc;
2035 case 0x20:
2036 /* ADDG */
2037 REQUIRE_FEN;
2038 gen_helper_addg(vc, cpu_env, va, vb);
2039 break;
2040 case 0x21:
2041 /* SUBG */
2042 REQUIRE_FEN;
2043 gen_helper_subg(vc, cpu_env, va, vb);
2044 break;
2045 case 0x22:
2046 /* MULG */
2047 REQUIRE_FEN;
2048 gen_helper_mulg(vc, cpu_env, va, vb);
2049 break;
2050 case 0x23:
2051 /* DIVG */
2052 REQUIRE_FEN;
2053 gen_helper_divg(vc, cpu_env, va, vb);
2054 break;
2055 case 0x25:
2056 /* CMPGEQ */
2057 REQUIRE_FEN;
2058 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2059 break;
2060 case 0x26:
2061 /* CMPGLT */
2062 REQUIRE_FEN;
2063 gen_helper_cmpglt(vc, cpu_env, va, vb);
2064 break;
2065 case 0x27:
2066 /* CMPGLE */
2067 REQUIRE_FEN;
2068 gen_helper_cmpgle(vc, cpu_env, va, vb);
2069 break;
2070 case 0x2C:
2071 /* CVTGF */
2072 REQUIRE_REG_31(ra);
2073 REQUIRE_FEN;
2074 gen_helper_cvtgf(vc, cpu_env, vb);
2075 break;
2076 case 0x2D:
2077 /* CVTGD -- TODO */
2078 REQUIRE_REG_31(ra);
2079 goto invalid_opc;
2080 case 0x2F:
2081 /* CVTGQ */
2082 REQUIRE_REG_31(ra);
2083 REQUIRE_FEN;
2084 gen_helper_cvtgq(vc, cpu_env, vb);
2085 break;
2086 case 0x3C:
2087 /* CVTQF */
2088 REQUIRE_REG_31(ra);
2089 REQUIRE_FEN;
2090 gen_helper_cvtqf(vc, cpu_env, vb);
2091 break;
2092 case 0x3E:
2093 /* CVTQG */
2094 REQUIRE_REG_31(ra);
2095 REQUIRE_FEN;
2096 gen_helper_cvtqg(vc, cpu_env, vb);
2097 break;
2098 default:
2099 goto invalid_opc;
2100 }
2101 break;
2102
2103 case 0x16:
2104 /* IEEE floating-point */
2105 switch (fpfn) { /* fn11 & 0x3F */
2106 case 0x00:
2107 /* ADDS */
2108 REQUIRE_FEN;
2109 gen_adds(ctx, ra, rb, rc, fn11);
2110 break;
2111 case 0x01:
2112 /* SUBS */
2113 REQUIRE_FEN;
2114 gen_subs(ctx, ra, rb, rc, fn11);
2115 break;
2116 case 0x02:
2117 /* MULS */
2118 REQUIRE_FEN;
2119 gen_muls(ctx, ra, rb, rc, fn11);
2120 break;
2121 case 0x03:
2122 /* DIVS */
2123 REQUIRE_FEN;
2124 gen_divs(ctx, ra, rb, rc, fn11);
2125 break;
2126 case 0x20:
2127 /* ADDT */
2128 REQUIRE_FEN;
2129 gen_addt(ctx, ra, rb, rc, fn11);
2130 break;
2131 case 0x21:
2132 /* SUBT */
2133 REQUIRE_FEN;
2134 gen_subt(ctx, ra, rb, rc, fn11);
2135 break;
2136 case 0x22:
2137 /* MULT */
2138 REQUIRE_FEN;
2139 gen_mult(ctx, ra, rb, rc, fn11);
2140 break;
2141 case 0x23:
2142 /* DIVT */
2143 REQUIRE_FEN;
2144 gen_divt(ctx, ra, rb, rc, fn11);
2145 break;
2146 case 0x24:
2147 /* CMPTUN */
2148 REQUIRE_FEN;
2149 gen_cmptun(ctx, ra, rb, rc, fn11);
2150 break;
2151 case 0x25:
2152 /* CMPTEQ */
2153 REQUIRE_FEN;
2154 gen_cmpteq(ctx, ra, rb, rc, fn11);
2155 break;
2156 case 0x26:
2157 /* CMPTLT */
2158 REQUIRE_FEN;
2159 gen_cmptlt(ctx, ra, rb, rc, fn11);
2160 break;
2161 case 0x27:
2162 /* CMPTLE */
2163 REQUIRE_FEN;
2164 gen_cmptle(ctx, ra, rb, rc, fn11);
2165 break;
2166 case 0x2C:
2167 REQUIRE_REG_31(ra);
2168 REQUIRE_FEN;
2169 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2170 /* CVTST */
2171 gen_cvtst(ctx, rb, rc, fn11);
2172 } else {
2173 /* CVTTS */
2174 gen_cvtts(ctx, rb, rc, fn11);
2175 }
2176 break;
2177 case 0x2F:
2178 /* CVTTQ */
2179 REQUIRE_REG_31(ra);
2180 REQUIRE_FEN;
2181 gen_cvttq(ctx, rb, rc, fn11);
2182 break;
2183 case 0x3C:
2184 /* CVTQS */
2185 REQUIRE_REG_31(ra);
2186 REQUIRE_FEN;
2187 gen_cvtqs(ctx, rb, rc, fn11);
2188 break;
2189 case 0x3E:
2190 /* CVTQT */
2191 REQUIRE_REG_31(ra);
2192 REQUIRE_FEN;
2193 gen_cvtqt(ctx, rb, rc, fn11);
2194 break;
2195 default:
2196 goto invalid_opc;
2197 }
2198 break;
2199
2200 case 0x17:
2201 switch (fn11) {
2202 case 0x010:
2203 /* CVTLQ */
2204 REQUIRE_REG_31(ra);
2205 REQUIRE_FEN;
2206 vc = dest_fpr(ctx, rc);
2207 vb = load_fpr(ctx, rb);
2208 gen_cvtlq(vc, vb);
2209 break;
2210 case 0x020:
2211 /* CPYS */
2212 REQUIRE_FEN;
2213 if (rc == 31) {
2214 /* Special case CPYS as FNOP. */
2215 } else {
2216 vc = dest_fpr(ctx, rc);
2217 va = load_fpr(ctx, ra);
2218 if (ra == rb) {
2219 /* Special case CPYS as FMOV. */
2220 tcg_gen_mov_i64(vc, va);
2221 } else {
2222 vb = load_fpr(ctx, rb);
2223 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2224 }
2225 }
2226 break;
2227 case 0x021:
2228 /* CPYSN */
2229 REQUIRE_FEN;
2230 vc = dest_fpr(ctx, rc);
2231 vb = load_fpr(ctx, rb);
2232 va = load_fpr(ctx, ra);
2233 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2234 break;
2235 case 0x022:
2236 /* CPYSE */
2237 REQUIRE_FEN;
2238 vc = dest_fpr(ctx, rc);
2239 vb = load_fpr(ctx, rb);
2240 va = load_fpr(ctx, ra);
2241 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2242 break;
2243 case 0x024:
2244 /* MT_FPCR */
2245 REQUIRE_FEN;
2246 va = load_fpr(ctx, ra);
2247 gen_helper_store_fpcr(cpu_env, va);
2248 if (ctx->tb_rm == QUAL_RM_D) {
2249 /* Re-do the copy of the rounding mode to fp_status
2250 the next time we use dynamic rounding. */
2251 ctx->tb_rm = -1;
2252 }
2253 break;
2254 case 0x025:
2255 /* MF_FPCR */
2256 REQUIRE_FEN;
2257 va = dest_fpr(ctx, ra);
2258 gen_helper_load_fpcr(va, cpu_env);
2259 break;
2260 case 0x02A:
2261 /* FCMOVEQ */
2262 REQUIRE_FEN;
2263 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2264 break;
2265 case 0x02B:
2266 /* FCMOVNE */
2267 REQUIRE_FEN;
2268 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2269 break;
2270 case 0x02C:
2271 /* FCMOVLT */
2272 REQUIRE_FEN;
2273 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2274 break;
2275 case 0x02D:
2276 /* FCMOVGE */
2277 REQUIRE_FEN;
2278 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2279 break;
2280 case 0x02E:
2281 /* FCMOVLE */
2282 REQUIRE_FEN;
2283 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2284 break;
2285 case 0x02F:
2286 /* FCMOVGT */
2287 REQUIRE_FEN;
2288 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2289 break;
2290 case 0x030: /* CVTQL */
2291 case 0x130: /* CVTQL/V */
2292 case 0x530: /* CVTQL/SV */
2293 REQUIRE_REG_31(ra);
2294 REQUIRE_FEN;
2295 vc = dest_fpr(ctx, rc);
2296 vb = load_fpr(ctx, rb);
2297 gen_helper_cvtql(vc, cpu_env, vb);
2298 gen_fp_exc_raise(rc, fn11);
2299 break;
2300 default:
2301 goto invalid_opc;
2302 }
2303 break;
2304
2305 case 0x18:
2306 switch ((uint16_t)disp16) {
2307 case 0x0000:
2308 /* TRAPB */
2309 /* No-op. */
2310 break;
2311 case 0x0400:
2312 /* EXCB */
2313 /* No-op. */
2314 break;
2315 case 0x4000:
2316 /* MB */
2317 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2318 break;
2319 case 0x4400:
2320 /* WMB */
2321 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2322 break;
2323 case 0x8000:
2324 /* FETCH */
2325 /* No-op */
2326 break;
2327 case 0xA000:
2328 /* FETCH_M */
2329 /* No-op */
2330 break;
2331 case 0xC000:
2332 /* RPCC */
2333 va = dest_gpr(ctx, ra);
2334 if (translator_io_start(&ctx->base)) {
2335 ret = DISAS_PC_STALE;
2336 }
2337 gen_helper_load_pcc(va, cpu_env);
2338 break;
2339 case 0xE000:
2340 /* RC */
2341 gen_rx(ctx, ra, 0);
2342 break;
2343 case 0xE800:
2344 /* ECB */
2345 break;
2346 case 0xF000:
2347 /* RS */
2348 gen_rx(ctx, ra, 1);
2349 break;
2350 case 0xF800:
2351 /* WH64 */
2352 /* No-op */
2353 break;
2354 case 0xFC00:
2355 /* WH64EN */
2356 /* No-op */
2357 break;
2358 default:
2359 goto invalid_opc;
2360 }
2361 break;
2362
2363 case 0x19:
2364 /* HW_MFPR (PALcode) */
2365 #ifndef CONFIG_USER_ONLY
2366 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2367 va = dest_gpr(ctx, ra);
2368 ret = gen_mfpr(ctx, va, insn & 0xffff);
2369 break;
2370 #else
2371 goto invalid_opc;
2372 #endif
2373
2374 case 0x1A:
2375 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2376 prediction stack action, which of course we don't implement. */
2377 vb = load_gpr(ctx, rb);
2378 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2379 if (ra != 31) {
2380 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
2381 }
2382 ret = DISAS_PC_UPDATED;
2383 break;
2384
2385 case 0x1B:
2386 /* HW_LD (PALcode) */
2387 #ifndef CONFIG_USER_ONLY
2388 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2389 {
2390 TCGv addr = tcg_temp_new();
2391 vb = load_gpr(ctx, rb);
2392 va = dest_gpr(ctx, ra);
2393
2394 tcg_gen_addi_i64(addr, vb, disp12);
2395 switch ((insn >> 12) & 0xF) {
2396 case 0x0:
2397 /* Longword physical access (hw_ldl/p) */
2398 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2399 break;
2400 case 0x1:
2401 /* Quadword physical access (hw_ldq/p) */
2402 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2403 break;
2404 case 0x2:
2405 /* Longword physical access with lock (hw_ldl_l/p) */
2406 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2407 tcg_gen_mov_i64(cpu_lock_addr, addr);
2408 tcg_gen_mov_i64(cpu_lock_value, va);
2409 break;
2410 case 0x3:
2411 /* Quadword physical access with lock (hw_ldq_l/p) */
2412 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2413 tcg_gen_mov_i64(cpu_lock_addr, addr);
2414 tcg_gen_mov_i64(cpu_lock_value, va);
2415 break;
2416 case 0x4:
2417 /* Longword virtual PTE fetch (hw_ldl/v) */
2418 goto invalid_opc;
2419 case 0x5:
2420 /* Quadword virtual PTE fetch (hw_ldq/v) */
2421 goto invalid_opc;
2422 break;
2423 case 0x6:
2424 /* Invalid */
2425 goto invalid_opc;
2426 case 0x7:
2427 /* Invaliid */
2428 goto invalid_opc;
2429 case 0x8:
2430 /* Longword virtual access (hw_ldl) */
2431 goto invalid_opc;
2432 case 0x9:
2433 /* Quadword virtual access (hw_ldq) */
2434 goto invalid_opc;
2435 case 0xA:
2436 /* Longword virtual access with protection check (hw_ldl/w) */
2437 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
2438 MO_LESL | MO_ALIGN);
2439 break;
2440 case 0xB:
2441 /* Quadword virtual access with protection check (hw_ldq/w) */
2442 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
2443 MO_LEUQ | MO_ALIGN);
2444 break;
2445 case 0xC:
2446 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2447 goto invalid_opc;
2448 case 0xD:
2449 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2450 goto invalid_opc;
2451 case 0xE:
2452 /* Longword virtual access with alternate access mode and
2453 protection checks (hw_ldl/wa) */
2454 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
2455 MO_LESL | MO_ALIGN);
2456 break;
2457 case 0xF:
2458 /* Quadword virtual access with alternate access mode and
2459 protection checks (hw_ldq/wa) */
2460 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
2461 MO_LEUQ | MO_ALIGN);
2462 break;
2463 }
2464 break;
2465 }
2466 #else
2467 goto invalid_opc;
2468 #endif
2469
2470 case 0x1C:
2471 vc = dest_gpr(ctx, rc);
2472 if (fn7 == 0x70) {
2473 /* FTOIT */
2474 REQUIRE_AMASK(FIX);
2475 REQUIRE_REG_31(rb);
2476 va = load_fpr(ctx, ra);
2477 tcg_gen_mov_i64(vc, va);
2478 break;
2479 } else if (fn7 == 0x78) {
2480 /* FTOIS */
2481 REQUIRE_AMASK(FIX);
2482 REQUIRE_REG_31(rb);
2483 t32 = tcg_temp_new_i32();
2484 va = load_fpr(ctx, ra);
2485 gen_helper_s_to_memory(t32, va);
2486 tcg_gen_ext_i32_i64(vc, t32);
2487 break;
2488 }
2489
2490 vb = load_gpr_lit(ctx, rb, lit, islit);
2491 switch (fn7) {
2492 case 0x00:
2493 /* SEXTB */
2494 REQUIRE_AMASK(BWX);
2495 REQUIRE_REG_31(ra);
2496 tcg_gen_ext8s_i64(vc, vb);
2497 break;
2498 case 0x01:
2499 /* SEXTW */
2500 REQUIRE_AMASK(BWX);
2501 REQUIRE_REG_31(ra);
2502 tcg_gen_ext16s_i64(vc, vb);
2503 break;
2504 case 0x30:
2505 /* CTPOP */
2506 REQUIRE_AMASK(CIX);
2507 REQUIRE_REG_31(ra);
2508 REQUIRE_NO_LIT;
2509 tcg_gen_ctpop_i64(vc, vb);
2510 break;
2511 case 0x31:
2512 /* PERR */
2513 REQUIRE_AMASK(MVI);
2514 REQUIRE_NO_LIT;
2515 va = load_gpr(ctx, ra);
2516 gen_helper_perr(vc, va, vb);
2517 break;
2518 case 0x32:
2519 /* CTLZ */
2520 REQUIRE_AMASK(CIX);
2521 REQUIRE_REG_31(ra);
2522 REQUIRE_NO_LIT;
2523 tcg_gen_clzi_i64(vc, vb, 64);
2524 break;
2525 case 0x33:
2526 /* CTTZ */
2527 REQUIRE_AMASK(CIX);
2528 REQUIRE_REG_31(ra);
2529 REQUIRE_NO_LIT;
2530 tcg_gen_ctzi_i64(vc, vb, 64);
2531 break;
2532 case 0x34:
2533 /* UNPKBW */
2534 REQUIRE_AMASK(MVI);
2535 REQUIRE_REG_31(ra);
2536 REQUIRE_NO_LIT;
2537 gen_helper_unpkbw(vc, vb);
2538 break;
2539 case 0x35:
2540 /* UNPKBL */
2541 REQUIRE_AMASK(MVI);
2542 REQUIRE_REG_31(ra);
2543 REQUIRE_NO_LIT;
2544 gen_helper_unpkbl(vc, vb);
2545 break;
2546 case 0x36:
2547 /* PKWB */
2548 REQUIRE_AMASK(MVI);
2549 REQUIRE_REG_31(ra);
2550 REQUIRE_NO_LIT;
2551 gen_helper_pkwb(vc, vb);
2552 break;
2553 case 0x37:
2554 /* PKLB */
2555 REQUIRE_AMASK(MVI);
2556 REQUIRE_REG_31(ra);
2557 REQUIRE_NO_LIT;
2558 gen_helper_pklb(vc, vb);
2559 break;
2560 case 0x38:
2561 /* MINSB8 */
2562 REQUIRE_AMASK(MVI);
2563 va = load_gpr(ctx, ra);
2564 gen_helper_minsb8(vc, va, vb);
2565 break;
2566 case 0x39:
2567 /* MINSW4 */
2568 REQUIRE_AMASK(MVI);
2569 va = load_gpr(ctx, ra);
2570 gen_helper_minsw4(vc, va, vb);
2571 break;
2572 case 0x3A:
2573 /* MINUB8 */
2574 REQUIRE_AMASK(MVI);
2575 va = load_gpr(ctx, ra);
2576 gen_helper_minub8(vc, va, vb);
2577 break;
2578 case 0x3B:
2579 /* MINUW4 */
2580 REQUIRE_AMASK(MVI);
2581 va = load_gpr(ctx, ra);
2582 gen_helper_minuw4(vc, va, vb);
2583 break;
2584 case 0x3C:
2585 /* MAXUB8 */
2586 REQUIRE_AMASK(MVI);
2587 va = load_gpr(ctx, ra);
2588 gen_helper_maxub8(vc, va, vb);
2589 break;
2590 case 0x3D:
2591 /* MAXUW4 */
2592 REQUIRE_AMASK(MVI);
2593 va = load_gpr(ctx, ra);
2594 gen_helper_maxuw4(vc, va, vb);
2595 break;
2596 case 0x3E:
2597 /* MAXSB8 */
2598 REQUIRE_AMASK(MVI);
2599 va = load_gpr(ctx, ra);
2600 gen_helper_maxsb8(vc, va, vb);
2601 break;
2602 case 0x3F:
2603 /* MAXSW4 */
2604 REQUIRE_AMASK(MVI);
2605 va = load_gpr(ctx, ra);
2606 gen_helper_maxsw4(vc, va, vb);
2607 break;
2608 default:
2609 goto invalid_opc;
2610 }
2611 break;
2612
2613 case 0x1D:
2614 /* HW_MTPR (PALcode) */
2615 #ifndef CONFIG_USER_ONLY
2616 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2617 vb = load_gpr(ctx, rb);
2618 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2619 break;
2620 #else
2621 goto invalid_opc;
2622 #endif
2623
2624 case 0x1E:
2625 /* HW_RET (PALcode) */
2626 #ifndef CONFIG_USER_ONLY
2627 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2628 if (rb == 31) {
2629 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2630 address from EXC_ADDR. This turns out to be useful for our
2631 emulation PALcode, so continue to accept it. */
2632 vb = dest_sink(ctx);
2633 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2634 } else {
2635 vb = load_gpr(ctx, rb);
2636 }
2637 tcg_gen_movi_i64(cpu_lock_addr, -1);
2638 st_flag_byte(load_zero(ctx), ENV_FLAG_RX_SHIFT);
2639 tmp = tcg_temp_new();
2640 tcg_gen_andi_i64(tmp, vb, 1);
2641 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
2642 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2643 /* Allow interrupts to be recognized right away. */
2644 ret = DISAS_PC_UPDATED_NOCHAIN;
2645 break;
2646 #else
2647 goto invalid_opc;
2648 #endif
2649
2650 case 0x1F:
2651 /* HW_ST (PALcode) */
2652 #ifndef CONFIG_USER_ONLY
2653 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2654 {
2655 switch ((insn >> 12) & 0xF) {
2656 case 0x0:
2657 /* Longword physical access */
2658 va = load_gpr(ctx, ra);
2659 vb = load_gpr(ctx, rb);
2660 tmp = tcg_temp_new();
2661 tcg_gen_addi_i64(tmp, vb, disp12);
2662 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2663 break;
2664 case 0x1:
2665 /* Quadword physical access */
2666 va = load_gpr(ctx, ra);
2667 vb = load_gpr(ctx, rb);
2668 tmp = tcg_temp_new();
2669 tcg_gen_addi_i64(tmp, vb, disp12);
2670 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2671 break;
2672 case 0x2:
2673 /* Longword physical access with lock */
2674 ret = gen_store_conditional(ctx, ra, rb, disp12,
2675 MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2676 break;
2677 case 0x3:
2678 /* Quadword physical access with lock */
2679 ret = gen_store_conditional(ctx, ra, rb, disp12,
2680 MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2681 break;
2682 case 0x4:
2683 /* Longword virtual access */
2684 goto invalid_opc;
2685 case 0x5:
2686 /* Quadword virtual access */
2687 goto invalid_opc;
2688 case 0x6:
2689 /* Invalid */
2690 goto invalid_opc;
2691 case 0x7:
2692 /* Invalid */
2693 goto invalid_opc;
2694 case 0x8:
2695 /* Invalid */
2696 goto invalid_opc;
2697 case 0x9:
2698 /* Invalid */
2699 goto invalid_opc;
2700 case 0xA:
2701 /* Invalid */
2702 goto invalid_opc;
2703 case 0xB:
2704 /* Invalid */
2705 goto invalid_opc;
2706 case 0xC:
2707 /* Longword virtual access with alternate access mode */
2708 goto invalid_opc;
2709 case 0xD:
2710 /* Quadword virtual access with alternate access mode */
2711 goto invalid_opc;
2712 case 0xE:
2713 /* Invalid */
2714 goto invalid_opc;
2715 case 0xF:
2716 /* Invalid */
2717 goto invalid_opc;
2718 }
2719 break;
2720 }
2721 #else
2722 goto invalid_opc;
2723 #endif
2724 case 0x20:
2725 /* LDF */
2726 REQUIRE_FEN;
2727 gen_load_fp(ctx, ra, rb, disp16, gen_ldf);
2728 break;
2729 case 0x21:
2730 /* LDG */
2731 REQUIRE_FEN;
2732 gen_load_fp(ctx, ra, rb, disp16, gen_ldg);
2733 break;
2734 case 0x22:
2735 /* LDS */
2736 REQUIRE_FEN;
2737 gen_load_fp(ctx, ra, rb, disp16, gen_lds);
2738 break;
2739 case 0x23:
2740 /* LDT */
2741 REQUIRE_FEN;
2742 gen_load_fp(ctx, ra, rb, disp16, gen_ldt);
2743 break;
2744 case 0x24:
2745 /* STF */
2746 REQUIRE_FEN;
2747 gen_store_fp(ctx, ra, rb, disp16, gen_stf);
2748 break;
2749 case 0x25:
2750 /* STG */
2751 REQUIRE_FEN;
2752 gen_store_fp(ctx, ra, rb, disp16, gen_stg);
2753 break;
2754 case 0x26:
2755 /* STS */
2756 REQUIRE_FEN;
2757 gen_store_fp(ctx, ra, rb, disp16, gen_sts);
2758 break;
2759 case 0x27:
2760 /* STT */
2761 REQUIRE_FEN;
2762 gen_store_fp(ctx, ra, rb, disp16, gen_stt);
2763 break;
2764 case 0x28:
2765 /* LDL */
2766 gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 0);
2767 break;
2768 case 0x29:
2769 /* LDQ */
2770 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 0);
2771 break;
2772 case 0x2A:
2773 /* LDL_L */
2774 gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1);
2775 break;
2776 case 0x2B:
2777 /* LDQ_L */
2778 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1);
2779 break;
2780 case 0x2C:
2781 /* STL */
2782 gen_store_int(ctx, ra, rb, disp16, MO_LEUL, 0);
2783 break;
2784 case 0x2D:
2785 /* STQ */
2786 gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 0);
2787 break;
2788 case 0x2E:
2789 /* STL_C */
2790 ret = gen_store_conditional(ctx, ra, rb, disp16,
2791 ctx->mem_idx, MO_LESL | MO_ALIGN);
2792 break;
2793 case 0x2F:
2794 /* STQ_C */
2795 ret = gen_store_conditional(ctx, ra, rb, disp16,
2796 ctx->mem_idx, MO_LEUQ | MO_ALIGN);
2797 break;
2798 case 0x30:
2799 /* BR */
2800 ret = gen_bdirect(ctx, ra, disp21);
2801 break;
2802 case 0x31: /* FBEQ */
2803 REQUIRE_FEN;
2804 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2805 break;
2806 case 0x32: /* FBLT */
2807 REQUIRE_FEN;
2808 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2809 break;
2810 case 0x33: /* FBLE */
2811 REQUIRE_FEN;
2812 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2813 break;
2814 case 0x34:
2815 /* BSR */
2816 ret = gen_bdirect(ctx, ra, disp21);
2817 break;
2818 case 0x35: /* FBNE */
2819 REQUIRE_FEN;
2820 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2821 break;
2822 case 0x36: /* FBGE */
2823 REQUIRE_FEN;
2824 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2825 break;
2826 case 0x37: /* FBGT */
2827 REQUIRE_FEN;
2828 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2829 break;
2830 case 0x38:
2831 /* BLBC */
2832 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2833 break;
2834 case 0x39:
2835 /* BEQ */
2836 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2837 break;
2838 case 0x3A:
2839 /* BLT */
2840 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2841 break;
2842 case 0x3B:
2843 /* BLE */
2844 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2845 break;
2846 case 0x3C:
2847 /* BLBS */
2848 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2849 break;
2850 case 0x3D:
2851 /* BNE */
2852 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2853 break;
2854 case 0x3E:
2855 /* BGE */
2856 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2857 break;
2858 case 0x3F:
2859 /* BGT */
2860 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2861 break;
2862 invalid_opc:
2863 ret = gen_invalid(ctx);
2864 break;
2865 raise_fen:
2866 ret = gen_excp(ctx, EXCP_FEN, 0);
2867 break;
2868 }
2869
2870 return ret;
2871 }
2872
2873 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
2874 {
2875 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2876 CPUAlphaState *env = cpu->env_ptr;
2877 int64_t bound;
2878
2879 ctx->tbflags = ctx->base.tb->flags;
2880 ctx->mem_idx = cpu_mmu_index(env, false);
2881 ctx->implver = env->implver;
2882 ctx->amask = env->amask;
2883
2884 #ifdef CONFIG_USER_ONLY
2885 ctx->ir = cpu_std_ir;
2886 ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
2887 #else
2888 ctx->palbr = env->palbr;
2889 ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2890 #endif
2891
2892 /* ??? Every TB begins with unset rounding mode, to be initialized on
2893 the first fp insn of the TB. Alternately we could define a proper
2894 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2895 to reset the FP_STATUS to that default at the end of any TB that
2896 changes the default. We could even (gasp) dynamically figure out
2897 what default would be most efficient given the running program. */
2898 ctx->tb_rm = -1;
2899 /* Similarly for flush-to-zero. */
2900 ctx->tb_ftz = -1;
2901
2902 ctx->zero = NULL;
2903 ctx->sink = NULL;
2904
2905 /* Bound the number of insns to execute to those left on the page. */
2906 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
2907 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2908 }
2909
2910 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
2911 {
2912 }
2913
2914 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
2915 {
2916 tcg_gen_insn_start(dcbase->pc_next);
2917 }
2918
2919 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
2920 {
2921 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2922 CPUAlphaState *env = cpu->env_ptr;
2923 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
2924
2925 ctx->base.pc_next += 4;
2926 ctx->base.is_jmp = translate_one(ctx, insn);
2927
2928 free_context_temps(ctx);
2929 }
2930
2931 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
2932 {
2933 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2934
2935 switch (ctx->base.is_jmp) {
2936 case DISAS_NORETURN:
2937 break;
2938 case DISAS_TOO_MANY:
2939 if (use_goto_tb(ctx, ctx->base.pc_next)) {
2940 tcg_gen_goto_tb(0);
2941 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
2942 tcg_gen_exit_tb(ctx->base.tb, 0);
2943 }
2944 /* FALLTHRU */
2945 case DISAS_PC_STALE:
2946 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
2947 /* FALLTHRU */
2948 case DISAS_PC_UPDATED:
2949 tcg_gen_lookup_and_goto_ptr();
2950 break;
2951 case DISAS_PC_UPDATED_NOCHAIN:
2952 tcg_gen_exit_tb(NULL, 0);
2953 break;
2954 default:
2955 g_assert_not_reached();
2956 }
2957 }
2958
2959 static void alpha_tr_disas_log(const DisasContextBase *dcbase,
2960 CPUState *cpu, FILE *logfile)
2961 {
2962 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
2963 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
2964 }
2965
2966 static const TranslatorOps alpha_tr_ops = {
2967 .init_disas_context = alpha_tr_init_disas_context,
2968 .tb_start = alpha_tr_tb_start,
2969 .insn_start = alpha_tr_insn_start,
2970 .translate_insn = alpha_tr_translate_insn,
2971 .tb_stop = alpha_tr_tb_stop,
2972 .disas_log = alpha_tr_disas_log,
2973 };
2974
2975 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
2976 target_ulong pc, void *host_pc)
2977 {
2978 DisasContext dc;
2979 translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
2980 }