]> git.proxmox.com Git - mirror_qemu.git/blob - target-alpha/translate.c
exec: [tcg] Track which vCPU is performing translation and execution
[mirror_qemu.git] / target-alpha / translate.c
1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
27
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30
31 #include "trace-tcg.h"
32 #include "exec/log.h"
33
34
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
37
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
43
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46 struct TranslationBlock *tb;
47 uint64_t pc;
48 #ifndef CONFIG_USER_ONLY
49 uint64_t palbr;
50 #endif
51 int mem_idx;
52
53 /* Current rounding mode for this TB. */
54 int tb_rm;
55 /* Current flush-to-zero setting for this TB. */
56 int tb_ftz;
57
58 /* implver value for this CPU. */
59 int implver;
60
61 /* The set of registers active in the current context. */
62 TCGv *ir;
63
64 /* Temporaries for $31 and $f31 as source and destination. */
65 TCGv zero;
66 TCGv sink;
67 /* Temporary for immediate constants. */
68 TCGv lit;
69
70 bool singlestep_enabled;
71 };
72
73 /* Return values from translate_one, indicating the state of the TB.
74 Note that zero indicates that we are not exiting the TB. */
75
76 typedef enum {
77 NO_EXIT,
78
79 /* We have emitted one or more goto_tb. No fixup required. */
80 EXIT_GOTO_TB,
81
82 /* We are not using a goto_tb (for whatever reason), but have updated
83 the PC (for whatever reason), so there's no need to do it again on
84 exiting the TB. */
85 EXIT_PC_UPDATED,
86
87 /* We are exiting the TB, but have neither emitted a goto_tb, nor
88 updated the PC for the next instruction to be executed. */
89 EXIT_PC_STALE,
90
91 /* We are ending the TB with a noreturn function call, e.g. longjmp.
92 No following code will be executed. */
93 EXIT_NORETURN,
94 } ExitStatus;
95
96 /* global register indexes */
97 static TCGv_env cpu_env;
98 static TCGv cpu_std_ir[31];
99 static TCGv cpu_fir[31];
100 static TCGv cpu_pc;
101 static TCGv cpu_lock_addr;
102 static TCGv cpu_lock_st_addr;
103 static TCGv cpu_lock_value;
104
105 #ifndef CONFIG_USER_ONLY
106 static TCGv cpu_pal_ir[31];
107 #endif
108
109 #include "exec/gen-icount.h"
110
111 void alpha_translate_init(void)
112 {
113 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
114
115 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
116 static const GlobalVar vars[] = {
117 DEF_VAR(pc),
118 DEF_VAR(lock_addr),
119 DEF_VAR(lock_st_addr),
120 DEF_VAR(lock_value),
121 };
122
123 #undef DEF_VAR
124
125 /* Use the symbolic register names that match the disassembler. */
126 static const char greg_names[31][4] = {
127 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
128 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
129 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
130 "t10", "t11", "ra", "t12", "at", "gp", "sp"
131 };
132 static const char freg_names[31][4] = {
133 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
134 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
135 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
136 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
137 };
138 #ifndef CONFIG_USER_ONLY
139 static const char shadow_names[8][8] = {
140 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
141 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
142 };
143 #endif
144
145 static bool done_init = 0;
146 int i;
147
148 if (done_init) {
149 return;
150 }
151 done_init = 1;
152
153 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
154 tcg_ctx.tcg_env = cpu_env;
155
156 for (i = 0; i < 31; i++) {
157 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
158 offsetof(CPUAlphaState, ir[i]),
159 greg_names[i]);
160 }
161
162 for (i = 0; i < 31; i++) {
163 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
164 offsetof(CPUAlphaState, fir[i]),
165 freg_names[i]);
166 }
167
168 #ifndef CONFIG_USER_ONLY
169 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
170 for (i = 0; i < 8; i++) {
171 int r = (i == 7 ? 25 : i + 8);
172 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
173 offsetof(CPUAlphaState,
174 shadow[i]),
175 shadow_names[i]);
176 }
177 #endif
178
179 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
180 const GlobalVar *v = &vars[i];
181 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
182 }
183 }
184
185 static TCGv load_zero(DisasContext *ctx)
186 {
187 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
188 ctx->zero = tcg_const_i64(0);
189 }
190 return ctx->zero;
191 }
192
193 static TCGv dest_sink(DisasContext *ctx)
194 {
195 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
196 ctx->sink = tcg_temp_new();
197 }
198 return ctx->sink;
199 }
200
201 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
202 {
203 if (likely(reg < 31)) {
204 return ctx->ir[reg];
205 } else {
206 return load_zero(ctx);
207 }
208 }
209
210 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
211 uint8_t lit, bool islit)
212 {
213 if (islit) {
214 ctx->lit = tcg_const_i64(lit);
215 return ctx->lit;
216 } else if (likely(reg < 31)) {
217 return ctx->ir[reg];
218 } else {
219 return load_zero(ctx);
220 }
221 }
222
223 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
224 {
225 if (likely(reg < 31)) {
226 return ctx->ir[reg];
227 } else {
228 return dest_sink(ctx);
229 }
230 }
231
232 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
233 {
234 if (likely(reg < 31)) {
235 return cpu_fir[reg];
236 } else {
237 return load_zero(ctx);
238 }
239 }
240
241 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
242 {
243 if (likely(reg < 31)) {
244 return cpu_fir[reg];
245 } else {
246 return dest_sink(ctx);
247 }
248 }
249
250 static void gen_excp_1(int exception, int error_code)
251 {
252 TCGv_i32 tmp1, tmp2;
253
254 tmp1 = tcg_const_i32(exception);
255 tmp2 = tcg_const_i32(error_code);
256 gen_helper_excp(cpu_env, tmp1, tmp2);
257 tcg_temp_free_i32(tmp2);
258 tcg_temp_free_i32(tmp1);
259 }
260
261 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
262 {
263 tcg_gen_movi_i64(cpu_pc, ctx->pc);
264 gen_excp_1(exception, error_code);
265 return EXIT_NORETURN;
266 }
267
268 static inline ExitStatus gen_invalid(DisasContext *ctx)
269 {
270 return gen_excp(ctx, EXCP_OPCDEC, 0);
271 }
272
273 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
274 {
275 TCGv_i32 tmp32 = tcg_temp_new_i32();
276 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
277 gen_helper_memory_to_f(t0, tmp32);
278 tcg_temp_free_i32(tmp32);
279 }
280
281 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
282 {
283 TCGv tmp = tcg_temp_new();
284 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
285 gen_helper_memory_to_g(t0, tmp);
286 tcg_temp_free(tmp);
287 }
288
289 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
290 {
291 TCGv_i32 tmp32 = tcg_temp_new_i32();
292 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
293 gen_helper_memory_to_s(t0, tmp32);
294 tcg_temp_free_i32(tmp32);
295 }
296
297 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
298 {
299 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
300 tcg_gen_mov_i64(cpu_lock_addr, t1);
301 tcg_gen_mov_i64(cpu_lock_value, t0);
302 }
303
304 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
305 {
306 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
307 tcg_gen_mov_i64(cpu_lock_addr, t1);
308 tcg_gen_mov_i64(cpu_lock_value, t0);
309 }
310
311 static inline void gen_load_mem(DisasContext *ctx,
312 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
313 int flags),
314 int ra, int rb, int32_t disp16, bool fp,
315 bool clear)
316 {
317 TCGv tmp, addr, va;
318
319 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
320 prefetches, which we can treat as nops. No worries about
321 missed exceptions here. */
322 if (unlikely(ra == 31)) {
323 return;
324 }
325
326 tmp = tcg_temp_new();
327 addr = load_gpr(ctx, rb);
328
329 if (disp16) {
330 tcg_gen_addi_i64(tmp, addr, disp16);
331 addr = tmp;
332 }
333 if (clear) {
334 tcg_gen_andi_i64(tmp, addr, ~0x7);
335 addr = tmp;
336 }
337
338 va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
339 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
340
341 tcg_temp_free(tmp);
342 }
343
344 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
345 {
346 TCGv_i32 tmp32 = tcg_temp_new_i32();
347 gen_helper_f_to_memory(tmp32, t0);
348 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
349 tcg_temp_free_i32(tmp32);
350 }
351
352 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
353 {
354 TCGv tmp = tcg_temp_new();
355 gen_helper_g_to_memory(tmp, t0);
356 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
357 tcg_temp_free(tmp);
358 }
359
360 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
361 {
362 TCGv_i32 tmp32 = tcg_temp_new_i32();
363 gen_helper_s_to_memory(tmp32, t0);
364 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
365 tcg_temp_free_i32(tmp32);
366 }
367
368 static inline void gen_store_mem(DisasContext *ctx,
369 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
370 int flags),
371 int ra, int rb, int32_t disp16, bool fp,
372 bool clear)
373 {
374 TCGv tmp, addr, va;
375
376 tmp = tcg_temp_new();
377 addr = load_gpr(ctx, rb);
378
379 if (disp16) {
380 tcg_gen_addi_i64(tmp, addr, disp16);
381 addr = tmp;
382 }
383 if (clear) {
384 tcg_gen_andi_i64(tmp, addr, ~0x7);
385 addr = tmp;
386 }
387
388 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
389 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
390
391 tcg_temp_free(tmp);
392 }
393
394 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
395 int32_t disp16, int quad)
396 {
397 TCGv addr;
398
399 if (ra == 31) {
400 /* ??? Don't bother storing anything. The user can't tell
401 the difference, since the zero register always reads zero. */
402 return NO_EXIT;
403 }
404
405 #if defined(CONFIG_USER_ONLY)
406 addr = cpu_lock_st_addr;
407 #else
408 addr = tcg_temp_local_new();
409 #endif
410
411 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
412
413 #if defined(CONFIG_USER_ONLY)
414 /* ??? This is handled via a complicated version of compare-and-swap
415 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
416 in TCG so that this isn't necessary. */
417 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
418 #else
419 /* ??? In system mode we are never multi-threaded, so CAS can be
420 implemented via a non-atomic load-compare-store sequence. */
421 {
422 TCGLabel *lab_fail, *lab_done;
423 TCGv val;
424
425 lab_fail = gen_new_label();
426 lab_done = gen_new_label();
427 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
428
429 val = tcg_temp_new();
430 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
431 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
432
433 tcg_gen_qemu_st_i64(ctx->ir[ra], addr, ctx->mem_idx,
434 quad ? MO_LEQ : MO_LEUL);
435 tcg_gen_movi_i64(ctx->ir[ra], 1);
436 tcg_gen_br(lab_done);
437
438 gen_set_label(lab_fail);
439 tcg_gen_movi_i64(ctx->ir[ra], 0);
440
441 gen_set_label(lab_done);
442 tcg_gen_movi_i64(cpu_lock_addr, -1);
443
444 tcg_temp_free(addr);
445 return NO_EXIT;
446 }
447 #endif
448 }
449
450 static bool in_superpage(DisasContext *ctx, int64_t addr)
451 {
452 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
453 && addr < 0
454 && ((addr >> 41) & 3) == 2
455 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
456 }
457
458 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
459 {
460 /* Suppress goto_tb in the case of single-steping and IO. */
461 if ((ctx->tb->cflags & CF_LAST_IO)
462 || ctx->singlestep_enabled || singlestep) {
463 return false;
464 }
465 #ifndef CONFIG_USER_ONLY
466 /* If the destination is in the superpage, the page perms can't change. */
467 if (in_superpage(ctx, dest)) {
468 return true;
469 }
470 /* Check for the dest on the same page as the start of the TB. */
471 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
472 #else
473 return true;
474 #endif
475 }
476
477 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
478 {
479 uint64_t dest = ctx->pc + (disp << 2);
480
481 if (ra != 31) {
482 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
483 }
484
485 /* Notice branch-to-next; used to initialize RA with the PC. */
486 if (disp == 0) {
487 return 0;
488 } else if (use_goto_tb(ctx, dest)) {
489 tcg_gen_goto_tb(0);
490 tcg_gen_movi_i64(cpu_pc, dest);
491 tcg_gen_exit_tb((uintptr_t)ctx->tb);
492 return EXIT_GOTO_TB;
493 } else {
494 tcg_gen_movi_i64(cpu_pc, dest);
495 return EXIT_PC_UPDATED;
496 }
497 }
498
499 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
500 TCGv cmp, int32_t disp)
501 {
502 uint64_t dest = ctx->pc + (disp << 2);
503 TCGLabel *lab_true = gen_new_label();
504
505 if (use_goto_tb(ctx, dest)) {
506 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
507
508 tcg_gen_goto_tb(0);
509 tcg_gen_movi_i64(cpu_pc, ctx->pc);
510 tcg_gen_exit_tb((uintptr_t)ctx->tb);
511
512 gen_set_label(lab_true);
513 tcg_gen_goto_tb(1);
514 tcg_gen_movi_i64(cpu_pc, dest);
515 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
516
517 return EXIT_GOTO_TB;
518 } else {
519 TCGv_i64 z = tcg_const_i64(0);
520 TCGv_i64 d = tcg_const_i64(dest);
521 TCGv_i64 p = tcg_const_i64(ctx->pc);
522
523 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
524
525 tcg_temp_free_i64(z);
526 tcg_temp_free_i64(d);
527 tcg_temp_free_i64(p);
528 return EXIT_PC_UPDATED;
529 }
530 }
531
532 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
533 int32_t disp, int mask)
534 {
535 TCGv cmp_tmp;
536
537 if (mask) {
538 cmp_tmp = tcg_temp_new();
539 tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
540 } else {
541 cmp_tmp = load_gpr(ctx, ra);
542 }
543
544 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
545 }
546
547 /* Fold -0.0 for comparison with COND. */
548
549 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
550 {
551 uint64_t mzero = 1ull << 63;
552
553 switch (cond) {
554 case TCG_COND_LE:
555 case TCG_COND_GT:
556 /* For <= or >, the -0.0 value directly compares the way we want. */
557 tcg_gen_mov_i64(dest, src);
558 break;
559
560 case TCG_COND_EQ:
561 case TCG_COND_NE:
562 /* For == or !=, we can simply mask off the sign bit and compare. */
563 tcg_gen_andi_i64(dest, src, mzero - 1);
564 break;
565
566 case TCG_COND_GE:
567 case TCG_COND_LT:
568 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
569 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
570 tcg_gen_neg_i64(dest, dest);
571 tcg_gen_and_i64(dest, dest, src);
572 break;
573
574 default:
575 abort();
576 }
577 }
578
579 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
580 int32_t disp)
581 {
582 TCGv cmp_tmp = tcg_temp_new();
583 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
584 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
585 }
586
587 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
588 {
589 TCGv_i64 va, vb, z;
590
591 z = load_zero(ctx);
592 vb = load_fpr(ctx, rb);
593 va = tcg_temp_new();
594 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
595
596 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
597
598 tcg_temp_free(va);
599 }
600
601 #define QUAL_RM_N 0x080 /* Round mode nearest even */
602 #define QUAL_RM_C 0x000 /* Round mode chopped */
603 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
604 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
605 #define QUAL_RM_MASK 0x0c0
606
607 #define QUAL_U 0x100 /* Underflow enable (fp output) */
608 #define QUAL_V 0x100 /* Overflow enable (int output) */
609 #define QUAL_S 0x400 /* Software completion enable */
610 #define QUAL_I 0x200 /* Inexact detection enable */
611
612 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
613 {
614 TCGv_i32 tmp;
615
616 fn11 &= QUAL_RM_MASK;
617 if (fn11 == ctx->tb_rm) {
618 return;
619 }
620 ctx->tb_rm = fn11;
621
622 tmp = tcg_temp_new_i32();
623 switch (fn11) {
624 case QUAL_RM_N:
625 tcg_gen_movi_i32(tmp, float_round_nearest_even);
626 break;
627 case QUAL_RM_C:
628 tcg_gen_movi_i32(tmp, float_round_to_zero);
629 break;
630 case QUAL_RM_M:
631 tcg_gen_movi_i32(tmp, float_round_down);
632 break;
633 case QUAL_RM_D:
634 tcg_gen_ld8u_i32(tmp, cpu_env,
635 offsetof(CPUAlphaState, fpcr_dyn_round));
636 break;
637 }
638
639 #if defined(CONFIG_SOFTFLOAT_INLINE)
640 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
641 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
642 sets the one field. */
643 tcg_gen_st8_i32(tmp, cpu_env,
644 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
645 #else
646 gen_helper_setroundmode(tmp);
647 #endif
648
649 tcg_temp_free_i32(tmp);
650 }
651
652 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
653 {
654 TCGv_i32 tmp;
655
656 fn11 &= QUAL_U;
657 if (fn11 == ctx->tb_ftz) {
658 return;
659 }
660 ctx->tb_ftz = fn11;
661
662 tmp = tcg_temp_new_i32();
663 if (fn11) {
664 /* Underflow is enabled, use the FPCR setting. */
665 tcg_gen_ld8u_i32(tmp, cpu_env,
666 offsetof(CPUAlphaState, fpcr_flush_to_zero));
667 } else {
668 /* Underflow is disabled, force flush-to-zero. */
669 tcg_gen_movi_i32(tmp, 1);
670 }
671
672 #if defined(CONFIG_SOFTFLOAT_INLINE)
673 tcg_gen_st8_i32(tmp, cpu_env,
674 offsetof(CPUAlphaState, fp_status.flush_to_zero));
675 #else
676 gen_helper_setflushzero(tmp);
677 #endif
678
679 tcg_temp_free_i32(tmp);
680 }
681
682 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
683 {
684 TCGv val;
685
686 if (unlikely(reg == 31)) {
687 val = load_zero(ctx);
688 } else {
689 val = cpu_fir[reg];
690 if ((fn11 & QUAL_S) == 0) {
691 if (is_cmp) {
692 gen_helper_ieee_input_cmp(cpu_env, val);
693 } else {
694 gen_helper_ieee_input(cpu_env, val);
695 }
696 } else {
697 #ifndef CONFIG_USER_ONLY
698 /* In system mode, raise exceptions for denormals like real
699 hardware. In user mode, proceed as if the OS completion
700 handler is handling the denormal as per spec. */
701 gen_helper_ieee_input_s(cpu_env, val);
702 #endif
703 }
704 }
705 return val;
706 }
707
708 static void gen_fp_exc_raise(int rc, int fn11)
709 {
710 /* ??? We ought to be able to do something with imprecise exceptions.
711 E.g. notice we're still in the trap shadow of something within the
712 TB and do not generate the code to signal the exception; end the TB
713 when an exception is forced to arrive, either by consumption of a
714 register value or TRAPB or EXCB. */
715 TCGv_i32 reg, ign;
716 uint32_t ignore = 0;
717
718 if (!(fn11 & QUAL_U)) {
719 /* Note that QUAL_U == QUAL_V, so ignore either. */
720 ignore |= FPCR_UNF | FPCR_IOV;
721 }
722 if (!(fn11 & QUAL_I)) {
723 ignore |= FPCR_INE;
724 }
725 ign = tcg_const_i32(ignore);
726
727 /* ??? Pass in the regno of the destination so that the helper can
728 set EXC_MASK, which contains a bitmask of destination registers
729 that have caused arithmetic traps. A simple userspace emulation
730 does not require this. We do need it for a guest kernel's entArith,
731 or if we were to do something clever with imprecise exceptions. */
732 reg = tcg_const_i32(rc + 32);
733 if (fn11 & QUAL_S) {
734 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
735 } else {
736 gen_helper_fp_exc_raise(cpu_env, ign, reg);
737 }
738
739 tcg_temp_free_i32(reg);
740 tcg_temp_free_i32(ign);
741 }
742
743 static void gen_cvtlq(TCGv vc, TCGv vb)
744 {
745 TCGv tmp = tcg_temp_new();
746
747 /* The arithmetic right shift here, plus the sign-extended mask below
748 yields a sign-extended result without an explicit ext32s_i64. */
749 tcg_gen_sari_i64(tmp, vb, 32);
750 tcg_gen_shri_i64(vc, vb, 29);
751 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
752 tcg_gen_andi_i64(vc, vc, 0x3fffffff);
753 tcg_gen_or_i64(vc, vc, tmp);
754
755 tcg_temp_free(tmp);
756 }
757
758 static void gen_ieee_arith2(DisasContext *ctx,
759 void (*helper)(TCGv, TCGv_ptr, TCGv),
760 int rb, int rc, int fn11)
761 {
762 TCGv vb;
763
764 gen_qual_roundmode(ctx, fn11);
765 gen_qual_flushzero(ctx, fn11);
766
767 vb = gen_ieee_input(ctx, rb, fn11, 0);
768 helper(dest_fpr(ctx, rc), cpu_env, vb);
769
770 gen_fp_exc_raise(rc, fn11);
771 }
772
773 #define IEEE_ARITH2(name) \
774 static inline void glue(gen_, name)(DisasContext *ctx, \
775 int rb, int rc, int fn11) \
776 { \
777 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
778 }
779 IEEE_ARITH2(sqrts)
780 IEEE_ARITH2(sqrtt)
781 IEEE_ARITH2(cvtst)
782 IEEE_ARITH2(cvtts)
783
784 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
785 {
786 TCGv vb, vc;
787
788 /* No need to set flushzero, since we have an integer output. */
789 vb = gen_ieee_input(ctx, rb, fn11, 0);
790 vc = dest_fpr(ctx, rc);
791
792 /* Almost all integer conversions use cropped rounding;
793 special case that. */
794 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
795 gen_helper_cvttq_c(vc, cpu_env, vb);
796 } else {
797 gen_qual_roundmode(ctx, fn11);
798 gen_helper_cvttq(vc, cpu_env, vb);
799 }
800 gen_fp_exc_raise(rc, fn11);
801 }
802
803 static void gen_ieee_intcvt(DisasContext *ctx,
804 void (*helper)(TCGv, TCGv_ptr, TCGv),
805 int rb, int rc, int fn11)
806 {
807 TCGv vb, vc;
808
809 gen_qual_roundmode(ctx, fn11);
810 vb = load_fpr(ctx, rb);
811 vc = dest_fpr(ctx, rc);
812
813 /* The only exception that can be raised by integer conversion
814 is inexact. Thus we only need to worry about exceptions when
815 inexact handling is requested. */
816 if (fn11 & QUAL_I) {
817 helper(vc, cpu_env, vb);
818 gen_fp_exc_raise(rc, fn11);
819 } else {
820 helper(vc, cpu_env, vb);
821 }
822 }
823
824 #define IEEE_INTCVT(name) \
825 static inline void glue(gen_, name)(DisasContext *ctx, \
826 int rb, int rc, int fn11) \
827 { \
828 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
829 }
830 IEEE_INTCVT(cvtqs)
831 IEEE_INTCVT(cvtqt)
832
833 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
834 {
835 TCGv vmask = tcg_const_i64(mask);
836 TCGv tmp = tcg_temp_new_i64();
837
838 if (inv_a) {
839 tcg_gen_andc_i64(tmp, vmask, va);
840 } else {
841 tcg_gen_and_i64(tmp, va, vmask);
842 }
843
844 tcg_gen_andc_i64(vc, vb, vmask);
845 tcg_gen_or_i64(vc, vc, tmp);
846
847 tcg_temp_free(vmask);
848 tcg_temp_free(tmp);
849 }
850
851 static void gen_ieee_arith3(DisasContext *ctx,
852 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
853 int ra, int rb, int rc, int fn11)
854 {
855 TCGv va, vb, vc;
856
857 gen_qual_roundmode(ctx, fn11);
858 gen_qual_flushzero(ctx, fn11);
859
860 va = gen_ieee_input(ctx, ra, fn11, 0);
861 vb = gen_ieee_input(ctx, rb, fn11, 0);
862 vc = dest_fpr(ctx, rc);
863 helper(vc, cpu_env, va, vb);
864
865 gen_fp_exc_raise(rc, fn11);
866 }
867
868 #define IEEE_ARITH3(name) \
869 static inline void glue(gen_, name)(DisasContext *ctx, \
870 int ra, int rb, int rc, int fn11) \
871 { \
872 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
873 }
874 IEEE_ARITH3(adds)
875 IEEE_ARITH3(subs)
876 IEEE_ARITH3(muls)
877 IEEE_ARITH3(divs)
878 IEEE_ARITH3(addt)
879 IEEE_ARITH3(subt)
880 IEEE_ARITH3(mult)
881 IEEE_ARITH3(divt)
882
883 static void gen_ieee_compare(DisasContext *ctx,
884 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
885 int ra, int rb, int rc, int fn11)
886 {
887 TCGv va, vb, vc;
888
889 va = gen_ieee_input(ctx, ra, fn11, 1);
890 vb = gen_ieee_input(ctx, rb, fn11, 1);
891 vc = dest_fpr(ctx, rc);
892 helper(vc, cpu_env, va, vb);
893
894 gen_fp_exc_raise(rc, fn11);
895 }
896
897 #define IEEE_CMP3(name) \
898 static inline void glue(gen_, name)(DisasContext *ctx, \
899 int ra, int rb, int rc, int fn11) \
900 { \
901 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
902 }
903 IEEE_CMP3(cmptun)
904 IEEE_CMP3(cmpteq)
905 IEEE_CMP3(cmptlt)
906 IEEE_CMP3(cmptle)
907
908 static inline uint64_t zapnot_mask(uint8_t lit)
909 {
910 uint64_t mask = 0;
911 int i;
912
913 for (i = 0; i < 8; ++i) {
914 if ((lit >> i) & 1) {
915 mask |= 0xffull << (i * 8);
916 }
917 }
918 return mask;
919 }
920
921 /* Implement zapnot with an immediate operand, which expands to some
922 form of immediate AND. This is a basic building block in the
923 definition of many of the other byte manipulation instructions. */
924 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
925 {
926 switch (lit) {
927 case 0x00:
928 tcg_gen_movi_i64(dest, 0);
929 break;
930 case 0x01:
931 tcg_gen_ext8u_i64(dest, src);
932 break;
933 case 0x03:
934 tcg_gen_ext16u_i64(dest, src);
935 break;
936 case 0x0f:
937 tcg_gen_ext32u_i64(dest, src);
938 break;
939 case 0xff:
940 tcg_gen_mov_i64(dest, src);
941 break;
942 default:
943 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
944 break;
945 }
946 }
947
948 /* EXTWH, EXTLH, EXTQH */
949 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
950 uint8_t lit, uint8_t byte_mask)
951 {
952 if (islit) {
953 tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
954 } else {
955 TCGv tmp = tcg_temp_new();
956 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
957 tcg_gen_neg_i64(tmp, tmp);
958 tcg_gen_andi_i64(tmp, tmp, 0x3f);
959 tcg_gen_shl_i64(vc, va, tmp);
960 tcg_temp_free(tmp);
961 }
962 gen_zapnoti(vc, vc, byte_mask);
963 }
964
965 /* EXTBL, EXTWL, EXTLL, EXTQL */
966 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
967 uint8_t lit, uint8_t byte_mask)
968 {
969 if (islit) {
970 tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
971 } else {
972 TCGv tmp = tcg_temp_new();
973 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
974 tcg_gen_shli_i64(tmp, tmp, 3);
975 tcg_gen_shr_i64(vc, va, tmp);
976 tcg_temp_free(tmp);
977 }
978 gen_zapnoti(vc, vc, byte_mask);
979 }
980
981 /* INSWH, INSLH, INSQH */
982 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
983 uint8_t lit, uint8_t byte_mask)
984 {
985 TCGv tmp = tcg_temp_new();
986
987 /* The instruction description has us left-shift the byte mask and extract
988 bits <15:8> and apply that zap at the end. This is equivalent to simply
989 performing the zap first and shifting afterward. */
990 gen_zapnoti(tmp, va, byte_mask);
991
992 if (islit) {
993 lit &= 7;
994 if (unlikely(lit == 0)) {
995 tcg_gen_movi_i64(vc, 0);
996 } else {
997 tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
998 }
999 } else {
1000 TCGv shift = tcg_temp_new();
1001
1002 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1003 portably by splitting the shift into two parts: shift_count-1 and 1.
1004 Arrange for the -1 by using ones-complement instead of
1005 twos-complement in the negation: ~(B * 8) & 63. */
1006
1007 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1008 tcg_gen_not_i64(shift, shift);
1009 tcg_gen_andi_i64(shift, shift, 0x3f);
1010
1011 tcg_gen_shr_i64(vc, tmp, shift);
1012 tcg_gen_shri_i64(vc, vc, 1);
1013 tcg_temp_free(shift);
1014 }
1015 tcg_temp_free(tmp);
1016 }
1017
1018 /* INSBL, INSWL, INSLL, INSQL */
1019 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1020 uint8_t lit, uint8_t byte_mask)
1021 {
1022 TCGv tmp = tcg_temp_new();
1023
1024 /* The instruction description has us left-shift the byte mask
1025 the same number of byte slots as the data and apply the zap
1026 at the end. This is equivalent to simply performing the zap
1027 first and shifting afterward. */
1028 gen_zapnoti(tmp, va, byte_mask);
1029
1030 if (islit) {
1031 tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
1032 } else {
1033 TCGv shift = tcg_temp_new();
1034 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1035 tcg_gen_shli_i64(shift, shift, 3);
1036 tcg_gen_shl_i64(vc, tmp, shift);
1037 tcg_temp_free(shift);
1038 }
1039 tcg_temp_free(tmp);
1040 }
1041
1042 /* MSKWH, MSKLH, MSKQH */
1043 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1044 uint8_t lit, uint8_t byte_mask)
1045 {
1046 if (islit) {
1047 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1048 } else {
1049 TCGv shift = tcg_temp_new();
1050 TCGv mask = tcg_temp_new();
1051
1052 /* The instruction description is as above, where the byte_mask
1053 is shifted left, and then we extract bits <15:8>. This can be
1054 emulated with a right-shift on the expanded byte mask. This
1055 requires extra care because for an input <2:0> == 0 we need a
1056 shift of 64 bits in order to generate a zero. This is done by
1057 splitting the shift into two parts, the variable shift - 1
1058 followed by a constant 1 shift. The code we expand below is
1059 equivalent to ~(B * 8) & 63. */
1060
1061 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1062 tcg_gen_not_i64(shift, shift);
1063 tcg_gen_andi_i64(shift, shift, 0x3f);
1064 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1065 tcg_gen_shr_i64(mask, mask, shift);
1066 tcg_gen_shri_i64(mask, mask, 1);
1067
1068 tcg_gen_andc_i64(vc, va, mask);
1069
1070 tcg_temp_free(mask);
1071 tcg_temp_free(shift);
1072 }
1073 }
1074
1075 /* MSKBL, MSKWL, MSKLL, MSKQL */
1076 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1077 uint8_t lit, uint8_t byte_mask)
1078 {
1079 if (islit) {
1080 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1081 } else {
1082 TCGv shift = tcg_temp_new();
1083 TCGv mask = tcg_temp_new();
1084
1085 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1086 tcg_gen_shli_i64(shift, shift, 3);
1087 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1088 tcg_gen_shl_i64(mask, mask, shift);
1089
1090 tcg_gen_andc_i64(vc, va, mask);
1091
1092 tcg_temp_free(mask);
1093 tcg_temp_free(shift);
1094 }
1095 }
1096
1097 static void gen_rx(DisasContext *ctx, int ra, int set)
1098 {
1099 TCGv_i32 tmp;
1100
1101 if (ra != 31) {
1102 tcg_gen_ld8u_i64(ctx->ir[ra], cpu_env,
1103 offsetof(CPUAlphaState, intr_flag));
1104 }
1105
1106 tmp = tcg_const_i32(set);
1107 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1108 tcg_temp_free_i32(tmp);
1109 }
1110
1111 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1112 {
1113 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1114 to internal cpu registers. */
1115
1116 /* Unprivileged PAL call */
1117 if (palcode >= 0x80 && palcode < 0xC0) {
1118 switch (palcode) {
1119 case 0x86:
1120 /* IMB */
1121 /* No-op inside QEMU. */
1122 break;
1123 case 0x9E:
1124 /* RDUNIQUE */
1125 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1126 offsetof(CPUAlphaState, unique));
1127 break;
1128 case 0x9F:
1129 /* WRUNIQUE */
1130 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1131 offsetof(CPUAlphaState, unique));
1132 break;
1133 default:
1134 palcode &= 0xbf;
1135 goto do_call_pal;
1136 }
1137 return NO_EXIT;
1138 }
1139
1140 #ifndef CONFIG_USER_ONLY
1141 /* Privileged PAL code */
1142 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1143 switch (palcode) {
1144 case 0x01:
1145 /* CFLUSH */
1146 /* No-op inside QEMU. */
1147 break;
1148 case 0x02:
1149 /* DRAINA */
1150 /* No-op inside QEMU. */
1151 break;
1152 case 0x2D:
1153 /* WRVPTPTR */
1154 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1155 offsetof(CPUAlphaState, vptptr));
1156 break;
1157 case 0x31:
1158 /* WRVAL */
1159 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1160 offsetof(CPUAlphaState, sysval));
1161 break;
1162 case 0x32:
1163 /* RDVAL */
1164 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1165 offsetof(CPUAlphaState, sysval));
1166 break;
1167
1168 case 0x35: {
1169 /* SWPIPL */
1170 TCGv tmp;
1171
1172 /* Note that we already know we're in kernel mode, so we know
1173 that PS only contains the 3 IPL bits. */
1174 tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
1175 offsetof(CPUAlphaState, ps));
1176
1177 /* But make sure and store only the 3 IPL bits from the user. */
1178 tmp = tcg_temp_new();
1179 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1180 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1181 tcg_temp_free(tmp);
1182 break;
1183 }
1184
1185 case 0x36:
1186 /* RDPS */
1187 tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
1188 offsetof(CPUAlphaState, ps));
1189 break;
1190 case 0x38:
1191 /* WRUSP */
1192 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1193 offsetof(CPUAlphaState, usp));
1194 break;
1195 case 0x3A:
1196 /* RDUSP */
1197 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1198 offsetof(CPUAlphaState, usp));
1199 break;
1200 case 0x3C:
1201 /* WHAMI */
1202 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1203 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1204 break;
1205
1206 default:
1207 palcode &= 0x3f;
1208 goto do_call_pal;
1209 }
1210 return NO_EXIT;
1211 }
1212 #endif
1213 return gen_invalid(ctx);
1214
1215 do_call_pal:
1216 #ifdef CONFIG_USER_ONLY
1217 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1218 #else
1219 {
1220 TCGv tmp = tcg_temp_new();
1221 uint64_t exc_addr = ctx->pc;
1222 uint64_t entry = ctx->palbr;
1223
1224 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
1225 exc_addr |= 1;
1226 } else {
1227 tcg_gen_movi_i64(tmp, 1);
1228 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
1229 }
1230
1231 tcg_gen_movi_i64(tmp, exc_addr);
1232 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1233 tcg_temp_free(tmp);
1234
1235 entry += (palcode & 0x80
1236 ? 0x2000 + (palcode - 0x80) * 64
1237 : 0x1000 + palcode * 64);
1238
1239 /* Since the destination is running in PALmode, we don't really
1240 need the page permissions check. We'll see the existence of
1241 the page when we create the TB, and we'll flush all TBs if
1242 we change the PAL base register. */
1243 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1244 tcg_gen_goto_tb(0);
1245 tcg_gen_movi_i64(cpu_pc, entry);
1246 tcg_gen_exit_tb((uintptr_t)ctx->tb);
1247 return EXIT_GOTO_TB;
1248 } else {
1249 tcg_gen_movi_i64(cpu_pc, entry);
1250 return EXIT_PC_UPDATED;
1251 }
1252 }
1253 #endif
1254 }
1255
1256 #ifndef CONFIG_USER_ONLY
1257
1258 #define PR_BYTE 0x100000
1259 #define PR_LONG 0x200000
1260
1261 static int cpu_pr_data(int pr)
1262 {
1263 switch (pr) {
1264 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1265 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1266 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1267 case 3: return offsetof(CPUAlphaState, trap_arg0);
1268 case 4: return offsetof(CPUAlphaState, trap_arg1);
1269 case 5: return offsetof(CPUAlphaState, trap_arg2);
1270 case 6: return offsetof(CPUAlphaState, exc_addr);
1271 case 7: return offsetof(CPUAlphaState, palbr);
1272 case 8: return offsetof(CPUAlphaState, ptbr);
1273 case 9: return offsetof(CPUAlphaState, vptptr);
1274 case 10: return offsetof(CPUAlphaState, unique);
1275 case 11: return offsetof(CPUAlphaState, sysval);
1276 case 12: return offsetof(CPUAlphaState, usp);
1277
1278 case 40 ... 63:
1279 return offsetof(CPUAlphaState, scratch[pr - 40]);
1280
1281 case 251:
1282 return offsetof(CPUAlphaState, alarm_expire);
1283 }
1284 return 0;
1285 }
1286
1287 static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1288 {
1289 void (*helper)(TCGv);
1290 int data;
1291
1292 switch (regno) {
1293 case 32 ... 39:
1294 /* Accessing the "non-shadow" general registers. */
1295 regno = regno == 39 ? 25 : regno - 32 + 8;
1296 tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1297 break;
1298
1299 case 250: /* WALLTIME */
1300 helper = gen_helper_get_walltime;
1301 goto do_helper;
1302 case 249: /* VMTIME */
1303 helper = gen_helper_get_vmtime;
1304 do_helper:
1305 if (use_icount) {
1306 gen_io_start();
1307 helper(va);
1308 gen_io_end();
1309 return EXIT_PC_STALE;
1310 } else {
1311 helper(va);
1312 }
1313 break;
1314
1315 default:
1316 /* The basic registers are data only, and unknown registers
1317 are read-zero, write-ignore. */
1318 data = cpu_pr_data(regno);
1319 if (data == 0) {
1320 tcg_gen_movi_i64(va, 0);
1321 } else if (data & PR_BYTE) {
1322 tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
1323 } else if (data & PR_LONG) {
1324 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1325 } else {
1326 tcg_gen_ld_i64(va, cpu_env, data);
1327 }
1328 break;
1329 }
1330
1331 return NO_EXIT;
1332 }
1333
1334 static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1335 {
1336 TCGv tmp;
1337 int data;
1338
1339 switch (regno) {
1340 case 255:
1341 /* TBIA */
1342 gen_helper_tbia(cpu_env);
1343 break;
1344
1345 case 254:
1346 /* TBIS */
1347 gen_helper_tbis(cpu_env, vb);
1348 break;
1349
1350 case 253:
1351 /* WAIT */
1352 tmp = tcg_const_i64(1);
1353 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1354 offsetof(CPUState, halted));
1355 return gen_excp(ctx, EXCP_HLT, 0);
1356
1357 case 252:
1358 /* HALT */
1359 gen_helper_halt(vb);
1360 return EXIT_PC_STALE;
1361
1362 case 251:
1363 /* ALARM */
1364 gen_helper_set_alarm(cpu_env, vb);
1365 break;
1366
1367 case 7:
1368 /* PALBR */
1369 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1370 /* Changing the PAL base register implies un-chaining all of the TBs
1371 that ended with a CALL_PAL. Since the base register usually only
1372 changes during boot, flushing everything works well. */
1373 gen_helper_tb_flush(cpu_env);
1374 return EXIT_PC_STALE;
1375
1376 case 32 ... 39:
1377 /* Accessing the "non-shadow" general registers. */
1378 regno = regno == 39 ? 25 : regno - 32 + 8;
1379 tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1380 break;
1381
1382 default:
1383 /* The basic registers are data only, and unknown registers
1384 are read-zero, write-ignore. */
1385 data = cpu_pr_data(regno);
1386 if (data != 0) {
1387 if (data & PR_BYTE) {
1388 tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
1389 } else if (data & PR_LONG) {
1390 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1391 } else {
1392 tcg_gen_st_i64(vb, cpu_env, data);
1393 }
1394 }
1395 break;
1396 }
1397
1398 return NO_EXIT;
1399 }
1400 #endif /* !USER_ONLY*/
1401
1402 #define REQUIRE_NO_LIT \
1403 do { \
1404 if (real_islit) { \
1405 goto invalid_opc; \
1406 } \
1407 } while (0)
1408
1409 #define REQUIRE_TB_FLAG(FLAG) \
1410 do { \
1411 if ((ctx->tb->flags & (FLAG)) == 0) { \
1412 goto invalid_opc; \
1413 } \
1414 } while (0)
1415
1416 #define REQUIRE_REG_31(WHICH) \
1417 do { \
1418 if (WHICH != 31) { \
1419 goto invalid_opc; \
1420 } \
1421 } while (0)
1422
1423 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1424 {
1425 int32_t disp21, disp16, disp12 __attribute__((unused));
1426 uint16_t fn11;
1427 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1428 bool islit, real_islit;
1429 TCGv va, vb, vc, tmp, tmp2;
1430 TCGv_i32 t32;
1431 ExitStatus ret;
1432
1433 /* Decode all instruction fields */
1434 opc = extract32(insn, 26, 6);
1435 ra = extract32(insn, 21, 5);
1436 rb = extract32(insn, 16, 5);
1437 rc = extract32(insn, 0, 5);
1438 real_islit = islit = extract32(insn, 12, 1);
1439 lit = extract32(insn, 13, 8);
1440
1441 disp21 = sextract32(insn, 0, 21);
1442 disp16 = sextract32(insn, 0, 16);
1443 disp12 = sextract32(insn, 0, 12);
1444
1445 fn11 = extract32(insn, 5, 11);
1446 fpfn = extract32(insn, 5, 6);
1447 fn7 = extract32(insn, 5, 7);
1448
1449 if (rb == 31 && !islit) {
1450 islit = true;
1451 lit = 0;
1452 }
1453
1454 ret = NO_EXIT;
1455 switch (opc) {
1456 case 0x00:
1457 /* CALL_PAL */
1458 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1459 break;
1460 case 0x01:
1461 /* OPC01 */
1462 goto invalid_opc;
1463 case 0x02:
1464 /* OPC02 */
1465 goto invalid_opc;
1466 case 0x03:
1467 /* OPC03 */
1468 goto invalid_opc;
1469 case 0x04:
1470 /* OPC04 */
1471 goto invalid_opc;
1472 case 0x05:
1473 /* OPC05 */
1474 goto invalid_opc;
1475 case 0x06:
1476 /* OPC06 */
1477 goto invalid_opc;
1478 case 0x07:
1479 /* OPC07 */
1480 goto invalid_opc;
1481
1482 case 0x09:
1483 /* LDAH */
1484 disp16 = (uint32_t)disp16 << 16;
1485 /* fall through */
1486 case 0x08:
1487 /* LDA */
1488 va = dest_gpr(ctx, ra);
1489 /* It's worth special-casing immediate loads. */
1490 if (rb == 31) {
1491 tcg_gen_movi_i64(va, disp16);
1492 } else {
1493 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1494 }
1495 break;
1496
1497 case 0x0A:
1498 /* LDBU */
1499 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1500 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1501 break;
1502 case 0x0B:
1503 /* LDQ_U */
1504 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1505 break;
1506 case 0x0C:
1507 /* LDWU */
1508 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1509 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1510 break;
1511 case 0x0D:
1512 /* STW */
1513 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1514 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1515 break;
1516 case 0x0E:
1517 /* STB */
1518 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1519 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1520 break;
1521 case 0x0F:
1522 /* STQ_U */
1523 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1524 break;
1525
1526 case 0x10:
1527 vc = dest_gpr(ctx, rc);
1528 vb = load_gpr_lit(ctx, rb, lit, islit);
1529
1530 if (ra == 31) {
1531 if (fn7 == 0x00) {
1532 /* Special case ADDL as SEXTL. */
1533 tcg_gen_ext32s_i64(vc, vb);
1534 break;
1535 }
1536 if (fn7 == 0x29) {
1537 /* Special case SUBQ as NEGQ. */
1538 tcg_gen_neg_i64(vc, vb);
1539 break;
1540 }
1541 }
1542
1543 va = load_gpr(ctx, ra);
1544 switch (fn7) {
1545 case 0x00:
1546 /* ADDL */
1547 tcg_gen_add_i64(vc, va, vb);
1548 tcg_gen_ext32s_i64(vc, vc);
1549 break;
1550 case 0x02:
1551 /* S4ADDL */
1552 tmp = tcg_temp_new();
1553 tcg_gen_shli_i64(tmp, va, 2);
1554 tcg_gen_add_i64(tmp, tmp, vb);
1555 tcg_gen_ext32s_i64(vc, tmp);
1556 tcg_temp_free(tmp);
1557 break;
1558 case 0x09:
1559 /* SUBL */
1560 tcg_gen_sub_i64(vc, va, vb);
1561 tcg_gen_ext32s_i64(vc, vc);
1562 break;
1563 case 0x0B:
1564 /* S4SUBL */
1565 tmp = tcg_temp_new();
1566 tcg_gen_shli_i64(tmp, va, 2);
1567 tcg_gen_sub_i64(tmp, tmp, vb);
1568 tcg_gen_ext32s_i64(vc, tmp);
1569 tcg_temp_free(tmp);
1570 break;
1571 case 0x0F:
1572 /* CMPBGE */
1573 if (ra == 31) {
1574 /* Special case 0 >= X as X == 0. */
1575 gen_helper_cmpbe0(vc, vb);
1576 } else {
1577 gen_helper_cmpbge(vc, va, vb);
1578 }
1579 break;
1580 case 0x12:
1581 /* S8ADDL */
1582 tmp = tcg_temp_new();
1583 tcg_gen_shli_i64(tmp, va, 3);
1584 tcg_gen_add_i64(tmp, tmp, vb);
1585 tcg_gen_ext32s_i64(vc, tmp);
1586 tcg_temp_free(tmp);
1587 break;
1588 case 0x1B:
1589 /* S8SUBL */
1590 tmp = tcg_temp_new();
1591 tcg_gen_shli_i64(tmp, va, 3);
1592 tcg_gen_sub_i64(tmp, tmp, vb);
1593 tcg_gen_ext32s_i64(vc, tmp);
1594 tcg_temp_free(tmp);
1595 break;
1596 case 0x1D:
1597 /* CMPULT */
1598 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1599 break;
1600 case 0x20:
1601 /* ADDQ */
1602 tcg_gen_add_i64(vc, va, vb);
1603 break;
1604 case 0x22:
1605 /* S4ADDQ */
1606 tmp = tcg_temp_new();
1607 tcg_gen_shli_i64(tmp, va, 2);
1608 tcg_gen_add_i64(vc, tmp, vb);
1609 tcg_temp_free(tmp);
1610 break;
1611 case 0x29:
1612 /* SUBQ */
1613 tcg_gen_sub_i64(vc, va, vb);
1614 break;
1615 case 0x2B:
1616 /* S4SUBQ */
1617 tmp = tcg_temp_new();
1618 tcg_gen_shli_i64(tmp, va, 2);
1619 tcg_gen_sub_i64(vc, tmp, vb);
1620 tcg_temp_free(tmp);
1621 break;
1622 case 0x2D:
1623 /* CMPEQ */
1624 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1625 break;
1626 case 0x32:
1627 /* S8ADDQ */
1628 tmp = tcg_temp_new();
1629 tcg_gen_shli_i64(tmp, va, 3);
1630 tcg_gen_add_i64(vc, tmp, vb);
1631 tcg_temp_free(tmp);
1632 break;
1633 case 0x3B:
1634 /* S8SUBQ */
1635 tmp = tcg_temp_new();
1636 tcg_gen_shli_i64(tmp, va, 3);
1637 tcg_gen_sub_i64(vc, tmp, vb);
1638 tcg_temp_free(tmp);
1639 break;
1640 case 0x3D:
1641 /* CMPULE */
1642 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1643 break;
1644 case 0x40:
1645 /* ADDL/V */
1646 tmp = tcg_temp_new();
1647 tcg_gen_ext32s_i64(tmp, va);
1648 tcg_gen_ext32s_i64(vc, vb);
1649 tcg_gen_add_i64(tmp, tmp, vc);
1650 tcg_gen_ext32s_i64(vc, tmp);
1651 gen_helper_check_overflow(cpu_env, vc, tmp);
1652 tcg_temp_free(tmp);
1653 break;
1654 case 0x49:
1655 /* SUBL/V */
1656 tmp = tcg_temp_new();
1657 tcg_gen_ext32s_i64(tmp, va);
1658 tcg_gen_ext32s_i64(vc, vb);
1659 tcg_gen_sub_i64(tmp, tmp, vc);
1660 tcg_gen_ext32s_i64(vc, tmp);
1661 gen_helper_check_overflow(cpu_env, vc, tmp);
1662 tcg_temp_free(tmp);
1663 break;
1664 case 0x4D:
1665 /* CMPLT */
1666 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1667 break;
1668 case 0x60:
1669 /* ADDQ/V */
1670 tmp = tcg_temp_new();
1671 tmp2 = tcg_temp_new();
1672 tcg_gen_eqv_i64(tmp, va, vb);
1673 tcg_gen_mov_i64(tmp2, va);
1674 tcg_gen_add_i64(vc, va, vb);
1675 tcg_gen_xor_i64(tmp2, tmp2, vc);
1676 tcg_gen_and_i64(tmp, tmp, tmp2);
1677 tcg_gen_shri_i64(tmp, tmp, 63);
1678 tcg_gen_movi_i64(tmp2, 0);
1679 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1680 tcg_temp_free(tmp);
1681 tcg_temp_free(tmp2);
1682 break;
1683 case 0x69:
1684 /* SUBQ/V */
1685 tmp = tcg_temp_new();
1686 tmp2 = tcg_temp_new();
1687 tcg_gen_xor_i64(tmp, va, vb);
1688 tcg_gen_mov_i64(tmp2, va);
1689 tcg_gen_sub_i64(vc, va, vb);
1690 tcg_gen_xor_i64(tmp2, tmp2, vc);
1691 tcg_gen_and_i64(tmp, tmp, tmp2);
1692 tcg_gen_shri_i64(tmp, tmp, 63);
1693 tcg_gen_movi_i64(tmp2, 0);
1694 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1695 tcg_temp_free(tmp);
1696 tcg_temp_free(tmp2);
1697 break;
1698 case 0x6D:
1699 /* CMPLE */
1700 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1701 break;
1702 default:
1703 goto invalid_opc;
1704 }
1705 break;
1706
1707 case 0x11:
1708 if (fn7 == 0x20) {
1709 if (rc == 31) {
1710 /* Special case BIS as NOP. */
1711 break;
1712 }
1713 if (ra == 31) {
1714 /* Special case BIS as MOV. */
1715 vc = dest_gpr(ctx, rc);
1716 if (islit) {
1717 tcg_gen_movi_i64(vc, lit);
1718 } else {
1719 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1720 }
1721 break;
1722 }
1723 }
1724
1725 vc = dest_gpr(ctx, rc);
1726 vb = load_gpr_lit(ctx, rb, lit, islit);
1727
1728 if (fn7 == 0x28 && ra == 31) {
1729 /* Special case ORNOT as NOT. */
1730 tcg_gen_not_i64(vc, vb);
1731 break;
1732 }
1733
1734 va = load_gpr(ctx, ra);
1735 switch (fn7) {
1736 case 0x00:
1737 /* AND */
1738 tcg_gen_and_i64(vc, va, vb);
1739 break;
1740 case 0x08:
1741 /* BIC */
1742 tcg_gen_andc_i64(vc, va, vb);
1743 break;
1744 case 0x14:
1745 /* CMOVLBS */
1746 tmp = tcg_temp_new();
1747 tcg_gen_andi_i64(tmp, va, 1);
1748 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1749 vb, load_gpr(ctx, rc));
1750 tcg_temp_free(tmp);
1751 break;
1752 case 0x16:
1753 /* CMOVLBC */
1754 tmp = tcg_temp_new();
1755 tcg_gen_andi_i64(tmp, va, 1);
1756 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1757 vb, load_gpr(ctx, rc));
1758 tcg_temp_free(tmp);
1759 break;
1760 case 0x20:
1761 /* BIS */
1762 tcg_gen_or_i64(vc, va, vb);
1763 break;
1764 case 0x24:
1765 /* CMOVEQ */
1766 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1767 vb, load_gpr(ctx, rc));
1768 break;
1769 case 0x26:
1770 /* CMOVNE */
1771 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1772 vb, load_gpr(ctx, rc));
1773 break;
1774 case 0x28:
1775 /* ORNOT */
1776 tcg_gen_orc_i64(vc, va, vb);
1777 break;
1778 case 0x40:
1779 /* XOR */
1780 tcg_gen_xor_i64(vc, va, vb);
1781 break;
1782 case 0x44:
1783 /* CMOVLT */
1784 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1785 vb, load_gpr(ctx, rc));
1786 break;
1787 case 0x46:
1788 /* CMOVGE */
1789 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1790 vb, load_gpr(ctx, rc));
1791 break;
1792 case 0x48:
1793 /* EQV */
1794 tcg_gen_eqv_i64(vc, va, vb);
1795 break;
1796 case 0x61:
1797 /* AMASK */
1798 REQUIRE_REG_31(ra);
1799 {
1800 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
1801 tcg_gen_andi_i64(vc, vb, ~amask);
1802 }
1803 break;
1804 case 0x64:
1805 /* CMOVLE */
1806 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1807 vb, load_gpr(ctx, rc));
1808 break;
1809 case 0x66:
1810 /* CMOVGT */
1811 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1812 vb, load_gpr(ctx, rc));
1813 break;
1814 case 0x6C:
1815 /* IMPLVER */
1816 REQUIRE_REG_31(ra);
1817 tcg_gen_movi_i64(vc, ctx->implver);
1818 break;
1819 default:
1820 goto invalid_opc;
1821 }
1822 break;
1823
1824 case 0x12:
1825 vc = dest_gpr(ctx, rc);
1826 va = load_gpr(ctx, ra);
1827 switch (fn7) {
1828 case 0x02:
1829 /* MSKBL */
1830 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1831 break;
1832 case 0x06:
1833 /* EXTBL */
1834 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1835 break;
1836 case 0x0B:
1837 /* INSBL */
1838 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1839 break;
1840 case 0x12:
1841 /* MSKWL */
1842 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1843 break;
1844 case 0x16:
1845 /* EXTWL */
1846 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1847 break;
1848 case 0x1B:
1849 /* INSWL */
1850 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1851 break;
1852 case 0x22:
1853 /* MSKLL */
1854 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1855 break;
1856 case 0x26:
1857 /* EXTLL */
1858 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1859 break;
1860 case 0x2B:
1861 /* INSLL */
1862 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1863 break;
1864 case 0x30:
1865 /* ZAP */
1866 if (islit) {
1867 gen_zapnoti(vc, va, ~lit);
1868 } else {
1869 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1870 }
1871 break;
1872 case 0x31:
1873 /* ZAPNOT */
1874 if (islit) {
1875 gen_zapnoti(vc, va, lit);
1876 } else {
1877 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1878 }
1879 break;
1880 case 0x32:
1881 /* MSKQL */
1882 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1883 break;
1884 case 0x34:
1885 /* SRL */
1886 if (islit) {
1887 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1888 } else {
1889 tmp = tcg_temp_new();
1890 vb = load_gpr(ctx, rb);
1891 tcg_gen_andi_i64(tmp, vb, 0x3f);
1892 tcg_gen_shr_i64(vc, va, tmp);
1893 tcg_temp_free(tmp);
1894 }
1895 break;
1896 case 0x36:
1897 /* EXTQL */
1898 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1899 break;
1900 case 0x39:
1901 /* SLL */
1902 if (islit) {
1903 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1904 } else {
1905 tmp = tcg_temp_new();
1906 vb = load_gpr(ctx, rb);
1907 tcg_gen_andi_i64(tmp, vb, 0x3f);
1908 tcg_gen_shl_i64(vc, va, tmp);
1909 tcg_temp_free(tmp);
1910 }
1911 break;
1912 case 0x3B:
1913 /* INSQL */
1914 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1915 break;
1916 case 0x3C:
1917 /* SRA */
1918 if (islit) {
1919 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1920 } else {
1921 tmp = tcg_temp_new();
1922 vb = load_gpr(ctx, rb);
1923 tcg_gen_andi_i64(tmp, vb, 0x3f);
1924 tcg_gen_sar_i64(vc, va, tmp);
1925 tcg_temp_free(tmp);
1926 }
1927 break;
1928 case 0x52:
1929 /* MSKWH */
1930 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1931 break;
1932 case 0x57:
1933 /* INSWH */
1934 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1935 break;
1936 case 0x5A:
1937 /* EXTWH */
1938 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1939 break;
1940 case 0x62:
1941 /* MSKLH */
1942 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1943 break;
1944 case 0x67:
1945 /* INSLH */
1946 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1947 break;
1948 case 0x6A:
1949 /* EXTLH */
1950 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1951 break;
1952 case 0x72:
1953 /* MSKQH */
1954 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1955 break;
1956 case 0x77:
1957 /* INSQH */
1958 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1959 break;
1960 case 0x7A:
1961 /* EXTQH */
1962 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1963 break;
1964 default:
1965 goto invalid_opc;
1966 }
1967 break;
1968
1969 case 0x13:
1970 vc = dest_gpr(ctx, rc);
1971 vb = load_gpr_lit(ctx, rb, lit, islit);
1972 va = load_gpr(ctx, ra);
1973 switch (fn7) {
1974 case 0x00:
1975 /* MULL */
1976 tcg_gen_mul_i64(vc, va, vb);
1977 tcg_gen_ext32s_i64(vc, vc);
1978 break;
1979 case 0x20:
1980 /* MULQ */
1981 tcg_gen_mul_i64(vc, va, vb);
1982 break;
1983 case 0x30:
1984 /* UMULH */
1985 tmp = tcg_temp_new();
1986 tcg_gen_mulu2_i64(tmp, vc, va, vb);
1987 tcg_temp_free(tmp);
1988 break;
1989 case 0x40:
1990 /* MULL/V */
1991 tmp = tcg_temp_new();
1992 tcg_gen_ext32s_i64(tmp, va);
1993 tcg_gen_ext32s_i64(vc, vb);
1994 tcg_gen_mul_i64(tmp, tmp, vc);
1995 tcg_gen_ext32s_i64(vc, tmp);
1996 gen_helper_check_overflow(cpu_env, vc, tmp);
1997 tcg_temp_free(tmp);
1998 break;
1999 case 0x60:
2000 /* MULQ/V */
2001 tmp = tcg_temp_new();
2002 tmp2 = tcg_temp_new();
2003 tcg_gen_muls2_i64(vc, tmp, va, vb);
2004 tcg_gen_sari_i64(tmp2, vc, 63);
2005 gen_helper_check_overflow(cpu_env, tmp, tmp2);
2006 tcg_temp_free(tmp);
2007 tcg_temp_free(tmp2);
2008 break;
2009 default:
2010 goto invalid_opc;
2011 }
2012 break;
2013
2014 case 0x14:
2015 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2016 vc = dest_fpr(ctx, rc);
2017 switch (fpfn) { /* fn11 & 0x3F */
2018 case 0x04:
2019 /* ITOFS */
2020 REQUIRE_REG_31(rb);
2021 t32 = tcg_temp_new_i32();
2022 va = load_gpr(ctx, ra);
2023 tcg_gen_extrl_i64_i32(t32, va);
2024 gen_helper_memory_to_s(vc, t32);
2025 tcg_temp_free_i32(t32);
2026 break;
2027 case 0x0A:
2028 /* SQRTF */
2029 REQUIRE_REG_31(ra);
2030 vb = load_fpr(ctx, rb);
2031 gen_helper_sqrtf(vc, cpu_env, vb);
2032 break;
2033 case 0x0B:
2034 /* SQRTS */
2035 REQUIRE_REG_31(ra);
2036 gen_sqrts(ctx, rb, rc, fn11);
2037 break;
2038 case 0x14:
2039 /* ITOFF */
2040 REQUIRE_REG_31(rb);
2041 t32 = tcg_temp_new_i32();
2042 va = load_gpr(ctx, ra);
2043 tcg_gen_extrl_i64_i32(t32, va);
2044 gen_helper_memory_to_f(vc, t32);
2045 tcg_temp_free_i32(t32);
2046 break;
2047 case 0x24:
2048 /* ITOFT */
2049 REQUIRE_REG_31(rb);
2050 va = load_gpr(ctx, ra);
2051 tcg_gen_mov_i64(vc, va);
2052 break;
2053 case 0x2A:
2054 /* SQRTG */
2055 REQUIRE_REG_31(ra);
2056 vb = load_fpr(ctx, rb);
2057 gen_helper_sqrtg(vc, cpu_env, vb);
2058 break;
2059 case 0x02B:
2060 /* SQRTT */
2061 REQUIRE_REG_31(ra);
2062 gen_sqrtt(ctx, rb, rc, fn11);
2063 break;
2064 default:
2065 goto invalid_opc;
2066 }
2067 break;
2068
2069 case 0x15:
2070 /* VAX floating point */
2071 /* XXX: rounding mode and trap are ignored (!) */
2072 vc = dest_fpr(ctx, rc);
2073 vb = load_fpr(ctx, rb);
2074 va = load_fpr(ctx, ra);
2075 switch (fpfn) { /* fn11 & 0x3F */
2076 case 0x00:
2077 /* ADDF */
2078 gen_helper_addf(vc, cpu_env, va, vb);
2079 break;
2080 case 0x01:
2081 /* SUBF */
2082 gen_helper_subf(vc, cpu_env, va, vb);
2083 break;
2084 case 0x02:
2085 /* MULF */
2086 gen_helper_mulf(vc, cpu_env, va, vb);
2087 break;
2088 case 0x03:
2089 /* DIVF */
2090 gen_helper_divf(vc, cpu_env, va, vb);
2091 break;
2092 case 0x1E:
2093 /* CVTDG -- TODO */
2094 REQUIRE_REG_31(ra);
2095 goto invalid_opc;
2096 case 0x20:
2097 /* ADDG */
2098 gen_helper_addg(vc, cpu_env, va, vb);
2099 break;
2100 case 0x21:
2101 /* SUBG */
2102 gen_helper_subg(vc, cpu_env, va, vb);
2103 break;
2104 case 0x22:
2105 /* MULG */
2106 gen_helper_mulg(vc, cpu_env, va, vb);
2107 break;
2108 case 0x23:
2109 /* DIVG */
2110 gen_helper_divg(vc, cpu_env, va, vb);
2111 break;
2112 case 0x25:
2113 /* CMPGEQ */
2114 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2115 break;
2116 case 0x26:
2117 /* CMPGLT */
2118 gen_helper_cmpglt(vc, cpu_env, va, vb);
2119 break;
2120 case 0x27:
2121 /* CMPGLE */
2122 gen_helper_cmpgle(vc, cpu_env, va, vb);
2123 break;
2124 case 0x2C:
2125 /* CVTGF */
2126 REQUIRE_REG_31(ra);
2127 gen_helper_cvtgf(vc, cpu_env, vb);
2128 break;
2129 case 0x2D:
2130 /* CVTGD -- TODO */
2131 REQUIRE_REG_31(ra);
2132 goto invalid_opc;
2133 case 0x2F:
2134 /* CVTGQ */
2135 REQUIRE_REG_31(ra);
2136 gen_helper_cvtgq(vc, cpu_env, vb);
2137 break;
2138 case 0x3C:
2139 /* CVTQF */
2140 REQUIRE_REG_31(ra);
2141 gen_helper_cvtqf(vc, cpu_env, vb);
2142 break;
2143 case 0x3E:
2144 /* CVTQG */
2145 REQUIRE_REG_31(ra);
2146 gen_helper_cvtqg(vc, cpu_env, vb);
2147 break;
2148 default:
2149 goto invalid_opc;
2150 }
2151 break;
2152
2153 case 0x16:
2154 /* IEEE floating-point */
2155 switch (fpfn) { /* fn11 & 0x3F */
2156 case 0x00:
2157 /* ADDS */
2158 gen_adds(ctx, ra, rb, rc, fn11);
2159 break;
2160 case 0x01:
2161 /* SUBS */
2162 gen_subs(ctx, ra, rb, rc, fn11);
2163 break;
2164 case 0x02:
2165 /* MULS */
2166 gen_muls(ctx, ra, rb, rc, fn11);
2167 break;
2168 case 0x03:
2169 /* DIVS */
2170 gen_divs(ctx, ra, rb, rc, fn11);
2171 break;
2172 case 0x20:
2173 /* ADDT */
2174 gen_addt(ctx, ra, rb, rc, fn11);
2175 break;
2176 case 0x21:
2177 /* SUBT */
2178 gen_subt(ctx, ra, rb, rc, fn11);
2179 break;
2180 case 0x22:
2181 /* MULT */
2182 gen_mult(ctx, ra, rb, rc, fn11);
2183 break;
2184 case 0x23:
2185 /* DIVT */
2186 gen_divt(ctx, ra, rb, rc, fn11);
2187 break;
2188 case 0x24:
2189 /* CMPTUN */
2190 gen_cmptun(ctx, ra, rb, rc, fn11);
2191 break;
2192 case 0x25:
2193 /* CMPTEQ */
2194 gen_cmpteq(ctx, ra, rb, rc, fn11);
2195 break;
2196 case 0x26:
2197 /* CMPTLT */
2198 gen_cmptlt(ctx, ra, rb, rc, fn11);
2199 break;
2200 case 0x27:
2201 /* CMPTLE */
2202 gen_cmptle(ctx, ra, rb, rc, fn11);
2203 break;
2204 case 0x2C:
2205 REQUIRE_REG_31(ra);
2206 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2207 /* CVTST */
2208 gen_cvtst(ctx, rb, rc, fn11);
2209 } else {
2210 /* CVTTS */
2211 gen_cvtts(ctx, rb, rc, fn11);
2212 }
2213 break;
2214 case 0x2F:
2215 /* CVTTQ */
2216 REQUIRE_REG_31(ra);
2217 gen_cvttq(ctx, rb, rc, fn11);
2218 break;
2219 case 0x3C:
2220 /* CVTQS */
2221 REQUIRE_REG_31(ra);
2222 gen_cvtqs(ctx, rb, rc, fn11);
2223 break;
2224 case 0x3E:
2225 /* CVTQT */
2226 REQUIRE_REG_31(ra);
2227 gen_cvtqt(ctx, rb, rc, fn11);
2228 break;
2229 default:
2230 goto invalid_opc;
2231 }
2232 break;
2233
2234 case 0x17:
2235 switch (fn11) {
2236 case 0x010:
2237 /* CVTLQ */
2238 REQUIRE_REG_31(ra);
2239 vc = dest_fpr(ctx, rc);
2240 vb = load_fpr(ctx, rb);
2241 gen_cvtlq(vc, vb);
2242 break;
2243 case 0x020:
2244 /* CPYS */
2245 if (rc == 31) {
2246 /* Special case CPYS as FNOP. */
2247 } else {
2248 vc = dest_fpr(ctx, rc);
2249 va = load_fpr(ctx, ra);
2250 if (ra == rb) {
2251 /* Special case CPYS as FMOV. */
2252 tcg_gen_mov_i64(vc, va);
2253 } else {
2254 vb = load_fpr(ctx, rb);
2255 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2256 }
2257 }
2258 break;
2259 case 0x021:
2260 /* CPYSN */
2261 vc = dest_fpr(ctx, rc);
2262 vb = load_fpr(ctx, rb);
2263 va = load_fpr(ctx, ra);
2264 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2265 break;
2266 case 0x022:
2267 /* CPYSE */
2268 vc = dest_fpr(ctx, rc);
2269 vb = load_fpr(ctx, rb);
2270 va = load_fpr(ctx, ra);
2271 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2272 break;
2273 case 0x024:
2274 /* MT_FPCR */
2275 va = load_fpr(ctx, ra);
2276 gen_helper_store_fpcr(cpu_env, va);
2277 if (ctx->tb_rm == QUAL_RM_D) {
2278 /* Re-do the copy of the rounding mode to fp_status
2279 the next time we use dynamic rounding. */
2280 ctx->tb_rm = -1;
2281 }
2282 break;
2283 case 0x025:
2284 /* MF_FPCR */
2285 va = dest_fpr(ctx, ra);
2286 gen_helper_load_fpcr(va, cpu_env);
2287 break;
2288 case 0x02A:
2289 /* FCMOVEQ */
2290 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2291 break;
2292 case 0x02B:
2293 /* FCMOVNE */
2294 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2295 break;
2296 case 0x02C:
2297 /* FCMOVLT */
2298 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2299 break;
2300 case 0x02D:
2301 /* FCMOVGE */
2302 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2303 break;
2304 case 0x02E:
2305 /* FCMOVLE */
2306 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2307 break;
2308 case 0x02F:
2309 /* FCMOVGT */
2310 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2311 break;
2312 case 0x030: /* CVTQL */
2313 case 0x130: /* CVTQL/V */
2314 case 0x530: /* CVTQL/SV */
2315 REQUIRE_REG_31(ra);
2316 vc = dest_fpr(ctx, rc);
2317 vb = load_fpr(ctx, rb);
2318 gen_helper_cvtql(vc, cpu_env, vb);
2319 gen_fp_exc_raise(rc, fn11);
2320 break;
2321 default:
2322 goto invalid_opc;
2323 }
2324 break;
2325
2326 case 0x18:
2327 switch ((uint16_t)disp16) {
2328 case 0x0000:
2329 /* TRAPB */
2330 /* No-op. */
2331 break;
2332 case 0x0400:
2333 /* EXCB */
2334 /* No-op. */
2335 break;
2336 case 0x4000:
2337 /* MB */
2338 /* No-op */
2339 break;
2340 case 0x4400:
2341 /* WMB */
2342 /* No-op */
2343 break;
2344 case 0x8000:
2345 /* FETCH */
2346 /* No-op */
2347 break;
2348 case 0xA000:
2349 /* FETCH_M */
2350 /* No-op */
2351 break;
2352 case 0xC000:
2353 /* RPCC */
2354 va = dest_gpr(ctx, ra);
2355 if (ctx->tb->cflags & CF_USE_ICOUNT) {
2356 gen_io_start();
2357 gen_helper_load_pcc(va, cpu_env);
2358 gen_io_end();
2359 ret = EXIT_PC_STALE;
2360 } else {
2361 gen_helper_load_pcc(va, cpu_env);
2362 }
2363 break;
2364 case 0xE000:
2365 /* RC */
2366 gen_rx(ctx, ra, 0);
2367 break;
2368 case 0xE800:
2369 /* ECB */
2370 break;
2371 case 0xF000:
2372 /* RS */
2373 gen_rx(ctx, ra, 1);
2374 break;
2375 case 0xF800:
2376 /* WH64 */
2377 /* No-op */
2378 break;
2379 case 0xFC00:
2380 /* WH64EN */
2381 /* No-op */
2382 break;
2383 default:
2384 goto invalid_opc;
2385 }
2386 break;
2387
2388 case 0x19:
2389 /* HW_MFPR (PALcode) */
2390 #ifndef CONFIG_USER_ONLY
2391 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2392 va = dest_gpr(ctx, ra);
2393 ret = gen_mfpr(ctx, va, insn & 0xffff);
2394 break;
2395 #else
2396 goto invalid_opc;
2397 #endif
2398
2399 case 0x1A:
2400 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2401 prediction stack action, which of course we don't implement. */
2402 vb = load_gpr(ctx, rb);
2403 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2404 if (ra != 31) {
2405 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
2406 }
2407 ret = EXIT_PC_UPDATED;
2408 break;
2409
2410 case 0x1B:
2411 /* HW_LD (PALcode) */
2412 #ifndef CONFIG_USER_ONLY
2413 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2414 {
2415 TCGv addr = tcg_temp_new();
2416 vb = load_gpr(ctx, rb);
2417 va = dest_gpr(ctx, ra);
2418
2419 tcg_gen_addi_i64(addr, vb, disp12);
2420 switch ((insn >> 12) & 0xF) {
2421 case 0x0:
2422 /* Longword physical access (hw_ldl/p) */
2423 gen_helper_ldl_phys(va, cpu_env, addr);
2424 break;
2425 case 0x1:
2426 /* Quadword physical access (hw_ldq/p) */
2427 gen_helper_ldq_phys(va, cpu_env, addr);
2428 break;
2429 case 0x2:
2430 /* Longword physical access with lock (hw_ldl_l/p) */
2431 gen_helper_ldl_l_phys(va, cpu_env, addr);
2432 break;
2433 case 0x3:
2434 /* Quadword physical access with lock (hw_ldq_l/p) */
2435 gen_helper_ldq_l_phys(va, cpu_env, addr);
2436 break;
2437 case 0x4:
2438 /* Longword virtual PTE fetch (hw_ldl/v) */
2439 goto invalid_opc;
2440 case 0x5:
2441 /* Quadword virtual PTE fetch (hw_ldq/v) */
2442 goto invalid_opc;
2443 break;
2444 case 0x6:
2445 /* Invalid */
2446 goto invalid_opc;
2447 case 0x7:
2448 /* Invaliid */
2449 goto invalid_opc;
2450 case 0x8:
2451 /* Longword virtual access (hw_ldl) */
2452 goto invalid_opc;
2453 case 0x9:
2454 /* Quadword virtual access (hw_ldq) */
2455 goto invalid_opc;
2456 case 0xA:
2457 /* Longword virtual access with protection check (hw_ldl/w) */
2458 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2459 break;
2460 case 0xB:
2461 /* Quadword virtual access with protection check (hw_ldq/w) */
2462 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2463 break;
2464 case 0xC:
2465 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2466 goto invalid_opc;
2467 case 0xD:
2468 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2469 goto invalid_opc;
2470 case 0xE:
2471 /* Longword virtual access with alternate access mode and
2472 protection checks (hw_ldl/wa) */
2473 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2474 break;
2475 case 0xF:
2476 /* Quadword virtual access with alternate access mode and
2477 protection checks (hw_ldq/wa) */
2478 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2479 break;
2480 }
2481 tcg_temp_free(addr);
2482 break;
2483 }
2484 #else
2485 goto invalid_opc;
2486 #endif
2487
2488 case 0x1C:
2489 vc = dest_gpr(ctx, rc);
2490 if (fn7 == 0x70) {
2491 /* FTOIT */
2492 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2493 REQUIRE_REG_31(rb);
2494 va = load_fpr(ctx, ra);
2495 tcg_gen_mov_i64(vc, va);
2496 break;
2497 } else if (fn7 == 0x78) {
2498 /* FTOIS */
2499 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2500 REQUIRE_REG_31(rb);
2501 t32 = tcg_temp_new_i32();
2502 va = load_fpr(ctx, ra);
2503 gen_helper_s_to_memory(t32, va);
2504 tcg_gen_ext_i32_i64(vc, t32);
2505 tcg_temp_free_i32(t32);
2506 break;
2507 }
2508
2509 vb = load_gpr_lit(ctx, rb, lit, islit);
2510 switch (fn7) {
2511 case 0x00:
2512 /* SEXTB */
2513 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2514 REQUIRE_REG_31(ra);
2515 tcg_gen_ext8s_i64(vc, vb);
2516 break;
2517 case 0x01:
2518 /* SEXTW */
2519 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2520 REQUIRE_REG_31(ra);
2521 tcg_gen_ext16s_i64(vc, vb);
2522 break;
2523 case 0x30:
2524 /* CTPOP */
2525 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2526 REQUIRE_REG_31(ra);
2527 REQUIRE_NO_LIT;
2528 gen_helper_ctpop(vc, vb);
2529 break;
2530 case 0x31:
2531 /* PERR */
2532 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2533 REQUIRE_NO_LIT;
2534 va = load_gpr(ctx, ra);
2535 gen_helper_perr(vc, va, vb);
2536 break;
2537 case 0x32:
2538 /* CTLZ */
2539 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2540 REQUIRE_REG_31(ra);
2541 REQUIRE_NO_LIT;
2542 gen_helper_ctlz(vc, vb);
2543 break;
2544 case 0x33:
2545 /* CTTZ */
2546 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2547 REQUIRE_REG_31(ra);
2548 REQUIRE_NO_LIT;
2549 gen_helper_cttz(vc, vb);
2550 break;
2551 case 0x34:
2552 /* UNPKBW */
2553 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2554 REQUIRE_REG_31(ra);
2555 REQUIRE_NO_LIT;
2556 gen_helper_unpkbw(vc, vb);
2557 break;
2558 case 0x35:
2559 /* UNPKBL */
2560 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2561 REQUIRE_REG_31(ra);
2562 REQUIRE_NO_LIT;
2563 gen_helper_unpkbl(vc, vb);
2564 break;
2565 case 0x36:
2566 /* PKWB */
2567 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2568 REQUIRE_REG_31(ra);
2569 REQUIRE_NO_LIT;
2570 gen_helper_pkwb(vc, vb);
2571 break;
2572 case 0x37:
2573 /* PKLB */
2574 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2575 REQUIRE_REG_31(ra);
2576 REQUIRE_NO_LIT;
2577 gen_helper_pklb(vc, vb);
2578 break;
2579 case 0x38:
2580 /* MINSB8 */
2581 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2582 va = load_gpr(ctx, ra);
2583 gen_helper_minsb8(vc, va, vb);
2584 break;
2585 case 0x39:
2586 /* MINSW4 */
2587 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2588 va = load_gpr(ctx, ra);
2589 gen_helper_minsw4(vc, va, vb);
2590 break;
2591 case 0x3A:
2592 /* MINUB8 */
2593 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2594 va = load_gpr(ctx, ra);
2595 gen_helper_minub8(vc, va, vb);
2596 break;
2597 case 0x3B:
2598 /* MINUW4 */
2599 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2600 va = load_gpr(ctx, ra);
2601 gen_helper_minuw4(vc, va, vb);
2602 break;
2603 case 0x3C:
2604 /* MAXUB8 */
2605 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2606 va = load_gpr(ctx, ra);
2607 gen_helper_maxub8(vc, va, vb);
2608 break;
2609 case 0x3D:
2610 /* MAXUW4 */
2611 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2612 va = load_gpr(ctx, ra);
2613 gen_helper_maxuw4(vc, va, vb);
2614 break;
2615 case 0x3E:
2616 /* MAXSB8 */
2617 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2618 va = load_gpr(ctx, ra);
2619 gen_helper_maxsb8(vc, va, vb);
2620 break;
2621 case 0x3F:
2622 /* MAXSW4 */
2623 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2624 va = load_gpr(ctx, ra);
2625 gen_helper_maxsw4(vc, va, vb);
2626 break;
2627 default:
2628 goto invalid_opc;
2629 }
2630 break;
2631
2632 case 0x1D:
2633 /* HW_MTPR (PALcode) */
2634 #ifndef CONFIG_USER_ONLY
2635 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2636 vb = load_gpr(ctx, rb);
2637 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2638 break;
2639 #else
2640 goto invalid_opc;
2641 #endif
2642
2643 case 0x1E:
2644 /* HW_RET (PALcode) */
2645 #ifndef CONFIG_USER_ONLY
2646 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2647 if (rb == 31) {
2648 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2649 address from EXC_ADDR. This turns out to be useful for our
2650 emulation PALcode, so continue to accept it. */
2651 ctx->lit = vb = tcg_temp_new();
2652 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2653 } else {
2654 vb = load_gpr(ctx, rb);
2655 }
2656 tmp = tcg_temp_new();
2657 tcg_gen_movi_i64(tmp, 0);
2658 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
2659 tcg_gen_movi_i64(cpu_lock_addr, -1);
2660 tcg_gen_andi_i64(tmp, vb, 1);
2661 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
2662 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2663 ret = EXIT_PC_UPDATED;
2664 break;
2665 #else
2666 goto invalid_opc;
2667 #endif
2668
2669 case 0x1F:
2670 /* HW_ST (PALcode) */
2671 #ifndef CONFIG_USER_ONLY
2672 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2673 {
2674 TCGv addr = tcg_temp_new();
2675 va = load_gpr(ctx, ra);
2676 vb = load_gpr(ctx, rb);
2677
2678 tcg_gen_addi_i64(addr, vb, disp12);
2679 switch ((insn >> 12) & 0xF) {
2680 case 0x0:
2681 /* Longword physical access */
2682 gen_helper_stl_phys(cpu_env, addr, va);
2683 break;
2684 case 0x1:
2685 /* Quadword physical access */
2686 gen_helper_stq_phys(cpu_env, addr, va);
2687 break;
2688 case 0x2:
2689 /* Longword physical access with lock */
2690 gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2691 break;
2692 case 0x3:
2693 /* Quadword physical access with lock */
2694 gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2695 break;
2696 case 0x4:
2697 /* Longword virtual access */
2698 goto invalid_opc;
2699 case 0x5:
2700 /* Quadword virtual access */
2701 goto invalid_opc;
2702 case 0x6:
2703 /* Invalid */
2704 goto invalid_opc;
2705 case 0x7:
2706 /* Invalid */
2707 goto invalid_opc;
2708 case 0x8:
2709 /* Invalid */
2710 goto invalid_opc;
2711 case 0x9:
2712 /* Invalid */
2713 goto invalid_opc;
2714 case 0xA:
2715 /* Invalid */
2716 goto invalid_opc;
2717 case 0xB:
2718 /* Invalid */
2719 goto invalid_opc;
2720 case 0xC:
2721 /* Longword virtual access with alternate access mode */
2722 goto invalid_opc;
2723 case 0xD:
2724 /* Quadword virtual access with alternate access mode */
2725 goto invalid_opc;
2726 case 0xE:
2727 /* Invalid */
2728 goto invalid_opc;
2729 case 0xF:
2730 /* Invalid */
2731 goto invalid_opc;
2732 }
2733 tcg_temp_free(addr);
2734 break;
2735 }
2736 #else
2737 goto invalid_opc;
2738 #endif
2739 case 0x20:
2740 /* LDF */
2741 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2742 break;
2743 case 0x21:
2744 /* LDG */
2745 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2746 break;
2747 case 0x22:
2748 /* LDS */
2749 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2750 break;
2751 case 0x23:
2752 /* LDT */
2753 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2754 break;
2755 case 0x24:
2756 /* STF */
2757 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2758 break;
2759 case 0x25:
2760 /* STG */
2761 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2762 break;
2763 case 0x26:
2764 /* STS */
2765 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2766 break;
2767 case 0x27:
2768 /* STT */
2769 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2770 break;
2771 case 0x28:
2772 /* LDL */
2773 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2774 break;
2775 case 0x29:
2776 /* LDQ */
2777 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2778 break;
2779 case 0x2A:
2780 /* LDL_L */
2781 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2782 break;
2783 case 0x2B:
2784 /* LDQ_L */
2785 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2786 break;
2787 case 0x2C:
2788 /* STL */
2789 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2790 break;
2791 case 0x2D:
2792 /* STQ */
2793 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2794 break;
2795 case 0x2E:
2796 /* STL_C */
2797 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
2798 break;
2799 case 0x2F:
2800 /* STQ_C */
2801 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
2802 break;
2803 case 0x30:
2804 /* BR */
2805 ret = gen_bdirect(ctx, ra, disp21);
2806 break;
2807 case 0x31: /* FBEQ */
2808 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2809 break;
2810 case 0x32: /* FBLT */
2811 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2812 break;
2813 case 0x33: /* FBLE */
2814 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2815 break;
2816 case 0x34:
2817 /* BSR */
2818 ret = gen_bdirect(ctx, ra, disp21);
2819 break;
2820 case 0x35: /* FBNE */
2821 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2822 break;
2823 case 0x36: /* FBGE */
2824 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2825 break;
2826 case 0x37: /* FBGT */
2827 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2828 break;
2829 case 0x38:
2830 /* BLBC */
2831 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2832 break;
2833 case 0x39:
2834 /* BEQ */
2835 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2836 break;
2837 case 0x3A:
2838 /* BLT */
2839 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2840 break;
2841 case 0x3B:
2842 /* BLE */
2843 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2844 break;
2845 case 0x3C:
2846 /* BLBS */
2847 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2848 break;
2849 case 0x3D:
2850 /* BNE */
2851 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2852 break;
2853 case 0x3E:
2854 /* BGE */
2855 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2856 break;
2857 case 0x3F:
2858 /* BGT */
2859 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2860 break;
2861 invalid_opc:
2862 ret = gen_invalid(ctx);
2863 break;
2864 }
2865
2866 return ret;
2867 }
2868
2869 void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
2870 {
2871 AlphaCPU *cpu = alpha_env_get_cpu(env);
2872 CPUState *cs = CPU(cpu);
2873 DisasContext ctx, *ctxp = &ctx;
2874 target_ulong pc_start;
2875 target_ulong pc_mask;
2876 uint32_t insn;
2877 ExitStatus ret;
2878 int num_insns;
2879 int max_insns;
2880
2881 pc_start = tb->pc;
2882
2883 ctx.tb = tb;
2884 ctx.pc = pc_start;
2885 ctx.mem_idx = cpu_mmu_index(env, false);
2886 ctx.implver = env->implver;
2887 ctx.singlestep_enabled = cs->singlestep_enabled;
2888
2889 #ifdef CONFIG_USER_ONLY
2890 ctx.ir = cpu_std_ir;
2891 #else
2892 ctx.palbr = env->palbr;
2893 ctx.ir = (tb->flags & TB_FLAGS_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2894 #endif
2895
2896 /* ??? Every TB begins with unset rounding mode, to be initialized on
2897 the first fp insn of the TB. Alternately we could define a proper
2898 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2899 to reset the FP_STATUS to that default at the end of any TB that
2900 changes the default. We could even (gasp) dynamiclly figure out
2901 what default would be most efficient given the running program. */
2902 ctx.tb_rm = -1;
2903 /* Similarly for flush-to-zero. */
2904 ctx.tb_ftz = -1;
2905
2906 num_insns = 0;
2907 max_insns = tb->cflags & CF_COUNT_MASK;
2908 if (max_insns == 0) {
2909 max_insns = CF_COUNT_MASK;
2910 }
2911 if (max_insns > TCG_MAX_INSNS) {
2912 max_insns = TCG_MAX_INSNS;
2913 }
2914
2915 if (in_superpage(&ctx, pc_start)) {
2916 pc_mask = (1ULL << 41) - 1;
2917 } else {
2918 pc_mask = ~TARGET_PAGE_MASK;
2919 }
2920
2921 gen_tb_start(tb);
2922 do {
2923 tcg_gen_insn_start(ctx.pc);
2924 num_insns++;
2925
2926 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
2927 ret = gen_excp(&ctx, EXCP_DEBUG, 0);
2928 /* The address covered by the breakpoint must be included in
2929 [tb->pc, tb->pc + tb->size) in order to for it to be
2930 properly cleared -- thus we increment the PC here so that
2931 the logic setting tb->size below does the right thing. */
2932 ctx.pc += 4;
2933 break;
2934 }
2935 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2936 gen_io_start();
2937 }
2938 insn = cpu_ldl_code(env, ctx.pc);
2939
2940 TCGV_UNUSED_I64(ctx.zero);
2941 TCGV_UNUSED_I64(ctx.sink);
2942 TCGV_UNUSED_I64(ctx.lit);
2943
2944 ctx.pc += 4;
2945 ret = translate_one(ctxp, insn);
2946
2947 if (!TCGV_IS_UNUSED_I64(ctx.sink)) {
2948 tcg_gen_discard_i64(ctx.sink);
2949 tcg_temp_free(ctx.sink);
2950 }
2951 if (!TCGV_IS_UNUSED_I64(ctx.zero)) {
2952 tcg_temp_free(ctx.zero);
2953 }
2954 if (!TCGV_IS_UNUSED_I64(ctx.lit)) {
2955 tcg_temp_free(ctx.lit);
2956 }
2957
2958 /* If we reach a page boundary, are single stepping,
2959 or exhaust instruction count, stop generation. */
2960 if (ret == NO_EXIT
2961 && ((ctx.pc & pc_mask) == 0
2962 || tcg_op_buf_full()
2963 || num_insns >= max_insns
2964 || singlestep
2965 || ctx.singlestep_enabled)) {
2966 ret = EXIT_PC_STALE;
2967 }
2968 } while (ret == NO_EXIT);
2969
2970 if (tb->cflags & CF_LAST_IO) {
2971 gen_io_end();
2972 }
2973
2974 switch (ret) {
2975 case EXIT_GOTO_TB:
2976 case EXIT_NORETURN:
2977 break;
2978 case EXIT_PC_STALE:
2979 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2980 /* FALLTHRU */
2981 case EXIT_PC_UPDATED:
2982 if (ctx.singlestep_enabled) {
2983 gen_excp_1(EXCP_DEBUG, 0);
2984 } else {
2985 tcg_gen_exit_tb(0);
2986 }
2987 break;
2988 default:
2989 abort();
2990 }
2991
2992 gen_tb_end(tb, num_insns);
2993
2994 tb->size = ctx.pc - pc_start;
2995 tb->icount = num_insns;
2996
2997 #ifdef DEBUG_DISAS
2998 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
2999 && qemu_log_in_addr_range(pc_start)) {
3000 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3001 log_target_disas(cs, pc_start, ctx.pc - pc_start, 1);
3002 qemu_log("\n");
3003 }
3004 #endif
3005 }
3006
3007 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3008 target_ulong *data)
3009 {
3010 env->pc = data[0];
3011 }