]> git.proxmox.com Git - qemu.git/blob - target-alpha/translate.c
target-alpha: Fix system store_conditional
[qemu.git] / target-alpha / translate.c
1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23
24 #include "cpu.h"
25 #include "exec-all.h"
26 #include "disas.h"
27 #include "host-utils.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
30
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
37
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
43
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46 struct TranslationBlock *tb;
47 CPUAlphaState *env;
48 uint64_t pc;
49 int mem_idx;
50 #if !defined (CONFIG_USER_ONLY)
51 int pal_mode;
52 #endif
53 uint32_t amask;
54
55 /* Current rounding mode for this TB. */
56 int tb_rm;
57 /* Current flush-to-zero setting for this TB. */
58 int tb_ftz;
59 };
60
61 /* Return values from translate_one, indicating the state of the TB.
62 Note that zero indicates that we are not exiting the TB. */
63
64 typedef enum {
65 NO_EXIT,
66
67 /* We have emitted one or more goto_tb. No fixup required. */
68 EXIT_GOTO_TB,
69
70 /* We are not using a goto_tb (for whatever reason), but have updated
71 the PC (for whatever reason), so there's no need to do it again on
72 exiting the TB. */
73 EXIT_PC_UPDATED,
74
75 /* We are exiting the TB, but have neither emitted a goto_tb, nor
76 updated the PC for the next instruction to be executed. */
77 EXIT_PC_STALE,
78
79 /* We are ending the TB with a noreturn function call, e.g. longjmp.
80 No following code will be executed. */
81 EXIT_NORETURN,
82 } ExitStatus;
83
84 /* global register indexes */
85 static TCGv_ptr cpu_env;
86 static TCGv cpu_ir[31];
87 static TCGv cpu_fir[31];
88 static TCGv cpu_pc;
89 static TCGv cpu_lock_addr;
90 static TCGv cpu_lock_st_addr;
91 static TCGv cpu_lock_value;
92 #ifdef CONFIG_USER_ONLY
93 static TCGv cpu_uniq;
94 #endif
95
96 /* register names */
97 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
98
99 #include "gen-icount.h"
100
101 static void alpha_translate_init(void)
102 {
103 int i;
104 char *p;
105 static int done_init = 0;
106
107 if (done_init)
108 return;
109
110 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111
112 p = cpu_reg_names;
113 for (i = 0; i < 31; i++) {
114 sprintf(p, "ir%d", i);
115 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
116 offsetof(CPUState, ir[i]), p);
117 p += (i < 10) ? 4 : 5;
118
119 sprintf(p, "fir%d", i);
120 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
121 offsetof(CPUState, fir[i]), p);
122 p += (i < 10) ? 5 : 6;
123 }
124
125 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
126 offsetof(CPUState, pc), "pc");
127
128 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
129 offsetof(CPUState, lock_addr),
130 "lock_addr");
131 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
132 offsetof(CPUState, lock_st_addr),
133 "lock_st_addr");
134 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
135 offsetof(CPUState, lock_value),
136 "lock_value");
137
138 #ifdef CONFIG_USER_ONLY
139 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
140 offsetof(CPUState, unique), "uniq");
141 #endif
142
143 /* register helpers */
144 #define GEN_HELPER 2
145 #include "helper.h"
146
147 done_init = 1;
148 }
149
150 static void gen_excp_1(int exception, int error_code)
151 {
152 TCGv_i32 tmp1, tmp2;
153
154 tmp1 = tcg_const_i32(exception);
155 tmp2 = tcg_const_i32(error_code);
156 gen_helper_excp(tmp1, tmp2);
157 tcg_temp_free_i32(tmp2);
158 tcg_temp_free_i32(tmp1);
159 }
160
161 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
162 {
163 tcg_gen_movi_i64(cpu_pc, ctx->pc);
164 gen_excp_1(exception, error_code);
165 return EXIT_NORETURN;
166 }
167
168 static inline ExitStatus gen_invalid(DisasContext *ctx)
169 {
170 return gen_excp(ctx, EXCP_OPCDEC, 0);
171 }
172
173 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
174 {
175 TCGv tmp = tcg_temp_new();
176 TCGv_i32 tmp32 = tcg_temp_new_i32();
177 tcg_gen_qemu_ld32u(tmp, t1, flags);
178 tcg_gen_trunc_i64_i32(tmp32, tmp);
179 gen_helper_memory_to_f(t0, tmp32);
180 tcg_temp_free_i32(tmp32);
181 tcg_temp_free(tmp);
182 }
183
184 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
185 {
186 TCGv tmp = tcg_temp_new();
187 tcg_gen_qemu_ld64(tmp, t1, flags);
188 gen_helper_memory_to_g(t0, tmp);
189 tcg_temp_free(tmp);
190 }
191
192 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
193 {
194 TCGv tmp = tcg_temp_new();
195 TCGv_i32 tmp32 = tcg_temp_new_i32();
196 tcg_gen_qemu_ld32u(tmp, t1, flags);
197 tcg_gen_trunc_i64_i32(tmp32, tmp);
198 gen_helper_memory_to_s(t0, tmp32);
199 tcg_temp_free_i32(tmp32);
200 tcg_temp_free(tmp);
201 }
202
203 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
204 {
205 tcg_gen_qemu_ld32s(t0, t1, flags);
206 tcg_gen_mov_i64(cpu_lock_addr, t1);
207 tcg_gen_mov_i64(cpu_lock_value, t0);
208 }
209
210 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
211 {
212 tcg_gen_qemu_ld64(t0, t1, flags);
213 tcg_gen_mov_i64(cpu_lock_addr, t1);
214 tcg_gen_mov_i64(cpu_lock_value, t0);
215 }
216
217 static inline void gen_load_mem(DisasContext *ctx,
218 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
219 int flags),
220 int ra, int rb, int32_t disp16, int fp,
221 int clear)
222 {
223 TCGv addr, va;
224
225 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
226 prefetches, which we can treat as nops. No worries about
227 missed exceptions here. */
228 if (unlikely(ra == 31)) {
229 return;
230 }
231
232 addr = tcg_temp_new();
233 if (rb != 31) {
234 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
235 if (clear) {
236 tcg_gen_andi_i64(addr, addr, ~0x7);
237 }
238 } else {
239 if (clear) {
240 disp16 &= ~0x7;
241 }
242 tcg_gen_movi_i64(addr, disp16);
243 }
244
245 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
246 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
247
248 tcg_temp_free(addr);
249 }
250
251 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
252 {
253 TCGv_i32 tmp32 = tcg_temp_new_i32();
254 TCGv tmp = tcg_temp_new();
255 gen_helper_f_to_memory(tmp32, t0);
256 tcg_gen_extu_i32_i64(tmp, tmp32);
257 tcg_gen_qemu_st32(tmp, t1, flags);
258 tcg_temp_free(tmp);
259 tcg_temp_free_i32(tmp32);
260 }
261
262 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
263 {
264 TCGv tmp = tcg_temp_new();
265 gen_helper_g_to_memory(tmp, t0);
266 tcg_gen_qemu_st64(tmp, t1, flags);
267 tcg_temp_free(tmp);
268 }
269
270 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
271 {
272 TCGv_i32 tmp32 = tcg_temp_new_i32();
273 TCGv tmp = tcg_temp_new();
274 gen_helper_s_to_memory(tmp32, t0);
275 tcg_gen_extu_i32_i64(tmp, tmp32);
276 tcg_gen_qemu_st32(tmp, t1, flags);
277 tcg_temp_free(tmp);
278 tcg_temp_free_i32(tmp32);
279 }
280
281 static inline void gen_store_mem(DisasContext *ctx,
282 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
283 int flags),
284 int ra, int rb, int32_t disp16, int fp,
285 int clear)
286 {
287 TCGv addr, va;
288
289 addr = tcg_temp_new();
290 if (rb != 31) {
291 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
292 if (clear) {
293 tcg_gen_andi_i64(addr, addr, ~0x7);
294 }
295 } else {
296 if (clear) {
297 disp16 &= ~0x7;
298 }
299 tcg_gen_movi_i64(addr, disp16);
300 }
301
302 if (ra == 31) {
303 va = tcg_const_i64(0);
304 } else {
305 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
306 }
307 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
308
309 tcg_temp_free(addr);
310 if (ra == 31) {
311 tcg_temp_free(va);
312 }
313 }
314
315 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
316 int32_t disp16, int quad)
317 {
318 TCGv addr;
319
320 if (ra == 31) {
321 /* ??? Don't bother storing anything. The user can't tell
322 the difference, since the zero register always reads zero. */
323 return NO_EXIT;
324 }
325
326 #if defined(CONFIG_USER_ONLY)
327 addr = cpu_lock_st_addr;
328 #else
329 addr = tcg_temp_local_new();
330 #endif
331
332 if (rb != 31) {
333 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
334 } else {
335 tcg_gen_movi_i64(addr, disp16);
336 }
337
338 #if defined(CONFIG_USER_ONLY)
339 /* ??? This is handled via a complicated version of compare-and-swap
340 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
341 in TCG so that this isn't necessary. */
342 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
343 #else
344 /* ??? In system mode we are never multi-threaded, so CAS can be
345 implemented via a non-atomic load-compare-store sequence. */
346 {
347 int lab_fail, lab_done;
348 TCGv val;
349
350 lab_fail = gen_new_label();
351 lab_done = gen_new_label();
352 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
353
354 val = tcg_temp_new();
355 if (quad) {
356 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
357 } else {
358 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
359 }
360 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
361
362 if (quad) {
363 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
364 } else {
365 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
366 }
367 tcg_gen_movi_i64(cpu_ir[ra], 1);
368 tcg_gen_br(lab_done);
369
370 gen_set_label(lab_fail);
371 tcg_gen_movi_i64(cpu_ir[ra], 0);
372
373 gen_set_label(lab_done);
374 tcg_gen_movi_i64(cpu_lock_addr, -1);
375
376 tcg_temp_free(addr);
377 return NO_EXIT;
378 }
379 #endif
380 }
381
382 static int use_goto_tb(DisasContext *ctx, uint64_t dest)
383 {
384 /* Check for the dest on the same page as the start of the TB. We
385 also want to suppress goto_tb in the case of single-steping and IO. */
386 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
387 && !ctx->env->singlestep_enabled
388 && !(ctx->tb->cflags & CF_LAST_IO));
389 }
390
391 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
392 {
393 uint64_t dest = ctx->pc + (disp << 2);
394
395 if (ra != 31) {
396 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
397 }
398
399 /* Notice branch-to-next; used to initialize RA with the PC. */
400 if (disp == 0) {
401 return 0;
402 } else if (use_goto_tb(ctx, dest)) {
403 tcg_gen_goto_tb(0);
404 tcg_gen_movi_i64(cpu_pc, dest);
405 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
406 return EXIT_GOTO_TB;
407 } else {
408 tcg_gen_movi_i64(cpu_pc, dest);
409 return EXIT_PC_UPDATED;
410 }
411 }
412
413 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
414 TCGv cmp, int32_t disp)
415 {
416 uint64_t dest = ctx->pc + (disp << 2);
417 int lab_true = gen_new_label();
418
419 if (use_goto_tb(ctx, dest)) {
420 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
421
422 tcg_gen_goto_tb(0);
423 tcg_gen_movi_i64(cpu_pc, ctx->pc);
424 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
425
426 gen_set_label(lab_true);
427 tcg_gen_goto_tb(1);
428 tcg_gen_movi_i64(cpu_pc, dest);
429 tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
430
431 return EXIT_GOTO_TB;
432 } else {
433 int lab_over = gen_new_label();
434
435 /* ??? Consider using either
436 movi pc, next
437 addi tmp, pc, disp
438 movcond pc, cond, 0, tmp, pc
439 or
440 setcond tmp, cond, 0
441 movi pc, next
442 neg tmp, tmp
443 andi tmp, tmp, disp
444 add pc, pc, tmp
445 The current diamond subgraph surely isn't efficient. */
446
447 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
448 tcg_gen_movi_i64(cpu_pc, ctx->pc);
449 tcg_gen_br(lab_over);
450 gen_set_label(lab_true);
451 tcg_gen_movi_i64(cpu_pc, dest);
452 gen_set_label(lab_over);
453
454 return EXIT_PC_UPDATED;
455 }
456 }
457
458 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
459 int32_t disp, int mask)
460 {
461 TCGv cmp_tmp;
462
463 if (unlikely(ra == 31)) {
464 cmp_tmp = tcg_const_i64(0);
465 } else {
466 cmp_tmp = tcg_temp_new();
467 if (mask) {
468 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
469 } else {
470 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
471 }
472 }
473
474 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
475 }
476
477 /* Fold -0.0 for comparison with COND. */
478
479 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
480 {
481 uint64_t mzero = 1ull << 63;
482
483 switch (cond) {
484 case TCG_COND_LE:
485 case TCG_COND_GT:
486 /* For <= or >, the -0.0 value directly compares the way we want. */
487 tcg_gen_mov_i64(dest, src);
488 break;
489
490 case TCG_COND_EQ:
491 case TCG_COND_NE:
492 /* For == or !=, we can simply mask off the sign bit and compare. */
493 tcg_gen_andi_i64(dest, src, mzero - 1);
494 break;
495
496 case TCG_COND_GE:
497 case TCG_COND_LT:
498 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
499 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
500 tcg_gen_neg_i64(dest, dest);
501 tcg_gen_and_i64(dest, dest, src);
502 break;
503
504 default:
505 abort();
506 }
507 }
508
509 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
510 int32_t disp)
511 {
512 TCGv cmp_tmp;
513
514 if (unlikely(ra == 31)) {
515 /* Very uncommon case, but easier to optimize it to an integer
516 comparison than continuing with the floating point comparison. */
517 return gen_bcond(ctx, cond, ra, disp, 0);
518 }
519
520 cmp_tmp = tcg_temp_new();
521 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
522 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
523 }
524
525 static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
526 int islit, uint8_t lit, int mask)
527 {
528 TCGCond inv_cond = tcg_invert_cond(cond);
529 int l1;
530
531 if (unlikely(rc == 31))
532 return;
533
534 l1 = gen_new_label();
535
536 if (ra != 31) {
537 if (mask) {
538 TCGv tmp = tcg_temp_new();
539 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
540 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
541 tcg_temp_free(tmp);
542 } else
543 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
544 } else {
545 /* Very uncommon case - Do not bother to optimize. */
546 TCGv tmp = tcg_const_i64(0);
547 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
548 tcg_temp_free(tmp);
549 }
550
551 if (islit)
552 tcg_gen_movi_i64(cpu_ir[rc], lit);
553 else
554 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
555 gen_set_label(l1);
556 }
557
558 static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
559 {
560 TCGv cmp_tmp;
561 int l1;
562
563 if (unlikely(rc == 31)) {
564 return;
565 }
566
567 cmp_tmp = tcg_temp_new();
568 if (unlikely(ra == 31)) {
569 tcg_gen_movi_i64(cmp_tmp, 0);
570 } else {
571 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
572 }
573
574 l1 = gen_new_label();
575 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
576 tcg_temp_free(cmp_tmp);
577
578 if (rb != 31)
579 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
580 else
581 tcg_gen_movi_i64(cpu_fir[rc], 0);
582 gen_set_label(l1);
583 }
584
585 #define QUAL_RM_N 0x080 /* Round mode nearest even */
586 #define QUAL_RM_C 0x000 /* Round mode chopped */
587 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
588 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
589 #define QUAL_RM_MASK 0x0c0
590
591 #define QUAL_U 0x100 /* Underflow enable (fp output) */
592 #define QUAL_V 0x100 /* Overflow enable (int output) */
593 #define QUAL_S 0x400 /* Software completion enable */
594 #define QUAL_I 0x200 /* Inexact detection enable */
595
596 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
597 {
598 TCGv_i32 tmp;
599
600 fn11 &= QUAL_RM_MASK;
601 if (fn11 == ctx->tb_rm) {
602 return;
603 }
604 ctx->tb_rm = fn11;
605
606 tmp = tcg_temp_new_i32();
607 switch (fn11) {
608 case QUAL_RM_N:
609 tcg_gen_movi_i32(tmp, float_round_nearest_even);
610 break;
611 case QUAL_RM_C:
612 tcg_gen_movi_i32(tmp, float_round_to_zero);
613 break;
614 case QUAL_RM_M:
615 tcg_gen_movi_i32(tmp, float_round_down);
616 break;
617 case QUAL_RM_D:
618 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
619 break;
620 }
621
622 #if defined(CONFIG_SOFTFLOAT_INLINE)
623 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
624 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
625 sets the one field. */
626 tcg_gen_st8_i32(tmp, cpu_env,
627 offsetof(CPUState, fp_status.float_rounding_mode));
628 #else
629 gen_helper_setroundmode(tmp);
630 #endif
631
632 tcg_temp_free_i32(tmp);
633 }
634
635 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
636 {
637 TCGv_i32 tmp;
638
639 fn11 &= QUAL_U;
640 if (fn11 == ctx->tb_ftz) {
641 return;
642 }
643 ctx->tb_ftz = fn11;
644
645 tmp = tcg_temp_new_i32();
646 if (fn11) {
647 /* Underflow is enabled, use the FPCR setting. */
648 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
649 } else {
650 /* Underflow is disabled, force flush-to-zero. */
651 tcg_gen_movi_i32(tmp, 1);
652 }
653
654 #if defined(CONFIG_SOFTFLOAT_INLINE)
655 tcg_gen_st8_i32(tmp, cpu_env,
656 offsetof(CPUState, fp_status.flush_to_zero));
657 #else
658 gen_helper_setflushzero(tmp);
659 #endif
660
661 tcg_temp_free_i32(tmp);
662 }
663
664 static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
665 {
666 TCGv val = tcg_temp_new();
667 if (reg == 31) {
668 tcg_gen_movi_i64(val, 0);
669 } else if (fn11 & QUAL_S) {
670 gen_helper_ieee_input_s(val, cpu_fir[reg]);
671 } else if (is_cmp) {
672 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
673 } else {
674 gen_helper_ieee_input(val, cpu_fir[reg]);
675 }
676 return val;
677 }
678
679 static void gen_fp_exc_clear(void)
680 {
681 #if defined(CONFIG_SOFTFLOAT_INLINE)
682 TCGv_i32 zero = tcg_const_i32(0);
683 tcg_gen_st8_i32(zero, cpu_env,
684 offsetof(CPUState, fp_status.float_exception_flags));
685 tcg_temp_free_i32(zero);
686 #else
687 gen_helper_fp_exc_clear();
688 #endif
689 }
690
691 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
692 {
693 /* ??? We ought to be able to do something with imprecise exceptions.
694 E.g. notice we're still in the trap shadow of something within the
695 TB and do not generate the code to signal the exception; end the TB
696 when an exception is forced to arrive, either by consumption of a
697 register value or TRAPB or EXCB. */
698 TCGv_i32 exc = tcg_temp_new_i32();
699 TCGv_i32 reg;
700
701 #if defined(CONFIG_SOFTFLOAT_INLINE)
702 tcg_gen_ld8u_i32(exc, cpu_env,
703 offsetof(CPUState, fp_status.float_exception_flags));
704 #else
705 gen_helper_fp_exc_get(exc);
706 #endif
707
708 if (ignore) {
709 tcg_gen_andi_i32(exc, exc, ~ignore);
710 }
711
712 /* ??? Pass in the regno of the destination so that the helper can
713 set EXC_MASK, which contains a bitmask of destination registers
714 that have caused arithmetic traps. A simple userspace emulation
715 does not require this. We do need it for a guest kernel's entArith,
716 or if we were to do something clever with imprecise exceptions. */
717 reg = tcg_const_i32(rc + 32);
718
719 if (fn11 & QUAL_S) {
720 gen_helper_fp_exc_raise_s(exc, reg);
721 } else {
722 gen_helper_fp_exc_raise(exc, reg);
723 }
724
725 tcg_temp_free_i32(reg);
726 tcg_temp_free_i32(exc);
727 }
728
729 static inline void gen_fp_exc_raise(int rc, int fn11)
730 {
731 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
732 }
733
734 static void gen_fcvtlq(int rb, int rc)
735 {
736 if (unlikely(rc == 31)) {
737 return;
738 }
739 if (unlikely(rb == 31)) {
740 tcg_gen_movi_i64(cpu_fir[rc], 0);
741 } else {
742 TCGv tmp = tcg_temp_new();
743
744 /* The arithmetic right shift here, plus the sign-extended mask below
745 yields a sign-extended result without an explicit ext32s_i64. */
746 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
747 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
748 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
749 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
750 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
751
752 tcg_temp_free(tmp);
753 }
754 }
755
756 static void gen_fcvtql(int rb, int rc)
757 {
758 if (unlikely(rc == 31)) {
759 return;
760 }
761 if (unlikely(rb == 31)) {
762 tcg_gen_movi_i64(cpu_fir[rc], 0);
763 } else {
764 TCGv tmp = tcg_temp_new();
765
766 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
767 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
768 tcg_gen_shli_i64(tmp, tmp, 32);
769 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
770 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
771
772 tcg_temp_free(tmp);
773 }
774 }
775
776 static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
777 {
778 if (rb != 31) {
779 int lab = gen_new_label();
780 TCGv tmp = tcg_temp_new();
781
782 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
783 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
784 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
785
786 gen_set_label(lab);
787 }
788 gen_fcvtql(rb, rc);
789 }
790
791 #define FARITH2(name) \
792 static inline void glue(gen_f, name)(int rb, int rc) \
793 { \
794 if (unlikely(rc == 31)) { \
795 return; \
796 } \
797 if (rb != 31) { \
798 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
799 } else { \
800 TCGv tmp = tcg_const_i64(0); \
801 gen_helper_ ## name (cpu_fir[rc], tmp); \
802 tcg_temp_free(tmp); \
803 } \
804 }
805
806 /* ??? VAX instruction qualifiers ignored. */
807 FARITH2(sqrtf)
808 FARITH2(sqrtg)
809 FARITH2(cvtgf)
810 FARITH2(cvtgq)
811 FARITH2(cvtqf)
812 FARITH2(cvtqg)
813
814 static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
815 int rb, int rc, int fn11)
816 {
817 TCGv vb;
818
819 /* ??? This is wrong: the instruction is not a nop, it still may
820 raise exceptions. */
821 if (unlikely(rc == 31)) {
822 return;
823 }
824
825 gen_qual_roundmode(ctx, fn11);
826 gen_qual_flushzero(ctx, fn11);
827 gen_fp_exc_clear();
828
829 vb = gen_ieee_input(rb, fn11, 0);
830 helper(cpu_fir[rc], vb);
831 tcg_temp_free(vb);
832
833 gen_fp_exc_raise(rc, fn11);
834 }
835
836 #define IEEE_ARITH2(name) \
837 static inline void glue(gen_f, name)(DisasContext *ctx, \
838 int rb, int rc, int fn11) \
839 { \
840 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
841 }
842 IEEE_ARITH2(sqrts)
843 IEEE_ARITH2(sqrtt)
844 IEEE_ARITH2(cvtst)
845 IEEE_ARITH2(cvtts)
846
847 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
848 {
849 TCGv vb;
850 int ignore = 0;
851
852 /* ??? This is wrong: the instruction is not a nop, it still may
853 raise exceptions. */
854 if (unlikely(rc == 31)) {
855 return;
856 }
857
858 /* No need to set flushzero, since we have an integer output. */
859 gen_fp_exc_clear();
860 vb = gen_ieee_input(rb, fn11, 0);
861
862 /* Almost all integer conversions use cropped rounding, and most
863 also do not have integer overflow enabled. Special case that. */
864 switch (fn11) {
865 case QUAL_RM_C:
866 gen_helper_cvttq_c(cpu_fir[rc], vb);
867 break;
868 case QUAL_V | QUAL_RM_C:
869 case QUAL_S | QUAL_V | QUAL_RM_C:
870 ignore = float_flag_inexact;
871 /* FALLTHRU */
872 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
873 gen_helper_cvttq_svic(cpu_fir[rc], vb);
874 break;
875 default:
876 gen_qual_roundmode(ctx, fn11);
877 gen_helper_cvttq(cpu_fir[rc], vb);
878 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
879 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
880 break;
881 }
882 tcg_temp_free(vb);
883
884 gen_fp_exc_raise_ignore(rc, fn11, ignore);
885 }
886
887 static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
888 int rb, int rc, int fn11)
889 {
890 TCGv vb;
891
892 /* ??? This is wrong: the instruction is not a nop, it still may
893 raise exceptions. */
894 if (unlikely(rc == 31)) {
895 return;
896 }
897
898 gen_qual_roundmode(ctx, fn11);
899
900 if (rb == 31) {
901 vb = tcg_const_i64(0);
902 } else {
903 vb = cpu_fir[rb];
904 }
905
906 /* The only exception that can be raised by integer conversion
907 is inexact. Thus we only need to worry about exceptions when
908 inexact handling is requested. */
909 if (fn11 & QUAL_I) {
910 gen_fp_exc_clear();
911 helper(cpu_fir[rc], vb);
912 gen_fp_exc_raise(rc, fn11);
913 } else {
914 helper(cpu_fir[rc], vb);
915 }
916
917 if (rb == 31) {
918 tcg_temp_free(vb);
919 }
920 }
921
922 #define IEEE_INTCVT(name) \
923 static inline void glue(gen_f, name)(DisasContext *ctx, \
924 int rb, int rc, int fn11) \
925 { \
926 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
927 }
928 IEEE_INTCVT(cvtqs)
929 IEEE_INTCVT(cvtqt)
930
931 static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
932 {
933 TCGv va, vb, vmask;
934 int za = 0, zb = 0;
935
936 if (unlikely(rc == 31)) {
937 return;
938 }
939
940 vmask = tcg_const_i64(mask);
941
942 TCGV_UNUSED_I64(va);
943 if (ra == 31) {
944 if (inv_a) {
945 va = vmask;
946 } else {
947 za = 1;
948 }
949 } else {
950 va = tcg_temp_new_i64();
951 tcg_gen_mov_i64(va, cpu_fir[ra]);
952 if (inv_a) {
953 tcg_gen_andc_i64(va, vmask, va);
954 } else {
955 tcg_gen_and_i64(va, va, vmask);
956 }
957 }
958
959 TCGV_UNUSED_I64(vb);
960 if (rb == 31) {
961 zb = 1;
962 } else {
963 vb = tcg_temp_new_i64();
964 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
965 }
966
967 switch (za << 1 | zb) {
968 case 0 | 0:
969 tcg_gen_or_i64(cpu_fir[rc], va, vb);
970 break;
971 case 0 | 1:
972 tcg_gen_mov_i64(cpu_fir[rc], va);
973 break;
974 case 2 | 0:
975 tcg_gen_mov_i64(cpu_fir[rc], vb);
976 break;
977 case 2 | 1:
978 tcg_gen_movi_i64(cpu_fir[rc], 0);
979 break;
980 }
981
982 tcg_temp_free(vmask);
983 if (ra != 31) {
984 tcg_temp_free(va);
985 }
986 if (rb != 31) {
987 tcg_temp_free(vb);
988 }
989 }
990
991 static inline void gen_fcpys(int ra, int rb, int rc)
992 {
993 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
994 }
995
996 static inline void gen_fcpysn(int ra, int rb, int rc)
997 {
998 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
999 }
1000
1001 static inline void gen_fcpyse(int ra, int rb, int rc)
1002 {
1003 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1004 }
1005
1006 #define FARITH3(name) \
1007 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1008 { \
1009 TCGv va, vb; \
1010 \
1011 if (unlikely(rc == 31)) { \
1012 return; \
1013 } \
1014 if (ra == 31) { \
1015 va = tcg_const_i64(0); \
1016 } else { \
1017 va = cpu_fir[ra]; \
1018 } \
1019 if (rb == 31) { \
1020 vb = tcg_const_i64(0); \
1021 } else { \
1022 vb = cpu_fir[rb]; \
1023 } \
1024 \
1025 gen_helper_ ## name (cpu_fir[rc], va, vb); \
1026 \
1027 if (ra == 31) { \
1028 tcg_temp_free(va); \
1029 } \
1030 if (rb == 31) { \
1031 tcg_temp_free(vb); \
1032 } \
1033 }
1034
1035 /* ??? VAX instruction qualifiers ignored. */
1036 FARITH3(addf)
1037 FARITH3(subf)
1038 FARITH3(mulf)
1039 FARITH3(divf)
1040 FARITH3(addg)
1041 FARITH3(subg)
1042 FARITH3(mulg)
1043 FARITH3(divg)
1044 FARITH3(cmpgeq)
1045 FARITH3(cmpglt)
1046 FARITH3(cmpgle)
1047
1048 static void gen_ieee_arith3(DisasContext *ctx,
1049 void (*helper)(TCGv, TCGv, TCGv),
1050 int ra, int rb, int rc, int fn11)
1051 {
1052 TCGv va, vb;
1053
1054 /* ??? This is wrong: the instruction is not a nop, it still may
1055 raise exceptions. */
1056 if (unlikely(rc == 31)) {
1057 return;
1058 }
1059
1060 gen_qual_roundmode(ctx, fn11);
1061 gen_qual_flushzero(ctx, fn11);
1062 gen_fp_exc_clear();
1063
1064 va = gen_ieee_input(ra, fn11, 0);
1065 vb = gen_ieee_input(rb, fn11, 0);
1066 helper(cpu_fir[rc], va, vb);
1067 tcg_temp_free(va);
1068 tcg_temp_free(vb);
1069
1070 gen_fp_exc_raise(rc, fn11);
1071 }
1072
1073 #define IEEE_ARITH3(name) \
1074 static inline void glue(gen_f, name)(DisasContext *ctx, \
1075 int ra, int rb, int rc, int fn11) \
1076 { \
1077 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1078 }
1079 IEEE_ARITH3(adds)
1080 IEEE_ARITH3(subs)
1081 IEEE_ARITH3(muls)
1082 IEEE_ARITH3(divs)
1083 IEEE_ARITH3(addt)
1084 IEEE_ARITH3(subt)
1085 IEEE_ARITH3(mult)
1086 IEEE_ARITH3(divt)
1087
1088 static void gen_ieee_compare(DisasContext *ctx,
1089 void (*helper)(TCGv, TCGv, TCGv),
1090 int ra, int rb, int rc, int fn11)
1091 {
1092 TCGv va, vb;
1093
1094 /* ??? This is wrong: the instruction is not a nop, it still may
1095 raise exceptions. */
1096 if (unlikely(rc == 31)) {
1097 return;
1098 }
1099
1100 gen_fp_exc_clear();
1101
1102 va = gen_ieee_input(ra, fn11, 1);
1103 vb = gen_ieee_input(rb, fn11, 1);
1104 helper(cpu_fir[rc], va, vb);
1105 tcg_temp_free(va);
1106 tcg_temp_free(vb);
1107
1108 gen_fp_exc_raise(rc, fn11);
1109 }
1110
1111 #define IEEE_CMP3(name) \
1112 static inline void glue(gen_f, name)(DisasContext *ctx, \
1113 int ra, int rb, int rc, int fn11) \
1114 { \
1115 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1116 }
1117 IEEE_CMP3(cmptun)
1118 IEEE_CMP3(cmpteq)
1119 IEEE_CMP3(cmptlt)
1120 IEEE_CMP3(cmptle)
1121
1122 static inline uint64_t zapnot_mask(uint8_t lit)
1123 {
1124 uint64_t mask = 0;
1125 int i;
1126
1127 for (i = 0; i < 8; ++i) {
1128 if ((lit >> i) & 1)
1129 mask |= 0xffull << (i * 8);
1130 }
1131 return mask;
1132 }
1133
1134 /* Implement zapnot with an immediate operand, which expands to some
1135 form of immediate AND. This is a basic building block in the
1136 definition of many of the other byte manipulation instructions. */
1137 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1138 {
1139 switch (lit) {
1140 case 0x00:
1141 tcg_gen_movi_i64(dest, 0);
1142 break;
1143 case 0x01:
1144 tcg_gen_ext8u_i64(dest, src);
1145 break;
1146 case 0x03:
1147 tcg_gen_ext16u_i64(dest, src);
1148 break;
1149 case 0x0f:
1150 tcg_gen_ext32u_i64(dest, src);
1151 break;
1152 case 0xff:
1153 tcg_gen_mov_i64(dest, src);
1154 break;
1155 default:
1156 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1157 break;
1158 }
1159 }
1160
1161 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1162 {
1163 if (unlikely(rc == 31))
1164 return;
1165 else if (unlikely(ra == 31))
1166 tcg_gen_movi_i64(cpu_ir[rc], 0);
1167 else if (islit)
1168 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1169 else
1170 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1171 }
1172
1173 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1174 {
1175 if (unlikely(rc == 31))
1176 return;
1177 else if (unlikely(ra == 31))
1178 tcg_gen_movi_i64(cpu_ir[rc], 0);
1179 else if (islit)
1180 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1181 else
1182 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1183 }
1184
1185
1186 /* EXTWH, EXTLH, EXTQH */
1187 static void gen_ext_h(int ra, int rb, int rc, int islit,
1188 uint8_t lit, uint8_t byte_mask)
1189 {
1190 if (unlikely(rc == 31))
1191 return;
1192 else if (unlikely(ra == 31))
1193 tcg_gen_movi_i64(cpu_ir[rc], 0);
1194 else {
1195 if (islit) {
1196 lit = (64 - (lit & 7) * 8) & 0x3f;
1197 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1198 } else {
1199 TCGv tmp1 = tcg_temp_new();
1200 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1201 tcg_gen_shli_i64(tmp1, tmp1, 3);
1202 tcg_gen_neg_i64(tmp1, tmp1);
1203 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1204 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1205 tcg_temp_free(tmp1);
1206 }
1207 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1208 }
1209 }
1210
1211 /* EXTBL, EXTWL, EXTLL, EXTQL */
1212 static void gen_ext_l(int ra, int rb, int rc, int islit,
1213 uint8_t lit, uint8_t byte_mask)
1214 {
1215 if (unlikely(rc == 31))
1216 return;
1217 else if (unlikely(ra == 31))
1218 tcg_gen_movi_i64(cpu_ir[rc], 0);
1219 else {
1220 if (islit) {
1221 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1222 } else {
1223 TCGv tmp = tcg_temp_new();
1224 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1225 tcg_gen_shli_i64(tmp, tmp, 3);
1226 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1227 tcg_temp_free(tmp);
1228 }
1229 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1230 }
1231 }
1232
1233 /* INSWH, INSLH, INSQH */
1234 static void gen_ins_h(int ra, int rb, int rc, int islit,
1235 uint8_t lit, uint8_t byte_mask)
1236 {
1237 if (unlikely(rc == 31))
1238 return;
1239 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1240 tcg_gen_movi_i64(cpu_ir[rc], 0);
1241 else {
1242 TCGv tmp = tcg_temp_new();
1243
1244 /* The instruction description has us left-shift the byte mask
1245 and extract bits <15:8> and apply that zap at the end. This
1246 is equivalent to simply performing the zap first and shifting
1247 afterward. */
1248 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1249
1250 if (islit) {
1251 /* Note that we have handled the lit==0 case above. */
1252 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1253 } else {
1254 TCGv shift = tcg_temp_new();
1255
1256 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1257 Do this portably by splitting the shift into two parts:
1258 shift_count-1 and 1. Arrange for the -1 by using
1259 ones-complement instead of twos-complement in the negation:
1260 ~((B & 7) * 8) & 63. */
1261
1262 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1263 tcg_gen_shli_i64(shift, shift, 3);
1264 tcg_gen_not_i64(shift, shift);
1265 tcg_gen_andi_i64(shift, shift, 0x3f);
1266
1267 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1268 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1269 tcg_temp_free(shift);
1270 }
1271 tcg_temp_free(tmp);
1272 }
1273 }
1274
1275 /* INSBL, INSWL, INSLL, INSQL */
1276 static void gen_ins_l(int ra, int rb, int rc, int islit,
1277 uint8_t lit, uint8_t byte_mask)
1278 {
1279 if (unlikely(rc == 31))
1280 return;
1281 else if (unlikely(ra == 31))
1282 tcg_gen_movi_i64(cpu_ir[rc], 0);
1283 else {
1284 TCGv tmp = tcg_temp_new();
1285
1286 /* The instruction description has us left-shift the byte mask
1287 the same number of byte slots as the data and apply the zap
1288 at the end. This is equivalent to simply performing the zap
1289 first and shifting afterward. */
1290 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1291
1292 if (islit) {
1293 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1294 } else {
1295 TCGv shift = tcg_temp_new();
1296 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1297 tcg_gen_shli_i64(shift, shift, 3);
1298 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1299 tcg_temp_free(shift);
1300 }
1301 tcg_temp_free(tmp);
1302 }
1303 }
1304
1305 /* MSKWH, MSKLH, MSKQH */
1306 static void gen_msk_h(int ra, int rb, int rc, int islit,
1307 uint8_t lit, uint8_t byte_mask)
1308 {
1309 if (unlikely(rc == 31))
1310 return;
1311 else if (unlikely(ra == 31))
1312 tcg_gen_movi_i64(cpu_ir[rc], 0);
1313 else if (islit) {
1314 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1315 } else {
1316 TCGv shift = tcg_temp_new();
1317 TCGv mask = tcg_temp_new();
1318
1319 /* The instruction description is as above, where the byte_mask
1320 is shifted left, and then we extract bits <15:8>. This can be
1321 emulated with a right-shift on the expanded byte mask. This
1322 requires extra care because for an input <2:0> == 0 we need a
1323 shift of 64 bits in order to generate a zero. This is done by
1324 splitting the shift into two parts, the variable shift - 1
1325 followed by a constant 1 shift. The code we expand below is
1326 equivalent to ~((B & 7) * 8) & 63. */
1327
1328 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1329 tcg_gen_shli_i64(shift, shift, 3);
1330 tcg_gen_not_i64(shift, shift);
1331 tcg_gen_andi_i64(shift, shift, 0x3f);
1332 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1333 tcg_gen_shr_i64(mask, mask, shift);
1334 tcg_gen_shri_i64(mask, mask, 1);
1335
1336 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1337
1338 tcg_temp_free(mask);
1339 tcg_temp_free(shift);
1340 }
1341 }
1342
1343 /* MSKBL, MSKWL, MSKLL, MSKQL */
1344 static void gen_msk_l(int ra, int rb, int rc, int islit,
1345 uint8_t lit, uint8_t byte_mask)
1346 {
1347 if (unlikely(rc == 31))
1348 return;
1349 else if (unlikely(ra == 31))
1350 tcg_gen_movi_i64(cpu_ir[rc], 0);
1351 else if (islit) {
1352 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1353 } else {
1354 TCGv shift = tcg_temp_new();
1355 TCGv mask = tcg_temp_new();
1356
1357 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1358 tcg_gen_shli_i64(shift, shift, 3);
1359 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1360 tcg_gen_shl_i64(mask, mask, shift);
1361
1362 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1363
1364 tcg_temp_free(mask);
1365 tcg_temp_free(shift);
1366 }
1367 }
1368
1369 /* Code to call arith3 helpers */
1370 #define ARITH3(name) \
1371 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1372 uint8_t lit) \
1373 { \
1374 if (unlikely(rc == 31)) \
1375 return; \
1376 \
1377 if (ra != 31) { \
1378 if (islit) { \
1379 TCGv tmp = tcg_const_i64(lit); \
1380 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1381 tcg_temp_free(tmp); \
1382 } else \
1383 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1384 } else { \
1385 TCGv tmp1 = tcg_const_i64(0); \
1386 if (islit) { \
1387 TCGv tmp2 = tcg_const_i64(lit); \
1388 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1389 tcg_temp_free(tmp2); \
1390 } else \
1391 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1392 tcg_temp_free(tmp1); \
1393 } \
1394 }
1395 ARITH3(cmpbge)
1396 ARITH3(addlv)
1397 ARITH3(sublv)
1398 ARITH3(addqv)
1399 ARITH3(subqv)
1400 ARITH3(umulh)
1401 ARITH3(mullv)
1402 ARITH3(mulqv)
1403 ARITH3(minub8)
1404 ARITH3(minsb8)
1405 ARITH3(minuw4)
1406 ARITH3(minsw4)
1407 ARITH3(maxub8)
1408 ARITH3(maxsb8)
1409 ARITH3(maxuw4)
1410 ARITH3(maxsw4)
1411 ARITH3(perr)
1412
1413 #define MVIOP2(name) \
1414 static inline void glue(gen_, name)(int rb, int rc) \
1415 { \
1416 if (unlikely(rc == 31)) \
1417 return; \
1418 if (unlikely(rb == 31)) \
1419 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1420 else \
1421 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1422 }
1423 MVIOP2(pklb)
1424 MVIOP2(pkwb)
1425 MVIOP2(unpkbl)
1426 MVIOP2(unpkbw)
1427
1428 static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1429 int islit, uint8_t lit)
1430 {
1431 TCGv va, vb;
1432
1433 if (unlikely(rc == 31)) {
1434 return;
1435 }
1436
1437 if (ra == 31) {
1438 va = tcg_const_i64(0);
1439 } else {
1440 va = cpu_ir[ra];
1441 }
1442 if (islit) {
1443 vb = tcg_const_i64(lit);
1444 } else {
1445 vb = cpu_ir[rb];
1446 }
1447
1448 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1449
1450 if (ra == 31) {
1451 tcg_temp_free(va);
1452 }
1453 if (islit) {
1454 tcg_temp_free(vb);
1455 }
1456 }
1457
1458 static void gen_rx(int ra, int set)
1459 {
1460 TCGv_i32 tmp;
1461
1462 if (ra != 31) {
1463 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1464 }
1465
1466 tmp = tcg_const_i32(set);
1467 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1468 tcg_temp_free_i32(tmp);
1469 }
1470
1471 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1472 {
1473 uint32_t palcode;
1474 int32_t disp21, disp16, disp12;
1475 uint16_t fn11;
1476 uint8_t opc, ra, rb, rc, fpfn, fn7, fn2, islit, real_islit;
1477 uint8_t lit;
1478 ExitStatus ret;
1479
1480 /* Decode all instruction fields */
1481 opc = insn >> 26;
1482 ra = (insn >> 21) & 0x1F;
1483 rb = (insn >> 16) & 0x1F;
1484 rc = insn & 0x1F;
1485 real_islit = islit = (insn >> 12) & 1;
1486 if (rb == 31 && !islit) {
1487 islit = 1;
1488 lit = 0;
1489 } else
1490 lit = (insn >> 13) & 0xFF;
1491 palcode = insn & 0x03FFFFFF;
1492 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1493 disp16 = (int16_t)(insn & 0x0000FFFF);
1494 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1495 fn11 = (insn >> 5) & 0x000007FF;
1496 fpfn = fn11 & 0x3F;
1497 fn7 = (insn >> 5) & 0x0000007F;
1498 fn2 = (insn >> 5) & 0x00000003;
1499 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1500 opc, ra, rb, rc, disp16);
1501
1502 ret = NO_EXIT;
1503 switch (opc) {
1504 case 0x00:
1505 /* CALL_PAL */
1506 #ifdef CONFIG_USER_ONLY
1507 if (palcode == 0x9E) {
1508 /* RDUNIQUE */
1509 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1510 break;
1511 } else if (palcode == 0x9F) {
1512 /* WRUNIQUE */
1513 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1514 break;
1515 }
1516 #endif
1517 if (palcode >= 0x80 && palcode < 0xC0) {
1518 /* Unprivileged PAL call */
1519 ret = gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
1520 break;
1521 }
1522 #ifndef CONFIG_USER_ONLY
1523 if (palcode < 0x40) {
1524 /* Privileged PAL code */
1525 if (ctx->mem_idx & 1)
1526 goto invalid_opc;
1527 ret = gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
1528 }
1529 #endif
1530 /* Invalid PAL call */
1531 goto invalid_opc;
1532 case 0x01:
1533 /* OPC01 */
1534 goto invalid_opc;
1535 case 0x02:
1536 /* OPC02 */
1537 goto invalid_opc;
1538 case 0x03:
1539 /* OPC03 */
1540 goto invalid_opc;
1541 case 0x04:
1542 /* OPC04 */
1543 goto invalid_opc;
1544 case 0x05:
1545 /* OPC05 */
1546 goto invalid_opc;
1547 case 0x06:
1548 /* OPC06 */
1549 goto invalid_opc;
1550 case 0x07:
1551 /* OPC07 */
1552 goto invalid_opc;
1553 case 0x08:
1554 /* LDA */
1555 if (likely(ra != 31)) {
1556 if (rb != 31)
1557 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1558 else
1559 tcg_gen_movi_i64(cpu_ir[ra], disp16);
1560 }
1561 break;
1562 case 0x09:
1563 /* LDAH */
1564 if (likely(ra != 31)) {
1565 if (rb != 31)
1566 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1567 else
1568 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1569 }
1570 break;
1571 case 0x0A:
1572 /* LDBU */
1573 if (!(ctx->amask & AMASK_BWX))
1574 goto invalid_opc;
1575 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1576 break;
1577 case 0x0B:
1578 /* LDQ_U */
1579 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1580 break;
1581 case 0x0C:
1582 /* LDWU */
1583 if (!(ctx->amask & AMASK_BWX))
1584 goto invalid_opc;
1585 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1586 break;
1587 case 0x0D:
1588 /* STW */
1589 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1590 break;
1591 case 0x0E:
1592 /* STB */
1593 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1594 break;
1595 case 0x0F:
1596 /* STQ_U */
1597 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1598 break;
1599 case 0x10:
1600 switch (fn7) {
1601 case 0x00:
1602 /* ADDL */
1603 if (likely(rc != 31)) {
1604 if (ra != 31) {
1605 if (islit) {
1606 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1607 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1608 } else {
1609 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1610 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1611 }
1612 } else {
1613 if (islit)
1614 tcg_gen_movi_i64(cpu_ir[rc], lit);
1615 else
1616 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1617 }
1618 }
1619 break;
1620 case 0x02:
1621 /* S4ADDL */
1622 if (likely(rc != 31)) {
1623 if (ra != 31) {
1624 TCGv tmp = tcg_temp_new();
1625 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1626 if (islit)
1627 tcg_gen_addi_i64(tmp, tmp, lit);
1628 else
1629 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1630 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1631 tcg_temp_free(tmp);
1632 } else {
1633 if (islit)
1634 tcg_gen_movi_i64(cpu_ir[rc], lit);
1635 else
1636 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1637 }
1638 }
1639 break;
1640 case 0x09:
1641 /* SUBL */
1642 if (likely(rc != 31)) {
1643 if (ra != 31) {
1644 if (islit)
1645 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1646 else
1647 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1648 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1649 } else {
1650 if (islit)
1651 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1652 else {
1653 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1654 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1655 }
1656 }
1657 break;
1658 case 0x0B:
1659 /* S4SUBL */
1660 if (likely(rc != 31)) {
1661 if (ra != 31) {
1662 TCGv tmp = tcg_temp_new();
1663 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1664 if (islit)
1665 tcg_gen_subi_i64(tmp, tmp, lit);
1666 else
1667 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1668 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1669 tcg_temp_free(tmp);
1670 } else {
1671 if (islit)
1672 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1673 else {
1674 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1675 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1676 }
1677 }
1678 }
1679 break;
1680 case 0x0F:
1681 /* CMPBGE */
1682 gen_cmpbge(ra, rb, rc, islit, lit);
1683 break;
1684 case 0x12:
1685 /* S8ADDL */
1686 if (likely(rc != 31)) {
1687 if (ra != 31) {
1688 TCGv tmp = tcg_temp_new();
1689 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1690 if (islit)
1691 tcg_gen_addi_i64(tmp, tmp, lit);
1692 else
1693 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1694 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1695 tcg_temp_free(tmp);
1696 } else {
1697 if (islit)
1698 tcg_gen_movi_i64(cpu_ir[rc], lit);
1699 else
1700 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1701 }
1702 }
1703 break;
1704 case 0x1B:
1705 /* S8SUBL */
1706 if (likely(rc != 31)) {
1707 if (ra != 31) {
1708 TCGv tmp = tcg_temp_new();
1709 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1710 if (islit)
1711 tcg_gen_subi_i64(tmp, tmp, lit);
1712 else
1713 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1714 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1715 tcg_temp_free(tmp);
1716 } else {
1717 if (islit)
1718 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1719 else
1720 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1721 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1722 }
1723 }
1724 }
1725 break;
1726 case 0x1D:
1727 /* CMPULT */
1728 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1729 break;
1730 case 0x20:
1731 /* ADDQ */
1732 if (likely(rc != 31)) {
1733 if (ra != 31) {
1734 if (islit)
1735 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1736 else
1737 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1738 } else {
1739 if (islit)
1740 tcg_gen_movi_i64(cpu_ir[rc], lit);
1741 else
1742 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1743 }
1744 }
1745 break;
1746 case 0x22:
1747 /* S4ADDQ */
1748 if (likely(rc != 31)) {
1749 if (ra != 31) {
1750 TCGv tmp = tcg_temp_new();
1751 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1752 if (islit)
1753 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1754 else
1755 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1756 tcg_temp_free(tmp);
1757 } else {
1758 if (islit)
1759 tcg_gen_movi_i64(cpu_ir[rc], lit);
1760 else
1761 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1762 }
1763 }
1764 break;
1765 case 0x29:
1766 /* SUBQ */
1767 if (likely(rc != 31)) {
1768 if (ra != 31) {
1769 if (islit)
1770 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1771 else
1772 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1773 } else {
1774 if (islit)
1775 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1776 else
1777 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1778 }
1779 }
1780 break;
1781 case 0x2B:
1782 /* S4SUBQ */
1783 if (likely(rc != 31)) {
1784 if (ra != 31) {
1785 TCGv tmp = tcg_temp_new();
1786 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1787 if (islit)
1788 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1789 else
1790 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1791 tcg_temp_free(tmp);
1792 } else {
1793 if (islit)
1794 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1795 else
1796 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1797 }
1798 }
1799 break;
1800 case 0x2D:
1801 /* CMPEQ */
1802 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1803 break;
1804 case 0x32:
1805 /* S8ADDQ */
1806 if (likely(rc != 31)) {
1807 if (ra != 31) {
1808 TCGv tmp = tcg_temp_new();
1809 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1810 if (islit)
1811 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1812 else
1813 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1814 tcg_temp_free(tmp);
1815 } else {
1816 if (islit)
1817 tcg_gen_movi_i64(cpu_ir[rc], lit);
1818 else
1819 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1820 }
1821 }
1822 break;
1823 case 0x3B:
1824 /* S8SUBQ */
1825 if (likely(rc != 31)) {
1826 if (ra != 31) {
1827 TCGv tmp = tcg_temp_new();
1828 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1829 if (islit)
1830 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1831 else
1832 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1833 tcg_temp_free(tmp);
1834 } else {
1835 if (islit)
1836 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1837 else
1838 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1839 }
1840 }
1841 break;
1842 case 0x3D:
1843 /* CMPULE */
1844 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1845 break;
1846 case 0x40:
1847 /* ADDL/V */
1848 gen_addlv(ra, rb, rc, islit, lit);
1849 break;
1850 case 0x49:
1851 /* SUBL/V */
1852 gen_sublv(ra, rb, rc, islit, lit);
1853 break;
1854 case 0x4D:
1855 /* CMPLT */
1856 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1857 break;
1858 case 0x60:
1859 /* ADDQ/V */
1860 gen_addqv(ra, rb, rc, islit, lit);
1861 break;
1862 case 0x69:
1863 /* SUBQ/V */
1864 gen_subqv(ra, rb, rc, islit, lit);
1865 break;
1866 case 0x6D:
1867 /* CMPLE */
1868 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1869 break;
1870 default:
1871 goto invalid_opc;
1872 }
1873 break;
1874 case 0x11:
1875 switch (fn7) {
1876 case 0x00:
1877 /* AND */
1878 if (likely(rc != 31)) {
1879 if (ra == 31)
1880 tcg_gen_movi_i64(cpu_ir[rc], 0);
1881 else if (islit)
1882 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1883 else
1884 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1885 }
1886 break;
1887 case 0x08:
1888 /* BIC */
1889 if (likely(rc != 31)) {
1890 if (ra != 31) {
1891 if (islit)
1892 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1893 else
1894 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1895 } else
1896 tcg_gen_movi_i64(cpu_ir[rc], 0);
1897 }
1898 break;
1899 case 0x14:
1900 /* CMOVLBS */
1901 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1902 break;
1903 case 0x16:
1904 /* CMOVLBC */
1905 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1906 break;
1907 case 0x20:
1908 /* BIS */
1909 if (likely(rc != 31)) {
1910 if (ra != 31) {
1911 if (islit)
1912 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1913 else
1914 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1915 } else {
1916 if (islit)
1917 tcg_gen_movi_i64(cpu_ir[rc], lit);
1918 else
1919 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1920 }
1921 }
1922 break;
1923 case 0x24:
1924 /* CMOVEQ */
1925 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1926 break;
1927 case 0x26:
1928 /* CMOVNE */
1929 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1930 break;
1931 case 0x28:
1932 /* ORNOT */
1933 if (likely(rc != 31)) {
1934 if (ra != 31) {
1935 if (islit)
1936 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1937 else
1938 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1939 } else {
1940 if (islit)
1941 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1942 else
1943 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1944 }
1945 }
1946 break;
1947 case 0x40:
1948 /* XOR */
1949 if (likely(rc != 31)) {
1950 if (ra != 31) {
1951 if (islit)
1952 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1953 else
1954 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1955 } else {
1956 if (islit)
1957 tcg_gen_movi_i64(cpu_ir[rc], lit);
1958 else
1959 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1960 }
1961 }
1962 break;
1963 case 0x44:
1964 /* CMOVLT */
1965 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1966 break;
1967 case 0x46:
1968 /* CMOVGE */
1969 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1970 break;
1971 case 0x48:
1972 /* EQV */
1973 if (likely(rc != 31)) {
1974 if (ra != 31) {
1975 if (islit)
1976 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1977 else
1978 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1979 } else {
1980 if (islit)
1981 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1982 else
1983 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1984 }
1985 }
1986 break;
1987 case 0x61:
1988 /* AMASK */
1989 if (likely(rc != 31)) {
1990 if (islit)
1991 tcg_gen_movi_i64(cpu_ir[rc], lit);
1992 else
1993 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1994 switch (ctx->env->implver) {
1995 case IMPLVER_2106x:
1996 /* EV4, EV45, LCA, LCA45 & EV5 */
1997 break;
1998 case IMPLVER_21164:
1999 case IMPLVER_21264:
2000 case IMPLVER_21364:
2001 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
2002 ~(uint64_t)ctx->amask);
2003 break;
2004 }
2005 }
2006 break;
2007 case 0x64:
2008 /* CMOVLE */
2009 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2010 break;
2011 case 0x66:
2012 /* CMOVGT */
2013 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2014 break;
2015 case 0x6C:
2016 /* IMPLVER */
2017 if (rc != 31)
2018 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
2019 break;
2020 default:
2021 goto invalid_opc;
2022 }
2023 break;
2024 case 0x12:
2025 switch (fn7) {
2026 case 0x02:
2027 /* MSKBL */
2028 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2029 break;
2030 case 0x06:
2031 /* EXTBL */
2032 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2033 break;
2034 case 0x0B:
2035 /* INSBL */
2036 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2037 break;
2038 case 0x12:
2039 /* MSKWL */
2040 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2041 break;
2042 case 0x16:
2043 /* EXTWL */
2044 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2045 break;
2046 case 0x1B:
2047 /* INSWL */
2048 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2049 break;
2050 case 0x22:
2051 /* MSKLL */
2052 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2053 break;
2054 case 0x26:
2055 /* EXTLL */
2056 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2057 break;
2058 case 0x2B:
2059 /* INSLL */
2060 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2061 break;
2062 case 0x30:
2063 /* ZAP */
2064 gen_zap(ra, rb, rc, islit, lit);
2065 break;
2066 case 0x31:
2067 /* ZAPNOT */
2068 gen_zapnot(ra, rb, rc, islit, lit);
2069 break;
2070 case 0x32:
2071 /* MSKQL */
2072 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2073 break;
2074 case 0x34:
2075 /* SRL */
2076 if (likely(rc != 31)) {
2077 if (ra != 31) {
2078 if (islit)
2079 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2080 else {
2081 TCGv shift = tcg_temp_new();
2082 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2083 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2084 tcg_temp_free(shift);
2085 }
2086 } else
2087 tcg_gen_movi_i64(cpu_ir[rc], 0);
2088 }
2089 break;
2090 case 0x36:
2091 /* EXTQL */
2092 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2093 break;
2094 case 0x39:
2095 /* SLL */
2096 if (likely(rc != 31)) {
2097 if (ra != 31) {
2098 if (islit)
2099 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2100 else {
2101 TCGv shift = tcg_temp_new();
2102 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2103 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2104 tcg_temp_free(shift);
2105 }
2106 } else
2107 tcg_gen_movi_i64(cpu_ir[rc], 0);
2108 }
2109 break;
2110 case 0x3B:
2111 /* INSQL */
2112 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2113 break;
2114 case 0x3C:
2115 /* SRA */
2116 if (likely(rc != 31)) {
2117 if (ra != 31) {
2118 if (islit)
2119 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2120 else {
2121 TCGv shift = tcg_temp_new();
2122 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2123 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2124 tcg_temp_free(shift);
2125 }
2126 } else
2127 tcg_gen_movi_i64(cpu_ir[rc], 0);
2128 }
2129 break;
2130 case 0x52:
2131 /* MSKWH */
2132 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2133 break;
2134 case 0x57:
2135 /* INSWH */
2136 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2137 break;
2138 case 0x5A:
2139 /* EXTWH */
2140 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2141 break;
2142 case 0x62:
2143 /* MSKLH */
2144 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2145 break;
2146 case 0x67:
2147 /* INSLH */
2148 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2149 break;
2150 case 0x6A:
2151 /* EXTLH */
2152 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2153 break;
2154 case 0x72:
2155 /* MSKQH */
2156 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2157 break;
2158 case 0x77:
2159 /* INSQH */
2160 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2161 break;
2162 case 0x7A:
2163 /* EXTQH */
2164 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2165 break;
2166 default:
2167 goto invalid_opc;
2168 }
2169 break;
2170 case 0x13:
2171 switch (fn7) {
2172 case 0x00:
2173 /* MULL */
2174 if (likely(rc != 31)) {
2175 if (ra == 31)
2176 tcg_gen_movi_i64(cpu_ir[rc], 0);
2177 else {
2178 if (islit)
2179 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2180 else
2181 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2182 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2183 }
2184 }
2185 break;
2186 case 0x20:
2187 /* MULQ */
2188 if (likely(rc != 31)) {
2189 if (ra == 31)
2190 tcg_gen_movi_i64(cpu_ir[rc], 0);
2191 else if (islit)
2192 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2193 else
2194 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2195 }
2196 break;
2197 case 0x30:
2198 /* UMULH */
2199 gen_umulh(ra, rb, rc, islit, lit);
2200 break;
2201 case 0x40:
2202 /* MULL/V */
2203 gen_mullv(ra, rb, rc, islit, lit);
2204 break;
2205 case 0x60:
2206 /* MULQ/V */
2207 gen_mulqv(ra, rb, rc, islit, lit);
2208 break;
2209 default:
2210 goto invalid_opc;
2211 }
2212 break;
2213 case 0x14:
2214 switch (fpfn) { /* fn11 & 0x3F */
2215 case 0x04:
2216 /* ITOFS */
2217 if (!(ctx->amask & AMASK_FIX))
2218 goto invalid_opc;
2219 if (likely(rc != 31)) {
2220 if (ra != 31) {
2221 TCGv_i32 tmp = tcg_temp_new_i32();
2222 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2223 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2224 tcg_temp_free_i32(tmp);
2225 } else
2226 tcg_gen_movi_i64(cpu_fir[rc], 0);
2227 }
2228 break;
2229 case 0x0A:
2230 /* SQRTF */
2231 if (!(ctx->amask & AMASK_FIX))
2232 goto invalid_opc;
2233 gen_fsqrtf(rb, rc);
2234 break;
2235 case 0x0B:
2236 /* SQRTS */
2237 if (!(ctx->amask & AMASK_FIX))
2238 goto invalid_opc;
2239 gen_fsqrts(ctx, rb, rc, fn11);
2240 break;
2241 case 0x14:
2242 /* ITOFF */
2243 if (!(ctx->amask & AMASK_FIX))
2244 goto invalid_opc;
2245 if (likely(rc != 31)) {
2246 if (ra != 31) {
2247 TCGv_i32 tmp = tcg_temp_new_i32();
2248 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2249 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2250 tcg_temp_free_i32(tmp);
2251 } else
2252 tcg_gen_movi_i64(cpu_fir[rc], 0);
2253 }
2254 break;
2255 case 0x24:
2256 /* ITOFT */
2257 if (!(ctx->amask & AMASK_FIX))
2258 goto invalid_opc;
2259 if (likely(rc != 31)) {
2260 if (ra != 31)
2261 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2262 else
2263 tcg_gen_movi_i64(cpu_fir[rc], 0);
2264 }
2265 break;
2266 case 0x2A:
2267 /* SQRTG */
2268 if (!(ctx->amask & AMASK_FIX))
2269 goto invalid_opc;
2270 gen_fsqrtg(rb, rc);
2271 break;
2272 case 0x02B:
2273 /* SQRTT */
2274 if (!(ctx->amask & AMASK_FIX))
2275 goto invalid_opc;
2276 gen_fsqrtt(ctx, rb, rc, fn11);
2277 break;
2278 default:
2279 goto invalid_opc;
2280 }
2281 break;
2282 case 0x15:
2283 /* VAX floating point */
2284 /* XXX: rounding mode and trap are ignored (!) */
2285 switch (fpfn) { /* fn11 & 0x3F */
2286 case 0x00:
2287 /* ADDF */
2288 gen_faddf(ra, rb, rc);
2289 break;
2290 case 0x01:
2291 /* SUBF */
2292 gen_fsubf(ra, rb, rc);
2293 break;
2294 case 0x02:
2295 /* MULF */
2296 gen_fmulf(ra, rb, rc);
2297 break;
2298 case 0x03:
2299 /* DIVF */
2300 gen_fdivf(ra, rb, rc);
2301 break;
2302 case 0x1E:
2303 /* CVTDG */
2304 #if 0 // TODO
2305 gen_fcvtdg(rb, rc);
2306 #else
2307 goto invalid_opc;
2308 #endif
2309 break;
2310 case 0x20:
2311 /* ADDG */
2312 gen_faddg(ra, rb, rc);
2313 break;
2314 case 0x21:
2315 /* SUBG */
2316 gen_fsubg(ra, rb, rc);
2317 break;
2318 case 0x22:
2319 /* MULG */
2320 gen_fmulg(ra, rb, rc);
2321 break;
2322 case 0x23:
2323 /* DIVG */
2324 gen_fdivg(ra, rb, rc);
2325 break;
2326 case 0x25:
2327 /* CMPGEQ */
2328 gen_fcmpgeq(ra, rb, rc);
2329 break;
2330 case 0x26:
2331 /* CMPGLT */
2332 gen_fcmpglt(ra, rb, rc);
2333 break;
2334 case 0x27:
2335 /* CMPGLE */
2336 gen_fcmpgle(ra, rb, rc);
2337 break;
2338 case 0x2C:
2339 /* CVTGF */
2340 gen_fcvtgf(rb, rc);
2341 break;
2342 case 0x2D:
2343 /* CVTGD */
2344 #if 0 // TODO
2345 gen_fcvtgd(rb, rc);
2346 #else
2347 goto invalid_opc;
2348 #endif
2349 break;
2350 case 0x2F:
2351 /* CVTGQ */
2352 gen_fcvtgq(rb, rc);
2353 break;
2354 case 0x3C:
2355 /* CVTQF */
2356 gen_fcvtqf(rb, rc);
2357 break;
2358 case 0x3E:
2359 /* CVTQG */
2360 gen_fcvtqg(rb, rc);
2361 break;
2362 default:
2363 goto invalid_opc;
2364 }
2365 break;
2366 case 0x16:
2367 /* IEEE floating-point */
2368 switch (fpfn) { /* fn11 & 0x3F */
2369 case 0x00:
2370 /* ADDS */
2371 gen_fadds(ctx, ra, rb, rc, fn11);
2372 break;
2373 case 0x01:
2374 /* SUBS */
2375 gen_fsubs(ctx, ra, rb, rc, fn11);
2376 break;
2377 case 0x02:
2378 /* MULS */
2379 gen_fmuls(ctx, ra, rb, rc, fn11);
2380 break;
2381 case 0x03:
2382 /* DIVS */
2383 gen_fdivs(ctx, ra, rb, rc, fn11);
2384 break;
2385 case 0x20:
2386 /* ADDT */
2387 gen_faddt(ctx, ra, rb, rc, fn11);
2388 break;
2389 case 0x21:
2390 /* SUBT */
2391 gen_fsubt(ctx, ra, rb, rc, fn11);
2392 break;
2393 case 0x22:
2394 /* MULT */
2395 gen_fmult(ctx, ra, rb, rc, fn11);
2396 break;
2397 case 0x23:
2398 /* DIVT */
2399 gen_fdivt(ctx, ra, rb, rc, fn11);
2400 break;
2401 case 0x24:
2402 /* CMPTUN */
2403 gen_fcmptun(ctx, ra, rb, rc, fn11);
2404 break;
2405 case 0x25:
2406 /* CMPTEQ */
2407 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2408 break;
2409 case 0x26:
2410 /* CMPTLT */
2411 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2412 break;
2413 case 0x27:
2414 /* CMPTLE */
2415 gen_fcmptle(ctx, ra, rb, rc, fn11);
2416 break;
2417 case 0x2C:
2418 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2419 /* CVTST */
2420 gen_fcvtst(ctx, rb, rc, fn11);
2421 } else {
2422 /* CVTTS */
2423 gen_fcvtts(ctx, rb, rc, fn11);
2424 }
2425 break;
2426 case 0x2F:
2427 /* CVTTQ */
2428 gen_fcvttq(ctx, rb, rc, fn11);
2429 break;
2430 case 0x3C:
2431 /* CVTQS */
2432 gen_fcvtqs(ctx, rb, rc, fn11);
2433 break;
2434 case 0x3E:
2435 /* CVTQT */
2436 gen_fcvtqt(ctx, rb, rc, fn11);
2437 break;
2438 default:
2439 goto invalid_opc;
2440 }
2441 break;
2442 case 0x17:
2443 switch (fn11) {
2444 case 0x010:
2445 /* CVTLQ */
2446 gen_fcvtlq(rb, rc);
2447 break;
2448 case 0x020:
2449 if (likely(rc != 31)) {
2450 if (ra == rb) {
2451 /* FMOV */
2452 if (ra == 31)
2453 tcg_gen_movi_i64(cpu_fir[rc], 0);
2454 else
2455 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2456 } else {
2457 /* CPYS */
2458 gen_fcpys(ra, rb, rc);
2459 }
2460 }
2461 break;
2462 case 0x021:
2463 /* CPYSN */
2464 gen_fcpysn(ra, rb, rc);
2465 break;
2466 case 0x022:
2467 /* CPYSE */
2468 gen_fcpyse(ra, rb, rc);
2469 break;
2470 case 0x024:
2471 /* MT_FPCR */
2472 if (likely(ra != 31))
2473 gen_helper_store_fpcr(cpu_fir[ra]);
2474 else {
2475 TCGv tmp = tcg_const_i64(0);
2476 gen_helper_store_fpcr(tmp);
2477 tcg_temp_free(tmp);
2478 }
2479 break;
2480 case 0x025:
2481 /* MF_FPCR */
2482 if (likely(ra != 31))
2483 gen_helper_load_fpcr(cpu_fir[ra]);
2484 break;
2485 case 0x02A:
2486 /* FCMOVEQ */
2487 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2488 break;
2489 case 0x02B:
2490 /* FCMOVNE */
2491 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2492 break;
2493 case 0x02C:
2494 /* FCMOVLT */
2495 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2496 break;
2497 case 0x02D:
2498 /* FCMOVGE */
2499 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2500 break;
2501 case 0x02E:
2502 /* FCMOVLE */
2503 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2504 break;
2505 case 0x02F:
2506 /* FCMOVGT */
2507 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2508 break;
2509 case 0x030:
2510 /* CVTQL */
2511 gen_fcvtql(rb, rc);
2512 break;
2513 case 0x130:
2514 /* CVTQL/V */
2515 case 0x530:
2516 /* CVTQL/SV */
2517 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2518 /v doesn't do. The only thing I can think is that /sv is a
2519 valid instruction merely for completeness in the ISA. */
2520 gen_fcvtql_v(ctx, rb, rc);
2521 break;
2522 default:
2523 goto invalid_opc;
2524 }
2525 break;
2526 case 0x18:
2527 switch ((uint16_t)disp16) {
2528 case 0x0000:
2529 /* TRAPB */
2530 /* No-op. */
2531 break;
2532 case 0x0400:
2533 /* EXCB */
2534 /* No-op. */
2535 break;
2536 case 0x4000:
2537 /* MB */
2538 /* No-op */
2539 break;
2540 case 0x4400:
2541 /* WMB */
2542 /* No-op */
2543 break;
2544 case 0x8000:
2545 /* FETCH */
2546 /* No-op */
2547 break;
2548 case 0xA000:
2549 /* FETCH_M */
2550 /* No-op */
2551 break;
2552 case 0xC000:
2553 /* RPCC */
2554 if (ra != 31)
2555 gen_helper_load_pcc(cpu_ir[ra]);
2556 break;
2557 case 0xE000:
2558 /* RC */
2559 gen_rx(ra, 0);
2560 break;
2561 case 0xE800:
2562 /* ECB */
2563 break;
2564 case 0xF000:
2565 /* RS */
2566 gen_rx(ra, 1);
2567 break;
2568 case 0xF800:
2569 /* WH64 */
2570 /* No-op */
2571 break;
2572 default:
2573 goto invalid_opc;
2574 }
2575 break;
2576 case 0x19:
2577 /* HW_MFPR (PALcode) */
2578 #if defined (CONFIG_USER_ONLY)
2579 goto invalid_opc;
2580 #else
2581 if (!ctx->pal_mode)
2582 goto invalid_opc;
2583 if (ra != 31) {
2584 TCGv tmp = tcg_const_i32(insn & 0xFF);
2585 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
2586 tcg_temp_free(tmp);
2587 }
2588 break;
2589 #endif
2590 case 0x1A:
2591 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2592 prediction stack action, which of course we don't implement. */
2593 if (rb != 31) {
2594 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2595 } else {
2596 tcg_gen_movi_i64(cpu_pc, 0);
2597 }
2598 if (ra != 31) {
2599 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2600 }
2601 ret = EXIT_PC_UPDATED;
2602 break;
2603 case 0x1B:
2604 /* HW_LD (PALcode) */
2605 #if defined (CONFIG_USER_ONLY)
2606 goto invalid_opc;
2607 #else
2608 if (!ctx->pal_mode)
2609 goto invalid_opc;
2610 if (ra != 31) {
2611 TCGv addr = tcg_temp_new();
2612 if (rb != 31)
2613 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2614 else
2615 tcg_gen_movi_i64(addr, disp12);
2616 switch ((insn >> 12) & 0xF) {
2617 case 0x0:
2618 /* Longword physical access (hw_ldl/p) */
2619 gen_helper_ldl_phys(cpu_ir[ra], addr);
2620 break;
2621 case 0x1:
2622 /* Quadword physical access (hw_ldq/p) */
2623 gen_helper_ldq_phys(cpu_ir[ra], addr);
2624 break;
2625 case 0x2:
2626 /* Longword physical access with lock (hw_ldl_l/p) */
2627 gen_helper_ldl_l_phys(cpu_ir[ra], addr);
2628 break;
2629 case 0x3:
2630 /* Quadword physical access with lock (hw_ldq_l/p) */
2631 gen_helper_ldq_l_phys(cpu_ir[ra], addr);
2632 break;
2633 case 0x4:
2634 /* Longword virtual PTE fetch (hw_ldl/v) */
2635 goto invalid_opc;
2636 case 0x5:
2637 /* Quadword virtual PTE fetch (hw_ldq/v) */
2638 goto invalid_opc;
2639 break;
2640 case 0x6:
2641 /* Incpu_ir[ra]id */
2642 goto invalid_opc;
2643 case 0x7:
2644 /* Incpu_ir[ra]id */
2645 goto invalid_opc;
2646 case 0x8:
2647 /* Longword virtual access (hw_ldl) */
2648 goto invalid_opc;
2649 case 0x9:
2650 /* Quadword virtual access (hw_ldq) */
2651 goto invalid_opc;
2652 case 0xA:
2653 /* Longword virtual access with protection check (hw_ldl/w) */
2654 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2655 break;
2656 case 0xB:
2657 /* Quadword virtual access with protection check (hw_ldq/w) */
2658 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2659 break;
2660 case 0xC:
2661 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2662 goto invalid_opc;
2663 case 0xD:
2664 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2665 goto invalid_opc;
2666 case 0xE:
2667 /* Longword virtual access with alternate access mode and
2668 protection checks (hw_ldl/wa) */
2669 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2670 break;
2671 case 0xF:
2672 /* Quadword virtual access with alternate access mode and
2673 protection checks (hw_ldq/wa) */
2674 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2675 break;
2676 }
2677 tcg_temp_free(addr);
2678 }
2679 break;
2680 #endif
2681 case 0x1C:
2682 switch (fn7) {
2683 case 0x00:
2684 /* SEXTB */
2685 if (!(ctx->amask & AMASK_BWX))
2686 goto invalid_opc;
2687 if (likely(rc != 31)) {
2688 if (islit)
2689 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2690 else
2691 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2692 }
2693 break;
2694 case 0x01:
2695 /* SEXTW */
2696 if (!(ctx->amask & AMASK_BWX))
2697 goto invalid_opc;
2698 if (likely(rc != 31)) {
2699 if (islit)
2700 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2701 else
2702 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2703 }
2704 break;
2705 case 0x30:
2706 /* CTPOP */
2707 if (!(ctx->amask & AMASK_CIX))
2708 goto invalid_opc;
2709 if (likely(rc != 31)) {
2710 if (islit)
2711 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2712 else
2713 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2714 }
2715 break;
2716 case 0x31:
2717 /* PERR */
2718 if (!(ctx->amask & AMASK_MVI))
2719 goto invalid_opc;
2720 gen_perr(ra, rb, rc, islit, lit);
2721 break;
2722 case 0x32:
2723 /* CTLZ */
2724 if (!(ctx->amask & AMASK_CIX))
2725 goto invalid_opc;
2726 if (likely(rc != 31)) {
2727 if (islit)
2728 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2729 else
2730 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2731 }
2732 break;
2733 case 0x33:
2734 /* CTTZ */
2735 if (!(ctx->amask & AMASK_CIX))
2736 goto invalid_opc;
2737 if (likely(rc != 31)) {
2738 if (islit)
2739 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2740 else
2741 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2742 }
2743 break;
2744 case 0x34:
2745 /* UNPKBW */
2746 if (!(ctx->amask & AMASK_MVI))
2747 goto invalid_opc;
2748 if (real_islit || ra != 31)
2749 goto invalid_opc;
2750 gen_unpkbw (rb, rc);
2751 break;
2752 case 0x35:
2753 /* UNPKBL */
2754 if (!(ctx->amask & AMASK_MVI))
2755 goto invalid_opc;
2756 if (real_islit || ra != 31)
2757 goto invalid_opc;
2758 gen_unpkbl (rb, rc);
2759 break;
2760 case 0x36:
2761 /* PKWB */
2762 if (!(ctx->amask & AMASK_MVI))
2763 goto invalid_opc;
2764 if (real_islit || ra != 31)
2765 goto invalid_opc;
2766 gen_pkwb (rb, rc);
2767 break;
2768 case 0x37:
2769 /* PKLB */
2770 if (!(ctx->amask & AMASK_MVI))
2771 goto invalid_opc;
2772 if (real_islit || ra != 31)
2773 goto invalid_opc;
2774 gen_pklb (rb, rc);
2775 break;
2776 case 0x38:
2777 /* MINSB8 */
2778 if (!(ctx->amask & AMASK_MVI))
2779 goto invalid_opc;
2780 gen_minsb8 (ra, rb, rc, islit, lit);
2781 break;
2782 case 0x39:
2783 /* MINSW4 */
2784 if (!(ctx->amask & AMASK_MVI))
2785 goto invalid_opc;
2786 gen_minsw4 (ra, rb, rc, islit, lit);
2787 break;
2788 case 0x3A:
2789 /* MINUB8 */
2790 if (!(ctx->amask & AMASK_MVI))
2791 goto invalid_opc;
2792 gen_minub8 (ra, rb, rc, islit, lit);
2793 break;
2794 case 0x3B:
2795 /* MINUW4 */
2796 if (!(ctx->amask & AMASK_MVI))
2797 goto invalid_opc;
2798 gen_minuw4 (ra, rb, rc, islit, lit);
2799 break;
2800 case 0x3C:
2801 /* MAXUB8 */
2802 if (!(ctx->amask & AMASK_MVI))
2803 goto invalid_opc;
2804 gen_maxub8 (ra, rb, rc, islit, lit);
2805 break;
2806 case 0x3D:
2807 /* MAXUW4 */
2808 if (!(ctx->amask & AMASK_MVI))
2809 goto invalid_opc;
2810 gen_maxuw4 (ra, rb, rc, islit, lit);
2811 break;
2812 case 0x3E:
2813 /* MAXSB8 */
2814 if (!(ctx->amask & AMASK_MVI))
2815 goto invalid_opc;
2816 gen_maxsb8 (ra, rb, rc, islit, lit);
2817 break;
2818 case 0x3F:
2819 /* MAXSW4 */
2820 if (!(ctx->amask & AMASK_MVI))
2821 goto invalid_opc;
2822 gen_maxsw4 (ra, rb, rc, islit, lit);
2823 break;
2824 case 0x70:
2825 /* FTOIT */
2826 if (!(ctx->amask & AMASK_FIX))
2827 goto invalid_opc;
2828 if (likely(rc != 31)) {
2829 if (ra != 31)
2830 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2831 else
2832 tcg_gen_movi_i64(cpu_ir[rc], 0);
2833 }
2834 break;
2835 case 0x78:
2836 /* FTOIS */
2837 if (!(ctx->amask & AMASK_FIX))
2838 goto invalid_opc;
2839 if (rc != 31) {
2840 TCGv_i32 tmp1 = tcg_temp_new_i32();
2841 if (ra != 31)
2842 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2843 else {
2844 TCGv tmp2 = tcg_const_i64(0);
2845 gen_helper_s_to_memory(tmp1, tmp2);
2846 tcg_temp_free(tmp2);
2847 }
2848 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2849 tcg_temp_free_i32(tmp1);
2850 }
2851 break;
2852 default:
2853 goto invalid_opc;
2854 }
2855 break;
2856 case 0x1D:
2857 /* HW_MTPR (PALcode) */
2858 #if defined (CONFIG_USER_ONLY)
2859 goto invalid_opc;
2860 #else
2861 if (!ctx->pal_mode)
2862 goto invalid_opc;
2863 else {
2864 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2865 if (ra != 31)
2866 gen_helper_mtpr(tmp1, cpu_ir[ra]);
2867 else {
2868 TCGv tmp2 = tcg_const_i64(0);
2869 gen_helper_mtpr(tmp1, tmp2);
2870 tcg_temp_free(tmp2);
2871 }
2872 tcg_temp_free(tmp1);
2873 ret = EXIT_PC_STALE;
2874 }
2875 break;
2876 #endif
2877 case 0x1E:
2878 /* HW_REI (PALcode) */
2879 #if defined (CONFIG_USER_ONLY)
2880 goto invalid_opc;
2881 #else
2882 if (!ctx->pal_mode)
2883 goto invalid_opc;
2884 if (rb == 31) {
2885 /* "Old" alpha */
2886 gen_helper_hw_rei();
2887 } else {
2888 TCGv tmp;
2889
2890 if (ra != 31) {
2891 tmp = tcg_temp_new();
2892 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2893 } else
2894 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2895 gen_helper_hw_ret(tmp);
2896 tcg_temp_free(tmp);
2897 }
2898 ret = EXIT_PC_UPDATED;
2899 break;
2900 #endif
2901 case 0x1F:
2902 /* HW_ST (PALcode) */
2903 #if defined (CONFIG_USER_ONLY)
2904 goto invalid_opc;
2905 #else
2906 if (!ctx->pal_mode)
2907 goto invalid_opc;
2908 else {
2909 TCGv addr, val;
2910 addr = tcg_temp_new();
2911 if (rb != 31)
2912 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2913 else
2914 tcg_gen_movi_i64(addr, disp12);
2915 if (ra != 31)
2916 val = cpu_ir[ra];
2917 else {
2918 val = tcg_temp_new();
2919 tcg_gen_movi_i64(val, 0);
2920 }
2921 switch ((insn >> 12) & 0xF) {
2922 case 0x0:
2923 /* Longword physical access */
2924 gen_helper_stl_phys(addr, val);
2925 break;
2926 case 0x1:
2927 /* Quadword physical access */
2928 gen_helper_stq_phys(addr, val);
2929 break;
2930 case 0x2:
2931 /* Longword physical access with lock */
2932 gen_helper_stl_c_phys(val, addr, val);
2933 break;
2934 case 0x3:
2935 /* Quadword physical access with lock */
2936 gen_helper_stq_c_phys(val, addr, val);
2937 break;
2938 case 0x4:
2939 /* Longword virtual access */
2940 goto invalid_opc;
2941 case 0x5:
2942 /* Quadword virtual access */
2943 goto invalid_opc;
2944 case 0x6:
2945 /* Invalid */
2946 goto invalid_opc;
2947 case 0x7:
2948 /* Invalid */
2949 goto invalid_opc;
2950 case 0x8:
2951 /* Invalid */
2952 goto invalid_opc;
2953 case 0x9:
2954 /* Invalid */
2955 goto invalid_opc;
2956 case 0xA:
2957 /* Invalid */
2958 goto invalid_opc;
2959 case 0xB:
2960 /* Invalid */
2961 goto invalid_opc;
2962 case 0xC:
2963 /* Longword virtual access with alternate access mode */
2964 goto invalid_opc;
2965 case 0xD:
2966 /* Quadword virtual access with alternate access mode */
2967 goto invalid_opc;
2968 case 0xE:
2969 /* Invalid */
2970 goto invalid_opc;
2971 case 0xF:
2972 /* Invalid */
2973 goto invalid_opc;
2974 }
2975 if (ra == 31)
2976 tcg_temp_free(val);
2977 tcg_temp_free(addr);
2978 }
2979 break;
2980 #endif
2981 case 0x20:
2982 /* LDF */
2983 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2984 break;
2985 case 0x21:
2986 /* LDG */
2987 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2988 break;
2989 case 0x22:
2990 /* LDS */
2991 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2992 break;
2993 case 0x23:
2994 /* LDT */
2995 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2996 break;
2997 case 0x24:
2998 /* STF */
2999 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3000 break;
3001 case 0x25:
3002 /* STG */
3003 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3004 break;
3005 case 0x26:
3006 /* STS */
3007 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3008 break;
3009 case 0x27:
3010 /* STT */
3011 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3012 break;
3013 case 0x28:
3014 /* LDL */
3015 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3016 break;
3017 case 0x29:
3018 /* LDQ */
3019 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3020 break;
3021 case 0x2A:
3022 /* LDL_L */
3023 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3024 break;
3025 case 0x2B:
3026 /* LDQ_L */
3027 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3028 break;
3029 case 0x2C:
3030 /* STL */
3031 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3032 break;
3033 case 0x2D:
3034 /* STQ */
3035 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3036 break;
3037 case 0x2E:
3038 /* STL_C */
3039 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3040 break;
3041 case 0x2F:
3042 /* STQ_C */
3043 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3044 break;
3045 case 0x30:
3046 /* BR */
3047 ret = gen_bdirect(ctx, ra, disp21);
3048 break;
3049 case 0x31: /* FBEQ */
3050 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3051 break;
3052 case 0x32: /* FBLT */
3053 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3054 break;
3055 case 0x33: /* FBLE */
3056 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3057 break;
3058 case 0x34:
3059 /* BSR */
3060 ret = gen_bdirect(ctx, ra, disp21);
3061 break;
3062 case 0x35: /* FBNE */
3063 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3064 break;
3065 case 0x36: /* FBGE */
3066 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3067 break;
3068 case 0x37: /* FBGT */
3069 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3070 break;
3071 case 0x38:
3072 /* BLBC */
3073 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3074 break;
3075 case 0x39:
3076 /* BEQ */
3077 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3078 break;
3079 case 0x3A:
3080 /* BLT */
3081 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3082 break;
3083 case 0x3B:
3084 /* BLE */
3085 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3086 break;
3087 case 0x3C:
3088 /* BLBS */
3089 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3090 break;
3091 case 0x3D:
3092 /* BNE */
3093 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3094 break;
3095 case 0x3E:
3096 /* BGE */
3097 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3098 break;
3099 case 0x3F:
3100 /* BGT */
3101 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3102 break;
3103 invalid_opc:
3104 ret = gen_invalid(ctx);
3105 break;
3106 }
3107
3108 return ret;
3109 }
3110
3111 static inline void gen_intermediate_code_internal(CPUState *env,
3112 TranslationBlock *tb,
3113 int search_pc)
3114 {
3115 DisasContext ctx, *ctxp = &ctx;
3116 target_ulong pc_start;
3117 uint32_t insn;
3118 uint16_t *gen_opc_end;
3119 CPUBreakpoint *bp;
3120 int j, lj = -1;
3121 ExitStatus ret;
3122 int num_insns;
3123 int max_insns;
3124
3125 pc_start = tb->pc;
3126 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3127
3128 ctx.tb = tb;
3129 ctx.env = env;
3130 ctx.pc = pc_start;
3131 ctx.amask = env->amask;
3132 #if defined (CONFIG_USER_ONLY)
3133 ctx.mem_idx = 0;
3134 #else
3135 ctx.mem_idx = ((env->ps >> 3) & 3);
3136 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
3137 #endif
3138
3139 /* ??? Every TB begins with unset rounding mode, to be initialized on
3140 the first fp insn of the TB. Alternately we could define a proper
3141 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3142 to reset the FP_STATUS to that default at the end of any TB that
3143 changes the default. We could even (gasp) dynamiclly figure out
3144 what default would be most efficient given the running program. */
3145 ctx.tb_rm = -1;
3146 /* Similarly for flush-to-zero. */
3147 ctx.tb_ftz = -1;
3148
3149 num_insns = 0;
3150 max_insns = tb->cflags & CF_COUNT_MASK;
3151 if (max_insns == 0)
3152 max_insns = CF_COUNT_MASK;
3153
3154 gen_icount_start();
3155 do {
3156 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3157 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3158 if (bp->pc == ctx.pc) {
3159 gen_excp(&ctx, EXCP_DEBUG, 0);
3160 break;
3161 }
3162 }
3163 }
3164 if (search_pc) {
3165 j = gen_opc_ptr - gen_opc_buf;
3166 if (lj < j) {
3167 lj++;
3168 while (lj < j)
3169 gen_opc_instr_start[lj++] = 0;
3170 }
3171 gen_opc_pc[lj] = ctx.pc;
3172 gen_opc_instr_start[lj] = 1;
3173 gen_opc_icount[lj] = num_insns;
3174 }
3175 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3176 gen_io_start();
3177 insn = ldl_code(ctx.pc);
3178 num_insns++;
3179
3180 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3181 tcg_gen_debug_insn_start(ctx.pc);
3182 }
3183
3184 ctx.pc += 4;
3185 ret = translate_one(ctxp, insn);
3186
3187 /* If we reach a page boundary, are single stepping,
3188 or exhaust instruction count, stop generation. */
3189 if (ret == NO_EXIT
3190 && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3191 || gen_opc_ptr >= gen_opc_end
3192 || num_insns >= max_insns
3193 || singlestep
3194 || env->singlestep_enabled)) {
3195 ret = EXIT_PC_STALE;
3196 }
3197 } while (ret == NO_EXIT);
3198
3199 if (tb->cflags & CF_LAST_IO) {
3200 gen_io_end();
3201 }
3202
3203 switch (ret) {
3204 case EXIT_GOTO_TB:
3205 case EXIT_NORETURN:
3206 break;
3207 case EXIT_PC_STALE:
3208 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3209 /* FALLTHRU */
3210 case EXIT_PC_UPDATED:
3211 if (env->singlestep_enabled) {
3212 gen_excp_1(EXCP_DEBUG, 0);
3213 } else {
3214 tcg_gen_exit_tb(0);
3215 }
3216 break;
3217 default:
3218 abort();
3219 }
3220
3221 gen_icount_end(tb, num_insns);
3222 *gen_opc_ptr = INDEX_op_end;
3223 if (search_pc) {
3224 j = gen_opc_ptr - gen_opc_buf;
3225 lj++;
3226 while (lj <= j)
3227 gen_opc_instr_start[lj++] = 0;
3228 } else {
3229 tb->size = ctx.pc - pc_start;
3230 tb->icount = num_insns;
3231 }
3232
3233 #ifdef DEBUG_DISAS
3234 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3235 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3236 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3237 qemu_log("\n");
3238 }
3239 #endif
3240 }
3241
3242 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3243 {
3244 gen_intermediate_code_internal(env, tb, 0);
3245 }
3246
3247 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3248 {
3249 gen_intermediate_code_internal(env, tb, 1);
3250 }
3251
3252 struct cpu_def_t {
3253 const char *name;
3254 int implver, amask;
3255 };
3256
3257 static const struct cpu_def_t cpu_defs[] = {
3258 { "ev4", IMPLVER_2106x, 0 },
3259 { "ev5", IMPLVER_21164, 0 },
3260 { "ev56", IMPLVER_21164, AMASK_BWX },
3261 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3262 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3263 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3264 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3265 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3266 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3267 { "21064", IMPLVER_2106x, 0 },
3268 { "21164", IMPLVER_21164, 0 },
3269 { "21164a", IMPLVER_21164, AMASK_BWX },
3270 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3271 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3272 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3273 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3274 };
3275
3276 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3277 {
3278 CPUAlphaState *env;
3279 int implver, amask, i, max;
3280
3281 env = qemu_mallocz(sizeof(CPUAlphaState));
3282 cpu_exec_init(env);
3283 alpha_translate_init();
3284 tlb_flush(env, 1);
3285
3286 /* Default to ev67; no reason not to emulate insns by default. */
3287 implver = IMPLVER_21264;
3288 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3289 | AMASK_TRAP | AMASK_PREFETCH);
3290
3291 max = ARRAY_SIZE(cpu_defs);
3292 for (i = 0; i < max; i++) {
3293 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3294 implver = cpu_defs[i].implver;
3295 amask = cpu_defs[i].amask;
3296 break;
3297 }
3298 }
3299 env->implver = implver;
3300 env->amask = amask;
3301
3302 env->ps = 0x1F00;
3303 #if defined (CONFIG_USER_ONLY)
3304 env->ps |= 1 << 3;
3305 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3306 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
3307 #endif
3308 env->lock_addr = -1;
3309
3310 /* Initialize IPR */
3311 #if defined (CONFIG_USER_ONLY)
3312 env->ipr[IPR_EXC_ADDR] = 0;
3313 env->ipr[IPR_EXC_SUM] = 0;
3314 env->ipr[IPR_EXC_MASK] = 0;
3315 #else
3316 {
3317 // uint64_t hwpcb;
3318 // hwpcb = env->ipr[IPR_PCBB];
3319 env->ipr[IPR_ASN] = 0;
3320 env->ipr[IPR_ASTEN] = 0;
3321 env->ipr[IPR_ASTSR] = 0;
3322 env->ipr[IPR_DATFX] = 0;
3323 /* XXX: fix this */
3324 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3325 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3326 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3327 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3328 env->ipr[IPR_FEN] = 0;
3329 env->ipr[IPR_IPL] = 31;
3330 env->ipr[IPR_MCES] = 0;
3331 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
3332 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3333 env->ipr[IPR_SISR] = 0;
3334 env->ipr[IPR_VIRBND] = -1ULL;
3335 }
3336 #endif
3337
3338 qemu_init_vcpu(env);
3339 return env;
3340 }
3341
3342 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
3343 {
3344 env->pc = gen_opc_pc[pc_pos];
3345 }