]> git.proxmox.com Git - qemu.git/blob - target-alpha/translate.c
target-alpha: Update commentary for opcode 0x1A.
[qemu.git] / target-alpha / translate.c
1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23
24 #include "cpu.h"
25 #include "exec-all.h"
26 #include "disas.h"
27 #include "host-utils.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
30
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
37
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
43
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46 struct TranslationBlock *tb;
47 CPUAlphaState *env;
48 uint64_t pc;
49 int mem_idx;
50 #if !defined (CONFIG_USER_ONLY)
51 int pal_mode;
52 #endif
53 uint32_t amask;
54
55 /* Current rounding mode for this TB. */
56 int tb_rm;
57 /* Current flush-to-zero setting for this TB. */
58 int tb_ftz;
59 };
60
61 /* Return values from translate_one, indicating the state of the TB.
62 Note that zero indicates that we are not exiting the TB. */
63
64 typedef enum {
65 NO_EXIT,
66
67 /* We have emitted one or more goto_tb. No fixup required. */
68 EXIT_GOTO_TB,
69
70 /* We are not using a goto_tb (for whatever reason), but have updated
71 the PC (for whatever reason), so there's no need to do it again on
72 exiting the TB. */
73 EXIT_PC_UPDATED,
74
75 /* We are exiting the TB, but have neither emitted a goto_tb, nor
76 updated the PC for the next instruction to be executed. */
77 EXIT_PC_STALE
78 } ExitStatus;
79
80 /* global register indexes */
81 static TCGv_ptr cpu_env;
82 static TCGv cpu_ir[31];
83 static TCGv cpu_fir[31];
84 static TCGv cpu_pc;
85 static TCGv cpu_lock;
86 #ifdef CONFIG_USER_ONLY
87 static TCGv cpu_uniq;
88 #endif
89
90 /* register names */
91 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
92
93 #include "gen-icount.h"
94
95 static void alpha_translate_init(void)
96 {
97 int i;
98 char *p;
99 static int done_init = 0;
100
101 if (done_init)
102 return;
103
104 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
105
106 p = cpu_reg_names;
107 for (i = 0; i < 31; i++) {
108 sprintf(p, "ir%d", i);
109 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
110 offsetof(CPUState, ir[i]), p);
111 p += (i < 10) ? 4 : 5;
112
113 sprintf(p, "fir%d", i);
114 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
115 offsetof(CPUState, fir[i]), p);
116 p += (i < 10) ? 5 : 6;
117 }
118
119 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
120 offsetof(CPUState, pc), "pc");
121
122 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
123 offsetof(CPUState, lock), "lock");
124
125 #ifdef CONFIG_USER_ONLY
126 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
127 offsetof(CPUState, unique), "uniq");
128 #endif
129
130 /* register helpers */
131 #define GEN_HELPER 2
132 #include "helper.h"
133
134 done_init = 1;
135 }
136
137 static inline void gen_excp(DisasContext *ctx, int exception, int error_code)
138 {
139 TCGv_i32 tmp1, tmp2;
140
141 tcg_gen_movi_i64(cpu_pc, ctx->pc);
142 tmp1 = tcg_const_i32(exception);
143 tmp2 = tcg_const_i32(error_code);
144 gen_helper_excp(tmp1, tmp2);
145 tcg_temp_free_i32(tmp2);
146 tcg_temp_free_i32(tmp1);
147 }
148
149 static inline void gen_invalid(DisasContext *ctx)
150 {
151 gen_excp(ctx, EXCP_OPCDEC, 0);
152 }
153
154 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
155 {
156 TCGv tmp = tcg_temp_new();
157 TCGv_i32 tmp32 = tcg_temp_new_i32();
158 tcg_gen_qemu_ld32u(tmp, t1, flags);
159 tcg_gen_trunc_i64_i32(tmp32, tmp);
160 gen_helper_memory_to_f(t0, tmp32);
161 tcg_temp_free_i32(tmp32);
162 tcg_temp_free(tmp);
163 }
164
165 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
166 {
167 TCGv tmp = tcg_temp_new();
168 tcg_gen_qemu_ld64(tmp, t1, flags);
169 gen_helper_memory_to_g(t0, tmp);
170 tcg_temp_free(tmp);
171 }
172
173 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
174 {
175 TCGv tmp = tcg_temp_new();
176 TCGv_i32 tmp32 = tcg_temp_new_i32();
177 tcg_gen_qemu_ld32u(tmp, t1, flags);
178 tcg_gen_trunc_i64_i32(tmp32, tmp);
179 gen_helper_memory_to_s(t0, tmp32);
180 tcg_temp_free_i32(tmp32);
181 tcg_temp_free(tmp);
182 }
183
184 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
185 {
186 tcg_gen_mov_i64(cpu_lock, t1);
187 tcg_gen_qemu_ld32s(t0, t1, flags);
188 }
189
190 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
191 {
192 tcg_gen_mov_i64(cpu_lock, t1);
193 tcg_gen_qemu_ld64(t0, t1, flags);
194 }
195
196 static inline void gen_load_mem(DisasContext *ctx,
197 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
198 int flags),
199 int ra, int rb, int32_t disp16, int fp,
200 int clear)
201 {
202 TCGv addr;
203
204 if (unlikely(ra == 31))
205 return;
206
207 addr = tcg_temp_new();
208 if (rb != 31) {
209 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
210 if (clear)
211 tcg_gen_andi_i64(addr, addr, ~0x7);
212 } else {
213 if (clear)
214 disp16 &= ~0x7;
215 tcg_gen_movi_i64(addr, disp16);
216 }
217 if (fp)
218 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
219 else
220 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
221 tcg_temp_free(addr);
222 }
223
224 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
225 {
226 TCGv_i32 tmp32 = tcg_temp_new_i32();
227 TCGv tmp = tcg_temp_new();
228 gen_helper_f_to_memory(tmp32, t0);
229 tcg_gen_extu_i32_i64(tmp, tmp32);
230 tcg_gen_qemu_st32(tmp, t1, flags);
231 tcg_temp_free(tmp);
232 tcg_temp_free_i32(tmp32);
233 }
234
235 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
236 {
237 TCGv tmp = tcg_temp_new();
238 gen_helper_g_to_memory(tmp, t0);
239 tcg_gen_qemu_st64(tmp, t1, flags);
240 tcg_temp_free(tmp);
241 }
242
243 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
244 {
245 TCGv_i32 tmp32 = tcg_temp_new_i32();
246 TCGv tmp = tcg_temp_new();
247 gen_helper_s_to_memory(tmp32, t0);
248 tcg_gen_extu_i32_i64(tmp, tmp32);
249 tcg_gen_qemu_st32(tmp, t1, flags);
250 tcg_temp_free(tmp);
251 tcg_temp_free_i32(tmp32);
252 }
253
254 static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
255 {
256 int l1, l2;
257
258 l1 = gen_new_label();
259 l2 = gen_new_label();
260 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
261 tcg_gen_qemu_st32(t0, t1, flags);
262 tcg_gen_movi_i64(t0, 1);
263 tcg_gen_br(l2);
264 gen_set_label(l1);
265 tcg_gen_movi_i64(t0, 0);
266 gen_set_label(l2);
267 tcg_gen_movi_i64(cpu_lock, -1);
268 }
269
270 static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
271 {
272 int l1, l2;
273
274 l1 = gen_new_label();
275 l2 = gen_new_label();
276 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
277 tcg_gen_qemu_st64(t0, t1, flags);
278 tcg_gen_movi_i64(t0, 1);
279 tcg_gen_br(l2);
280 gen_set_label(l1);
281 tcg_gen_movi_i64(t0, 0);
282 gen_set_label(l2);
283 tcg_gen_movi_i64(cpu_lock, -1);
284 }
285
286 static inline void gen_store_mem(DisasContext *ctx,
287 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
288 int flags),
289 int ra, int rb, int32_t disp16, int fp,
290 int clear, int local)
291 {
292 TCGv addr;
293 if (local)
294 addr = tcg_temp_local_new();
295 else
296 addr = tcg_temp_new();
297 if (rb != 31) {
298 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
299 if (clear)
300 tcg_gen_andi_i64(addr, addr, ~0x7);
301 } else {
302 if (clear)
303 disp16 &= ~0x7;
304 tcg_gen_movi_i64(addr, disp16);
305 }
306 if (ra != 31) {
307 if (fp)
308 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
309 else
310 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
311 } else {
312 TCGv zero;
313 if (local)
314 zero = tcg_const_local_i64(0);
315 else
316 zero = tcg_const_i64(0);
317 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
318 tcg_temp_free(zero);
319 }
320 tcg_temp_free(addr);
321 }
322
323 static int use_goto_tb(DisasContext *ctx, uint64_t dest)
324 {
325 /* Check for the dest on the same page as the start of the TB. We
326 also want to suppress goto_tb in the case of single-steping and IO. */
327 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
328 && !ctx->env->singlestep_enabled
329 && !(ctx->tb->cflags & CF_LAST_IO));
330 }
331
332 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
333 {
334 uint64_t dest = ctx->pc + (disp << 2);
335
336 if (ra != 31) {
337 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
338 }
339
340 /* Notice branch-to-next; used to initialize RA with the PC. */
341 if (disp == 0) {
342 return 0;
343 } else if (use_goto_tb(ctx, dest)) {
344 tcg_gen_goto_tb(0);
345 tcg_gen_movi_i64(cpu_pc, dest);
346 tcg_gen_exit_tb((long)ctx->tb);
347 return EXIT_GOTO_TB;
348 } else {
349 tcg_gen_movi_i64(cpu_pc, dest);
350 return EXIT_PC_UPDATED;
351 }
352 }
353
354 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
355 TCGv cmp, int32_t disp)
356 {
357 uint64_t dest = ctx->pc + (disp << 2);
358 int lab_true = gen_new_label();
359
360 if (use_goto_tb(ctx, dest)) {
361 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
362
363 tcg_gen_goto_tb(0);
364 tcg_gen_movi_i64(cpu_pc, ctx->pc);
365 tcg_gen_exit_tb((long)ctx->tb);
366
367 gen_set_label(lab_true);
368 tcg_gen_goto_tb(1);
369 tcg_gen_movi_i64(cpu_pc, dest);
370 tcg_gen_exit_tb((long)ctx->tb + 1);
371
372 return EXIT_GOTO_TB;
373 } else {
374 int lab_over = gen_new_label();
375
376 /* ??? Consider using either
377 movi pc, next
378 addi tmp, pc, disp
379 movcond pc, cond, 0, tmp, pc
380 or
381 setcond tmp, cond, 0
382 movi pc, next
383 neg tmp, tmp
384 andi tmp, tmp, disp
385 add pc, pc, tmp
386 The current diamond subgraph surely isn't efficient. */
387
388 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
389 tcg_gen_movi_i64(cpu_pc, ctx->pc);
390 tcg_gen_br(lab_over);
391 gen_set_label(lab_true);
392 tcg_gen_movi_i64(cpu_pc, dest);
393 gen_set_label(lab_over);
394
395 return EXIT_PC_UPDATED;
396 }
397 }
398
399 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
400 int32_t disp, int mask)
401 {
402 TCGv cmp_tmp;
403
404 if (unlikely(ra == 31)) {
405 cmp_tmp = tcg_const_i64(0);
406 } else {
407 cmp_tmp = tcg_temp_new();
408 if (mask) {
409 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
410 } else {
411 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
412 }
413 }
414
415 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
416 }
417
418 /* Fold -0.0 for comparison with COND. */
419
420 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
421 {
422 uint64_t mzero = 1ull << 63;
423
424 switch (cond) {
425 case TCG_COND_LE:
426 case TCG_COND_GT:
427 /* For <= or >, the -0.0 value directly compares the way we want. */
428 tcg_gen_mov_i64(dest, src);
429 break;
430
431 case TCG_COND_EQ:
432 case TCG_COND_NE:
433 /* For == or !=, we can simply mask off the sign bit and compare. */
434 tcg_gen_andi_i64(dest, src, mzero - 1);
435 break;
436
437 case TCG_COND_GE:
438 case TCG_COND_LT:
439 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
440 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
441 tcg_gen_neg_i64(dest, dest);
442 tcg_gen_and_i64(dest, dest, src);
443 break;
444
445 default:
446 abort();
447 }
448 }
449
450 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
451 int32_t disp)
452 {
453 TCGv cmp_tmp;
454
455 if (unlikely(ra == 31)) {
456 /* Very uncommon case, but easier to optimize it to an integer
457 comparison than continuing with the floating point comparison. */
458 return gen_bcond(ctx, cond, ra, disp, 0);
459 }
460
461 cmp_tmp = tcg_temp_new();
462 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
463 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
464 }
465
466 static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
467 int islit, uint8_t lit, int mask)
468 {
469 TCGCond inv_cond = tcg_invert_cond(cond);
470 int l1;
471
472 if (unlikely(rc == 31))
473 return;
474
475 l1 = gen_new_label();
476
477 if (ra != 31) {
478 if (mask) {
479 TCGv tmp = tcg_temp_new();
480 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
481 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
482 tcg_temp_free(tmp);
483 } else
484 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
485 } else {
486 /* Very uncommon case - Do not bother to optimize. */
487 TCGv tmp = tcg_const_i64(0);
488 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
489 tcg_temp_free(tmp);
490 }
491
492 if (islit)
493 tcg_gen_movi_i64(cpu_ir[rc], lit);
494 else
495 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
496 gen_set_label(l1);
497 }
498
499 static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
500 {
501 TCGv cmp_tmp;
502 int l1;
503
504 if (unlikely(rc == 31)) {
505 return;
506 }
507
508 cmp_tmp = tcg_temp_new();
509 if (unlikely(ra == 31)) {
510 tcg_gen_movi_i64(cmp_tmp, 0);
511 } else {
512 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
513 }
514
515 l1 = gen_new_label();
516 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
517 tcg_temp_free(cmp_tmp);
518
519 if (rb != 31)
520 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
521 else
522 tcg_gen_movi_i64(cpu_fir[rc], 0);
523 gen_set_label(l1);
524 }
525
526 #define QUAL_RM_N 0x080 /* Round mode nearest even */
527 #define QUAL_RM_C 0x000 /* Round mode chopped */
528 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
529 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
530 #define QUAL_RM_MASK 0x0c0
531
532 #define QUAL_U 0x100 /* Underflow enable (fp output) */
533 #define QUAL_V 0x100 /* Overflow enable (int output) */
534 #define QUAL_S 0x400 /* Software completion enable */
535 #define QUAL_I 0x200 /* Inexact detection enable */
536
537 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
538 {
539 TCGv_i32 tmp;
540
541 fn11 &= QUAL_RM_MASK;
542 if (fn11 == ctx->tb_rm) {
543 return;
544 }
545 ctx->tb_rm = fn11;
546
547 tmp = tcg_temp_new_i32();
548 switch (fn11) {
549 case QUAL_RM_N:
550 tcg_gen_movi_i32(tmp, float_round_nearest_even);
551 break;
552 case QUAL_RM_C:
553 tcg_gen_movi_i32(tmp, float_round_to_zero);
554 break;
555 case QUAL_RM_M:
556 tcg_gen_movi_i32(tmp, float_round_down);
557 break;
558 case QUAL_RM_D:
559 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
560 break;
561 }
562
563 #if defined(CONFIG_SOFTFLOAT_INLINE)
564 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
565 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
566 sets the one field. */
567 tcg_gen_st8_i32(tmp, cpu_env,
568 offsetof(CPUState, fp_status.float_rounding_mode));
569 #else
570 gen_helper_setroundmode(tmp);
571 #endif
572
573 tcg_temp_free_i32(tmp);
574 }
575
576 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
577 {
578 TCGv_i32 tmp;
579
580 fn11 &= QUAL_U;
581 if (fn11 == ctx->tb_ftz) {
582 return;
583 }
584 ctx->tb_ftz = fn11;
585
586 tmp = tcg_temp_new_i32();
587 if (fn11) {
588 /* Underflow is enabled, use the FPCR setting. */
589 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
590 } else {
591 /* Underflow is disabled, force flush-to-zero. */
592 tcg_gen_movi_i32(tmp, 1);
593 }
594
595 #if defined(CONFIG_SOFTFLOAT_INLINE)
596 tcg_gen_st8_i32(tmp, cpu_env,
597 offsetof(CPUState, fp_status.flush_to_zero));
598 #else
599 gen_helper_setflushzero(tmp);
600 #endif
601
602 tcg_temp_free_i32(tmp);
603 }
604
605 static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
606 {
607 TCGv val = tcg_temp_new();
608 if (reg == 31) {
609 tcg_gen_movi_i64(val, 0);
610 } else if (fn11 & QUAL_S) {
611 gen_helper_ieee_input_s(val, cpu_fir[reg]);
612 } else if (is_cmp) {
613 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
614 } else {
615 gen_helper_ieee_input(val, cpu_fir[reg]);
616 }
617 return val;
618 }
619
620 static void gen_fp_exc_clear(void)
621 {
622 #if defined(CONFIG_SOFTFLOAT_INLINE)
623 TCGv_i32 zero = tcg_const_i32(0);
624 tcg_gen_st8_i32(zero, cpu_env,
625 offsetof(CPUState, fp_status.float_exception_flags));
626 tcg_temp_free_i32(zero);
627 #else
628 gen_helper_fp_exc_clear();
629 #endif
630 }
631
632 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
633 {
634 /* ??? We ought to be able to do something with imprecise exceptions.
635 E.g. notice we're still in the trap shadow of something within the
636 TB and do not generate the code to signal the exception; end the TB
637 when an exception is forced to arrive, either by consumption of a
638 register value or TRAPB or EXCB. */
639 TCGv_i32 exc = tcg_temp_new_i32();
640 TCGv_i32 reg;
641
642 #if defined(CONFIG_SOFTFLOAT_INLINE)
643 tcg_gen_ld8u_i32(exc, cpu_env,
644 offsetof(CPUState, fp_status.float_exception_flags));
645 #else
646 gen_helper_fp_exc_get(exc);
647 #endif
648
649 if (ignore) {
650 tcg_gen_andi_i32(exc, exc, ~ignore);
651 }
652
653 /* ??? Pass in the regno of the destination so that the helper can
654 set EXC_MASK, which contains a bitmask of destination registers
655 that have caused arithmetic traps. A simple userspace emulation
656 does not require this. We do need it for a guest kernel's entArith,
657 or if we were to do something clever with imprecise exceptions. */
658 reg = tcg_const_i32(rc + 32);
659
660 if (fn11 & QUAL_S) {
661 gen_helper_fp_exc_raise_s(exc, reg);
662 } else {
663 gen_helper_fp_exc_raise(exc, reg);
664 }
665
666 tcg_temp_free_i32(reg);
667 tcg_temp_free_i32(exc);
668 }
669
670 static inline void gen_fp_exc_raise(int rc, int fn11)
671 {
672 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
673 }
674
675 static void gen_fcvtlq(int rb, int rc)
676 {
677 if (unlikely(rc == 31)) {
678 return;
679 }
680 if (unlikely(rb == 31)) {
681 tcg_gen_movi_i64(cpu_fir[rc], 0);
682 } else {
683 TCGv tmp = tcg_temp_new();
684
685 /* The arithmetic right shift here, plus the sign-extended mask below
686 yields a sign-extended result without an explicit ext32s_i64. */
687 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
688 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
689 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
690 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
691 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
692
693 tcg_temp_free(tmp);
694 }
695 }
696
697 static void gen_fcvtql(int rb, int rc)
698 {
699 if (unlikely(rc == 31)) {
700 return;
701 }
702 if (unlikely(rb == 31)) {
703 tcg_gen_movi_i64(cpu_fir[rc], 0);
704 } else {
705 TCGv tmp = tcg_temp_new();
706
707 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
708 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
709 tcg_gen_shli_i64(tmp, tmp, 32);
710 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
711 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
712
713 tcg_temp_free(tmp);
714 }
715 }
716
717 static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
718 {
719 if (rb != 31) {
720 int lab = gen_new_label();
721 TCGv tmp = tcg_temp_new();
722
723 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
724 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
725 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
726
727 gen_set_label(lab);
728 }
729 gen_fcvtql(rb, rc);
730 }
731
732 #define FARITH2(name) \
733 static inline void glue(gen_f, name)(int rb, int rc) \
734 { \
735 if (unlikely(rc == 31)) { \
736 return; \
737 } \
738 if (rb != 31) { \
739 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
740 } else { \
741 TCGv tmp = tcg_const_i64(0); \
742 gen_helper_ ## name (cpu_fir[rc], tmp); \
743 tcg_temp_free(tmp); \
744 } \
745 }
746
747 /* ??? VAX instruction qualifiers ignored. */
748 FARITH2(sqrtf)
749 FARITH2(sqrtg)
750 FARITH2(cvtgf)
751 FARITH2(cvtgq)
752 FARITH2(cvtqf)
753 FARITH2(cvtqg)
754
755 static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
756 int rb, int rc, int fn11)
757 {
758 TCGv vb;
759
760 /* ??? This is wrong: the instruction is not a nop, it still may
761 raise exceptions. */
762 if (unlikely(rc == 31)) {
763 return;
764 }
765
766 gen_qual_roundmode(ctx, fn11);
767 gen_qual_flushzero(ctx, fn11);
768 gen_fp_exc_clear();
769
770 vb = gen_ieee_input(rb, fn11, 0);
771 helper(cpu_fir[rc], vb);
772 tcg_temp_free(vb);
773
774 gen_fp_exc_raise(rc, fn11);
775 }
776
777 #define IEEE_ARITH2(name) \
778 static inline void glue(gen_f, name)(DisasContext *ctx, \
779 int rb, int rc, int fn11) \
780 { \
781 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
782 }
783 IEEE_ARITH2(sqrts)
784 IEEE_ARITH2(sqrtt)
785 IEEE_ARITH2(cvtst)
786 IEEE_ARITH2(cvtts)
787
788 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
789 {
790 TCGv vb;
791 int ignore = 0;
792
793 /* ??? This is wrong: the instruction is not a nop, it still may
794 raise exceptions. */
795 if (unlikely(rc == 31)) {
796 return;
797 }
798
799 /* No need to set flushzero, since we have an integer output. */
800 gen_fp_exc_clear();
801 vb = gen_ieee_input(rb, fn11, 0);
802
803 /* Almost all integer conversions use cropped rounding, and most
804 also do not have integer overflow enabled. Special case that. */
805 switch (fn11) {
806 case QUAL_RM_C:
807 gen_helper_cvttq_c(cpu_fir[rc], vb);
808 break;
809 case QUAL_V | QUAL_RM_C:
810 case QUAL_S | QUAL_V | QUAL_RM_C:
811 ignore = float_flag_inexact;
812 /* FALLTHRU */
813 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
814 gen_helper_cvttq_svic(cpu_fir[rc], vb);
815 break;
816 default:
817 gen_qual_roundmode(ctx, fn11);
818 gen_helper_cvttq(cpu_fir[rc], vb);
819 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
820 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
821 break;
822 }
823 tcg_temp_free(vb);
824
825 gen_fp_exc_raise_ignore(rc, fn11, ignore);
826 }
827
828 static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
829 int rb, int rc, int fn11)
830 {
831 TCGv vb;
832
833 /* ??? This is wrong: the instruction is not a nop, it still may
834 raise exceptions. */
835 if (unlikely(rc == 31)) {
836 return;
837 }
838
839 gen_qual_roundmode(ctx, fn11);
840
841 if (rb == 31) {
842 vb = tcg_const_i64(0);
843 } else {
844 vb = cpu_fir[rb];
845 }
846
847 /* The only exception that can be raised by integer conversion
848 is inexact. Thus we only need to worry about exceptions when
849 inexact handling is requested. */
850 if (fn11 & QUAL_I) {
851 gen_fp_exc_clear();
852 helper(cpu_fir[rc], vb);
853 gen_fp_exc_raise(rc, fn11);
854 } else {
855 helper(cpu_fir[rc], vb);
856 }
857
858 if (rb == 31) {
859 tcg_temp_free(vb);
860 }
861 }
862
863 #define IEEE_INTCVT(name) \
864 static inline void glue(gen_f, name)(DisasContext *ctx, \
865 int rb, int rc, int fn11) \
866 { \
867 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
868 }
869 IEEE_INTCVT(cvtqs)
870 IEEE_INTCVT(cvtqt)
871
872 static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
873 {
874 TCGv va, vb, vmask;
875 int za = 0, zb = 0;
876
877 if (unlikely(rc == 31)) {
878 return;
879 }
880
881 vmask = tcg_const_i64(mask);
882
883 TCGV_UNUSED_I64(va);
884 if (ra == 31) {
885 if (inv_a) {
886 va = vmask;
887 } else {
888 za = 1;
889 }
890 } else {
891 va = tcg_temp_new_i64();
892 tcg_gen_mov_i64(va, cpu_fir[ra]);
893 if (inv_a) {
894 tcg_gen_andc_i64(va, vmask, va);
895 } else {
896 tcg_gen_and_i64(va, va, vmask);
897 }
898 }
899
900 TCGV_UNUSED_I64(vb);
901 if (rb == 31) {
902 zb = 1;
903 } else {
904 vb = tcg_temp_new_i64();
905 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
906 }
907
908 switch (za << 1 | zb) {
909 case 0 | 0:
910 tcg_gen_or_i64(cpu_fir[rc], va, vb);
911 break;
912 case 0 | 1:
913 tcg_gen_mov_i64(cpu_fir[rc], va);
914 break;
915 case 2 | 0:
916 tcg_gen_mov_i64(cpu_fir[rc], vb);
917 break;
918 case 2 | 1:
919 tcg_gen_movi_i64(cpu_fir[rc], 0);
920 break;
921 }
922
923 tcg_temp_free(vmask);
924 if (ra != 31) {
925 tcg_temp_free(va);
926 }
927 if (rb != 31) {
928 tcg_temp_free(vb);
929 }
930 }
931
932 static inline void gen_fcpys(int ra, int rb, int rc)
933 {
934 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
935 }
936
937 static inline void gen_fcpysn(int ra, int rb, int rc)
938 {
939 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
940 }
941
942 static inline void gen_fcpyse(int ra, int rb, int rc)
943 {
944 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
945 }
946
947 #define FARITH3(name) \
948 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
949 { \
950 TCGv va, vb; \
951 \
952 if (unlikely(rc == 31)) { \
953 return; \
954 } \
955 if (ra == 31) { \
956 va = tcg_const_i64(0); \
957 } else { \
958 va = cpu_fir[ra]; \
959 } \
960 if (rb == 31) { \
961 vb = tcg_const_i64(0); \
962 } else { \
963 vb = cpu_fir[rb]; \
964 } \
965 \
966 gen_helper_ ## name (cpu_fir[rc], va, vb); \
967 \
968 if (ra == 31) { \
969 tcg_temp_free(va); \
970 } \
971 if (rb == 31) { \
972 tcg_temp_free(vb); \
973 } \
974 }
975
976 /* ??? VAX instruction qualifiers ignored. */
977 FARITH3(addf)
978 FARITH3(subf)
979 FARITH3(mulf)
980 FARITH3(divf)
981 FARITH3(addg)
982 FARITH3(subg)
983 FARITH3(mulg)
984 FARITH3(divg)
985 FARITH3(cmpgeq)
986 FARITH3(cmpglt)
987 FARITH3(cmpgle)
988
989 static void gen_ieee_arith3(DisasContext *ctx,
990 void (*helper)(TCGv, TCGv, TCGv),
991 int ra, int rb, int rc, int fn11)
992 {
993 TCGv va, vb;
994
995 /* ??? This is wrong: the instruction is not a nop, it still may
996 raise exceptions. */
997 if (unlikely(rc == 31)) {
998 return;
999 }
1000
1001 gen_qual_roundmode(ctx, fn11);
1002 gen_qual_flushzero(ctx, fn11);
1003 gen_fp_exc_clear();
1004
1005 va = gen_ieee_input(ra, fn11, 0);
1006 vb = gen_ieee_input(rb, fn11, 0);
1007 helper(cpu_fir[rc], va, vb);
1008 tcg_temp_free(va);
1009 tcg_temp_free(vb);
1010
1011 gen_fp_exc_raise(rc, fn11);
1012 }
1013
1014 #define IEEE_ARITH3(name) \
1015 static inline void glue(gen_f, name)(DisasContext *ctx, \
1016 int ra, int rb, int rc, int fn11) \
1017 { \
1018 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1019 }
1020 IEEE_ARITH3(adds)
1021 IEEE_ARITH3(subs)
1022 IEEE_ARITH3(muls)
1023 IEEE_ARITH3(divs)
1024 IEEE_ARITH3(addt)
1025 IEEE_ARITH3(subt)
1026 IEEE_ARITH3(mult)
1027 IEEE_ARITH3(divt)
1028
1029 static void gen_ieee_compare(DisasContext *ctx,
1030 void (*helper)(TCGv, TCGv, TCGv),
1031 int ra, int rb, int rc, int fn11)
1032 {
1033 TCGv va, vb;
1034
1035 /* ??? This is wrong: the instruction is not a nop, it still may
1036 raise exceptions. */
1037 if (unlikely(rc == 31)) {
1038 return;
1039 }
1040
1041 gen_fp_exc_clear();
1042
1043 va = gen_ieee_input(ra, fn11, 1);
1044 vb = gen_ieee_input(rb, fn11, 1);
1045 helper(cpu_fir[rc], va, vb);
1046 tcg_temp_free(va);
1047 tcg_temp_free(vb);
1048
1049 gen_fp_exc_raise(rc, fn11);
1050 }
1051
1052 #define IEEE_CMP3(name) \
1053 static inline void glue(gen_f, name)(DisasContext *ctx, \
1054 int ra, int rb, int rc, int fn11) \
1055 { \
1056 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1057 }
1058 IEEE_CMP3(cmptun)
1059 IEEE_CMP3(cmpteq)
1060 IEEE_CMP3(cmptlt)
1061 IEEE_CMP3(cmptle)
1062
1063 static inline uint64_t zapnot_mask(uint8_t lit)
1064 {
1065 uint64_t mask = 0;
1066 int i;
1067
1068 for (i = 0; i < 8; ++i) {
1069 if ((lit >> i) & 1)
1070 mask |= 0xffull << (i * 8);
1071 }
1072 return mask;
1073 }
1074
1075 /* Implement zapnot with an immediate operand, which expands to some
1076 form of immediate AND. This is a basic building block in the
1077 definition of many of the other byte manipulation instructions. */
1078 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1079 {
1080 switch (lit) {
1081 case 0x00:
1082 tcg_gen_movi_i64(dest, 0);
1083 break;
1084 case 0x01:
1085 tcg_gen_ext8u_i64(dest, src);
1086 break;
1087 case 0x03:
1088 tcg_gen_ext16u_i64(dest, src);
1089 break;
1090 case 0x0f:
1091 tcg_gen_ext32u_i64(dest, src);
1092 break;
1093 case 0xff:
1094 tcg_gen_mov_i64(dest, src);
1095 break;
1096 default:
1097 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1098 break;
1099 }
1100 }
1101
1102 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1103 {
1104 if (unlikely(rc == 31))
1105 return;
1106 else if (unlikely(ra == 31))
1107 tcg_gen_movi_i64(cpu_ir[rc], 0);
1108 else if (islit)
1109 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1110 else
1111 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1112 }
1113
1114 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1115 {
1116 if (unlikely(rc == 31))
1117 return;
1118 else if (unlikely(ra == 31))
1119 tcg_gen_movi_i64(cpu_ir[rc], 0);
1120 else if (islit)
1121 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1122 else
1123 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1124 }
1125
1126
1127 /* EXTWH, EXTLH, EXTQH */
1128 static void gen_ext_h(int ra, int rb, int rc, int islit,
1129 uint8_t lit, uint8_t byte_mask)
1130 {
1131 if (unlikely(rc == 31))
1132 return;
1133 else if (unlikely(ra == 31))
1134 tcg_gen_movi_i64(cpu_ir[rc], 0);
1135 else {
1136 if (islit) {
1137 lit = (64 - (lit & 7) * 8) & 0x3f;
1138 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1139 } else {
1140 TCGv tmp1 = tcg_temp_new();
1141 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1142 tcg_gen_shli_i64(tmp1, tmp1, 3);
1143 tcg_gen_neg_i64(tmp1, tmp1);
1144 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1145 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1146 tcg_temp_free(tmp1);
1147 }
1148 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1149 }
1150 }
1151
1152 /* EXTBL, EXTWL, EXTLL, EXTQL */
1153 static void gen_ext_l(int ra, int rb, int rc, int islit,
1154 uint8_t lit, uint8_t byte_mask)
1155 {
1156 if (unlikely(rc == 31))
1157 return;
1158 else if (unlikely(ra == 31))
1159 tcg_gen_movi_i64(cpu_ir[rc], 0);
1160 else {
1161 if (islit) {
1162 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1163 } else {
1164 TCGv tmp = tcg_temp_new();
1165 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1166 tcg_gen_shli_i64(tmp, tmp, 3);
1167 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1168 tcg_temp_free(tmp);
1169 }
1170 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1171 }
1172 }
1173
1174 /* INSWH, INSLH, INSQH */
1175 static void gen_ins_h(int ra, int rb, int rc, int islit,
1176 uint8_t lit, uint8_t byte_mask)
1177 {
1178 if (unlikely(rc == 31))
1179 return;
1180 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1181 tcg_gen_movi_i64(cpu_ir[rc], 0);
1182 else {
1183 TCGv tmp = tcg_temp_new();
1184
1185 /* The instruction description has us left-shift the byte mask
1186 and extract bits <15:8> and apply that zap at the end. This
1187 is equivalent to simply performing the zap first and shifting
1188 afterward. */
1189 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1190
1191 if (islit) {
1192 /* Note that we have handled the lit==0 case above. */
1193 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1194 } else {
1195 TCGv shift = tcg_temp_new();
1196
1197 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1198 Do this portably by splitting the shift into two parts:
1199 shift_count-1 and 1. Arrange for the -1 by using
1200 ones-complement instead of twos-complement in the negation:
1201 ~((B & 7) * 8) & 63. */
1202
1203 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1204 tcg_gen_shli_i64(shift, shift, 3);
1205 tcg_gen_not_i64(shift, shift);
1206 tcg_gen_andi_i64(shift, shift, 0x3f);
1207
1208 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1209 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1210 tcg_temp_free(shift);
1211 }
1212 tcg_temp_free(tmp);
1213 }
1214 }
1215
1216 /* INSBL, INSWL, INSLL, INSQL */
1217 static void gen_ins_l(int ra, int rb, int rc, int islit,
1218 uint8_t lit, uint8_t byte_mask)
1219 {
1220 if (unlikely(rc == 31))
1221 return;
1222 else if (unlikely(ra == 31))
1223 tcg_gen_movi_i64(cpu_ir[rc], 0);
1224 else {
1225 TCGv tmp = tcg_temp_new();
1226
1227 /* The instruction description has us left-shift the byte mask
1228 the same number of byte slots as the data and apply the zap
1229 at the end. This is equivalent to simply performing the zap
1230 first and shifting afterward. */
1231 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1232
1233 if (islit) {
1234 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1235 } else {
1236 TCGv shift = tcg_temp_new();
1237 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1238 tcg_gen_shli_i64(shift, shift, 3);
1239 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1240 tcg_temp_free(shift);
1241 }
1242 tcg_temp_free(tmp);
1243 }
1244 }
1245
1246 /* MSKWH, MSKLH, MSKQH */
1247 static void gen_msk_h(int ra, int rb, int rc, int islit,
1248 uint8_t lit, uint8_t byte_mask)
1249 {
1250 if (unlikely(rc == 31))
1251 return;
1252 else if (unlikely(ra == 31))
1253 tcg_gen_movi_i64(cpu_ir[rc], 0);
1254 else if (islit) {
1255 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1256 } else {
1257 TCGv shift = tcg_temp_new();
1258 TCGv mask = tcg_temp_new();
1259
1260 /* The instruction description is as above, where the byte_mask
1261 is shifted left, and then we extract bits <15:8>. This can be
1262 emulated with a right-shift on the expanded byte mask. This
1263 requires extra care because for an input <2:0> == 0 we need a
1264 shift of 64 bits in order to generate a zero. This is done by
1265 splitting the shift into two parts, the variable shift - 1
1266 followed by a constant 1 shift. The code we expand below is
1267 equivalent to ~((B & 7) * 8) & 63. */
1268
1269 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1270 tcg_gen_shli_i64(shift, shift, 3);
1271 tcg_gen_not_i64(shift, shift);
1272 tcg_gen_andi_i64(shift, shift, 0x3f);
1273 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1274 tcg_gen_shr_i64(mask, mask, shift);
1275 tcg_gen_shri_i64(mask, mask, 1);
1276
1277 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1278
1279 tcg_temp_free(mask);
1280 tcg_temp_free(shift);
1281 }
1282 }
1283
1284 /* MSKBL, MSKWL, MSKLL, MSKQL */
1285 static void gen_msk_l(int ra, int rb, int rc, int islit,
1286 uint8_t lit, uint8_t byte_mask)
1287 {
1288 if (unlikely(rc == 31))
1289 return;
1290 else if (unlikely(ra == 31))
1291 tcg_gen_movi_i64(cpu_ir[rc], 0);
1292 else if (islit) {
1293 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1294 } else {
1295 TCGv shift = tcg_temp_new();
1296 TCGv mask = tcg_temp_new();
1297
1298 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1299 tcg_gen_shli_i64(shift, shift, 3);
1300 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1301 tcg_gen_shl_i64(mask, mask, shift);
1302
1303 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1304
1305 tcg_temp_free(mask);
1306 tcg_temp_free(shift);
1307 }
1308 }
1309
1310 /* Code to call arith3 helpers */
1311 #define ARITH3(name) \
1312 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1313 uint8_t lit) \
1314 { \
1315 if (unlikely(rc == 31)) \
1316 return; \
1317 \
1318 if (ra != 31) { \
1319 if (islit) { \
1320 TCGv tmp = tcg_const_i64(lit); \
1321 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1322 tcg_temp_free(tmp); \
1323 } else \
1324 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1325 } else { \
1326 TCGv tmp1 = tcg_const_i64(0); \
1327 if (islit) { \
1328 TCGv tmp2 = tcg_const_i64(lit); \
1329 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1330 tcg_temp_free(tmp2); \
1331 } else \
1332 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1333 tcg_temp_free(tmp1); \
1334 } \
1335 }
1336 ARITH3(cmpbge)
1337 ARITH3(addlv)
1338 ARITH3(sublv)
1339 ARITH3(addqv)
1340 ARITH3(subqv)
1341 ARITH3(umulh)
1342 ARITH3(mullv)
1343 ARITH3(mulqv)
1344 ARITH3(minub8)
1345 ARITH3(minsb8)
1346 ARITH3(minuw4)
1347 ARITH3(minsw4)
1348 ARITH3(maxub8)
1349 ARITH3(maxsb8)
1350 ARITH3(maxuw4)
1351 ARITH3(maxsw4)
1352 ARITH3(perr)
1353
1354 #define MVIOP2(name) \
1355 static inline void glue(gen_, name)(int rb, int rc) \
1356 { \
1357 if (unlikely(rc == 31)) \
1358 return; \
1359 if (unlikely(rb == 31)) \
1360 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1361 else \
1362 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1363 }
1364 MVIOP2(pklb)
1365 MVIOP2(pkwb)
1366 MVIOP2(unpkbl)
1367 MVIOP2(unpkbw)
1368
1369 static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1370 int islit, uint8_t lit)
1371 {
1372 TCGv va, vb;
1373
1374 if (unlikely(rc == 31)) {
1375 return;
1376 }
1377
1378 if (ra == 31) {
1379 va = tcg_const_i64(0);
1380 } else {
1381 va = cpu_ir[ra];
1382 }
1383 if (islit) {
1384 vb = tcg_const_i64(lit);
1385 } else {
1386 vb = cpu_ir[rb];
1387 }
1388
1389 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1390
1391 if (ra == 31) {
1392 tcg_temp_free(va);
1393 }
1394 if (islit) {
1395 tcg_temp_free(vb);
1396 }
1397 }
1398
1399 static void gen_rx(int ra, int set)
1400 {
1401 TCGv_i32 tmp;
1402
1403 if (ra != 31) {
1404 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1405 }
1406
1407 tmp = tcg_const_i32(set);
1408 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1409 tcg_temp_free_i32(tmp);
1410 }
1411
1412 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1413 {
1414 uint32_t palcode;
1415 int32_t disp21, disp16, disp12;
1416 uint16_t fn11;
1417 uint8_t opc, ra, rb, rc, fpfn, fn7, fn2, islit, real_islit;
1418 uint8_t lit;
1419 ExitStatus ret;
1420
1421 /* Decode all instruction fields */
1422 opc = insn >> 26;
1423 ra = (insn >> 21) & 0x1F;
1424 rb = (insn >> 16) & 0x1F;
1425 rc = insn & 0x1F;
1426 real_islit = islit = (insn >> 12) & 1;
1427 if (rb == 31 && !islit) {
1428 islit = 1;
1429 lit = 0;
1430 } else
1431 lit = (insn >> 13) & 0xFF;
1432 palcode = insn & 0x03FFFFFF;
1433 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1434 disp16 = (int16_t)(insn & 0x0000FFFF);
1435 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1436 fn11 = (insn >> 5) & 0x000007FF;
1437 fpfn = fn11 & 0x3F;
1438 fn7 = (insn >> 5) & 0x0000007F;
1439 fn2 = (insn >> 5) & 0x00000003;
1440 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1441 opc, ra, rb, rc, disp16);
1442
1443 ret = NO_EXIT;
1444 switch (opc) {
1445 case 0x00:
1446 /* CALL_PAL */
1447 #ifdef CONFIG_USER_ONLY
1448 if (palcode == 0x9E) {
1449 /* RDUNIQUE */
1450 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1451 break;
1452 } else if (palcode == 0x9F) {
1453 /* WRUNIQUE */
1454 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1455 break;
1456 }
1457 #endif
1458 if (palcode >= 0x80 && palcode < 0xC0) {
1459 /* Unprivileged PAL call */
1460 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
1461 /* PC updated by gen_excp. */
1462 ret = EXIT_PC_UPDATED;
1463 break;
1464 }
1465 #ifndef CONFIG_USER_ONLY
1466 if (palcode < 0x40) {
1467 /* Privileged PAL code */
1468 if (ctx->mem_idx & 1)
1469 goto invalid_opc;
1470 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
1471 }
1472 #endif
1473 /* Invalid PAL call */
1474 goto invalid_opc;
1475 case 0x01:
1476 /* OPC01 */
1477 goto invalid_opc;
1478 case 0x02:
1479 /* OPC02 */
1480 goto invalid_opc;
1481 case 0x03:
1482 /* OPC03 */
1483 goto invalid_opc;
1484 case 0x04:
1485 /* OPC04 */
1486 goto invalid_opc;
1487 case 0x05:
1488 /* OPC05 */
1489 goto invalid_opc;
1490 case 0x06:
1491 /* OPC06 */
1492 goto invalid_opc;
1493 case 0x07:
1494 /* OPC07 */
1495 goto invalid_opc;
1496 case 0x08:
1497 /* LDA */
1498 if (likely(ra != 31)) {
1499 if (rb != 31)
1500 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1501 else
1502 tcg_gen_movi_i64(cpu_ir[ra], disp16);
1503 }
1504 break;
1505 case 0x09:
1506 /* LDAH */
1507 if (likely(ra != 31)) {
1508 if (rb != 31)
1509 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1510 else
1511 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1512 }
1513 break;
1514 case 0x0A:
1515 /* LDBU */
1516 if (!(ctx->amask & AMASK_BWX))
1517 goto invalid_opc;
1518 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1519 break;
1520 case 0x0B:
1521 /* LDQ_U */
1522 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1523 break;
1524 case 0x0C:
1525 /* LDWU */
1526 if (!(ctx->amask & AMASK_BWX))
1527 goto invalid_opc;
1528 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1529 break;
1530 case 0x0D:
1531 /* STW */
1532 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
1533 break;
1534 case 0x0E:
1535 /* STB */
1536 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
1537 break;
1538 case 0x0F:
1539 /* STQ_U */
1540 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
1541 break;
1542 case 0x10:
1543 switch (fn7) {
1544 case 0x00:
1545 /* ADDL */
1546 if (likely(rc != 31)) {
1547 if (ra != 31) {
1548 if (islit) {
1549 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1550 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1551 } else {
1552 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1553 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1554 }
1555 } else {
1556 if (islit)
1557 tcg_gen_movi_i64(cpu_ir[rc], lit);
1558 else
1559 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1560 }
1561 }
1562 break;
1563 case 0x02:
1564 /* S4ADDL */
1565 if (likely(rc != 31)) {
1566 if (ra != 31) {
1567 TCGv tmp = tcg_temp_new();
1568 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1569 if (islit)
1570 tcg_gen_addi_i64(tmp, tmp, lit);
1571 else
1572 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1573 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1574 tcg_temp_free(tmp);
1575 } else {
1576 if (islit)
1577 tcg_gen_movi_i64(cpu_ir[rc], lit);
1578 else
1579 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1580 }
1581 }
1582 break;
1583 case 0x09:
1584 /* SUBL */
1585 if (likely(rc != 31)) {
1586 if (ra != 31) {
1587 if (islit)
1588 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1589 else
1590 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1591 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1592 } else {
1593 if (islit)
1594 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1595 else {
1596 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1597 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1598 }
1599 }
1600 break;
1601 case 0x0B:
1602 /* S4SUBL */
1603 if (likely(rc != 31)) {
1604 if (ra != 31) {
1605 TCGv tmp = tcg_temp_new();
1606 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1607 if (islit)
1608 tcg_gen_subi_i64(tmp, tmp, lit);
1609 else
1610 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1611 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1612 tcg_temp_free(tmp);
1613 } else {
1614 if (islit)
1615 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1616 else {
1617 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1618 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1619 }
1620 }
1621 }
1622 break;
1623 case 0x0F:
1624 /* CMPBGE */
1625 gen_cmpbge(ra, rb, rc, islit, lit);
1626 break;
1627 case 0x12:
1628 /* S8ADDL */
1629 if (likely(rc != 31)) {
1630 if (ra != 31) {
1631 TCGv tmp = tcg_temp_new();
1632 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1633 if (islit)
1634 tcg_gen_addi_i64(tmp, tmp, lit);
1635 else
1636 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1637 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1638 tcg_temp_free(tmp);
1639 } else {
1640 if (islit)
1641 tcg_gen_movi_i64(cpu_ir[rc], lit);
1642 else
1643 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1644 }
1645 }
1646 break;
1647 case 0x1B:
1648 /* S8SUBL */
1649 if (likely(rc != 31)) {
1650 if (ra != 31) {
1651 TCGv tmp = tcg_temp_new();
1652 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1653 if (islit)
1654 tcg_gen_subi_i64(tmp, tmp, lit);
1655 else
1656 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1657 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1658 tcg_temp_free(tmp);
1659 } else {
1660 if (islit)
1661 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1662 else
1663 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1664 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1665 }
1666 }
1667 }
1668 break;
1669 case 0x1D:
1670 /* CMPULT */
1671 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1672 break;
1673 case 0x20:
1674 /* ADDQ */
1675 if (likely(rc != 31)) {
1676 if (ra != 31) {
1677 if (islit)
1678 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1679 else
1680 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1681 } else {
1682 if (islit)
1683 tcg_gen_movi_i64(cpu_ir[rc], lit);
1684 else
1685 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1686 }
1687 }
1688 break;
1689 case 0x22:
1690 /* S4ADDQ */
1691 if (likely(rc != 31)) {
1692 if (ra != 31) {
1693 TCGv tmp = tcg_temp_new();
1694 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1695 if (islit)
1696 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1697 else
1698 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1699 tcg_temp_free(tmp);
1700 } else {
1701 if (islit)
1702 tcg_gen_movi_i64(cpu_ir[rc], lit);
1703 else
1704 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1705 }
1706 }
1707 break;
1708 case 0x29:
1709 /* SUBQ */
1710 if (likely(rc != 31)) {
1711 if (ra != 31) {
1712 if (islit)
1713 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1714 else
1715 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1716 } else {
1717 if (islit)
1718 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1719 else
1720 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1721 }
1722 }
1723 break;
1724 case 0x2B:
1725 /* S4SUBQ */
1726 if (likely(rc != 31)) {
1727 if (ra != 31) {
1728 TCGv tmp = tcg_temp_new();
1729 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1730 if (islit)
1731 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1732 else
1733 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1734 tcg_temp_free(tmp);
1735 } else {
1736 if (islit)
1737 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1738 else
1739 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1740 }
1741 }
1742 break;
1743 case 0x2D:
1744 /* CMPEQ */
1745 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1746 break;
1747 case 0x32:
1748 /* S8ADDQ */
1749 if (likely(rc != 31)) {
1750 if (ra != 31) {
1751 TCGv tmp = tcg_temp_new();
1752 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1753 if (islit)
1754 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1755 else
1756 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1757 tcg_temp_free(tmp);
1758 } else {
1759 if (islit)
1760 tcg_gen_movi_i64(cpu_ir[rc], lit);
1761 else
1762 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1763 }
1764 }
1765 break;
1766 case 0x3B:
1767 /* S8SUBQ */
1768 if (likely(rc != 31)) {
1769 if (ra != 31) {
1770 TCGv tmp = tcg_temp_new();
1771 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1772 if (islit)
1773 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1774 else
1775 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1776 tcg_temp_free(tmp);
1777 } else {
1778 if (islit)
1779 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1780 else
1781 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1782 }
1783 }
1784 break;
1785 case 0x3D:
1786 /* CMPULE */
1787 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1788 break;
1789 case 0x40:
1790 /* ADDL/V */
1791 gen_addlv(ra, rb, rc, islit, lit);
1792 break;
1793 case 0x49:
1794 /* SUBL/V */
1795 gen_sublv(ra, rb, rc, islit, lit);
1796 break;
1797 case 0x4D:
1798 /* CMPLT */
1799 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1800 break;
1801 case 0x60:
1802 /* ADDQ/V */
1803 gen_addqv(ra, rb, rc, islit, lit);
1804 break;
1805 case 0x69:
1806 /* SUBQ/V */
1807 gen_subqv(ra, rb, rc, islit, lit);
1808 break;
1809 case 0x6D:
1810 /* CMPLE */
1811 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1812 break;
1813 default:
1814 goto invalid_opc;
1815 }
1816 break;
1817 case 0x11:
1818 switch (fn7) {
1819 case 0x00:
1820 /* AND */
1821 if (likely(rc != 31)) {
1822 if (ra == 31)
1823 tcg_gen_movi_i64(cpu_ir[rc], 0);
1824 else if (islit)
1825 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1826 else
1827 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1828 }
1829 break;
1830 case 0x08:
1831 /* BIC */
1832 if (likely(rc != 31)) {
1833 if (ra != 31) {
1834 if (islit)
1835 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1836 else
1837 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1838 } else
1839 tcg_gen_movi_i64(cpu_ir[rc], 0);
1840 }
1841 break;
1842 case 0x14:
1843 /* CMOVLBS */
1844 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1845 break;
1846 case 0x16:
1847 /* CMOVLBC */
1848 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1849 break;
1850 case 0x20:
1851 /* BIS */
1852 if (likely(rc != 31)) {
1853 if (ra != 31) {
1854 if (islit)
1855 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1856 else
1857 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1858 } else {
1859 if (islit)
1860 tcg_gen_movi_i64(cpu_ir[rc], lit);
1861 else
1862 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1863 }
1864 }
1865 break;
1866 case 0x24:
1867 /* CMOVEQ */
1868 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1869 break;
1870 case 0x26:
1871 /* CMOVNE */
1872 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1873 break;
1874 case 0x28:
1875 /* ORNOT */
1876 if (likely(rc != 31)) {
1877 if (ra != 31) {
1878 if (islit)
1879 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1880 else
1881 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1882 } else {
1883 if (islit)
1884 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1885 else
1886 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1887 }
1888 }
1889 break;
1890 case 0x40:
1891 /* XOR */
1892 if (likely(rc != 31)) {
1893 if (ra != 31) {
1894 if (islit)
1895 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1896 else
1897 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1898 } else {
1899 if (islit)
1900 tcg_gen_movi_i64(cpu_ir[rc], lit);
1901 else
1902 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1903 }
1904 }
1905 break;
1906 case 0x44:
1907 /* CMOVLT */
1908 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1909 break;
1910 case 0x46:
1911 /* CMOVGE */
1912 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1913 break;
1914 case 0x48:
1915 /* EQV */
1916 if (likely(rc != 31)) {
1917 if (ra != 31) {
1918 if (islit)
1919 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1920 else
1921 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1922 } else {
1923 if (islit)
1924 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1925 else
1926 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1927 }
1928 }
1929 break;
1930 case 0x61:
1931 /* AMASK */
1932 if (likely(rc != 31)) {
1933 if (islit)
1934 tcg_gen_movi_i64(cpu_ir[rc], lit);
1935 else
1936 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1937 switch (ctx->env->implver) {
1938 case IMPLVER_2106x:
1939 /* EV4, EV45, LCA, LCA45 & EV5 */
1940 break;
1941 case IMPLVER_21164:
1942 case IMPLVER_21264:
1943 case IMPLVER_21364:
1944 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1945 ~(uint64_t)ctx->amask);
1946 break;
1947 }
1948 }
1949 break;
1950 case 0x64:
1951 /* CMOVLE */
1952 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
1953 break;
1954 case 0x66:
1955 /* CMOVGT */
1956 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
1957 break;
1958 case 0x6C:
1959 /* IMPLVER */
1960 if (rc != 31)
1961 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
1962 break;
1963 default:
1964 goto invalid_opc;
1965 }
1966 break;
1967 case 0x12:
1968 switch (fn7) {
1969 case 0x02:
1970 /* MSKBL */
1971 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
1972 break;
1973 case 0x06:
1974 /* EXTBL */
1975 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
1976 break;
1977 case 0x0B:
1978 /* INSBL */
1979 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
1980 break;
1981 case 0x12:
1982 /* MSKWL */
1983 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
1984 break;
1985 case 0x16:
1986 /* EXTWL */
1987 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
1988 break;
1989 case 0x1B:
1990 /* INSWL */
1991 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
1992 break;
1993 case 0x22:
1994 /* MSKLL */
1995 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
1996 break;
1997 case 0x26:
1998 /* EXTLL */
1999 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2000 break;
2001 case 0x2B:
2002 /* INSLL */
2003 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2004 break;
2005 case 0x30:
2006 /* ZAP */
2007 gen_zap(ra, rb, rc, islit, lit);
2008 break;
2009 case 0x31:
2010 /* ZAPNOT */
2011 gen_zapnot(ra, rb, rc, islit, lit);
2012 break;
2013 case 0x32:
2014 /* MSKQL */
2015 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2016 break;
2017 case 0x34:
2018 /* SRL */
2019 if (likely(rc != 31)) {
2020 if (ra != 31) {
2021 if (islit)
2022 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2023 else {
2024 TCGv shift = tcg_temp_new();
2025 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2026 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2027 tcg_temp_free(shift);
2028 }
2029 } else
2030 tcg_gen_movi_i64(cpu_ir[rc], 0);
2031 }
2032 break;
2033 case 0x36:
2034 /* EXTQL */
2035 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2036 break;
2037 case 0x39:
2038 /* SLL */
2039 if (likely(rc != 31)) {
2040 if (ra != 31) {
2041 if (islit)
2042 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2043 else {
2044 TCGv shift = tcg_temp_new();
2045 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2046 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2047 tcg_temp_free(shift);
2048 }
2049 } else
2050 tcg_gen_movi_i64(cpu_ir[rc], 0);
2051 }
2052 break;
2053 case 0x3B:
2054 /* INSQL */
2055 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2056 break;
2057 case 0x3C:
2058 /* SRA */
2059 if (likely(rc != 31)) {
2060 if (ra != 31) {
2061 if (islit)
2062 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2063 else {
2064 TCGv shift = tcg_temp_new();
2065 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2066 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2067 tcg_temp_free(shift);
2068 }
2069 } else
2070 tcg_gen_movi_i64(cpu_ir[rc], 0);
2071 }
2072 break;
2073 case 0x52:
2074 /* MSKWH */
2075 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2076 break;
2077 case 0x57:
2078 /* INSWH */
2079 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2080 break;
2081 case 0x5A:
2082 /* EXTWH */
2083 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2084 break;
2085 case 0x62:
2086 /* MSKLH */
2087 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2088 break;
2089 case 0x67:
2090 /* INSLH */
2091 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2092 break;
2093 case 0x6A:
2094 /* EXTLH */
2095 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2096 break;
2097 case 0x72:
2098 /* MSKQH */
2099 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2100 break;
2101 case 0x77:
2102 /* INSQH */
2103 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2104 break;
2105 case 0x7A:
2106 /* EXTQH */
2107 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2108 break;
2109 default:
2110 goto invalid_opc;
2111 }
2112 break;
2113 case 0x13:
2114 switch (fn7) {
2115 case 0x00:
2116 /* MULL */
2117 if (likely(rc != 31)) {
2118 if (ra == 31)
2119 tcg_gen_movi_i64(cpu_ir[rc], 0);
2120 else {
2121 if (islit)
2122 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2123 else
2124 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2125 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2126 }
2127 }
2128 break;
2129 case 0x20:
2130 /* MULQ */
2131 if (likely(rc != 31)) {
2132 if (ra == 31)
2133 tcg_gen_movi_i64(cpu_ir[rc], 0);
2134 else if (islit)
2135 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2136 else
2137 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2138 }
2139 break;
2140 case 0x30:
2141 /* UMULH */
2142 gen_umulh(ra, rb, rc, islit, lit);
2143 break;
2144 case 0x40:
2145 /* MULL/V */
2146 gen_mullv(ra, rb, rc, islit, lit);
2147 break;
2148 case 0x60:
2149 /* MULQ/V */
2150 gen_mulqv(ra, rb, rc, islit, lit);
2151 break;
2152 default:
2153 goto invalid_opc;
2154 }
2155 break;
2156 case 0x14:
2157 switch (fpfn) { /* fn11 & 0x3F */
2158 case 0x04:
2159 /* ITOFS */
2160 if (!(ctx->amask & AMASK_FIX))
2161 goto invalid_opc;
2162 if (likely(rc != 31)) {
2163 if (ra != 31) {
2164 TCGv_i32 tmp = tcg_temp_new_i32();
2165 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2166 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2167 tcg_temp_free_i32(tmp);
2168 } else
2169 tcg_gen_movi_i64(cpu_fir[rc], 0);
2170 }
2171 break;
2172 case 0x0A:
2173 /* SQRTF */
2174 if (!(ctx->amask & AMASK_FIX))
2175 goto invalid_opc;
2176 gen_fsqrtf(rb, rc);
2177 break;
2178 case 0x0B:
2179 /* SQRTS */
2180 if (!(ctx->amask & AMASK_FIX))
2181 goto invalid_opc;
2182 gen_fsqrts(ctx, rb, rc, fn11);
2183 break;
2184 case 0x14:
2185 /* ITOFF */
2186 if (!(ctx->amask & AMASK_FIX))
2187 goto invalid_opc;
2188 if (likely(rc != 31)) {
2189 if (ra != 31) {
2190 TCGv_i32 tmp = tcg_temp_new_i32();
2191 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2192 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2193 tcg_temp_free_i32(tmp);
2194 } else
2195 tcg_gen_movi_i64(cpu_fir[rc], 0);
2196 }
2197 break;
2198 case 0x24:
2199 /* ITOFT */
2200 if (!(ctx->amask & AMASK_FIX))
2201 goto invalid_opc;
2202 if (likely(rc != 31)) {
2203 if (ra != 31)
2204 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2205 else
2206 tcg_gen_movi_i64(cpu_fir[rc], 0);
2207 }
2208 break;
2209 case 0x2A:
2210 /* SQRTG */
2211 if (!(ctx->amask & AMASK_FIX))
2212 goto invalid_opc;
2213 gen_fsqrtg(rb, rc);
2214 break;
2215 case 0x02B:
2216 /* SQRTT */
2217 if (!(ctx->amask & AMASK_FIX))
2218 goto invalid_opc;
2219 gen_fsqrtt(ctx, rb, rc, fn11);
2220 break;
2221 default:
2222 goto invalid_opc;
2223 }
2224 break;
2225 case 0x15:
2226 /* VAX floating point */
2227 /* XXX: rounding mode and trap are ignored (!) */
2228 switch (fpfn) { /* fn11 & 0x3F */
2229 case 0x00:
2230 /* ADDF */
2231 gen_faddf(ra, rb, rc);
2232 break;
2233 case 0x01:
2234 /* SUBF */
2235 gen_fsubf(ra, rb, rc);
2236 break;
2237 case 0x02:
2238 /* MULF */
2239 gen_fmulf(ra, rb, rc);
2240 break;
2241 case 0x03:
2242 /* DIVF */
2243 gen_fdivf(ra, rb, rc);
2244 break;
2245 case 0x1E:
2246 /* CVTDG */
2247 #if 0 // TODO
2248 gen_fcvtdg(rb, rc);
2249 #else
2250 goto invalid_opc;
2251 #endif
2252 break;
2253 case 0x20:
2254 /* ADDG */
2255 gen_faddg(ra, rb, rc);
2256 break;
2257 case 0x21:
2258 /* SUBG */
2259 gen_fsubg(ra, rb, rc);
2260 break;
2261 case 0x22:
2262 /* MULG */
2263 gen_fmulg(ra, rb, rc);
2264 break;
2265 case 0x23:
2266 /* DIVG */
2267 gen_fdivg(ra, rb, rc);
2268 break;
2269 case 0x25:
2270 /* CMPGEQ */
2271 gen_fcmpgeq(ra, rb, rc);
2272 break;
2273 case 0x26:
2274 /* CMPGLT */
2275 gen_fcmpglt(ra, rb, rc);
2276 break;
2277 case 0x27:
2278 /* CMPGLE */
2279 gen_fcmpgle(ra, rb, rc);
2280 break;
2281 case 0x2C:
2282 /* CVTGF */
2283 gen_fcvtgf(rb, rc);
2284 break;
2285 case 0x2D:
2286 /* CVTGD */
2287 #if 0 // TODO
2288 gen_fcvtgd(rb, rc);
2289 #else
2290 goto invalid_opc;
2291 #endif
2292 break;
2293 case 0x2F:
2294 /* CVTGQ */
2295 gen_fcvtgq(rb, rc);
2296 break;
2297 case 0x3C:
2298 /* CVTQF */
2299 gen_fcvtqf(rb, rc);
2300 break;
2301 case 0x3E:
2302 /* CVTQG */
2303 gen_fcvtqg(rb, rc);
2304 break;
2305 default:
2306 goto invalid_opc;
2307 }
2308 break;
2309 case 0x16:
2310 /* IEEE floating-point */
2311 switch (fpfn) { /* fn11 & 0x3F */
2312 case 0x00:
2313 /* ADDS */
2314 gen_fadds(ctx, ra, rb, rc, fn11);
2315 break;
2316 case 0x01:
2317 /* SUBS */
2318 gen_fsubs(ctx, ra, rb, rc, fn11);
2319 break;
2320 case 0x02:
2321 /* MULS */
2322 gen_fmuls(ctx, ra, rb, rc, fn11);
2323 break;
2324 case 0x03:
2325 /* DIVS */
2326 gen_fdivs(ctx, ra, rb, rc, fn11);
2327 break;
2328 case 0x20:
2329 /* ADDT */
2330 gen_faddt(ctx, ra, rb, rc, fn11);
2331 break;
2332 case 0x21:
2333 /* SUBT */
2334 gen_fsubt(ctx, ra, rb, rc, fn11);
2335 break;
2336 case 0x22:
2337 /* MULT */
2338 gen_fmult(ctx, ra, rb, rc, fn11);
2339 break;
2340 case 0x23:
2341 /* DIVT */
2342 gen_fdivt(ctx, ra, rb, rc, fn11);
2343 break;
2344 case 0x24:
2345 /* CMPTUN */
2346 gen_fcmptun(ctx, ra, rb, rc, fn11);
2347 break;
2348 case 0x25:
2349 /* CMPTEQ */
2350 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2351 break;
2352 case 0x26:
2353 /* CMPTLT */
2354 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2355 break;
2356 case 0x27:
2357 /* CMPTLE */
2358 gen_fcmptle(ctx, ra, rb, rc, fn11);
2359 break;
2360 case 0x2C:
2361 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2362 /* CVTST */
2363 gen_fcvtst(ctx, rb, rc, fn11);
2364 } else {
2365 /* CVTTS */
2366 gen_fcvtts(ctx, rb, rc, fn11);
2367 }
2368 break;
2369 case 0x2F:
2370 /* CVTTQ */
2371 gen_fcvttq(ctx, rb, rc, fn11);
2372 break;
2373 case 0x3C:
2374 /* CVTQS */
2375 gen_fcvtqs(ctx, rb, rc, fn11);
2376 break;
2377 case 0x3E:
2378 /* CVTQT */
2379 gen_fcvtqt(ctx, rb, rc, fn11);
2380 break;
2381 default:
2382 goto invalid_opc;
2383 }
2384 break;
2385 case 0x17:
2386 switch (fn11) {
2387 case 0x010:
2388 /* CVTLQ */
2389 gen_fcvtlq(rb, rc);
2390 break;
2391 case 0x020:
2392 if (likely(rc != 31)) {
2393 if (ra == rb) {
2394 /* FMOV */
2395 if (ra == 31)
2396 tcg_gen_movi_i64(cpu_fir[rc], 0);
2397 else
2398 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2399 } else {
2400 /* CPYS */
2401 gen_fcpys(ra, rb, rc);
2402 }
2403 }
2404 break;
2405 case 0x021:
2406 /* CPYSN */
2407 gen_fcpysn(ra, rb, rc);
2408 break;
2409 case 0x022:
2410 /* CPYSE */
2411 gen_fcpyse(ra, rb, rc);
2412 break;
2413 case 0x024:
2414 /* MT_FPCR */
2415 if (likely(ra != 31))
2416 gen_helper_store_fpcr(cpu_fir[ra]);
2417 else {
2418 TCGv tmp = tcg_const_i64(0);
2419 gen_helper_store_fpcr(tmp);
2420 tcg_temp_free(tmp);
2421 }
2422 break;
2423 case 0x025:
2424 /* MF_FPCR */
2425 if (likely(ra != 31))
2426 gen_helper_load_fpcr(cpu_fir[ra]);
2427 break;
2428 case 0x02A:
2429 /* FCMOVEQ */
2430 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2431 break;
2432 case 0x02B:
2433 /* FCMOVNE */
2434 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2435 break;
2436 case 0x02C:
2437 /* FCMOVLT */
2438 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2439 break;
2440 case 0x02D:
2441 /* FCMOVGE */
2442 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2443 break;
2444 case 0x02E:
2445 /* FCMOVLE */
2446 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2447 break;
2448 case 0x02F:
2449 /* FCMOVGT */
2450 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2451 break;
2452 case 0x030:
2453 /* CVTQL */
2454 gen_fcvtql(rb, rc);
2455 break;
2456 case 0x130:
2457 /* CVTQL/V */
2458 case 0x530:
2459 /* CVTQL/SV */
2460 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2461 /v doesn't do. The only thing I can think is that /sv is a
2462 valid instruction merely for completeness in the ISA. */
2463 gen_fcvtql_v(ctx, rb, rc);
2464 break;
2465 default:
2466 goto invalid_opc;
2467 }
2468 break;
2469 case 0x18:
2470 switch ((uint16_t)disp16) {
2471 case 0x0000:
2472 /* TRAPB */
2473 /* No-op. */
2474 break;
2475 case 0x0400:
2476 /* EXCB */
2477 /* No-op. */
2478 break;
2479 case 0x4000:
2480 /* MB */
2481 /* No-op */
2482 break;
2483 case 0x4400:
2484 /* WMB */
2485 /* No-op */
2486 break;
2487 case 0x8000:
2488 /* FETCH */
2489 /* No-op */
2490 break;
2491 case 0xA000:
2492 /* FETCH_M */
2493 /* No-op */
2494 break;
2495 case 0xC000:
2496 /* RPCC */
2497 if (ra != 31)
2498 gen_helper_load_pcc(cpu_ir[ra]);
2499 break;
2500 case 0xE000:
2501 /* RC */
2502 gen_rx(ra, 0);
2503 break;
2504 case 0xE800:
2505 /* ECB */
2506 break;
2507 case 0xF000:
2508 /* RS */
2509 gen_rx(ra, 1);
2510 break;
2511 case 0xF800:
2512 /* WH64 */
2513 /* No-op */
2514 break;
2515 default:
2516 goto invalid_opc;
2517 }
2518 break;
2519 case 0x19:
2520 /* HW_MFPR (PALcode) */
2521 #if defined (CONFIG_USER_ONLY)
2522 goto invalid_opc;
2523 #else
2524 if (!ctx->pal_mode)
2525 goto invalid_opc;
2526 if (ra != 31) {
2527 TCGv tmp = tcg_const_i32(insn & 0xFF);
2528 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
2529 tcg_temp_free(tmp);
2530 }
2531 break;
2532 #endif
2533 case 0x1A:
2534 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2535 prediction stack action, which of course we don't implement. */
2536 if (rb != 31) {
2537 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2538 } else {
2539 tcg_gen_movi_i64(cpu_pc, 0);
2540 }
2541 if (ra != 31) {
2542 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2543 }
2544 ret = EXIT_PC_UPDATED;
2545 break;
2546 case 0x1B:
2547 /* HW_LD (PALcode) */
2548 #if defined (CONFIG_USER_ONLY)
2549 goto invalid_opc;
2550 #else
2551 if (!ctx->pal_mode)
2552 goto invalid_opc;
2553 if (ra != 31) {
2554 TCGv addr = tcg_temp_new();
2555 if (rb != 31)
2556 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2557 else
2558 tcg_gen_movi_i64(addr, disp12);
2559 switch ((insn >> 12) & 0xF) {
2560 case 0x0:
2561 /* Longword physical access (hw_ldl/p) */
2562 gen_helper_ldl_raw(cpu_ir[ra], addr);
2563 break;
2564 case 0x1:
2565 /* Quadword physical access (hw_ldq/p) */
2566 gen_helper_ldq_raw(cpu_ir[ra], addr);
2567 break;
2568 case 0x2:
2569 /* Longword physical access with lock (hw_ldl_l/p) */
2570 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
2571 break;
2572 case 0x3:
2573 /* Quadword physical access with lock (hw_ldq_l/p) */
2574 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
2575 break;
2576 case 0x4:
2577 /* Longword virtual PTE fetch (hw_ldl/v) */
2578 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2579 break;
2580 case 0x5:
2581 /* Quadword virtual PTE fetch (hw_ldq/v) */
2582 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2583 break;
2584 case 0x6:
2585 /* Incpu_ir[ra]id */
2586 goto invalid_opc;
2587 case 0x7:
2588 /* Incpu_ir[ra]id */
2589 goto invalid_opc;
2590 case 0x8:
2591 /* Longword virtual access (hw_ldl) */
2592 gen_helper_st_virt_to_phys(addr, addr);
2593 gen_helper_ldl_raw(cpu_ir[ra], addr);
2594 break;
2595 case 0x9:
2596 /* Quadword virtual access (hw_ldq) */
2597 gen_helper_st_virt_to_phys(addr, addr);
2598 gen_helper_ldq_raw(cpu_ir[ra], addr);
2599 break;
2600 case 0xA:
2601 /* Longword virtual access with protection check (hw_ldl/w) */
2602 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2603 break;
2604 case 0xB:
2605 /* Quadword virtual access with protection check (hw_ldq/w) */
2606 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2607 break;
2608 case 0xC:
2609 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2610 gen_helper_set_alt_mode();
2611 gen_helper_st_virt_to_phys(addr, addr);
2612 gen_helper_ldl_raw(cpu_ir[ra], addr);
2613 gen_helper_restore_mode();
2614 break;
2615 case 0xD:
2616 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2617 gen_helper_set_alt_mode();
2618 gen_helper_st_virt_to_phys(addr, addr);
2619 gen_helper_ldq_raw(cpu_ir[ra], addr);
2620 gen_helper_restore_mode();
2621 break;
2622 case 0xE:
2623 /* Longword virtual access with alternate access mode and
2624 * protection checks (hw_ldl/wa)
2625 */
2626 gen_helper_set_alt_mode();
2627 gen_helper_ldl_data(cpu_ir[ra], addr);
2628 gen_helper_restore_mode();
2629 break;
2630 case 0xF:
2631 /* Quadword virtual access with alternate access mode and
2632 * protection checks (hw_ldq/wa)
2633 */
2634 gen_helper_set_alt_mode();
2635 gen_helper_ldq_data(cpu_ir[ra], addr);
2636 gen_helper_restore_mode();
2637 break;
2638 }
2639 tcg_temp_free(addr);
2640 }
2641 break;
2642 #endif
2643 case 0x1C:
2644 switch (fn7) {
2645 case 0x00:
2646 /* SEXTB */
2647 if (!(ctx->amask & AMASK_BWX))
2648 goto invalid_opc;
2649 if (likely(rc != 31)) {
2650 if (islit)
2651 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2652 else
2653 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2654 }
2655 break;
2656 case 0x01:
2657 /* SEXTW */
2658 if (!(ctx->amask & AMASK_BWX))
2659 goto invalid_opc;
2660 if (likely(rc != 31)) {
2661 if (islit)
2662 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2663 else
2664 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2665 }
2666 break;
2667 case 0x30:
2668 /* CTPOP */
2669 if (!(ctx->amask & AMASK_CIX))
2670 goto invalid_opc;
2671 if (likely(rc != 31)) {
2672 if (islit)
2673 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2674 else
2675 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2676 }
2677 break;
2678 case 0x31:
2679 /* PERR */
2680 if (!(ctx->amask & AMASK_MVI))
2681 goto invalid_opc;
2682 gen_perr(ra, rb, rc, islit, lit);
2683 break;
2684 case 0x32:
2685 /* CTLZ */
2686 if (!(ctx->amask & AMASK_CIX))
2687 goto invalid_opc;
2688 if (likely(rc != 31)) {
2689 if (islit)
2690 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2691 else
2692 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2693 }
2694 break;
2695 case 0x33:
2696 /* CTTZ */
2697 if (!(ctx->amask & AMASK_CIX))
2698 goto invalid_opc;
2699 if (likely(rc != 31)) {
2700 if (islit)
2701 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2702 else
2703 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2704 }
2705 break;
2706 case 0x34:
2707 /* UNPKBW */
2708 if (!(ctx->amask & AMASK_MVI))
2709 goto invalid_opc;
2710 if (real_islit || ra != 31)
2711 goto invalid_opc;
2712 gen_unpkbw (rb, rc);
2713 break;
2714 case 0x35:
2715 /* UNPKBL */
2716 if (!(ctx->amask & AMASK_MVI))
2717 goto invalid_opc;
2718 if (real_islit || ra != 31)
2719 goto invalid_opc;
2720 gen_unpkbl (rb, rc);
2721 break;
2722 case 0x36:
2723 /* PKWB */
2724 if (!(ctx->amask & AMASK_MVI))
2725 goto invalid_opc;
2726 if (real_islit || ra != 31)
2727 goto invalid_opc;
2728 gen_pkwb (rb, rc);
2729 break;
2730 case 0x37:
2731 /* PKLB */
2732 if (!(ctx->amask & AMASK_MVI))
2733 goto invalid_opc;
2734 if (real_islit || ra != 31)
2735 goto invalid_opc;
2736 gen_pklb (rb, rc);
2737 break;
2738 case 0x38:
2739 /* MINSB8 */
2740 if (!(ctx->amask & AMASK_MVI))
2741 goto invalid_opc;
2742 gen_minsb8 (ra, rb, rc, islit, lit);
2743 break;
2744 case 0x39:
2745 /* MINSW4 */
2746 if (!(ctx->amask & AMASK_MVI))
2747 goto invalid_opc;
2748 gen_minsw4 (ra, rb, rc, islit, lit);
2749 break;
2750 case 0x3A:
2751 /* MINUB8 */
2752 if (!(ctx->amask & AMASK_MVI))
2753 goto invalid_opc;
2754 gen_minub8 (ra, rb, rc, islit, lit);
2755 break;
2756 case 0x3B:
2757 /* MINUW4 */
2758 if (!(ctx->amask & AMASK_MVI))
2759 goto invalid_opc;
2760 gen_minuw4 (ra, rb, rc, islit, lit);
2761 break;
2762 case 0x3C:
2763 /* MAXUB8 */
2764 if (!(ctx->amask & AMASK_MVI))
2765 goto invalid_opc;
2766 gen_maxub8 (ra, rb, rc, islit, lit);
2767 break;
2768 case 0x3D:
2769 /* MAXUW4 */
2770 if (!(ctx->amask & AMASK_MVI))
2771 goto invalid_opc;
2772 gen_maxuw4 (ra, rb, rc, islit, lit);
2773 break;
2774 case 0x3E:
2775 /* MAXSB8 */
2776 if (!(ctx->amask & AMASK_MVI))
2777 goto invalid_opc;
2778 gen_maxsb8 (ra, rb, rc, islit, lit);
2779 break;
2780 case 0x3F:
2781 /* MAXSW4 */
2782 if (!(ctx->amask & AMASK_MVI))
2783 goto invalid_opc;
2784 gen_maxsw4 (ra, rb, rc, islit, lit);
2785 break;
2786 case 0x70:
2787 /* FTOIT */
2788 if (!(ctx->amask & AMASK_FIX))
2789 goto invalid_opc;
2790 if (likely(rc != 31)) {
2791 if (ra != 31)
2792 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2793 else
2794 tcg_gen_movi_i64(cpu_ir[rc], 0);
2795 }
2796 break;
2797 case 0x78:
2798 /* FTOIS */
2799 if (!(ctx->amask & AMASK_FIX))
2800 goto invalid_opc;
2801 if (rc != 31) {
2802 TCGv_i32 tmp1 = tcg_temp_new_i32();
2803 if (ra != 31)
2804 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2805 else {
2806 TCGv tmp2 = tcg_const_i64(0);
2807 gen_helper_s_to_memory(tmp1, tmp2);
2808 tcg_temp_free(tmp2);
2809 }
2810 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2811 tcg_temp_free_i32(tmp1);
2812 }
2813 break;
2814 default:
2815 goto invalid_opc;
2816 }
2817 break;
2818 case 0x1D:
2819 /* HW_MTPR (PALcode) */
2820 #if defined (CONFIG_USER_ONLY)
2821 goto invalid_opc;
2822 #else
2823 if (!ctx->pal_mode)
2824 goto invalid_opc;
2825 else {
2826 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2827 if (ra != 31)
2828 gen_helper_mtpr(tmp1, cpu_ir[ra]);
2829 else {
2830 TCGv tmp2 = tcg_const_i64(0);
2831 gen_helper_mtpr(tmp1, tmp2);
2832 tcg_temp_free(tmp2);
2833 }
2834 tcg_temp_free(tmp1);
2835 ret = EXIT_PC_STALE;
2836 }
2837 break;
2838 #endif
2839 case 0x1E:
2840 /* HW_REI (PALcode) */
2841 #if defined (CONFIG_USER_ONLY)
2842 goto invalid_opc;
2843 #else
2844 if (!ctx->pal_mode)
2845 goto invalid_opc;
2846 if (rb == 31) {
2847 /* "Old" alpha */
2848 gen_helper_hw_rei();
2849 } else {
2850 TCGv tmp;
2851
2852 if (ra != 31) {
2853 tmp = tcg_temp_new();
2854 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2855 } else
2856 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2857 gen_helper_hw_ret(tmp);
2858 tcg_temp_free(tmp);
2859 }
2860 ret = EXIT_PC_UPDATED;
2861 break;
2862 #endif
2863 case 0x1F:
2864 /* HW_ST (PALcode) */
2865 #if defined (CONFIG_USER_ONLY)
2866 goto invalid_opc;
2867 #else
2868 if (!ctx->pal_mode)
2869 goto invalid_opc;
2870 else {
2871 TCGv addr, val;
2872 addr = tcg_temp_new();
2873 if (rb != 31)
2874 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2875 else
2876 tcg_gen_movi_i64(addr, disp12);
2877 if (ra != 31)
2878 val = cpu_ir[ra];
2879 else {
2880 val = tcg_temp_new();
2881 tcg_gen_movi_i64(val, 0);
2882 }
2883 switch ((insn >> 12) & 0xF) {
2884 case 0x0:
2885 /* Longword physical access */
2886 gen_helper_stl_raw(val, addr);
2887 break;
2888 case 0x1:
2889 /* Quadword physical access */
2890 gen_helper_stq_raw(val, addr);
2891 break;
2892 case 0x2:
2893 /* Longword physical access with lock */
2894 gen_helper_stl_c_raw(val, val, addr);
2895 break;
2896 case 0x3:
2897 /* Quadword physical access with lock */
2898 gen_helper_stq_c_raw(val, val, addr);
2899 break;
2900 case 0x4:
2901 /* Longword virtual access */
2902 gen_helper_st_virt_to_phys(addr, addr);
2903 gen_helper_stl_raw(val, addr);
2904 break;
2905 case 0x5:
2906 /* Quadword virtual access */
2907 gen_helper_st_virt_to_phys(addr, addr);
2908 gen_helper_stq_raw(val, addr);
2909 break;
2910 case 0x6:
2911 /* Invalid */
2912 goto invalid_opc;
2913 case 0x7:
2914 /* Invalid */
2915 goto invalid_opc;
2916 case 0x8:
2917 /* Invalid */
2918 goto invalid_opc;
2919 case 0x9:
2920 /* Invalid */
2921 goto invalid_opc;
2922 case 0xA:
2923 /* Invalid */
2924 goto invalid_opc;
2925 case 0xB:
2926 /* Invalid */
2927 goto invalid_opc;
2928 case 0xC:
2929 /* Longword virtual access with alternate access mode */
2930 gen_helper_set_alt_mode();
2931 gen_helper_st_virt_to_phys(addr, addr);
2932 gen_helper_stl_raw(val, addr);
2933 gen_helper_restore_mode();
2934 break;
2935 case 0xD:
2936 /* Quadword virtual access with alternate access mode */
2937 gen_helper_set_alt_mode();
2938 gen_helper_st_virt_to_phys(addr, addr);
2939 gen_helper_stl_raw(val, addr);
2940 gen_helper_restore_mode();
2941 break;
2942 case 0xE:
2943 /* Invalid */
2944 goto invalid_opc;
2945 case 0xF:
2946 /* Invalid */
2947 goto invalid_opc;
2948 }
2949 if (ra == 31)
2950 tcg_temp_free(val);
2951 tcg_temp_free(addr);
2952 }
2953 break;
2954 #endif
2955 case 0x20:
2956 /* LDF */
2957 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2958 break;
2959 case 0x21:
2960 /* LDG */
2961 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2962 break;
2963 case 0x22:
2964 /* LDS */
2965 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2966 break;
2967 case 0x23:
2968 /* LDT */
2969 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2970 break;
2971 case 0x24:
2972 /* STF */
2973 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
2974 break;
2975 case 0x25:
2976 /* STG */
2977 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
2978 break;
2979 case 0x26:
2980 /* STS */
2981 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
2982 break;
2983 case 0x27:
2984 /* STT */
2985 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
2986 break;
2987 case 0x28:
2988 /* LDL */
2989 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2990 break;
2991 case 0x29:
2992 /* LDQ */
2993 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2994 break;
2995 case 0x2A:
2996 /* LDL_L */
2997 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2998 break;
2999 case 0x2B:
3000 /* LDQ_L */
3001 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3002 break;
3003 case 0x2C:
3004 /* STL */
3005 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
3006 break;
3007 case 0x2D:
3008 /* STQ */
3009 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
3010 break;
3011 case 0x2E:
3012 /* STL_C */
3013 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
3014 break;
3015 case 0x2F:
3016 /* STQ_C */
3017 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
3018 break;
3019 case 0x30:
3020 /* BR */
3021 ret = gen_bdirect(ctx, ra, disp21);
3022 break;
3023 case 0x31: /* FBEQ */
3024 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3025 break;
3026 case 0x32: /* FBLT */
3027 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3028 break;
3029 case 0x33: /* FBLE */
3030 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3031 break;
3032 case 0x34:
3033 /* BSR */
3034 ret = gen_bdirect(ctx, ra, disp21);
3035 break;
3036 case 0x35: /* FBNE */
3037 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3038 break;
3039 case 0x36: /* FBGE */
3040 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3041 break;
3042 case 0x37: /* FBGT */
3043 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3044 break;
3045 case 0x38:
3046 /* BLBC */
3047 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3048 break;
3049 case 0x39:
3050 /* BEQ */
3051 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3052 break;
3053 case 0x3A:
3054 /* BLT */
3055 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3056 break;
3057 case 0x3B:
3058 /* BLE */
3059 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3060 break;
3061 case 0x3C:
3062 /* BLBS */
3063 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3064 break;
3065 case 0x3D:
3066 /* BNE */
3067 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3068 break;
3069 case 0x3E:
3070 /* BGE */
3071 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3072 break;
3073 case 0x3F:
3074 /* BGT */
3075 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3076 break;
3077 invalid_opc:
3078 gen_invalid(ctx);
3079 /* PC updated by gen_excp. */
3080 ret = EXIT_PC_UPDATED;
3081 break;
3082 }
3083
3084 return ret;
3085 }
3086
3087 static inline void gen_intermediate_code_internal(CPUState *env,
3088 TranslationBlock *tb,
3089 int search_pc)
3090 {
3091 DisasContext ctx, *ctxp = &ctx;
3092 target_ulong pc_start;
3093 uint32_t insn;
3094 uint16_t *gen_opc_end;
3095 CPUBreakpoint *bp;
3096 int j, lj = -1;
3097 ExitStatus ret;
3098 int num_insns;
3099 int max_insns;
3100
3101 pc_start = tb->pc;
3102 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3103
3104 ctx.tb = tb;
3105 ctx.env = env;
3106 ctx.pc = pc_start;
3107 ctx.amask = env->amask;
3108 #if defined (CONFIG_USER_ONLY)
3109 ctx.mem_idx = 0;
3110 #else
3111 ctx.mem_idx = ((env->ps >> 3) & 3);
3112 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
3113 #endif
3114
3115 /* ??? Every TB begins with unset rounding mode, to be initialized on
3116 the first fp insn of the TB. Alternately we could define a proper
3117 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3118 to reset the FP_STATUS to that default at the end of any TB that
3119 changes the default. We could even (gasp) dynamiclly figure out
3120 what default would be most efficient given the running program. */
3121 ctx.tb_rm = -1;
3122 /* Similarly for flush-to-zero. */
3123 ctx.tb_ftz = -1;
3124
3125 num_insns = 0;
3126 max_insns = tb->cflags & CF_COUNT_MASK;
3127 if (max_insns == 0)
3128 max_insns = CF_COUNT_MASK;
3129
3130 gen_icount_start();
3131 do {
3132 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3133 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3134 if (bp->pc == ctx.pc) {
3135 gen_excp(&ctx, EXCP_DEBUG, 0);
3136 break;
3137 }
3138 }
3139 }
3140 if (search_pc) {
3141 j = gen_opc_ptr - gen_opc_buf;
3142 if (lj < j) {
3143 lj++;
3144 while (lj < j)
3145 gen_opc_instr_start[lj++] = 0;
3146 }
3147 gen_opc_pc[lj] = ctx.pc;
3148 gen_opc_instr_start[lj] = 1;
3149 gen_opc_icount[lj] = num_insns;
3150 }
3151 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3152 gen_io_start();
3153 insn = ldl_code(ctx.pc);
3154 num_insns++;
3155
3156 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3157 tcg_gen_debug_insn_start(ctx.pc);
3158 }
3159
3160 ctx.pc += 4;
3161 ret = translate_one(ctxp, insn);
3162
3163 if (ret == NO_EXIT) {
3164 /* If we reach a page boundary, are single stepping,
3165 or exhaust instruction count, stop generation. */
3166 if (env->singlestep_enabled) {
3167 gen_excp(&ctx, EXCP_DEBUG, 0);
3168 ret = EXIT_PC_UPDATED;
3169 } else if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3170 || gen_opc_ptr >= gen_opc_end
3171 || num_insns >= max_insns
3172 || singlestep) {
3173 ret = EXIT_PC_STALE;
3174 }
3175 }
3176 } while (ret == NO_EXIT);
3177
3178 if (tb->cflags & CF_LAST_IO) {
3179 gen_io_end();
3180 }
3181
3182 switch (ret) {
3183 case EXIT_GOTO_TB:
3184 break;
3185 case EXIT_PC_STALE:
3186 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3187 /* FALLTHRU */
3188 case EXIT_PC_UPDATED:
3189 tcg_gen_exit_tb(0);
3190 break;
3191 default:
3192 abort();
3193 }
3194
3195 gen_icount_end(tb, num_insns);
3196 *gen_opc_ptr = INDEX_op_end;
3197 if (search_pc) {
3198 j = gen_opc_ptr - gen_opc_buf;
3199 lj++;
3200 while (lj <= j)
3201 gen_opc_instr_start[lj++] = 0;
3202 } else {
3203 tb->size = ctx.pc - pc_start;
3204 tb->icount = num_insns;
3205 }
3206
3207 #ifdef DEBUG_DISAS
3208 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3209 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3210 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3211 qemu_log("\n");
3212 }
3213 #endif
3214 }
3215
3216 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3217 {
3218 gen_intermediate_code_internal(env, tb, 0);
3219 }
3220
3221 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3222 {
3223 gen_intermediate_code_internal(env, tb, 1);
3224 }
3225
3226 struct cpu_def_t {
3227 const char *name;
3228 int implver, amask;
3229 };
3230
3231 static const struct cpu_def_t cpu_defs[] = {
3232 { "ev4", IMPLVER_2106x, 0 },
3233 { "ev5", IMPLVER_21164, 0 },
3234 { "ev56", IMPLVER_21164, AMASK_BWX },
3235 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3236 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3237 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3238 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3239 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3240 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3241 { "21064", IMPLVER_2106x, 0 },
3242 { "21164", IMPLVER_21164, 0 },
3243 { "21164a", IMPLVER_21164, AMASK_BWX },
3244 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3245 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3246 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3247 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3248 };
3249
3250 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3251 {
3252 CPUAlphaState *env;
3253 int implver, amask, i, max;
3254
3255 env = qemu_mallocz(sizeof(CPUAlphaState));
3256 cpu_exec_init(env);
3257 alpha_translate_init();
3258 tlb_flush(env, 1);
3259
3260 /* Default to ev67; no reason not to emulate insns by default. */
3261 implver = IMPLVER_21264;
3262 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3263 | AMASK_TRAP | AMASK_PREFETCH);
3264
3265 max = ARRAY_SIZE(cpu_defs);
3266 for (i = 0; i < max; i++) {
3267 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3268 implver = cpu_defs[i].implver;
3269 amask = cpu_defs[i].amask;
3270 break;
3271 }
3272 }
3273 env->implver = implver;
3274 env->amask = amask;
3275
3276 env->ps = 0x1F00;
3277 #if defined (CONFIG_USER_ONLY)
3278 env->ps |= 1 << 3;
3279 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3280 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
3281 #else
3282 pal_init(env);
3283 #endif
3284
3285 /* Initialize IPR */
3286 #if defined (CONFIG_USER_ONLY)
3287 env->ipr[IPR_EXC_ADDR] = 0;
3288 env->ipr[IPR_EXC_SUM] = 0;
3289 env->ipr[IPR_EXC_MASK] = 0;
3290 #else
3291 {
3292 // uint64_t hwpcb;
3293 // hwpcb = env->ipr[IPR_PCBB];
3294 env->ipr[IPR_ASN] = 0;
3295 env->ipr[IPR_ASTEN] = 0;
3296 env->ipr[IPR_ASTSR] = 0;
3297 env->ipr[IPR_DATFX] = 0;
3298 /* XXX: fix this */
3299 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3300 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3301 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3302 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3303 env->ipr[IPR_FEN] = 0;
3304 env->ipr[IPR_IPL] = 31;
3305 env->ipr[IPR_MCES] = 0;
3306 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
3307 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3308 env->ipr[IPR_SISR] = 0;
3309 env->ipr[IPR_VIRBND] = -1ULL;
3310 }
3311 #endif
3312
3313 qemu_init_vcpu(env);
3314 return env;
3315 }
3316
3317 void gen_pc_load(CPUState *env, TranslationBlock *tb,
3318 unsigned long searched_pc, int pc_pos, void *puc)
3319 {
3320 env->pc = gen_opc_pc[pc_pos];
3321 }