]> git.proxmox.com Git - mirror_qemu.git/blob - target-alpha/translate.c
target-alpha: Cleanup MMU modes.
[mirror_qemu.git] / target-alpha / translate.c
1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23
24 #include "cpu.h"
25 #include "exec-all.h"
26 #include "disas.h"
27 #include "host-utils.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
30
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
37
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
43
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46 struct TranslationBlock *tb;
47 CPUAlphaState *env;
48 uint64_t pc;
49 int mem_idx;
50 #if !defined (CONFIG_USER_ONLY)
51 int pal_mode;
52 #endif
53 uint32_t amask;
54
55 /* Current rounding mode for this TB. */
56 int tb_rm;
57 /* Current flush-to-zero setting for this TB. */
58 int tb_ftz;
59 };
60
61 /* Return values from translate_one, indicating the state of the TB.
62 Note that zero indicates that we are not exiting the TB. */
63
64 typedef enum {
65 NO_EXIT,
66
67 /* We have emitted one or more goto_tb. No fixup required. */
68 EXIT_GOTO_TB,
69
70 /* We are not using a goto_tb (for whatever reason), but have updated
71 the PC (for whatever reason), so there's no need to do it again on
72 exiting the TB. */
73 EXIT_PC_UPDATED,
74
75 /* We are exiting the TB, but have neither emitted a goto_tb, nor
76 updated the PC for the next instruction to be executed. */
77 EXIT_PC_STALE,
78
79 /* We are ending the TB with a noreturn function call, e.g. longjmp.
80 No following code will be executed. */
81 EXIT_NORETURN,
82 } ExitStatus;
83
84 /* global register indexes */
85 static TCGv_ptr cpu_env;
86 static TCGv cpu_ir[31];
87 static TCGv cpu_fir[31];
88 static TCGv cpu_pc;
89 static TCGv cpu_lock_addr;
90 static TCGv cpu_lock_st_addr;
91 static TCGv cpu_lock_value;
92 #ifdef CONFIG_USER_ONLY
93 static TCGv cpu_uniq;
94 #endif
95
96 /* register names */
97 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
98
99 #include "gen-icount.h"
100
101 static void alpha_translate_init(void)
102 {
103 int i;
104 char *p;
105 static int done_init = 0;
106
107 if (done_init)
108 return;
109
110 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111
112 p = cpu_reg_names;
113 for (i = 0; i < 31; i++) {
114 sprintf(p, "ir%d", i);
115 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
116 offsetof(CPUState, ir[i]), p);
117 p += (i < 10) ? 4 : 5;
118
119 sprintf(p, "fir%d", i);
120 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
121 offsetof(CPUState, fir[i]), p);
122 p += (i < 10) ? 5 : 6;
123 }
124
125 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
126 offsetof(CPUState, pc), "pc");
127
128 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
129 offsetof(CPUState, lock_addr),
130 "lock_addr");
131 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
132 offsetof(CPUState, lock_st_addr),
133 "lock_st_addr");
134 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
135 offsetof(CPUState, lock_value),
136 "lock_value");
137
138 #ifdef CONFIG_USER_ONLY
139 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
140 offsetof(CPUState, unique), "uniq");
141 #endif
142
143 /* register helpers */
144 #define GEN_HELPER 2
145 #include "helper.h"
146
147 done_init = 1;
148 }
149
150 static void gen_excp_1(int exception, int error_code)
151 {
152 TCGv_i32 tmp1, tmp2;
153
154 tmp1 = tcg_const_i32(exception);
155 tmp2 = tcg_const_i32(error_code);
156 gen_helper_excp(tmp1, tmp2);
157 tcg_temp_free_i32(tmp2);
158 tcg_temp_free_i32(tmp1);
159 }
160
161 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
162 {
163 tcg_gen_movi_i64(cpu_pc, ctx->pc);
164 gen_excp_1(exception, error_code);
165 return EXIT_NORETURN;
166 }
167
168 static inline ExitStatus gen_invalid(DisasContext *ctx)
169 {
170 return gen_excp(ctx, EXCP_OPCDEC, 0);
171 }
172
173 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
174 {
175 TCGv tmp = tcg_temp_new();
176 TCGv_i32 tmp32 = tcg_temp_new_i32();
177 tcg_gen_qemu_ld32u(tmp, t1, flags);
178 tcg_gen_trunc_i64_i32(tmp32, tmp);
179 gen_helper_memory_to_f(t0, tmp32);
180 tcg_temp_free_i32(tmp32);
181 tcg_temp_free(tmp);
182 }
183
184 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
185 {
186 TCGv tmp = tcg_temp_new();
187 tcg_gen_qemu_ld64(tmp, t1, flags);
188 gen_helper_memory_to_g(t0, tmp);
189 tcg_temp_free(tmp);
190 }
191
192 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
193 {
194 TCGv tmp = tcg_temp_new();
195 TCGv_i32 tmp32 = tcg_temp_new_i32();
196 tcg_gen_qemu_ld32u(tmp, t1, flags);
197 tcg_gen_trunc_i64_i32(tmp32, tmp);
198 gen_helper_memory_to_s(t0, tmp32);
199 tcg_temp_free_i32(tmp32);
200 tcg_temp_free(tmp);
201 }
202
203 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
204 {
205 tcg_gen_qemu_ld32s(t0, t1, flags);
206 tcg_gen_mov_i64(cpu_lock_addr, t1);
207 tcg_gen_mov_i64(cpu_lock_value, t0);
208 }
209
210 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
211 {
212 tcg_gen_qemu_ld64(t0, t1, flags);
213 tcg_gen_mov_i64(cpu_lock_addr, t1);
214 tcg_gen_mov_i64(cpu_lock_value, t0);
215 }
216
217 static inline void gen_load_mem(DisasContext *ctx,
218 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
219 int flags),
220 int ra, int rb, int32_t disp16, int fp,
221 int clear)
222 {
223 TCGv addr, va;
224
225 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
226 prefetches, which we can treat as nops. No worries about
227 missed exceptions here. */
228 if (unlikely(ra == 31)) {
229 return;
230 }
231
232 addr = tcg_temp_new();
233 if (rb != 31) {
234 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
235 if (clear) {
236 tcg_gen_andi_i64(addr, addr, ~0x7);
237 }
238 } else {
239 if (clear) {
240 disp16 &= ~0x7;
241 }
242 tcg_gen_movi_i64(addr, disp16);
243 }
244
245 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
246 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
247
248 tcg_temp_free(addr);
249 }
250
251 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
252 {
253 TCGv_i32 tmp32 = tcg_temp_new_i32();
254 TCGv tmp = tcg_temp_new();
255 gen_helper_f_to_memory(tmp32, t0);
256 tcg_gen_extu_i32_i64(tmp, tmp32);
257 tcg_gen_qemu_st32(tmp, t1, flags);
258 tcg_temp_free(tmp);
259 tcg_temp_free_i32(tmp32);
260 }
261
262 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
263 {
264 TCGv tmp = tcg_temp_new();
265 gen_helper_g_to_memory(tmp, t0);
266 tcg_gen_qemu_st64(tmp, t1, flags);
267 tcg_temp_free(tmp);
268 }
269
270 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
271 {
272 TCGv_i32 tmp32 = tcg_temp_new_i32();
273 TCGv tmp = tcg_temp_new();
274 gen_helper_s_to_memory(tmp32, t0);
275 tcg_gen_extu_i32_i64(tmp, tmp32);
276 tcg_gen_qemu_st32(tmp, t1, flags);
277 tcg_temp_free(tmp);
278 tcg_temp_free_i32(tmp32);
279 }
280
281 static inline void gen_store_mem(DisasContext *ctx,
282 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
283 int flags),
284 int ra, int rb, int32_t disp16, int fp,
285 int clear)
286 {
287 TCGv addr, va;
288
289 addr = tcg_temp_new();
290 if (rb != 31) {
291 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
292 if (clear) {
293 tcg_gen_andi_i64(addr, addr, ~0x7);
294 }
295 } else {
296 if (clear) {
297 disp16 &= ~0x7;
298 }
299 tcg_gen_movi_i64(addr, disp16);
300 }
301
302 if (ra == 31) {
303 va = tcg_const_i64(0);
304 } else {
305 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
306 }
307 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
308
309 tcg_temp_free(addr);
310 if (ra == 31) {
311 tcg_temp_free(va);
312 }
313 }
314
315 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
316 int32_t disp16, int quad)
317 {
318 TCGv addr;
319
320 if (ra == 31) {
321 /* ??? Don't bother storing anything. The user can't tell
322 the difference, since the zero register always reads zero. */
323 return NO_EXIT;
324 }
325
326 #if defined(CONFIG_USER_ONLY)
327 addr = cpu_lock_st_addr;
328 #else
329 addr = tcg_temp_local_new();
330 #endif
331
332 if (rb != 31) {
333 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
334 } else {
335 tcg_gen_movi_i64(addr, disp16);
336 }
337
338 #if defined(CONFIG_USER_ONLY)
339 /* ??? This is handled via a complicated version of compare-and-swap
340 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
341 in TCG so that this isn't necessary. */
342 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
343 #else
344 /* ??? In system mode we are never multi-threaded, so CAS can be
345 implemented via a non-atomic load-compare-store sequence. */
346 {
347 int lab_fail, lab_done;
348 TCGv val;
349
350 lab_fail = gen_new_label();
351 lab_done = gen_new_label();
352 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
353
354 val = tcg_temp_new();
355 if (quad) {
356 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
357 } else {
358 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
359 }
360 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
361
362 if (quad) {
363 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
364 } else {
365 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
366 }
367 tcg_gen_movi_i64(cpu_ir[ra], 1);
368 tcg_gen_br(lab_done);
369
370 gen_set_label(lab_fail);
371 tcg_gen_movi_i64(cpu_ir[ra], 0);
372
373 gen_set_label(lab_done);
374 tcg_gen_movi_i64(cpu_lock_addr, -1);
375
376 tcg_temp_free(addr);
377 return NO_EXIT;
378 }
379 #endif
380 }
381
382 static int use_goto_tb(DisasContext *ctx, uint64_t dest)
383 {
384 /* Check for the dest on the same page as the start of the TB. We
385 also want to suppress goto_tb in the case of single-steping and IO. */
386 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
387 && !ctx->env->singlestep_enabled
388 && !(ctx->tb->cflags & CF_LAST_IO));
389 }
390
391 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
392 {
393 uint64_t dest = ctx->pc + (disp << 2);
394
395 if (ra != 31) {
396 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
397 }
398
399 /* Notice branch-to-next; used to initialize RA with the PC. */
400 if (disp == 0) {
401 return 0;
402 } else if (use_goto_tb(ctx, dest)) {
403 tcg_gen_goto_tb(0);
404 tcg_gen_movi_i64(cpu_pc, dest);
405 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
406 return EXIT_GOTO_TB;
407 } else {
408 tcg_gen_movi_i64(cpu_pc, dest);
409 return EXIT_PC_UPDATED;
410 }
411 }
412
413 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
414 TCGv cmp, int32_t disp)
415 {
416 uint64_t dest = ctx->pc + (disp << 2);
417 int lab_true = gen_new_label();
418
419 if (use_goto_tb(ctx, dest)) {
420 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
421
422 tcg_gen_goto_tb(0);
423 tcg_gen_movi_i64(cpu_pc, ctx->pc);
424 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
425
426 gen_set_label(lab_true);
427 tcg_gen_goto_tb(1);
428 tcg_gen_movi_i64(cpu_pc, dest);
429 tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
430
431 return EXIT_GOTO_TB;
432 } else {
433 int lab_over = gen_new_label();
434
435 /* ??? Consider using either
436 movi pc, next
437 addi tmp, pc, disp
438 movcond pc, cond, 0, tmp, pc
439 or
440 setcond tmp, cond, 0
441 movi pc, next
442 neg tmp, tmp
443 andi tmp, tmp, disp
444 add pc, pc, tmp
445 The current diamond subgraph surely isn't efficient. */
446
447 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
448 tcg_gen_movi_i64(cpu_pc, ctx->pc);
449 tcg_gen_br(lab_over);
450 gen_set_label(lab_true);
451 tcg_gen_movi_i64(cpu_pc, dest);
452 gen_set_label(lab_over);
453
454 return EXIT_PC_UPDATED;
455 }
456 }
457
458 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
459 int32_t disp, int mask)
460 {
461 TCGv cmp_tmp;
462
463 if (unlikely(ra == 31)) {
464 cmp_tmp = tcg_const_i64(0);
465 } else {
466 cmp_tmp = tcg_temp_new();
467 if (mask) {
468 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
469 } else {
470 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
471 }
472 }
473
474 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
475 }
476
477 /* Fold -0.0 for comparison with COND. */
478
479 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
480 {
481 uint64_t mzero = 1ull << 63;
482
483 switch (cond) {
484 case TCG_COND_LE:
485 case TCG_COND_GT:
486 /* For <= or >, the -0.0 value directly compares the way we want. */
487 tcg_gen_mov_i64(dest, src);
488 break;
489
490 case TCG_COND_EQ:
491 case TCG_COND_NE:
492 /* For == or !=, we can simply mask off the sign bit and compare. */
493 tcg_gen_andi_i64(dest, src, mzero - 1);
494 break;
495
496 case TCG_COND_GE:
497 case TCG_COND_LT:
498 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
499 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
500 tcg_gen_neg_i64(dest, dest);
501 tcg_gen_and_i64(dest, dest, src);
502 break;
503
504 default:
505 abort();
506 }
507 }
508
509 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
510 int32_t disp)
511 {
512 TCGv cmp_tmp;
513
514 if (unlikely(ra == 31)) {
515 /* Very uncommon case, but easier to optimize it to an integer
516 comparison than continuing with the floating point comparison. */
517 return gen_bcond(ctx, cond, ra, disp, 0);
518 }
519
520 cmp_tmp = tcg_temp_new();
521 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
522 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
523 }
524
525 static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
526 int islit, uint8_t lit, int mask)
527 {
528 TCGCond inv_cond = tcg_invert_cond(cond);
529 int l1;
530
531 if (unlikely(rc == 31))
532 return;
533
534 l1 = gen_new_label();
535
536 if (ra != 31) {
537 if (mask) {
538 TCGv tmp = tcg_temp_new();
539 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
540 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
541 tcg_temp_free(tmp);
542 } else
543 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
544 } else {
545 /* Very uncommon case - Do not bother to optimize. */
546 TCGv tmp = tcg_const_i64(0);
547 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
548 tcg_temp_free(tmp);
549 }
550
551 if (islit)
552 tcg_gen_movi_i64(cpu_ir[rc], lit);
553 else
554 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
555 gen_set_label(l1);
556 }
557
558 static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
559 {
560 TCGv cmp_tmp;
561 int l1;
562
563 if (unlikely(rc == 31)) {
564 return;
565 }
566
567 cmp_tmp = tcg_temp_new();
568 if (unlikely(ra == 31)) {
569 tcg_gen_movi_i64(cmp_tmp, 0);
570 } else {
571 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
572 }
573
574 l1 = gen_new_label();
575 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
576 tcg_temp_free(cmp_tmp);
577
578 if (rb != 31)
579 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
580 else
581 tcg_gen_movi_i64(cpu_fir[rc], 0);
582 gen_set_label(l1);
583 }
584
585 #define QUAL_RM_N 0x080 /* Round mode nearest even */
586 #define QUAL_RM_C 0x000 /* Round mode chopped */
587 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
588 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
589 #define QUAL_RM_MASK 0x0c0
590
591 #define QUAL_U 0x100 /* Underflow enable (fp output) */
592 #define QUAL_V 0x100 /* Overflow enable (int output) */
593 #define QUAL_S 0x400 /* Software completion enable */
594 #define QUAL_I 0x200 /* Inexact detection enable */
595
596 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
597 {
598 TCGv_i32 tmp;
599
600 fn11 &= QUAL_RM_MASK;
601 if (fn11 == ctx->tb_rm) {
602 return;
603 }
604 ctx->tb_rm = fn11;
605
606 tmp = tcg_temp_new_i32();
607 switch (fn11) {
608 case QUAL_RM_N:
609 tcg_gen_movi_i32(tmp, float_round_nearest_even);
610 break;
611 case QUAL_RM_C:
612 tcg_gen_movi_i32(tmp, float_round_to_zero);
613 break;
614 case QUAL_RM_M:
615 tcg_gen_movi_i32(tmp, float_round_down);
616 break;
617 case QUAL_RM_D:
618 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
619 break;
620 }
621
622 #if defined(CONFIG_SOFTFLOAT_INLINE)
623 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
624 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
625 sets the one field. */
626 tcg_gen_st8_i32(tmp, cpu_env,
627 offsetof(CPUState, fp_status.float_rounding_mode));
628 #else
629 gen_helper_setroundmode(tmp);
630 #endif
631
632 tcg_temp_free_i32(tmp);
633 }
634
635 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
636 {
637 TCGv_i32 tmp;
638
639 fn11 &= QUAL_U;
640 if (fn11 == ctx->tb_ftz) {
641 return;
642 }
643 ctx->tb_ftz = fn11;
644
645 tmp = tcg_temp_new_i32();
646 if (fn11) {
647 /* Underflow is enabled, use the FPCR setting. */
648 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
649 } else {
650 /* Underflow is disabled, force flush-to-zero. */
651 tcg_gen_movi_i32(tmp, 1);
652 }
653
654 #if defined(CONFIG_SOFTFLOAT_INLINE)
655 tcg_gen_st8_i32(tmp, cpu_env,
656 offsetof(CPUState, fp_status.flush_to_zero));
657 #else
658 gen_helper_setflushzero(tmp);
659 #endif
660
661 tcg_temp_free_i32(tmp);
662 }
663
664 static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
665 {
666 TCGv val = tcg_temp_new();
667 if (reg == 31) {
668 tcg_gen_movi_i64(val, 0);
669 } else if (fn11 & QUAL_S) {
670 gen_helper_ieee_input_s(val, cpu_fir[reg]);
671 } else if (is_cmp) {
672 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
673 } else {
674 gen_helper_ieee_input(val, cpu_fir[reg]);
675 }
676 return val;
677 }
678
679 static void gen_fp_exc_clear(void)
680 {
681 #if defined(CONFIG_SOFTFLOAT_INLINE)
682 TCGv_i32 zero = tcg_const_i32(0);
683 tcg_gen_st8_i32(zero, cpu_env,
684 offsetof(CPUState, fp_status.float_exception_flags));
685 tcg_temp_free_i32(zero);
686 #else
687 gen_helper_fp_exc_clear();
688 #endif
689 }
690
691 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
692 {
693 /* ??? We ought to be able to do something with imprecise exceptions.
694 E.g. notice we're still in the trap shadow of something within the
695 TB and do not generate the code to signal the exception; end the TB
696 when an exception is forced to arrive, either by consumption of a
697 register value or TRAPB or EXCB. */
698 TCGv_i32 exc = tcg_temp_new_i32();
699 TCGv_i32 reg;
700
701 #if defined(CONFIG_SOFTFLOAT_INLINE)
702 tcg_gen_ld8u_i32(exc, cpu_env,
703 offsetof(CPUState, fp_status.float_exception_flags));
704 #else
705 gen_helper_fp_exc_get(exc);
706 #endif
707
708 if (ignore) {
709 tcg_gen_andi_i32(exc, exc, ~ignore);
710 }
711
712 /* ??? Pass in the regno of the destination so that the helper can
713 set EXC_MASK, which contains a bitmask of destination registers
714 that have caused arithmetic traps. A simple userspace emulation
715 does not require this. We do need it for a guest kernel's entArith,
716 or if we were to do something clever with imprecise exceptions. */
717 reg = tcg_const_i32(rc + 32);
718
719 if (fn11 & QUAL_S) {
720 gen_helper_fp_exc_raise_s(exc, reg);
721 } else {
722 gen_helper_fp_exc_raise(exc, reg);
723 }
724
725 tcg_temp_free_i32(reg);
726 tcg_temp_free_i32(exc);
727 }
728
729 static inline void gen_fp_exc_raise(int rc, int fn11)
730 {
731 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
732 }
733
734 static void gen_fcvtlq(int rb, int rc)
735 {
736 if (unlikely(rc == 31)) {
737 return;
738 }
739 if (unlikely(rb == 31)) {
740 tcg_gen_movi_i64(cpu_fir[rc], 0);
741 } else {
742 TCGv tmp = tcg_temp_new();
743
744 /* The arithmetic right shift here, plus the sign-extended mask below
745 yields a sign-extended result without an explicit ext32s_i64. */
746 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
747 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
748 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
749 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
750 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
751
752 tcg_temp_free(tmp);
753 }
754 }
755
756 static void gen_fcvtql(int rb, int rc)
757 {
758 if (unlikely(rc == 31)) {
759 return;
760 }
761 if (unlikely(rb == 31)) {
762 tcg_gen_movi_i64(cpu_fir[rc], 0);
763 } else {
764 TCGv tmp = tcg_temp_new();
765
766 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
767 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
768 tcg_gen_shli_i64(tmp, tmp, 32);
769 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
770 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
771
772 tcg_temp_free(tmp);
773 }
774 }
775
776 static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
777 {
778 if (rb != 31) {
779 int lab = gen_new_label();
780 TCGv tmp = tcg_temp_new();
781
782 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
783 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
784 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
785
786 gen_set_label(lab);
787 }
788 gen_fcvtql(rb, rc);
789 }
790
791 #define FARITH2(name) \
792 static inline void glue(gen_f, name)(int rb, int rc) \
793 { \
794 if (unlikely(rc == 31)) { \
795 return; \
796 } \
797 if (rb != 31) { \
798 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
799 } else { \
800 TCGv tmp = tcg_const_i64(0); \
801 gen_helper_ ## name (cpu_fir[rc], tmp); \
802 tcg_temp_free(tmp); \
803 } \
804 }
805
806 /* ??? VAX instruction qualifiers ignored. */
807 FARITH2(sqrtf)
808 FARITH2(sqrtg)
809 FARITH2(cvtgf)
810 FARITH2(cvtgq)
811 FARITH2(cvtqf)
812 FARITH2(cvtqg)
813
814 static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
815 int rb, int rc, int fn11)
816 {
817 TCGv vb;
818
819 /* ??? This is wrong: the instruction is not a nop, it still may
820 raise exceptions. */
821 if (unlikely(rc == 31)) {
822 return;
823 }
824
825 gen_qual_roundmode(ctx, fn11);
826 gen_qual_flushzero(ctx, fn11);
827 gen_fp_exc_clear();
828
829 vb = gen_ieee_input(rb, fn11, 0);
830 helper(cpu_fir[rc], vb);
831 tcg_temp_free(vb);
832
833 gen_fp_exc_raise(rc, fn11);
834 }
835
836 #define IEEE_ARITH2(name) \
837 static inline void glue(gen_f, name)(DisasContext *ctx, \
838 int rb, int rc, int fn11) \
839 { \
840 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
841 }
842 IEEE_ARITH2(sqrts)
843 IEEE_ARITH2(sqrtt)
844 IEEE_ARITH2(cvtst)
845 IEEE_ARITH2(cvtts)
846
847 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
848 {
849 TCGv vb;
850 int ignore = 0;
851
852 /* ??? This is wrong: the instruction is not a nop, it still may
853 raise exceptions. */
854 if (unlikely(rc == 31)) {
855 return;
856 }
857
858 /* No need to set flushzero, since we have an integer output. */
859 gen_fp_exc_clear();
860 vb = gen_ieee_input(rb, fn11, 0);
861
862 /* Almost all integer conversions use cropped rounding, and most
863 also do not have integer overflow enabled. Special case that. */
864 switch (fn11) {
865 case QUAL_RM_C:
866 gen_helper_cvttq_c(cpu_fir[rc], vb);
867 break;
868 case QUAL_V | QUAL_RM_C:
869 case QUAL_S | QUAL_V | QUAL_RM_C:
870 ignore = float_flag_inexact;
871 /* FALLTHRU */
872 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
873 gen_helper_cvttq_svic(cpu_fir[rc], vb);
874 break;
875 default:
876 gen_qual_roundmode(ctx, fn11);
877 gen_helper_cvttq(cpu_fir[rc], vb);
878 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
879 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
880 break;
881 }
882 tcg_temp_free(vb);
883
884 gen_fp_exc_raise_ignore(rc, fn11, ignore);
885 }
886
887 static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
888 int rb, int rc, int fn11)
889 {
890 TCGv vb;
891
892 /* ??? This is wrong: the instruction is not a nop, it still may
893 raise exceptions. */
894 if (unlikely(rc == 31)) {
895 return;
896 }
897
898 gen_qual_roundmode(ctx, fn11);
899
900 if (rb == 31) {
901 vb = tcg_const_i64(0);
902 } else {
903 vb = cpu_fir[rb];
904 }
905
906 /* The only exception that can be raised by integer conversion
907 is inexact. Thus we only need to worry about exceptions when
908 inexact handling is requested. */
909 if (fn11 & QUAL_I) {
910 gen_fp_exc_clear();
911 helper(cpu_fir[rc], vb);
912 gen_fp_exc_raise(rc, fn11);
913 } else {
914 helper(cpu_fir[rc], vb);
915 }
916
917 if (rb == 31) {
918 tcg_temp_free(vb);
919 }
920 }
921
922 #define IEEE_INTCVT(name) \
923 static inline void glue(gen_f, name)(DisasContext *ctx, \
924 int rb, int rc, int fn11) \
925 { \
926 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
927 }
928 IEEE_INTCVT(cvtqs)
929 IEEE_INTCVT(cvtqt)
930
931 static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
932 {
933 TCGv va, vb, vmask;
934 int za = 0, zb = 0;
935
936 if (unlikely(rc == 31)) {
937 return;
938 }
939
940 vmask = tcg_const_i64(mask);
941
942 TCGV_UNUSED_I64(va);
943 if (ra == 31) {
944 if (inv_a) {
945 va = vmask;
946 } else {
947 za = 1;
948 }
949 } else {
950 va = tcg_temp_new_i64();
951 tcg_gen_mov_i64(va, cpu_fir[ra]);
952 if (inv_a) {
953 tcg_gen_andc_i64(va, vmask, va);
954 } else {
955 tcg_gen_and_i64(va, va, vmask);
956 }
957 }
958
959 TCGV_UNUSED_I64(vb);
960 if (rb == 31) {
961 zb = 1;
962 } else {
963 vb = tcg_temp_new_i64();
964 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
965 }
966
967 switch (za << 1 | zb) {
968 case 0 | 0:
969 tcg_gen_or_i64(cpu_fir[rc], va, vb);
970 break;
971 case 0 | 1:
972 tcg_gen_mov_i64(cpu_fir[rc], va);
973 break;
974 case 2 | 0:
975 tcg_gen_mov_i64(cpu_fir[rc], vb);
976 break;
977 case 2 | 1:
978 tcg_gen_movi_i64(cpu_fir[rc], 0);
979 break;
980 }
981
982 tcg_temp_free(vmask);
983 if (ra != 31) {
984 tcg_temp_free(va);
985 }
986 if (rb != 31) {
987 tcg_temp_free(vb);
988 }
989 }
990
991 static inline void gen_fcpys(int ra, int rb, int rc)
992 {
993 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
994 }
995
996 static inline void gen_fcpysn(int ra, int rb, int rc)
997 {
998 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
999 }
1000
1001 static inline void gen_fcpyse(int ra, int rb, int rc)
1002 {
1003 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1004 }
1005
1006 #define FARITH3(name) \
1007 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1008 { \
1009 TCGv va, vb; \
1010 \
1011 if (unlikely(rc == 31)) { \
1012 return; \
1013 } \
1014 if (ra == 31) { \
1015 va = tcg_const_i64(0); \
1016 } else { \
1017 va = cpu_fir[ra]; \
1018 } \
1019 if (rb == 31) { \
1020 vb = tcg_const_i64(0); \
1021 } else { \
1022 vb = cpu_fir[rb]; \
1023 } \
1024 \
1025 gen_helper_ ## name (cpu_fir[rc], va, vb); \
1026 \
1027 if (ra == 31) { \
1028 tcg_temp_free(va); \
1029 } \
1030 if (rb == 31) { \
1031 tcg_temp_free(vb); \
1032 } \
1033 }
1034
1035 /* ??? VAX instruction qualifiers ignored. */
1036 FARITH3(addf)
1037 FARITH3(subf)
1038 FARITH3(mulf)
1039 FARITH3(divf)
1040 FARITH3(addg)
1041 FARITH3(subg)
1042 FARITH3(mulg)
1043 FARITH3(divg)
1044 FARITH3(cmpgeq)
1045 FARITH3(cmpglt)
1046 FARITH3(cmpgle)
1047
1048 static void gen_ieee_arith3(DisasContext *ctx,
1049 void (*helper)(TCGv, TCGv, TCGv),
1050 int ra, int rb, int rc, int fn11)
1051 {
1052 TCGv va, vb;
1053
1054 /* ??? This is wrong: the instruction is not a nop, it still may
1055 raise exceptions. */
1056 if (unlikely(rc == 31)) {
1057 return;
1058 }
1059
1060 gen_qual_roundmode(ctx, fn11);
1061 gen_qual_flushzero(ctx, fn11);
1062 gen_fp_exc_clear();
1063
1064 va = gen_ieee_input(ra, fn11, 0);
1065 vb = gen_ieee_input(rb, fn11, 0);
1066 helper(cpu_fir[rc], va, vb);
1067 tcg_temp_free(va);
1068 tcg_temp_free(vb);
1069
1070 gen_fp_exc_raise(rc, fn11);
1071 }
1072
1073 #define IEEE_ARITH3(name) \
1074 static inline void glue(gen_f, name)(DisasContext *ctx, \
1075 int ra, int rb, int rc, int fn11) \
1076 { \
1077 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1078 }
1079 IEEE_ARITH3(adds)
1080 IEEE_ARITH3(subs)
1081 IEEE_ARITH3(muls)
1082 IEEE_ARITH3(divs)
1083 IEEE_ARITH3(addt)
1084 IEEE_ARITH3(subt)
1085 IEEE_ARITH3(mult)
1086 IEEE_ARITH3(divt)
1087
1088 static void gen_ieee_compare(DisasContext *ctx,
1089 void (*helper)(TCGv, TCGv, TCGv),
1090 int ra, int rb, int rc, int fn11)
1091 {
1092 TCGv va, vb;
1093
1094 /* ??? This is wrong: the instruction is not a nop, it still may
1095 raise exceptions. */
1096 if (unlikely(rc == 31)) {
1097 return;
1098 }
1099
1100 gen_fp_exc_clear();
1101
1102 va = gen_ieee_input(ra, fn11, 1);
1103 vb = gen_ieee_input(rb, fn11, 1);
1104 helper(cpu_fir[rc], va, vb);
1105 tcg_temp_free(va);
1106 tcg_temp_free(vb);
1107
1108 gen_fp_exc_raise(rc, fn11);
1109 }
1110
1111 #define IEEE_CMP3(name) \
1112 static inline void glue(gen_f, name)(DisasContext *ctx, \
1113 int ra, int rb, int rc, int fn11) \
1114 { \
1115 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1116 }
1117 IEEE_CMP3(cmptun)
1118 IEEE_CMP3(cmpteq)
1119 IEEE_CMP3(cmptlt)
1120 IEEE_CMP3(cmptle)
1121
1122 static inline uint64_t zapnot_mask(uint8_t lit)
1123 {
1124 uint64_t mask = 0;
1125 int i;
1126
1127 for (i = 0; i < 8; ++i) {
1128 if ((lit >> i) & 1)
1129 mask |= 0xffull << (i * 8);
1130 }
1131 return mask;
1132 }
1133
1134 /* Implement zapnot with an immediate operand, which expands to some
1135 form of immediate AND. This is a basic building block in the
1136 definition of many of the other byte manipulation instructions. */
1137 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1138 {
1139 switch (lit) {
1140 case 0x00:
1141 tcg_gen_movi_i64(dest, 0);
1142 break;
1143 case 0x01:
1144 tcg_gen_ext8u_i64(dest, src);
1145 break;
1146 case 0x03:
1147 tcg_gen_ext16u_i64(dest, src);
1148 break;
1149 case 0x0f:
1150 tcg_gen_ext32u_i64(dest, src);
1151 break;
1152 case 0xff:
1153 tcg_gen_mov_i64(dest, src);
1154 break;
1155 default:
1156 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1157 break;
1158 }
1159 }
1160
1161 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1162 {
1163 if (unlikely(rc == 31))
1164 return;
1165 else if (unlikely(ra == 31))
1166 tcg_gen_movi_i64(cpu_ir[rc], 0);
1167 else if (islit)
1168 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1169 else
1170 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1171 }
1172
1173 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1174 {
1175 if (unlikely(rc == 31))
1176 return;
1177 else if (unlikely(ra == 31))
1178 tcg_gen_movi_i64(cpu_ir[rc], 0);
1179 else if (islit)
1180 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1181 else
1182 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1183 }
1184
1185
1186 /* EXTWH, EXTLH, EXTQH */
1187 static void gen_ext_h(int ra, int rb, int rc, int islit,
1188 uint8_t lit, uint8_t byte_mask)
1189 {
1190 if (unlikely(rc == 31))
1191 return;
1192 else if (unlikely(ra == 31))
1193 tcg_gen_movi_i64(cpu_ir[rc], 0);
1194 else {
1195 if (islit) {
1196 lit = (64 - (lit & 7) * 8) & 0x3f;
1197 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1198 } else {
1199 TCGv tmp1 = tcg_temp_new();
1200 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1201 tcg_gen_shli_i64(tmp1, tmp1, 3);
1202 tcg_gen_neg_i64(tmp1, tmp1);
1203 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1204 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1205 tcg_temp_free(tmp1);
1206 }
1207 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1208 }
1209 }
1210
1211 /* EXTBL, EXTWL, EXTLL, EXTQL */
1212 static void gen_ext_l(int ra, int rb, int rc, int islit,
1213 uint8_t lit, uint8_t byte_mask)
1214 {
1215 if (unlikely(rc == 31))
1216 return;
1217 else if (unlikely(ra == 31))
1218 tcg_gen_movi_i64(cpu_ir[rc], 0);
1219 else {
1220 if (islit) {
1221 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1222 } else {
1223 TCGv tmp = tcg_temp_new();
1224 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1225 tcg_gen_shli_i64(tmp, tmp, 3);
1226 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1227 tcg_temp_free(tmp);
1228 }
1229 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1230 }
1231 }
1232
1233 /* INSWH, INSLH, INSQH */
1234 static void gen_ins_h(int ra, int rb, int rc, int islit,
1235 uint8_t lit, uint8_t byte_mask)
1236 {
1237 if (unlikely(rc == 31))
1238 return;
1239 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1240 tcg_gen_movi_i64(cpu_ir[rc], 0);
1241 else {
1242 TCGv tmp = tcg_temp_new();
1243
1244 /* The instruction description has us left-shift the byte mask
1245 and extract bits <15:8> and apply that zap at the end. This
1246 is equivalent to simply performing the zap first and shifting
1247 afterward. */
1248 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1249
1250 if (islit) {
1251 /* Note that we have handled the lit==0 case above. */
1252 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1253 } else {
1254 TCGv shift = tcg_temp_new();
1255
1256 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1257 Do this portably by splitting the shift into two parts:
1258 shift_count-1 and 1. Arrange for the -1 by using
1259 ones-complement instead of twos-complement in the negation:
1260 ~((B & 7) * 8) & 63. */
1261
1262 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1263 tcg_gen_shli_i64(shift, shift, 3);
1264 tcg_gen_not_i64(shift, shift);
1265 tcg_gen_andi_i64(shift, shift, 0x3f);
1266
1267 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1268 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1269 tcg_temp_free(shift);
1270 }
1271 tcg_temp_free(tmp);
1272 }
1273 }
1274
1275 /* INSBL, INSWL, INSLL, INSQL */
1276 static void gen_ins_l(int ra, int rb, int rc, int islit,
1277 uint8_t lit, uint8_t byte_mask)
1278 {
1279 if (unlikely(rc == 31))
1280 return;
1281 else if (unlikely(ra == 31))
1282 tcg_gen_movi_i64(cpu_ir[rc], 0);
1283 else {
1284 TCGv tmp = tcg_temp_new();
1285
1286 /* The instruction description has us left-shift the byte mask
1287 the same number of byte slots as the data and apply the zap
1288 at the end. This is equivalent to simply performing the zap
1289 first and shifting afterward. */
1290 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1291
1292 if (islit) {
1293 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1294 } else {
1295 TCGv shift = tcg_temp_new();
1296 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1297 tcg_gen_shli_i64(shift, shift, 3);
1298 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1299 tcg_temp_free(shift);
1300 }
1301 tcg_temp_free(tmp);
1302 }
1303 }
1304
1305 /* MSKWH, MSKLH, MSKQH */
1306 static void gen_msk_h(int ra, int rb, int rc, int islit,
1307 uint8_t lit, uint8_t byte_mask)
1308 {
1309 if (unlikely(rc == 31))
1310 return;
1311 else if (unlikely(ra == 31))
1312 tcg_gen_movi_i64(cpu_ir[rc], 0);
1313 else if (islit) {
1314 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1315 } else {
1316 TCGv shift = tcg_temp_new();
1317 TCGv mask = tcg_temp_new();
1318
1319 /* The instruction description is as above, where the byte_mask
1320 is shifted left, and then we extract bits <15:8>. This can be
1321 emulated with a right-shift on the expanded byte mask. This
1322 requires extra care because for an input <2:0> == 0 we need a
1323 shift of 64 bits in order to generate a zero. This is done by
1324 splitting the shift into two parts, the variable shift - 1
1325 followed by a constant 1 shift. The code we expand below is
1326 equivalent to ~((B & 7) * 8) & 63. */
1327
1328 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1329 tcg_gen_shli_i64(shift, shift, 3);
1330 tcg_gen_not_i64(shift, shift);
1331 tcg_gen_andi_i64(shift, shift, 0x3f);
1332 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1333 tcg_gen_shr_i64(mask, mask, shift);
1334 tcg_gen_shri_i64(mask, mask, 1);
1335
1336 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1337
1338 tcg_temp_free(mask);
1339 tcg_temp_free(shift);
1340 }
1341 }
1342
1343 /* MSKBL, MSKWL, MSKLL, MSKQL */
1344 static void gen_msk_l(int ra, int rb, int rc, int islit,
1345 uint8_t lit, uint8_t byte_mask)
1346 {
1347 if (unlikely(rc == 31))
1348 return;
1349 else if (unlikely(ra == 31))
1350 tcg_gen_movi_i64(cpu_ir[rc], 0);
1351 else if (islit) {
1352 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1353 } else {
1354 TCGv shift = tcg_temp_new();
1355 TCGv mask = tcg_temp_new();
1356
1357 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1358 tcg_gen_shli_i64(shift, shift, 3);
1359 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1360 tcg_gen_shl_i64(mask, mask, shift);
1361
1362 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1363
1364 tcg_temp_free(mask);
1365 tcg_temp_free(shift);
1366 }
1367 }
1368
1369 /* Code to call arith3 helpers */
1370 #define ARITH3(name) \
1371 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1372 uint8_t lit) \
1373 { \
1374 if (unlikely(rc == 31)) \
1375 return; \
1376 \
1377 if (ra != 31) { \
1378 if (islit) { \
1379 TCGv tmp = tcg_const_i64(lit); \
1380 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1381 tcg_temp_free(tmp); \
1382 } else \
1383 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1384 } else { \
1385 TCGv tmp1 = tcg_const_i64(0); \
1386 if (islit) { \
1387 TCGv tmp2 = tcg_const_i64(lit); \
1388 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1389 tcg_temp_free(tmp2); \
1390 } else \
1391 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1392 tcg_temp_free(tmp1); \
1393 } \
1394 }
1395 ARITH3(cmpbge)
1396 ARITH3(addlv)
1397 ARITH3(sublv)
1398 ARITH3(addqv)
1399 ARITH3(subqv)
1400 ARITH3(umulh)
1401 ARITH3(mullv)
1402 ARITH3(mulqv)
1403 ARITH3(minub8)
1404 ARITH3(minsb8)
1405 ARITH3(minuw4)
1406 ARITH3(minsw4)
1407 ARITH3(maxub8)
1408 ARITH3(maxsb8)
1409 ARITH3(maxuw4)
1410 ARITH3(maxsw4)
1411 ARITH3(perr)
1412
1413 #define MVIOP2(name) \
1414 static inline void glue(gen_, name)(int rb, int rc) \
1415 { \
1416 if (unlikely(rc == 31)) \
1417 return; \
1418 if (unlikely(rb == 31)) \
1419 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1420 else \
1421 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1422 }
1423 MVIOP2(pklb)
1424 MVIOP2(pkwb)
1425 MVIOP2(unpkbl)
1426 MVIOP2(unpkbw)
1427
1428 static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1429 int islit, uint8_t lit)
1430 {
1431 TCGv va, vb;
1432
1433 if (unlikely(rc == 31)) {
1434 return;
1435 }
1436
1437 if (ra == 31) {
1438 va = tcg_const_i64(0);
1439 } else {
1440 va = cpu_ir[ra];
1441 }
1442 if (islit) {
1443 vb = tcg_const_i64(lit);
1444 } else {
1445 vb = cpu_ir[rb];
1446 }
1447
1448 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1449
1450 if (ra == 31) {
1451 tcg_temp_free(va);
1452 }
1453 if (islit) {
1454 tcg_temp_free(vb);
1455 }
1456 }
1457
1458 static void gen_rx(int ra, int set)
1459 {
1460 TCGv_i32 tmp;
1461
1462 if (ra != 31) {
1463 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1464 }
1465
1466 tmp = tcg_const_i32(set);
1467 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1468 tcg_temp_free_i32(tmp);
1469 }
1470
1471 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1472 {
1473 uint32_t palcode;
1474 int32_t disp21, disp16, disp12;
1475 uint16_t fn11;
1476 uint8_t opc, ra, rb, rc, fpfn, fn7, fn2, islit, real_islit;
1477 uint8_t lit;
1478 ExitStatus ret;
1479
1480 /* Decode all instruction fields */
1481 opc = insn >> 26;
1482 ra = (insn >> 21) & 0x1F;
1483 rb = (insn >> 16) & 0x1F;
1484 rc = insn & 0x1F;
1485 real_islit = islit = (insn >> 12) & 1;
1486 if (rb == 31 && !islit) {
1487 islit = 1;
1488 lit = 0;
1489 } else
1490 lit = (insn >> 13) & 0xFF;
1491 palcode = insn & 0x03FFFFFF;
1492 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1493 disp16 = (int16_t)(insn & 0x0000FFFF);
1494 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1495 fn11 = (insn >> 5) & 0x000007FF;
1496 fpfn = fn11 & 0x3F;
1497 fn7 = (insn >> 5) & 0x0000007F;
1498 fn2 = (insn >> 5) & 0x00000003;
1499 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1500 opc, ra, rb, rc, disp16);
1501
1502 ret = NO_EXIT;
1503 switch (opc) {
1504 case 0x00:
1505 /* CALL_PAL */
1506 #ifdef CONFIG_USER_ONLY
1507 if (palcode == 0x9E) {
1508 /* RDUNIQUE */
1509 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1510 break;
1511 } else if (palcode == 0x9F) {
1512 /* WRUNIQUE */
1513 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1514 break;
1515 }
1516 #endif
1517 if (palcode >= 0x80 && palcode < 0xC0) {
1518 /* Unprivileged PAL call */
1519 ret = gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
1520 break;
1521 }
1522 #ifndef CONFIG_USER_ONLY
1523 if (palcode < 0x40) {
1524 /* Privileged PAL code */
1525 if (ctx->mem_idx != MMU_KERNEL_IDX) {
1526 goto invalid_opc;
1527 }
1528 ret = gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
1529 }
1530 #endif
1531 /* Invalid PAL call */
1532 goto invalid_opc;
1533 case 0x01:
1534 /* OPC01 */
1535 goto invalid_opc;
1536 case 0x02:
1537 /* OPC02 */
1538 goto invalid_opc;
1539 case 0x03:
1540 /* OPC03 */
1541 goto invalid_opc;
1542 case 0x04:
1543 /* OPC04 */
1544 goto invalid_opc;
1545 case 0x05:
1546 /* OPC05 */
1547 goto invalid_opc;
1548 case 0x06:
1549 /* OPC06 */
1550 goto invalid_opc;
1551 case 0x07:
1552 /* OPC07 */
1553 goto invalid_opc;
1554 case 0x08:
1555 /* LDA */
1556 if (likely(ra != 31)) {
1557 if (rb != 31)
1558 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1559 else
1560 tcg_gen_movi_i64(cpu_ir[ra], disp16);
1561 }
1562 break;
1563 case 0x09:
1564 /* LDAH */
1565 if (likely(ra != 31)) {
1566 if (rb != 31)
1567 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1568 else
1569 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1570 }
1571 break;
1572 case 0x0A:
1573 /* LDBU */
1574 if (!(ctx->amask & AMASK_BWX))
1575 goto invalid_opc;
1576 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1577 break;
1578 case 0x0B:
1579 /* LDQ_U */
1580 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1581 break;
1582 case 0x0C:
1583 /* LDWU */
1584 if (!(ctx->amask & AMASK_BWX))
1585 goto invalid_opc;
1586 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1587 break;
1588 case 0x0D:
1589 /* STW */
1590 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1591 break;
1592 case 0x0E:
1593 /* STB */
1594 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1595 break;
1596 case 0x0F:
1597 /* STQ_U */
1598 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1599 break;
1600 case 0x10:
1601 switch (fn7) {
1602 case 0x00:
1603 /* ADDL */
1604 if (likely(rc != 31)) {
1605 if (ra != 31) {
1606 if (islit) {
1607 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1608 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1609 } else {
1610 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1611 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1612 }
1613 } else {
1614 if (islit)
1615 tcg_gen_movi_i64(cpu_ir[rc], lit);
1616 else
1617 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1618 }
1619 }
1620 break;
1621 case 0x02:
1622 /* S4ADDL */
1623 if (likely(rc != 31)) {
1624 if (ra != 31) {
1625 TCGv tmp = tcg_temp_new();
1626 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1627 if (islit)
1628 tcg_gen_addi_i64(tmp, tmp, lit);
1629 else
1630 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1631 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1632 tcg_temp_free(tmp);
1633 } else {
1634 if (islit)
1635 tcg_gen_movi_i64(cpu_ir[rc], lit);
1636 else
1637 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1638 }
1639 }
1640 break;
1641 case 0x09:
1642 /* SUBL */
1643 if (likely(rc != 31)) {
1644 if (ra != 31) {
1645 if (islit)
1646 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1647 else
1648 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1649 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1650 } else {
1651 if (islit)
1652 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1653 else {
1654 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1655 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1656 }
1657 }
1658 break;
1659 case 0x0B:
1660 /* S4SUBL */
1661 if (likely(rc != 31)) {
1662 if (ra != 31) {
1663 TCGv tmp = tcg_temp_new();
1664 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1665 if (islit)
1666 tcg_gen_subi_i64(tmp, tmp, lit);
1667 else
1668 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1669 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1670 tcg_temp_free(tmp);
1671 } else {
1672 if (islit)
1673 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1674 else {
1675 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1676 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1677 }
1678 }
1679 }
1680 break;
1681 case 0x0F:
1682 /* CMPBGE */
1683 gen_cmpbge(ra, rb, rc, islit, lit);
1684 break;
1685 case 0x12:
1686 /* S8ADDL */
1687 if (likely(rc != 31)) {
1688 if (ra != 31) {
1689 TCGv tmp = tcg_temp_new();
1690 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1691 if (islit)
1692 tcg_gen_addi_i64(tmp, tmp, lit);
1693 else
1694 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1695 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1696 tcg_temp_free(tmp);
1697 } else {
1698 if (islit)
1699 tcg_gen_movi_i64(cpu_ir[rc], lit);
1700 else
1701 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1702 }
1703 }
1704 break;
1705 case 0x1B:
1706 /* S8SUBL */
1707 if (likely(rc != 31)) {
1708 if (ra != 31) {
1709 TCGv tmp = tcg_temp_new();
1710 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1711 if (islit)
1712 tcg_gen_subi_i64(tmp, tmp, lit);
1713 else
1714 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1715 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1716 tcg_temp_free(tmp);
1717 } else {
1718 if (islit)
1719 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1720 else
1721 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1722 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1723 }
1724 }
1725 }
1726 break;
1727 case 0x1D:
1728 /* CMPULT */
1729 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1730 break;
1731 case 0x20:
1732 /* ADDQ */
1733 if (likely(rc != 31)) {
1734 if (ra != 31) {
1735 if (islit)
1736 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1737 else
1738 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1739 } else {
1740 if (islit)
1741 tcg_gen_movi_i64(cpu_ir[rc], lit);
1742 else
1743 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1744 }
1745 }
1746 break;
1747 case 0x22:
1748 /* S4ADDQ */
1749 if (likely(rc != 31)) {
1750 if (ra != 31) {
1751 TCGv tmp = tcg_temp_new();
1752 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1753 if (islit)
1754 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1755 else
1756 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1757 tcg_temp_free(tmp);
1758 } else {
1759 if (islit)
1760 tcg_gen_movi_i64(cpu_ir[rc], lit);
1761 else
1762 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1763 }
1764 }
1765 break;
1766 case 0x29:
1767 /* SUBQ */
1768 if (likely(rc != 31)) {
1769 if (ra != 31) {
1770 if (islit)
1771 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1772 else
1773 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1774 } else {
1775 if (islit)
1776 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1777 else
1778 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1779 }
1780 }
1781 break;
1782 case 0x2B:
1783 /* S4SUBQ */
1784 if (likely(rc != 31)) {
1785 if (ra != 31) {
1786 TCGv tmp = tcg_temp_new();
1787 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1788 if (islit)
1789 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1790 else
1791 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1792 tcg_temp_free(tmp);
1793 } else {
1794 if (islit)
1795 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1796 else
1797 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1798 }
1799 }
1800 break;
1801 case 0x2D:
1802 /* CMPEQ */
1803 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1804 break;
1805 case 0x32:
1806 /* S8ADDQ */
1807 if (likely(rc != 31)) {
1808 if (ra != 31) {
1809 TCGv tmp = tcg_temp_new();
1810 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1811 if (islit)
1812 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1813 else
1814 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1815 tcg_temp_free(tmp);
1816 } else {
1817 if (islit)
1818 tcg_gen_movi_i64(cpu_ir[rc], lit);
1819 else
1820 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1821 }
1822 }
1823 break;
1824 case 0x3B:
1825 /* S8SUBQ */
1826 if (likely(rc != 31)) {
1827 if (ra != 31) {
1828 TCGv tmp = tcg_temp_new();
1829 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1830 if (islit)
1831 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1832 else
1833 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1834 tcg_temp_free(tmp);
1835 } else {
1836 if (islit)
1837 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1838 else
1839 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1840 }
1841 }
1842 break;
1843 case 0x3D:
1844 /* CMPULE */
1845 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1846 break;
1847 case 0x40:
1848 /* ADDL/V */
1849 gen_addlv(ra, rb, rc, islit, lit);
1850 break;
1851 case 0x49:
1852 /* SUBL/V */
1853 gen_sublv(ra, rb, rc, islit, lit);
1854 break;
1855 case 0x4D:
1856 /* CMPLT */
1857 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1858 break;
1859 case 0x60:
1860 /* ADDQ/V */
1861 gen_addqv(ra, rb, rc, islit, lit);
1862 break;
1863 case 0x69:
1864 /* SUBQ/V */
1865 gen_subqv(ra, rb, rc, islit, lit);
1866 break;
1867 case 0x6D:
1868 /* CMPLE */
1869 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1870 break;
1871 default:
1872 goto invalid_opc;
1873 }
1874 break;
1875 case 0x11:
1876 switch (fn7) {
1877 case 0x00:
1878 /* AND */
1879 if (likely(rc != 31)) {
1880 if (ra == 31)
1881 tcg_gen_movi_i64(cpu_ir[rc], 0);
1882 else if (islit)
1883 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1884 else
1885 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1886 }
1887 break;
1888 case 0x08:
1889 /* BIC */
1890 if (likely(rc != 31)) {
1891 if (ra != 31) {
1892 if (islit)
1893 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1894 else
1895 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1896 } else
1897 tcg_gen_movi_i64(cpu_ir[rc], 0);
1898 }
1899 break;
1900 case 0x14:
1901 /* CMOVLBS */
1902 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1903 break;
1904 case 0x16:
1905 /* CMOVLBC */
1906 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1907 break;
1908 case 0x20:
1909 /* BIS */
1910 if (likely(rc != 31)) {
1911 if (ra != 31) {
1912 if (islit)
1913 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1914 else
1915 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1916 } else {
1917 if (islit)
1918 tcg_gen_movi_i64(cpu_ir[rc], lit);
1919 else
1920 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1921 }
1922 }
1923 break;
1924 case 0x24:
1925 /* CMOVEQ */
1926 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1927 break;
1928 case 0x26:
1929 /* CMOVNE */
1930 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1931 break;
1932 case 0x28:
1933 /* ORNOT */
1934 if (likely(rc != 31)) {
1935 if (ra != 31) {
1936 if (islit)
1937 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1938 else
1939 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1940 } else {
1941 if (islit)
1942 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1943 else
1944 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1945 }
1946 }
1947 break;
1948 case 0x40:
1949 /* XOR */
1950 if (likely(rc != 31)) {
1951 if (ra != 31) {
1952 if (islit)
1953 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1954 else
1955 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1956 } else {
1957 if (islit)
1958 tcg_gen_movi_i64(cpu_ir[rc], lit);
1959 else
1960 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1961 }
1962 }
1963 break;
1964 case 0x44:
1965 /* CMOVLT */
1966 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1967 break;
1968 case 0x46:
1969 /* CMOVGE */
1970 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1971 break;
1972 case 0x48:
1973 /* EQV */
1974 if (likely(rc != 31)) {
1975 if (ra != 31) {
1976 if (islit)
1977 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1978 else
1979 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1980 } else {
1981 if (islit)
1982 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1983 else
1984 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1985 }
1986 }
1987 break;
1988 case 0x61:
1989 /* AMASK */
1990 if (likely(rc != 31)) {
1991 if (islit)
1992 tcg_gen_movi_i64(cpu_ir[rc], lit);
1993 else
1994 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1995 switch (ctx->env->implver) {
1996 case IMPLVER_2106x:
1997 /* EV4, EV45, LCA, LCA45 & EV5 */
1998 break;
1999 case IMPLVER_21164:
2000 case IMPLVER_21264:
2001 case IMPLVER_21364:
2002 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
2003 ~(uint64_t)ctx->amask);
2004 break;
2005 }
2006 }
2007 break;
2008 case 0x64:
2009 /* CMOVLE */
2010 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2011 break;
2012 case 0x66:
2013 /* CMOVGT */
2014 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2015 break;
2016 case 0x6C:
2017 /* IMPLVER */
2018 if (rc != 31)
2019 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
2020 break;
2021 default:
2022 goto invalid_opc;
2023 }
2024 break;
2025 case 0x12:
2026 switch (fn7) {
2027 case 0x02:
2028 /* MSKBL */
2029 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2030 break;
2031 case 0x06:
2032 /* EXTBL */
2033 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2034 break;
2035 case 0x0B:
2036 /* INSBL */
2037 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2038 break;
2039 case 0x12:
2040 /* MSKWL */
2041 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2042 break;
2043 case 0x16:
2044 /* EXTWL */
2045 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2046 break;
2047 case 0x1B:
2048 /* INSWL */
2049 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2050 break;
2051 case 0x22:
2052 /* MSKLL */
2053 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2054 break;
2055 case 0x26:
2056 /* EXTLL */
2057 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2058 break;
2059 case 0x2B:
2060 /* INSLL */
2061 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2062 break;
2063 case 0x30:
2064 /* ZAP */
2065 gen_zap(ra, rb, rc, islit, lit);
2066 break;
2067 case 0x31:
2068 /* ZAPNOT */
2069 gen_zapnot(ra, rb, rc, islit, lit);
2070 break;
2071 case 0x32:
2072 /* MSKQL */
2073 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2074 break;
2075 case 0x34:
2076 /* SRL */
2077 if (likely(rc != 31)) {
2078 if (ra != 31) {
2079 if (islit)
2080 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2081 else {
2082 TCGv shift = tcg_temp_new();
2083 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2084 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2085 tcg_temp_free(shift);
2086 }
2087 } else
2088 tcg_gen_movi_i64(cpu_ir[rc], 0);
2089 }
2090 break;
2091 case 0x36:
2092 /* EXTQL */
2093 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2094 break;
2095 case 0x39:
2096 /* SLL */
2097 if (likely(rc != 31)) {
2098 if (ra != 31) {
2099 if (islit)
2100 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2101 else {
2102 TCGv shift = tcg_temp_new();
2103 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2104 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2105 tcg_temp_free(shift);
2106 }
2107 } else
2108 tcg_gen_movi_i64(cpu_ir[rc], 0);
2109 }
2110 break;
2111 case 0x3B:
2112 /* INSQL */
2113 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2114 break;
2115 case 0x3C:
2116 /* SRA */
2117 if (likely(rc != 31)) {
2118 if (ra != 31) {
2119 if (islit)
2120 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2121 else {
2122 TCGv shift = tcg_temp_new();
2123 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2124 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2125 tcg_temp_free(shift);
2126 }
2127 } else
2128 tcg_gen_movi_i64(cpu_ir[rc], 0);
2129 }
2130 break;
2131 case 0x52:
2132 /* MSKWH */
2133 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2134 break;
2135 case 0x57:
2136 /* INSWH */
2137 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2138 break;
2139 case 0x5A:
2140 /* EXTWH */
2141 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2142 break;
2143 case 0x62:
2144 /* MSKLH */
2145 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2146 break;
2147 case 0x67:
2148 /* INSLH */
2149 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2150 break;
2151 case 0x6A:
2152 /* EXTLH */
2153 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2154 break;
2155 case 0x72:
2156 /* MSKQH */
2157 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2158 break;
2159 case 0x77:
2160 /* INSQH */
2161 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2162 break;
2163 case 0x7A:
2164 /* EXTQH */
2165 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2166 break;
2167 default:
2168 goto invalid_opc;
2169 }
2170 break;
2171 case 0x13:
2172 switch (fn7) {
2173 case 0x00:
2174 /* MULL */
2175 if (likely(rc != 31)) {
2176 if (ra == 31)
2177 tcg_gen_movi_i64(cpu_ir[rc], 0);
2178 else {
2179 if (islit)
2180 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2181 else
2182 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2183 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2184 }
2185 }
2186 break;
2187 case 0x20:
2188 /* MULQ */
2189 if (likely(rc != 31)) {
2190 if (ra == 31)
2191 tcg_gen_movi_i64(cpu_ir[rc], 0);
2192 else if (islit)
2193 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2194 else
2195 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2196 }
2197 break;
2198 case 0x30:
2199 /* UMULH */
2200 gen_umulh(ra, rb, rc, islit, lit);
2201 break;
2202 case 0x40:
2203 /* MULL/V */
2204 gen_mullv(ra, rb, rc, islit, lit);
2205 break;
2206 case 0x60:
2207 /* MULQ/V */
2208 gen_mulqv(ra, rb, rc, islit, lit);
2209 break;
2210 default:
2211 goto invalid_opc;
2212 }
2213 break;
2214 case 0x14:
2215 switch (fpfn) { /* fn11 & 0x3F */
2216 case 0x04:
2217 /* ITOFS */
2218 if (!(ctx->amask & AMASK_FIX))
2219 goto invalid_opc;
2220 if (likely(rc != 31)) {
2221 if (ra != 31) {
2222 TCGv_i32 tmp = tcg_temp_new_i32();
2223 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2224 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2225 tcg_temp_free_i32(tmp);
2226 } else
2227 tcg_gen_movi_i64(cpu_fir[rc], 0);
2228 }
2229 break;
2230 case 0x0A:
2231 /* SQRTF */
2232 if (!(ctx->amask & AMASK_FIX))
2233 goto invalid_opc;
2234 gen_fsqrtf(rb, rc);
2235 break;
2236 case 0x0B:
2237 /* SQRTS */
2238 if (!(ctx->amask & AMASK_FIX))
2239 goto invalid_opc;
2240 gen_fsqrts(ctx, rb, rc, fn11);
2241 break;
2242 case 0x14:
2243 /* ITOFF */
2244 if (!(ctx->amask & AMASK_FIX))
2245 goto invalid_opc;
2246 if (likely(rc != 31)) {
2247 if (ra != 31) {
2248 TCGv_i32 tmp = tcg_temp_new_i32();
2249 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2250 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2251 tcg_temp_free_i32(tmp);
2252 } else
2253 tcg_gen_movi_i64(cpu_fir[rc], 0);
2254 }
2255 break;
2256 case 0x24:
2257 /* ITOFT */
2258 if (!(ctx->amask & AMASK_FIX))
2259 goto invalid_opc;
2260 if (likely(rc != 31)) {
2261 if (ra != 31)
2262 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2263 else
2264 tcg_gen_movi_i64(cpu_fir[rc], 0);
2265 }
2266 break;
2267 case 0x2A:
2268 /* SQRTG */
2269 if (!(ctx->amask & AMASK_FIX))
2270 goto invalid_opc;
2271 gen_fsqrtg(rb, rc);
2272 break;
2273 case 0x02B:
2274 /* SQRTT */
2275 if (!(ctx->amask & AMASK_FIX))
2276 goto invalid_opc;
2277 gen_fsqrtt(ctx, rb, rc, fn11);
2278 break;
2279 default:
2280 goto invalid_opc;
2281 }
2282 break;
2283 case 0x15:
2284 /* VAX floating point */
2285 /* XXX: rounding mode and trap are ignored (!) */
2286 switch (fpfn) { /* fn11 & 0x3F */
2287 case 0x00:
2288 /* ADDF */
2289 gen_faddf(ra, rb, rc);
2290 break;
2291 case 0x01:
2292 /* SUBF */
2293 gen_fsubf(ra, rb, rc);
2294 break;
2295 case 0x02:
2296 /* MULF */
2297 gen_fmulf(ra, rb, rc);
2298 break;
2299 case 0x03:
2300 /* DIVF */
2301 gen_fdivf(ra, rb, rc);
2302 break;
2303 case 0x1E:
2304 /* CVTDG */
2305 #if 0 // TODO
2306 gen_fcvtdg(rb, rc);
2307 #else
2308 goto invalid_opc;
2309 #endif
2310 break;
2311 case 0x20:
2312 /* ADDG */
2313 gen_faddg(ra, rb, rc);
2314 break;
2315 case 0x21:
2316 /* SUBG */
2317 gen_fsubg(ra, rb, rc);
2318 break;
2319 case 0x22:
2320 /* MULG */
2321 gen_fmulg(ra, rb, rc);
2322 break;
2323 case 0x23:
2324 /* DIVG */
2325 gen_fdivg(ra, rb, rc);
2326 break;
2327 case 0x25:
2328 /* CMPGEQ */
2329 gen_fcmpgeq(ra, rb, rc);
2330 break;
2331 case 0x26:
2332 /* CMPGLT */
2333 gen_fcmpglt(ra, rb, rc);
2334 break;
2335 case 0x27:
2336 /* CMPGLE */
2337 gen_fcmpgle(ra, rb, rc);
2338 break;
2339 case 0x2C:
2340 /* CVTGF */
2341 gen_fcvtgf(rb, rc);
2342 break;
2343 case 0x2D:
2344 /* CVTGD */
2345 #if 0 // TODO
2346 gen_fcvtgd(rb, rc);
2347 #else
2348 goto invalid_opc;
2349 #endif
2350 break;
2351 case 0x2F:
2352 /* CVTGQ */
2353 gen_fcvtgq(rb, rc);
2354 break;
2355 case 0x3C:
2356 /* CVTQF */
2357 gen_fcvtqf(rb, rc);
2358 break;
2359 case 0x3E:
2360 /* CVTQG */
2361 gen_fcvtqg(rb, rc);
2362 break;
2363 default:
2364 goto invalid_opc;
2365 }
2366 break;
2367 case 0x16:
2368 /* IEEE floating-point */
2369 switch (fpfn) { /* fn11 & 0x3F */
2370 case 0x00:
2371 /* ADDS */
2372 gen_fadds(ctx, ra, rb, rc, fn11);
2373 break;
2374 case 0x01:
2375 /* SUBS */
2376 gen_fsubs(ctx, ra, rb, rc, fn11);
2377 break;
2378 case 0x02:
2379 /* MULS */
2380 gen_fmuls(ctx, ra, rb, rc, fn11);
2381 break;
2382 case 0x03:
2383 /* DIVS */
2384 gen_fdivs(ctx, ra, rb, rc, fn11);
2385 break;
2386 case 0x20:
2387 /* ADDT */
2388 gen_faddt(ctx, ra, rb, rc, fn11);
2389 break;
2390 case 0x21:
2391 /* SUBT */
2392 gen_fsubt(ctx, ra, rb, rc, fn11);
2393 break;
2394 case 0x22:
2395 /* MULT */
2396 gen_fmult(ctx, ra, rb, rc, fn11);
2397 break;
2398 case 0x23:
2399 /* DIVT */
2400 gen_fdivt(ctx, ra, rb, rc, fn11);
2401 break;
2402 case 0x24:
2403 /* CMPTUN */
2404 gen_fcmptun(ctx, ra, rb, rc, fn11);
2405 break;
2406 case 0x25:
2407 /* CMPTEQ */
2408 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2409 break;
2410 case 0x26:
2411 /* CMPTLT */
2412 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2413 break;
2414 case 0x27:
2415 /* CMPTLE */
2416 gen_fcmptle(ctx, ra, rb, rc, fn11);
2417 break;
2418 case 0x2C:
2419 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2420 /* CVTST */
2421 gen_fcvtst(ctx, rb, rc, fn11);
2422 } else {
2423 /* CVTTS */
2424 gen_fcvtts(ctx, rb, rc, fn11);
2425 }
2426 break;
2427 case 0x2F:
2428 /* CVTTQ */
2429 gen_fcvttq(ctx, rb, rc, fn11);
2430 break;
2431 case 0x3C:
2432 /* CVTQS */
2433 gen_fcvtqs(ctx, rb, rc, fn11);
2434 break;
2435 case 0x3E:
2436 /* CVTQT */
2437 gen_fcvtqt(ctx, rb, rc, fn11);
2438 break;
2439 default:
2440 goto invalid_opc;
2441 }
2442 break;
2443 case 0x17:
2444 switch (fn11) {
2445 case 0x010:
2446 /* CVTLQ */
2447 gen_fcvtlq(rb, rc);
2448 break;
2449 case 0x020:
2450 if (likely(rc != 31)) {
2451 if (ra == rb) {
2452 /* FMOV */
2453 if (ra == 31)
2454 tcg_gen_movi_i64(cpu_fir[rc], 0);
2455 else
2456 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2457 } else {
2458 /* CPYS */
2459 gen_fcpys(ra, rb, rc);
2460 }
2461 }
2462 break;
2463 case 0x021:
2464 /* CPYSN */
2465 gen_fcpysn(ra, rb, rc);
2466 break;
2467 case 0x022:
2468 /* CPYSE */
2469 gen_fcpyse(ra, rb, rc);
2470 break;
2471 case 0x024:
2472 /* MT_FPCR */
2473 if (likely(ra != 31))
2474 gen_helper_store_fpcr(cpu_fir[ra]);
2475 else {
2476 TCGv tmp = tcg_const_i64(0);
2477 gen_helper_store_fpcr(tmp);
2478 tcg_temp_free(tmp);
2479 }
2480 break;
2481 case 0x025:
2482 /* MF_FPCR */
2483 if (likely(ra != 31))
2484 gen_helper_load_fpcr(cpu_fir[ra]);
2485 break;
2486 case 0x02A:
2487 /* FCMOVEQ */
2488 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2489 break;
2490 case 0x02B:
2491 /* FCMOVNE */
2492 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2493 break;
2494 case 0x02C:
2495 /* FCMOVLT */
2496 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2497 break;
2498 case 0x02D:
2499 /* FCMOVGE */
2500 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2501 break;
2502 case 0x02E:
2503 /* FCMOVLE */
2504 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2505 break;
2506 case 0x02F:
2507 /* FCMOVGT */
2508 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2509 break;
2510 case 0x030:
2511 /* CVTQL */
2512 gen_fcvtql(rb, rc);
2513 break;
2514 case 0x130:
2515 /* CVTQL/V */
2516 case 0x530:
2517 /* CVTQL/SV */
2518 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2519 /v doesn't do. The only thing I can think is that /sv is a
2520 valid instruction merely for completeness in the ISA. */
2521 gen_fcvtql_v(ctx, rb, rc);
2522 break;
2523 default:
2524 goto invalid_opc;
2525 }
2526 break;
2527 case 0x18:
2528 switch ((uint16_t)disp16) {
2529 case 0x0000:
2530 /* TRAPB */
2531 /* No-op. */
2532 break;
2533 case 0x0400:
2534 /* EXCB */
2535 /* No-op. */
2536 break;
2537 case 0x4000:
2538 /* MB */
2539 /* No-op */
2540 break;
2541 case 0x4400:
2542 /* WMB */
2543 /* No-op */
2544 break;
2545 case 0x8000:
2546 /* FETCH */
2547 /* No-op */
2548 break;
2549 case 0xA000:
2550 /* FETCH_M */
2551 /* No-op */
2552 break;
2553 case 0xC000:
2554 /* RPCC */
2555 if (ra != 31)
2556 gen_helper_load_pcc(cpu_ir[ra]);
2557 break;
2558 case 0xE000:
2559 /* RC */
2560 gen_rx(ra, 0);
2561 break;
2562 case 0xE800:
2563 /* ECB */
2564 break;
2565 case 0xF000:
2566 /* RS */
2567 gen_rx(ra, 1);
2568 break;
2569 case 0xF800:
2570 /* WH64 */
2571 /* No-op */
2572 break;
2573 default:
2574 goto invalid_opc;
2575 }
2576 break;
2577 case 0x19:
2578 /* HW_MFPR (PALcode) */
2579 #if defined (CONFIG_USER_ONLY)
2580 goto invalid_opc;
2581 #else
2582 if (!ctx->pal_mode)
2583 goto invalid_opc;
2584 if (ra != 31) {
2585 TCGv tmp = tcg_const_i32(insn & 0xFF);
2586 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
2587 tcg_temp_free(tmp);
2588 }
2589 break;
2590 #endif
2591 case 0x1A:
2592 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2593 prediction stack action, which of course we don't implement. */
2594 if (rb != 31) {
2595 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2596 } else {
2597 tcg_gen_movi_i64(cpu_pc, 0);
2598 }
2599 if (ra != 31) {
2600 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2601 }
2602 ret = EXIT_PC_UPDATED;
2603 break;
2604 case 0x1B:
2605 /* HW_LD (PALcode) */
2606 #if defined (CONFIG_USER_ONLY)
2607 goto invalid_opc;
2608 #else
2609 if (!ctx->pal_mode)
2610 goto invalid_opc;
2611 if (ra != 31) {
2612 TCGv addr = tcg_temp_new();
2613 if (rb != 31)
2614 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2615 else
2616 tcg_gen_movi_i64(addr, disp12);
2617 switch ((insn >> 12) & 0xF) {
2618 case 0x0:
2619 /* Longword physical access (hw_ldl/p) */
2620 gen_helper_ldl_phys(cpu_ir[ra], addr);
2621 break;
2622 case 0x1:
2623 /* Quadword physical access (hw_ldq/p) */
2624 gen_helper_ldq_phys(cpu_ir[ra], addr);
2625 break;
2626 case 0x2:
2627 /* Longword physical access with lock (hw_ldl_l/p) */
2628 gen_helper_ldl_l_phys(cpu_ir[ra], addr);
2629 break;
2630 case 0x3:
2631 /* Quadword physical access with lock (hw_ldq_l/p) */
2632 gen_helper_ldq_l_phys(cpu_ir[ra], addr);
2633 break;
2634 case 0x4:
2635 /* Longword virtual PTE fetch (hw_ldl/v) */
2636 goto invalid_opc;
2637 case 0x5:
2638 /* Quadword virtual PTE fetch (hw_ldq/v) */
2639 goto invalid_opc;
2640 break;
2641 case 0x6:
2642 /* Incpu_ir[ra]id */
2643 goto invalid_opc;
2644 case 0x7:
2645 /* Incpu_ir[ra]id */
2646 goto invalid_opc;
2647 case 0x8:
2648 /* Longword virtual access (hw_ldl) */
2649 goto invalid_opc;
2650 case 0x9:
2651 /* Quadword virtual access (hw_ldq) */
2652 goto invalid_opc;
2653 case 0xA:
2654 /* Longword virtual access with protection check (hw_ldl/w) */
2655 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2656 break;
2657 case 0xB:
2658 /* Quadword virtual access with protection check (hw_ldq/w) */
2659 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2660 break;
2661 case 0xC:
2662 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2663 goto invalid_opc;
2664 case 0xD:
2665 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2666 goto invalid_opc;
2667 case 0xE:
2668 /* Longword virtual access with alternate access mode and
2669 protection checks (hw_ldl/wa) */
2670 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2671 break;
2672 case 0xF:
2673 /* Quadword virtual access with alternate access mode and
2674 protection checks (hw_ldq/wa) */
2675 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2676 break;
2677 }
2678 tcg_temp_free(addr);
2679 }
2680 break;
2681 #endif
2682 case 0x1C:
2683 switch (fn7) {
2684 case 0x00:
2685 /* SEXTB */
2686 if (!(ctx->amask & AMASK_BWX))
2687 goto invalid_opc;
2688 if (likely(rc != 31)) {
2689 if (islit)
2690 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2691 else
2692 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2693 }
2694 break;
2695 case 0x01:
2696 /* SEXTW */
2697 if (!(ctx->amask & AMASK_BWX))
2698 goto invalid_opc;
2699 if (likely(rc != 31)) {
2700 if (islit)
2701 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2702 else
2703 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2704 }
2705 break;
2706 case 0x30:
2707 /* CTPOP */
2708 if (!(ctx->amask & AMASK_CIX))
2709 goto invalid_opc;
2710 if (likely(rc != 31)) {
2711 if (islit)
2712 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2713 else
2714 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2715 }
2716 break;
2717 case 0x31:
2718 /* PERR */
2719 if (!(ctx->amask & AMASK_MVI))
2720 goto invalid_opc;
2721 gen_perr(ra, rb, rc, islit, lit);
2722 break;
2723 case 0x32:
2724 /* CTLZ */
2725 if (!(ctx->amask & AMASK_CIX))
2726 goto invalid_opc;
2727 if (likely(rc != 31)) {
2728 if (islit)
2729 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2730 else
2731 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2732 }
2733 break;
2734 case 0x33:
2735 /* CTTZ */
2736 if (!(ctx->amask & AMASK_CIX))
2737 goto invalid_opc;
2738 if (likely(rc != 31)) {
2739 if (islit)
2740 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2741 else
2742 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2743 }
2744 break;
2745 case 0x34:
2746 /* UNPKBW */
2747 if (!(ctx->amask & AMASK_MVI))
2748 goto invalid_opc;
2749 if (real_islit || ra != 31)
2750 goto invalid_opc;
2751 gen_unpkbw (rb, rc);
2752 break;
2753 case 0x35:
2754 /* UNPKBL */
2755 if (!(ctx->amask & AMASK_MVI))
2756 goto invalid_opc;
2757 if (real_islit || ra != 31)
2758 goto invalid_opc;
2759 gen_unpkbl (rb, rc);
2760 break;
2761 case 0x36:
2762 /* PKWB */
2763 if (!(ctx->amask & AMASK_MVI))
2764 goto invalid_opc;
2765 if (real_islit || ra != 31)
2766 goto invalid_opc;
2767 gen_pkwb (rb, rc);
2768 break;
2769 case 0x37:
2770 /* PKLB */
2771 if (!(ctx->amask & AMASK_MVI))
2772 goto invalid_opc;
2773 if (real_islit || ra != 31)
2774 goto invalid_opc;
2775 gen_pklb (rb, rc);
2776 break;
2777 case 0x38:
2778 /* MINSB8 */
2779 if (!(ctx->amask & AMASK_MVI))
2780 goto invalid_opc;
2781 gen_minsb8 (ra, rb, rc, islit, lit);
2782 break;
2783 case 0x39:
2784 /* MINSW4 */
2785 if (!(ctx->amask & AMASK_MVI))
2786 goto invalid_opc;
2787 gen_minsw4 (ra, rb, rc, islit, lit);
2788 break;
2789 case 0x3A:
2790 /* MINUB8 */
2791 if (!(ctx->amask & AMASK_MVI))
2792 goto invalid_opc;
2793 gen_minub8 (ra, rb, rc, islit, lit);
2794 break;
2795 case 0x3B:
2796 /* MINUW4 */
2797 if (!(ctx->amask & AMASK_MVI))
2798 goto invalid_opc;
2799 gen_minuw4 (ra, rb, rc, islit, lit);
2800 break;
2801 case 0x3C:
2802 /* MAXUB8 */
2803 if (!(ctx->amask & AMASK_MVI))
2804 goto invalid_opc;
2805 gen_maxub8 (ra, rb, rc, islit, lit);
2806 break;
2807 case 0x3D:
2808 /* MAXUW4 */
2809 if (!(ctx->amask & AMASK_MVI))
2810 goto invalid_opc;
2811 gen_maxuw4 (ra, rb, rc, islit, lit);
2812 break;
2813 case 0x3E:
2814 /* MAXSB8 */
2815 if (!(ctx->amask & AMASK_MVI))
2816 goto invalid_opc;
2817 gen_maxsb8 (ra, rb, rc, islit, lit);
2818 break;
2819 case 0x3F:
2820 /* MAXSW4 */
2821 if (!(ctx->amask & AMASK_MVI))
2822 goto invalid_opc;
2823 gen_maxsw4 (ra, rb, rc, islit, lit);
2824 break;
2825 case 0x70:
2826 /* FTOIT */
2827 if (!(ctx->amask & AMASK_FIX))
2828 goto invalid_opc;
2829 if (likely(rc != 31)) {
2830 if (ra != 31)
2831 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2832 else
2833 tcg_gen_movi_i64(cpu_ir[rc], 0);
2834 }
2835 break;
2836 case 0x78:
2837 /* FTOIS */
2838 if (!(ctx->amask & AMASK_FIX))
2839 goto invalid_opc;
2840 if (rc != 31) {
2841 TCGv_i32 tmp1 = tcg_temp_new_i32();
2842 if (ra != 31)
2843 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2844 else {
2845 TCGv tmp2 = tcg_const_i64(0);
2846 gen_helper_s_to_memory(tmp1, tmp2);
2847 tcg_temp_free(tmp2);
2848 }
2849 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2850 tcg_temp_free_i32(tmp1);
2851 }
2852 break;
2853 default:
2854 goto invalid_opc;
2855 }
2856 break;
2857 case 0x1D:
2858 /* HW_MTPR (PALcode) */
2859 #if defined (CONFIG_USER_ONLY)
2860 goto invalid_opc;
2861 #else
2862 if (!ctx->pal_mode)
2863 goto invalid_opc;
2864 else {
2865 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2866 if (ra != 31)
2867 gen_helper_mtpr(tmp1, cpu_ir[ra]);
2868 else {
2869 TCGv tmp2 = tcg_const_i64(0);
2870 gen_helper_mtpr(tmp1, tmp2);
2871 tcg_temp_free(tmp2);
2872 }
2873 tcg_temp_free(tmp1);
2874 ret = EXIT_PC_STALE;
2875 }
2876 break;
2877 #endif
2878 case 0x1E:
2879 /* HW_REI (PALcode) */
2880 #if defined (CONFIG_USER_ONLY)
2881 goto invalid_opc;
2882 #else
2883 if (!ctx->pal_mode)
2884 goto invalid_opc;
2885 if (rb == 31) {
2886 /* "Old" alpha */
2887 gen_helper_hw_rei();
2888 } else {
2889 TCGv tmp;
2890
2891 if (ra != 31) {
2892 tmp = tcg_temp_new();
2893 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2894 } else
2895 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2896 gen_helper_hw_ret(tmp);
2897 tcg_temp_free(tmp);
2898 }
2899 ret = EXIT_PC_UPDATED;
2900 break;
2901 #endif
2902 case 0x1F:
2903 /* HW_ST (PALcode) */
2904 #if defined (CONFIG_USER_ONLY)
2905 goto invalid_opc;
2906 #else
2907 if (!ctx->pal_mode)
2908 goto invalid_opc;
2909 else {
2910 TCGv addr, val;
2911 addr = tcg_temp_new();
2912 if (rb != 31)
2913 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2914 else
2915 tcg_gen_movi_i64(addr, disp12);
2916 if (ra != 31)
2917 val = cpu_ir[ra];
2918 else {
2919 val = tcg_temp_new();
2920 tcg_gen_movi_i64(val, 0);
2921 }
2922 switch ((insn >> 12) & 0xF) {
2923 case 0x0:
2924 /* Longword physical access */
2925 gen_helper_stl_phys(addr, val);
2926 break;
2927 case 0x1:
2928 /* Quadword physical access */
2929 gen_helper_stq_phys(addr, val);
2930 break;
2931 case 0x2:
2932 /* Longword physical access with lock */
2933 gen_helper_stl_c_phys(val, addr, val);
2934 break;
2935 case 0x3:
2936 /* Quadword physical access with lock */
2937 gen_helper_stq_c_phys(val, addr, val);
2938 break;
2939 case 0x4:
2940 /* Longword virtual access */
2941 goto invalid_opc;
2942 case 0x5:
2943 /* Quadword virtual access */
2944 goto invalid_opc;
2945 case 0x6:
2946 /* Invalid */
2947 goto invalid_opc;
2948 case 0x7:
2949 /* Invalid */
2950 goto invalid_opc;
2951 case 0x8:
2952 /* Invalid */
2953 goto invalid_opc;
2954 case 0x9:
2955 /* Invalid */
2956 goto invalid_opc;
2957 case 0xA:
2958 /* Invalid */
2959 goto invalid_opc;
2960 case 0xB:
2961 /* Invalid */
2962 goto invalid_opc;
2963 case 0xC:
2964 /* Longword virtual access with alternate access mode */
2965 goto invalid_opc;
2966 case 0xD:
2967 /* Quadword virtual access with alternate access mode */
2968 goto invalid_opc;
2969 case 0xE:
2970 /* Invalid */
2971 goto invalid_opc;
2972 case 0xF:
2973 /* Invalid */
2974 goto invalid_opc;
2975 }
2976 if (ra == 31)
2977 tcg_temp_free(val);
2978 tcg_temp_free(addr);
2979 }
2980 break;
2981 #endif
2982 case 0x20:
2983 /* LDF */
2984 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2985 break;
2986 case 0x21:
2987 /* LDG */
2988 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2989 break;
2990 case 0x22:
2991 /* LDS */
2992 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2993 break;
2994 case 0x23:
2995 /* LDT */
2996 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2997 break;
2998 case 0x24:
2999 /* STF */
3000 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3001 break;
3002 case 0x25:
3003 /* STG */
3004 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3005 break;
3006 case 0x26:
3007 /* STS */
3008 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3009 break;
3010 case 0x27:
3011 /* STT */
3012 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3013 break;
3014 case 0x28:
3015 /* LDL */
3016 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3017 break;
3018 case 0x29:
3019 /* LDQ */
3020 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3021 break;
3022 case 0x2A:
3023 /* LDL_L */
3024 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3025 break;
3026 case 0x2B:
3027 /* LDQ_L */
3028 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3029 break;
3030 case 0x2C:
3031 /* STL */
3032 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3033 break;
3034 case 0x2D:
3035 /* STQ */
3036 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3037 break;
3038 case 0x2E:
3039 /* STL_C */
3040 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3041 break;
3042 case 0x2F:
3043 /* STQ_C */
3044 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3045 break;
3046 case 0x30:
3047 /* BR */
3048 ret = gen_bdirect(ctx, ra, disp21);
3049 break;
3050 case 0x31: /* FBEQ */
3051 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3052 break;
3053 case 0x32: /* FBLT */
3054 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3055 break;
3056 case 0x33: /* FBLE */
3057 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3058 break;
3059 case 0x34:
3060 /* BSR */
3061 ret = gen_bdirect(ctx, ra, disp21);
3062 break;
3063 case 0x35: /* FBNE */
3064 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3065 break;
3066 case 0x36: /* FBGE */
3067 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3068 break;
3069 case 0x37: /* FBGT */
3070 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3071 break;
3072 case 0x38:
3073 /* BLBC */
3074 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3075 break;
3076 case 0x39:
3077 /* BEQ */
3078 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3079 break;
3080 case 0x3A:
3081 /* BLT */
3082 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3083 break;
3084 case 0x3B:
3085 /* BLE */
3086 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3087 break;
3088 case 0x3C:
3089 /* BLBS */
3090 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3091 break;
3092 case 0x3D:
3093 /* BNE */
3094 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3095 break;
3096 case 0x3E:
3097 /* BGE */
3098 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3099 break;
3100 case 0x3F:
3101 /* BGT */
3102 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3103 break;
3104 invalid_opc:
3105 ret = gen_invalid(ctx);
3106 break;
3107 }
3108
3109 return ret;
3110 }
3111
3112 static inline void gen_intermediate_code_internal(CPUState *env,
3113 TranslationBlock *tb,
3114 int search_pc)
3115 {
3116 DisasContext ctx, *ctxp = &ctx;
3117 target_ulong pc_start;
3118 uint32_t insn;
3119 uint16_t *gen_opc_end;
3120 CPUBreakpoint *bp;
3121 int j, lj = -1;
3122 ExitStatus ret;
3123 int num_insns;
3124 int max_insns;
3125
3126 pc_start = tb->pc;
3127 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3128
3129 ctx.tb = tb;
3130 ctx.env = env;
3131 ctx.pc = pc_start;
3132 ctx.amask = env->amask;
3133 #if defined (CONFIG_USER_ONLY)
3134 ctx.mem_idx = 0;
3135 #else
3136 ctx.mem_idx = ((env->ps >> 3) & 3);
3137 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
3138 #endif
3139
3140 /* ??? Every TB begins with unset rounding mode, to be initialized on
3141 the first fp insn of the TB. Alternately we could define a proper
3142 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3143 to reset the FP_STATUS to that default at the end of any TB that
3144 changes the default. We could even (gasp) dynamiclly figure out
3145 what default would be most efficient given the running program. */
3146 ctx.tb_rm = -1;
3147 /* Similarly for flush-to-zero. */
3148 ctx.tb_ftz = -1;
3149
3150 num_insns = 0;
3151 max_insns = tb->cflags & CF_COUNT_MASK;
3152 if (max_insns == 0)
3153 max_insns = CF_COUNT_MASK;
3154
3155 gen_icount_start();
3156 do {
3157 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3158 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3159 if (bp->pc == ctx.pc) {
3160 gen_excp(&ctx, EXCP_DEBUG, 0);
3161 break;
3162 }
3163 }
3164 }
3165 if (search_pc) {
3166 j = gen_opc_ptr - gen_opc_buf;
3167 if (lj < j) {
3168 lj++;
3169 while (lj < j)
3170 gen_opc_instr_start[lj++] = 0;
3171 }
3172 gen_opc_pc[lj] = ctx.pc;
3173 gen_opc_instr_start[lj] = 1;
3174 gen_opc_icount[lj] = num_insns;
3175 }
3176 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3177 gen_io_start();
3178 insn = ldl_code(ctx.pc);
3179 num_insns++;
3180
3181 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3182 tcg_gen_debug_insn_start(ctx.pc);
3183 }
3184
3185 ctx.pc += 4;
3186 ret = translate_one(ctxp, insn);
3187
3188 /* If we reach a page boundary, are single stepping,
3189 or exhaust instruction count, stop generation. */
3190 if (ret == NO_EXIT
3191 && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3192 || gen_opc_ptr >= gen_opc_end
3193 || num_insns >= max_insns
3194 || singlestep
3195 || env->singlestep_enabled)) {
3196 ret = EXIT_PC_STALE;
3197 }
3198 } while (ret == NO_EXIT);
3199
3200 if (tb->cflags & CF_LAST_IO) {
3201 gen_io_end();
3202 }
3203
3204 switch (ret) {
3205 case EXIT_GOTO_TB:
3206 case EXIT_NORETURN:
3207 break;
3208 case EXIT_PC_STALE:
3209 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3210 /* FALLTHRU */
3211 case EXIT_PC_UPDATED:
3212 if (env->singlestep_enabled) {
3213 gen_excp_1(EXCP_DEBUG, 0);
3214 } else {
3215 tcg_gen_exit_tb(0);
3216 }
3217 break;
3218 default:
3219 abort();
3220 }
3221
3222 gen_icount_end(tb, num_insns);
3223 *gen_opc_ptr = INDEX_op_end;
3224 if (search_pc) {
3225 j = gen_opc_ptr - gen_opc_buf;
3226 lj++;
3227 while (lj <= j)
3228 gen_opc_instr_start[lj++] = 0;
3229 } else {
3230 tb->size = ctx.pc - pc_start;
3231 tb->icount = num_insns;
3232 }
3233
3234 #ifdef DEBUG_DISAS
3235 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3236 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3237 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3238 qemu_log("\n");
3239 }
3240 #endif
3241 }
3242
3243 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3244 {
3245 gen_intermediate_code_internal(env, tb, 0);
3246 }
3247
3248 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3249 {
3250 gen_intermediate_code_internal(env, tb, 1);
3251 }
3252
3253 struct cpu_def_t {
3254 const char *name;
3255 int implver, amask;
3256 };
3257
3258 static const struct cpu_def_t cpu_defs[] = {
3259 { "ev4", IMPLVER_2106x, 0 },
3260 { "ev5", IMPLVER_21164, 0 },
3261 { "ev56", IMPLVER_21164, AMASK_BWX },
3262 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3263 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3264 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3265 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3266 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3267 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3268 { "21064", IMPLVER_2106x, 0 },
3269 { "21164", IMPLVER_21164, 0 },
3270 { "21164a", IMPLVER_21164, AMASK_BWX },
3271 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3272 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3273 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3274 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3275 };
3276
3277 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3278 {
3279 CPUAlphaState *env;
3280 int implver, amask, i, max;
3281
3282 env = qemu_mallocz(sizeof(CPUAlphaState));
3283 cpu_exec_init(env);
3284 alpha_translate_init();
3285 tlb_flush(env, 1);
3286
3287 /* Default to ev67; no reason not to emulate insns by default. */
3288 implver = IMPLVER_21264;
3289 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3290 | AMASK_TRAP | AMASK_PREFETCH);
3291
3292 max = ARRAY_SIZE(cpu_defs);
3293 for (i = 0; i < max; i++) {
3294 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3295 implver = cpu_defs[i].implver;
3296 amask = cpu_defs[i].amask;
3297 break;
3298 }
3299 }
3300 env->implver = implver;
3301 env->amask = amask;
3302
3303 env->ps = 0x1F00;
3304 #if defined (CONFIG_USER_ONLY)
3305 env->ps |= 1 << 3;
3306 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3307 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
3308 #endif
3309 env->lock_addr = -1;
3310
3311 /* Initialize IPR */
3312 #if defined (CONFIG_USER_ONLY)
3313 env->ipr[IPR_EXC_ADDR] = 0;
3314 env->ipr[IPR_EXC_SUM] = 0;
3315 env->ipr[IPR_EXC_MASK] = 0;
3316 #else
3317 {
3318 // uint64_t hwpcb;
3319 // hwpcb = env->ipr[IPR_PCBB];
3320 env->ipr[IPR_ASN] = 0;
3321 env->ipr[IPR_ASTEN] = 0;
3322 env->ipr[IPR_ASTSR] = 0;
3323 env->ipr[IPR_DATFX] = 0;
3324 /* XXX: fix this */
3325 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3326 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3327 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3328 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3329 env->ipr[IPR_FEN] = 0;
3330 env->ipr[IPR_IPL] = 31;
3331 env->ipr[IPR_MCES] = 0;
3332 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
3333 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3334 env->ipr[IPR_SISR] = 0;
3335 env->ipr[IPR_VIRBND] = -1ULL;
3336 }
3337 #endif
3338
3339 qemu_init_vcpu(env);
3340 return env;
3341 }
3342
3343 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
3344 {
3345 env->pc = gen_opc_pc[pc_pos];
3346 }