]> git.proxmox.com Git - qemu.git/blob - target-alpha/translate.c
Merge remote-tracking branch 'qemu-kvm/uq/master' into staging
[qemu.git] / target-alpha / translate.c
1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23
24 #include "cpu.h"
25 #include "exec-all.h"
26 #include "disas.h"
27 #include "host-utils.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
30
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
37
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
43
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46 struct TranslationBlock *tb;
47 CPUAlphaState *env;
48 uint64_t pc;
49 int mem_idx;
50
51 /* Current rounding mode for this TB. */
52 int tb_rm;
53 /* Current flush-to-zero setting for this TB. */
54 int tb_ftz;
55 };
56
57 /* Return values from translate_one, indicating the state of the TB.
58 Note that zero indicates that we are not exiting the TB. */
59
60 typedef enum {
61 NO_EXIT,
62
63 /* We have emitted one or more goto_tb. No fixup required. */
64 EXIT_GOTO_TB,
65
66 /* We are not using a goto_tb (for whatever reason), but have updated
67 the PC (for whatever reason), so there's no need to do it again on
68 exiting the TB. */
69 EXIT_PC_UPDATED,
70
71 /* We are exiting the TB, but have neither emitted a goto_tb, nor
72 updated the PC for the next instruction to be executed. */
73 EXIT_PC_STALE,
74
75 /* We are ending the TB with a noreturn function call, e.g. longjmp.
76 No following code will be executed. */
77 EXIT_NORETURN,
78 } ExitStatus;
79
80 /* global register indexes */
81 static TCGv_ptr cpu_env;
82 static TCGv cpu_ir[31];
83 static TCGv cpu_fir[31];
84 static TCGv cpu_pc;
85 static TCGv cpu_lock_addr;
86 static TCGv cpu_lock_st_addr;
87 static TCGv cpu_lock_value;
88 static TCGv cpu_unique;
89 #ifndef CONFIG_USER_ONLY
90 static TCGv cpu_sysval;
91 static TCGv cpu_usp;
92 #endif
93
94 /* register names */
95 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
96
97 #include "gen-icount.h"
98
99 static void alpha_translate_init(void)
100 {
101 int i;
102 char *p;
103 static int done_init = 0;
104
105 if (done_init)
106 return;
107
108 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
109
110 p = cpu_reg_names;
111 for (i = 0; i < 31; i++) {
112 sprintf(p, "ir%d", i);
113 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
114 offsetof(CPUState, ir[i]), p);
115 p += (i < 10) ? 4 : 5;
116
117 sprintf(p, "fir%d", i);
118 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
119 offsetof(CPUState, fir[i]), p);
120 p += (i < 10) ? 5 : 6;
121 }
122
123 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
124 offsetof(CPUState, pc), "pc");
125
126 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
127 offsetof(CPUState, lock_addr),
128 "lock_addr");
129 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
130 offsetof(CPUState, lock_st_addr),
131 "lock_st_addr");
132 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
133 offsetof(CPUState, lock_value),
134 "lock_value");
135
136 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
137 offsetof(CPUState, unique), "unique");
138 #ifndef CONFIG_USER_ONLY
139 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
140 offsetof(CPUState, sysval), "sysval");
141 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
142 offsetof(CPUState, usp), "usp");
143 #endif
144
145 /* register helpers */
146 #define GEN_HELPER 2
147 #include "helper.h"
148
149 done_init = 1;
150 }
151
152 static void gen_excp_1(int exception, int error_code)
153 {
154 TCGv_i32 tmp1, tmp2;
155
156 tmp1 = tcg_const_i32(exception);
157 tmp2 = tcg_const_i32(error_code);
158 gen_helper_excp(tmp1, tmp2);
159 tcg_temp_free_i32(tmp2);
160 tcg_temp_free_i32(tmp1);
161 }
162
163 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
164 {
165 tcg_gen_movi_i64(cpu_pc, ctx->pc);
166 gen_excp_1(exception, error_code);
167 return EXIT_NORETURN;
168 }
169
170 static inline ExitStatus gen_invalid(DisasContext *ctx)
171 {
172 return gen_excp(ctx, EXCP_OPCDEC, 0);
173 }
174
175 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
176 {
177 TCGv tmp = tcg_temp_new();
178 TCGv_i32 tmp32 = tcg_temp_new_i32();
179 tcg_gen_qemu_ld32u(tmp, t1, flags);
180 tcg_gen_trunc_i64_i32(tmp32, tmp);
181 gen_helper_memory_to_f(t0, tmp32);
182 tcg_temp_free_i32(tmp32);
183 tcg_temp_free(tmp);
184 }
185
186 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
187 {
188 TCGv tmp = tcg_temp_new();
189 tcg_gen_qemu_ld64(tmp, t1, flags);
190 gen_helper_memory_to_g(t0, tmp);
191 tcg_temp_free(tmp);
192 }
193
194 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
195 {
196 TCGv tmp = tcg_temp_new();
197 TCGv_i32 tmp32 = tcg_temp_new_i32();
198 tcg_gen_qemu_ld32u(tmp, t1, flags);
199 tcg_gen_trunc_i64_i32(tmp32, tmp);
200 gen_helper_memory_to_s(t0, tmp32);
201 tcg_temp_free_i32(tmp32);
202 tcg_temp_free(tmp);
203 }
204
205 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
206 {
207 tcg_gen_qemu_ld32s(t0, t1, flags);
208 tcg_gen_mov_i64(cpu_lock_addr, t1);
209 tcg_gen_mov_i64(cpu_lock_value, t0);
210 }
211
212 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
213 {
214 tcg_gen_qemu_ld64(t0, t1, flags);
215 tcg_gen_mov_i64(cpu_lock_addr, t1);
216 tcg_gen_mov_i64(cpu_lock_value, t0);
217 }
218
219 static inline void gen_load_mem(DisasContext *ctx,
220 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
221 int flags),
222 int ra, int rb, int32_t disp16, int fp,
223 int clear)
224 {
225 TCGv addr, va;
226
227 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
228 prefetches, which we can treat as nops. No worries about
229 missed exceptions here. */
230 if (unlikely(ra == 31)) {
231 return;
232 }
233
234 addr = tcg_temp_new();
235 if (rb != 31) {
236 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
237 if (clear) {
238 tcg_gen_andi_i64(addr, addr, ~0x7);
239 }
240 } else {
241 if (clear) {
242 disp16 &= ~0x7;
243 }
244 tcg_gen_movi_i64(addr, disp16);
245 }
246
247 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
248 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
249
250 tcg_temp_free(addr);
251 }
252
253 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
254 {
255 TCGv_i32 tmp32 = tcg_temp_new_i32();
256 TCGv tmp = tcg_temp_new();
257 gen_helper_f_to_memory(tmp32, t0);
258 tcg_gen_extu_i32_i64(tmp, tmp32);
259 tcg_gen_qemu_st32(tmp, t1, flags);
260 tcg_temp_free(tmp);
261 tcg_temp_free_i32(tmp32);
262 }
263
264 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
265 {
266 TCGv tmp = tcg_temp_new();
267 gen_helper_g_to_memory(tmp, t0);
268 tcg_gen_qemu_st64(tmp, t1, flags);
269 tcg_temp_free(tmp);
270 }
271
272 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
273 {
274 TCGv_i32 tmp32 = tcg_temp_new_i32();
275 TCGv tmp = tcg_temp_new();
276 gen_helper_s_to_memory(tmp32, t0);
277 tcg_gen_extu_i32_i64(tmp, tmp32);
278 tcg_gen_qemu_st32(tmp, t1, flags);
279 tcg_temp_free(tmp);
280 tcg_temp_free_i32(tmp32);
281 }
282
283 static inline void gen_store_mem(DisasContext *ctx,
284 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
285 int flags),
286 int ra, int rb, int32_t disp16, int fp,
287 int clear)
288 {
289 TCGv addr, va;
290
291 addr = tcg_temp_new();
292 if (rb != 31) {
293 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
294 if (clear) {
295 tcg_gen_andi_i64(addr, addr, ~0x7);
296 }
297 } else {
298 if (clear) {
299 disp16 &= ~0x7;
300 }
301 tcg_gen_movi_i64(addr, disp16);
302 }
303
304 if (ra == 31) {
305 va = tcg_const_i64(0);
306 } else {
307 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
308 }
309 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
310
311 tcg_temp_free(addr);
312 if (ra == 31) {
313 tcg_temp_free(va);
314 }
315 }
316
317 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
318 int32_t disp16, int quad)
319 {
320 TCGv addr;
321
322 if (ra == 31) {
323 /* ??? Don't bother storing anything. The user can't tell
324 the difference, since the zero register always reads zero. */
325 return NO_EXIT;
326 }
327
328 #if defined(CONFIG_USER_ONLY)
329 addr = cpu_lock_st_addr;
330 #else
331 addr = tcg_temp_local_new();
332 #endif
333
334 if (rb != 31) {
335 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
336 } else {
337 tcg_gen_movi_i64(addr, disp16);
338 }
339
340 #if defined(CONFIG_USER_ONLY)
341 /* ??? This is handled via a complicated version of compare-and-swap
342 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
343 in TCG so that this isn't necessary. */
344 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
345 #else
346 /* ??? In system mode we are never multi-threaded, so CAS can be
347 implemented via a non-atomic load-compare-store sequence. */
348 {
349 int lab_fail, lab_done;
350 TCGv val;
351
352 lab_fail = gen_new_label();
353 lab_done = gen_new_label();
354 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
355
356 val = tcg_temp_new();
357 if (quad) {
358 tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
359 } else {
360 tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
361 }
362 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
363
364 if (quad) {
365 tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
366 } else {
367 tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
368 }
369 tcg_gen_movi_i64(cpu_ir[ra], 1);
370 tcg_gen_br(lab_done);
371
372 gen_set_label(lab_fail);
373 tcg_gen_movi_i64(cpu_ir[ra], 0);
374
375 gen_set_label(lab_done);
376 tcg_gen_movi_i64(cpu_lock_addr, -1);
377
378 tcg_temp_free(addr);
379 return NO_EXIT;
380 }
381 #endif
382 }
383
384 static int use_goto_tb(DisasContext *ctx, uint64_t dest)
385 {
386 /* Check for the dest on the same page as the start of the TB. We
387 also want to suppress goto_tb in the case of single-steping and IO. */
388 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
389 && !ctx->env->singlestep_enabled
390 && !(ctx->tb->cflags & CF_LAST_IO));
391 }
392
393 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
394 {
395 uint64_t dest = ctx->pc + (disp << 2);
396
397 if (ra != 31) {
398 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
399 }
400
401 /* Notice branch-to-next; used to initialize RA with the PC. */
402 if (disp == 0) {
403 return 0;
404 } else if (use_goto_tb(ctx, dest)) {
405 tcg_gen_goto_tb(0);
406 tcg_gen_movi_i64(cpu_pc, dest);
407 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
408 return EXIT_GOTO_TB;
409 } else {
410 tcg_gen_movi_i64(cpu_pc, dest);
411 return EXIT_PC_UPDATED;
412 }
413 }
414
415 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
416 TCGv cmp, int32_t disp)
417 {
418 uint64_t dest = ctx->pc + (disp << 2);
419 int lab_true = gen_new_label();
420
421 if (use_goto_tb(ctx, dest)) {
422 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
423
424 tcg_gen_goto_tb(0);
425 tcg_gen_movi_i64(cpu_pc, ctx->pc);
426 tcg_gen_exit_tb((tcg_target_long)ctx->tb);
427
428 gen_set_label(lab_true);
429 tcg_gen_goto_tb(1);
430 tcg_gen_movi_i64(cpu_pc, dest);
431 tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
432
433 return EXIT_GOTO_TB;
434 } else {
435 int lab_over = gen_new_label();
436
437 /* ??? Consider using either
438 movi pc, next
439 addi tmp, pc, disp
440 movcond pc, cond, 0, tmp, pc
441 or
442 setcond tmp, cond, 0
443 movi pc, next
444 neg tmp, tmp
445 andi tmp, tmp, disp
446 add pc, pc, tmp
447 The current diamond subgraph surely isn't efficient. */
448
449 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
450 tcg_gen_movi_i64(cpu_pc, ctx->pc);
451 tcg_gen_br(lab_over);
452 gen_set_label(lab_true);
453 tcg_gen_movi_i64(cpu_pc, dest);
454 gen_set_label(lab_over);
455
456 return EXIT_PC_UPDATED;
457 }
458 }
459
460 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
461 int32_t disp, int mask)
462 {
463 TCGv cmp_tmp;
464
465 if (unlikely(ra == 31)) {
466 cmp_tmp = tcg_const_i64(0);
467 } else {
468 cmp_tmp = tcg_temp_new();
469 if (mask) {
470 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
471 } else {
472 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
473 }
474 }
475
476 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
477 }
478
479 /* Fold -0.0 for comparison with COND. */
480
481 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
482 {
483 uint64_t mzero = 1ull << 63;
484
485 switch (cond) {
486 case TCG_COND_LE:
487 case TCG_COND_GT:
488 /* For <= or >, the -0.0 value directly compares the way we want. */
489 tcg_gen_mov_i64(dest, src);
490 break;
491
492 case TCG_COND_EQ:
493 case TCG_COND_NE:
494 /* For == or !=, we can simply mask off the sign bit and compare. */
495 tcg_gen_andi_i64(dest, src, mzero - 1);
496 break;
497
498 case TCG_COND_GE:
499 case TCG_COND_LT:
500 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
501 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
502 tcg_gen_neg_i64(dest, dest);
503 tcg_gen_and_i64(dest, dest, src);
504 break;
505
506 default:
507 abort();
508 }
509 }
510
511 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
512 int32_t disp)
513 {
514 TCGv cmp_tmp;
515
516 if (unlikely(ra == 31)) {
517 /* Very uncommon case, but easier to optimize it to an integer
518 comparison than continuing with the floating point comparison. */
519 return gen_bcond(ctx, cond, ra, disp, 0);
520 }
521
522 cmp_tmp = tcg_temp_new();
523 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
524 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
525 }
526
527 static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
528 int islit, uint8_t lit, int mask)
529 {
530 TCGCond inv_cond = tcg_invert_cond(cond);
531 int l1;
532
533 if (unlikely(rc == 31))
534 return;
535
536 l1 = gen_new_label();
537
538 if (ra != 31) {
539 if (mask) {
540 TCGv tmp = tcg_temp_new();
541 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
542 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
543 tcg_temp_free(tmp);
544 } else
545 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
546 } else {
547 /* Very uncommon case - Do not bother to optimize. */
548 TCGv tmp = tcg_const_i64(0);
549 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
550 tcg_temp_free(tmp);
551 }
552
553 if (islit)
554 tcg_gen_movi_i64(cpu_ir[rc], lit);
555 else
556 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
557 gen_set_label(l1);
558 }
559
560 static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
561 {
562 TCGv cmp_tmp;
563 int l1;
564
565 if (unlikely(rc == 31)) {
566 return;
567 }
568
569 cmp_tmp = tcg_temp_new();
570 if (unlikely(ra == 31)) {
571 tcg_gen_movi_i64(cmp_tmp, 0);
572 } else {
573 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
574 }
575
576 l1 = gen_new_label();
577 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
578 tcg_temp_free(cmp_tmp);
579
580 if (rb != 31)
581 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
582 else
583 tcg_gen_movi_i64(cpu_fir[rc], 0);
584 gen_set_label(l1);
585 }
586
587 #define QUAL_RM_N 0x080 /* Round mode nearest even */
588 #define QUAL_RM_C 0x000 /* Round mode chopped */
589 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
590 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
591 #define QUAL_RM_MASK 0x0c0
592
593 #define QUAL_U 0x100 /* Underflow enable (fp output) */
594 #define QUAL_V 0x100 /* Overflow enable (int output) */
595 #define QUAL_S 0x400 /* Software completion enable */
596 #define QUAL_I 0x200 /* Inexact detection enable */
597
598 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
599 {
600 TCGv_i32 tmp;
601
602 fn11 &= QUAL_RM_MASK;
603 if (fn11 == ctx->tb_rm) {
604 return;
605 }
606 ctx->tb_rm = fn11;
607
608 tmp = tcg_temp_new_i32();
609 switch (fn11) {
610 case QUAL_RM_N:
611 tcg_gen_movi_i32(tmp, float_round_nearest_even);
612 break;
613 case QUAL_RM_C:
614 tcg_gen_movi_i32(tmp, float_round_to_zero);
615 break;
616 case QUAL_RM_M:
617 tcg_gen_movi_i32(tmp, float_round_down);
618 break;
619 case QUAL_RM_D:
620 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
621 break;
622 }
623
624 #if defined(CONFIG_SOFTFLOAT_INLINE)
625 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
626 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
627 sets the one field. */
628 tcg_gen_st8_i32(tmp, cpu_env,
629 offsetof(CPUState, fp_status.float_rounding_mode));
630 #else
631 gen_helper_setroundmode(tmp);
632 #endif
633
634 tcg_temp_free_i32(tmp);
635 }
636
637 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
638 {
639 TCGv_i32 tmp;
640
641 fn11 &= QUAL_U;
642 if (fn11 == ctx->tb_ftz) {
643 return;
644 }
645 ctx->tb_ftz = fn11;
646
647 tmp = tcg_temp_new_i32();
648 if (fn11) {
649 /* Underflow is enabled, use the FPCR setting. */
650 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
651 } else {
652 /* Underflow is disabled, force flush-to-zero. */
653 tcg_gen_movi_i32(tmp, 1);
654 }
655
656 #if defined(CONFIG_SOFTFLOAT_INLINE)
657 tcg_gen_st8_i32(tmp, cpu_env,
658 offsetof(CPUState, fp_status.flush_to_zero));
659 #else
660 gen_helper_setflushzero(tmp);
661 #endif
662
663 tcg_temp_free_i32(tmp);
664 }
665
666 static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
667 {
668 TCGv val = tcg_temp_new();
669 if (reg == 31) {
670 tcg_gen_movi_i64(val, 0);
671 } else if (fn11 & QUAL_S) {
672 gen_helper_ieee_input_s(val, cpu_fir[reg]);
673 } else if (is_cmp) {
674 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
675 } else {
676 gen_helper_ieee_input(val, cpu_fir[reg]);
677 }
678 return val;
679 }
680
681 static void gen_fp_exc_clear(void)
682 {
683 #if defined(CONFIG_SOFTFLOAT_INLINE)
684 TCGv_i32 zero = tcg_const_i32(0);
685 tcg_gen_st8_i32(zero, cpu_env,
686 offsetof(CPUState, fp_status.float_exception_flags));
687 tcg_temp_free_i32(zero);
688 #else
689 gen_helper_fp_exc_clear();
690 #endif
691 }
692
693 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
694 {
695 /* ??? We ought to be able to do something with imprecise exceptions.
696 E.g. notice we're still in the trap shadow of something within the
697 TB and do not generate the code to signal the exception; end the TB
698 when an exception is forced to arrive, either by consumption of a
699 register value or TRAPB or EXCB. */
700 TCGv_i32 exc = tcg_temp_new_i32();
701 TCGv_i32 reg;
702
703 #if defined(CONFIG_SOFTFLOAT_INLINE)
704 tcg_gen_ld8u_i32(exc, cpu_env,
705 offsetof(CPUState, fp_status.float_exception_flags));
706 #else
707 gen_helper_fp_exc_get(exc);
708 #endif
709
710 if (ignore) {
711 tcg_gen_andi_i32(exc, exc, ~ignore);
712 }
713
714 /* ??? Pass in the regno of the destination so that the helper can
715 set EXC_MASK, which contains a bitmask of destination registers
716 that have caused arithmetic traps. A simple userspace emulation
717 does not require this. We do need it for a guest kernel's entArith,
718 or if we were to do something clever with imprecise exceptions. */
719 reg = tcg_const_i32(rc + 32);
720
721 if (fn11 & QUAL_S) {
722 gen_helper_fp_exc_raise_s(exc, reg);
723 } else {
724 gen_helper_fp_exc_raise(exc, reg);
725 }
726
727 tcg_temp_free_i32(reg);
728 tcg_temp_free_i32(exc);
729 }
730
731 static inline void gen_fp_exc_raise(int rc, int fn11)
732 {
733 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
734 }
735
736 static void gen_fcvtlq(int rb, int rc)
737 {
738 if (unlikely(rc == 31)) {
739 return;
740 }
741 if (unlikely(rb == 31)) {
742 tcg_gen_movi_i64(cpu_fir[rc], 0);
743 } else {
744 TCGv tmp = tcg_temp_new();
745
746 /* The arithmetic right shift here, plus the sign-extended mask below
747 yields a sign-extended result without an explicit ext32s_i64. */
748 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
749 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
750 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
751 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
752 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
753
754 tcg_temp_free(tmp);
755 }
756 }
757
758 static void gen_fcvtql(int rb, int rc)
759 {
760 if (unlikely(rc == 31)) {
761 return;
762 }
763 if (unlikely(rb == 31)) {
764 tcg_gen_movi_i64(cpu_fir[rc], 0);
765 } else {
766 TCGv tmp = tcg_temp_new();
767
768 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
769 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
770 tcg_gen_shli_i64(tmp, tmp, 32);
771 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
772 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
773
774 tcg_temp_free(tmp);
775 }
776 }
777
778 static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
779 {
780 if (rb != 31) {
781 int lab = gen_new_label();
782 TCGv tmp = tcg_temp_new();
783
784 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
785 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
786 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
787
788 gen_set_label(lab);
789 }
790 gen_fcvtql(rb, rc);
791 }
792
793 #define FARITH2(name) \
794 static inline void glue(gen_f, name)(int rb, int rc) \
795 { \
796 if (unlikely(rc == 31)) { \
797 return; \
798 } \
799 if (rb != 31) { \
800 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
801 } else { \
802 TCGv tmp = tcg_const_i64(0); \
803 gen_helper_ ## name (cpu_fir[rc], tmp); \
804 tcg_temp_free(tmp); \
805 } \
806 }
807
808 /* ??? VAX instruction qualifiers ignored. */
809 FARITH2(sqrtf)
810 FARITH2(sqrtg)
811 FARITH2(cvtgf)
812 FARITH2(cvtgq)
813 FARITH2(cvtqf)
814 FARITH2(cvtqg)
815
816 static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
817 int rb, int rc, int fn11)
818 {
819 TCGv vb;
820
821 /* ??? This is wrong: the instruction is not a nop, it still may
822 raise exceptions. */
823 if (unlikely(rc == 31)) {
824 return;
825 }
826
827 gen_qual_roundmode(ctx, fn11);
828 gen_qual_flushzero(ctx, fn11);
829 gen_fp_exc_clear();
830
831 vb = gen_ieee_input(rb, fn11, 0);
832 helper(cpu_fir[rc], vb);
833 tcg_temp_free(vb);
834
835 gen_fp_exc_raise(rc, fn11);
836 }
837
838 #define IEEE_ARITH2(name) \
839 static inline void glue(gen_f, name)(DisasContext *ctx, \
840 int rb, int rc, int fn11) \
841 { \
842 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
843 }
844 IEEE_ARITH2(sqrts)
845 IEEE_ARITH2(sqrtt)
846 IEEE_ARITH2(cvtst)
847 IEEE_ARITH2(cvtts)
848
849 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
850 {
851 TCGv vb;
852 int ignore = 0;
853
854 /* ??? This is wrong: the instruction is not a nop, it still may
855 raise exceptions. */
856 if (unlikely(rc == 31)) {
857 return;
858 }
859
860 /* No need to set flushzero, since we have an integer output. */
861 gen_fp_exc_clear();
862 vb = gen_ieee_input(rb, fn11, 0);
863
864 /* Almost all integer conversions use cropped rounding, and most
865 also do not have integer overflow enabled. Special case that. */
866 switch (fn11) {
867 case QUAL_RM_C:
868 gen_helper_cvttq_c(cpu_fir[rc], vb);
869 break;
870 case QUAL_V | QUAL_RM_C:
871 case QUAL_S | QUAL_V | QUAL_RM_C:
872 ignore = float_flag_inexact;
873 /* FALLTHRU */
874 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
875 gen_helper_cvttq_svic(cpu_fir[rc], vb);
876 break;
877 default:
878 gen_qual_roundmode(ctx, fn11);
879 gen_helper_cvttq(cpu_fir[rc], vb);
880 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
881 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
882 break;
883 }
884 tcg_temp_free(vb);
885
886 gen_fp_exc_raise_ignore(rc, fn11, ignore);
887 }
888
889 static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
890 int rb, int rc, int fn11)
891 {
892 TCGv vb;
893
894 /* ??? This is wrong: the instruction is not a nop, it still may
895 raise exceptions. */
896 if (unlikely(rc == 31)) {
897 return;
898 }
899
900 gen_qual_roundmode(ctx, fn11);
901
902 if (rb == 31) {
903 vb = tcg_const_i64(0);
904 } else {
905 vb = cpu_fir[rb];
906 }
907
908 /* The only exception that can be raised by integer conversion
909 is inexact. Thus we only need to worry about exceptions when
910 inexact handling is requested. */
911 if (fn11 & QUAL_I) {
912 gen_fp_exc_clear();
913 helper(cpu_fir[rc], vb);
914 gen_fp_exc_raise(rc, fn11);
915 } else {
916 helper(cpu_fir[rc], vb);
917 }
918
919 if (rb == 31) {
920 tcg_temp_free(vb);
921 }
922 }
923
924 #define IEEE_INTCVT(name) \
925 static inline void glue(gen_f, name)(DisasContext *ctx, \
926 int rb, int rc, int fn11) \
927 { \
928 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
929 }
930 IEEE_INTCVT(cvtqs)
931 IEEE_INTCVT(cvtqt)
932
933 static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
934 {
935 TCGv va, vb, vmask;
936 int za = 0, zb = 0;
937
938 if (unlikely(rc == 31)) {
939 return;
940 }
941
942 vmask = tcg_const_i64(mask);
943
944 TCGV_UNUSED_I64(va);
945 if (ra == 31) {
946 if (inv_a) {
947 va = vmask;
948 } else {
949 za = 1;
950 }
951 } else {
952 va = tcg_temp_new_i64();
953 tcg_gen_mov_i64(va, cpu_fir[ra]);
954 if (inv_a) {
955 tcg_gen_andc_i64(va, vmask, va);
956 } else {
957 tcg_gen_and_i64(va, va, vmask);
958 }
959 }
960
961 TCGV_UNUSED_I64(vb);
962 if (rb == 31) {
963 zb = 1;
964 } else {
965 vb = tcg_temp_new_i64();
966 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
967 }
968
969 switch (za << 1 | zb) {
970 case 0 | 0:
971 tcg_gen_or_i64(cpu_fir[rc], va, vb);
972 break;
973 case 0 | 1:
974 tcg_gen_mov_i64(cpu_fir[rc], va);
975 break;
976 case 2 | 0:
977 tcg_gen_mov_i64(cpu_fir[rc], vb);
978 break;
979 case 2 | 1:
980 tcg_gen_movi_i64(cpu_fir[rc], 0);
981 break;
982 }
983
984 tcg_temp_free(vmask);
985 if (ra != 31) {
986 tcg_temp_free(va);
987 }
988 if (rb != 31) {
989 tcg_temp_free(vb);
990 }
991 }
992
993 static inline void gen_fcpys(int ra, int rb, int rc)
994 {
995 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
996 }
997
998 static inline void gen_fcpysn(int ra, int rb, int rc)
999 {
1000 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1001 }
1002
1003 static inline void gen_fcpyse(int ra, int rb, int rc)
1004 {
1005 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1006 }
1007
1008 #define FARITH3(name) \
1009 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1010 { \
1011 TCGv va, vb; \
1012 \
1013 if (unlikely(rc == 31)) { \
1014 return; \
1015 } \
1016 if (ra == 31) { \
1017 va = tcg_const_i64(0); \
1018 } else { \
1019 va = cpu_fir[ra]; \
1020 } \
1021 if (rb == 31) { \
1022 vb = tcg_const_i64(0); \
1023 } else { \
1024 vb = cpu_fir[rb]; \
1025 } \
1026 \
1027 gen_helper_ ## name (cpu_fir[rc], va, vb); \
1028 \
1029 if (ra == 31) { \
1030 tcg_temp_free(va); \
1031 } \
1032 if (rb == 31) { \
1033 tcg_temp_free(vb); \
1034 } \
1035 }
1036
1037 /* ??? VAX instruction qualifiers ignored. */
1038 FARITH3(addf)
1039 FARITH3(subf)
1040 FARITH3(mulf)
1041 FARITH3(divf)
1042 FARITH3(addg)
1043 FARITH3(subg)
1044 FARITH3(mulg)
1045 FARITH3(divg)
1046 FARITH3(cmpgeq)
1047 FARITH3(cmpglt)
1048 FARITH3(cmpgle)
1049
1050 static void gen_ieee_arith3(DisasContext *ctx,
1051 void (*helper)(TCGv, TCGv, TCGv),
1052 int ra, int rb, int rc, int fn11)
1053 {
1054 TCGv va, vb;
1055
1056 /* ??? This is wrong: the instruction is not a nop, it still may
1057 raise exceptions. */
1058 if (unlikely(rc == 31)) {
1059 return;
1060 }
1061
1062 gen_qual_roundmode(ctx, fn11);
1063 gen_qual_flushzero(ctx, fn11);
1064 gen_fp_exc_clear();
1065
1066 va = gen_ieee_input(ra, fn11, 0);
1067 vb = gen_ieee_input(rb, fn11, 0);
1068 helper(cpu_fir[rc], va, vb);
1069 tcg_temp_free(va);
1070 tcg_temp_free(vb);
1071
1072 gen_fp_exc_raise(rc, fn11);
1073 }
1074
1075 #define IEEE_ARITH3(name) \
1076 static inline void glue(gen_f, name)(DisasContext *ctx, \
1077 int ra, int rb, int rc, int fn11) \
1078 { \
1079 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1080 }
1081 IEEE_ARITH3(adds)
1082 IEEE_ARITH3(subs)
1083 IEEE_ARITH3(muls)
1084 IEEE_ARITH3(divs)
1085 IEEE_ARITH3(addt)
1086 IEEE_ARITH3(subt)
1087 IEEE_ARITH3(mult)
1088 IEEE_ARITH3(divt)
1089
1090 static void gen_ieee_compare(DisasContext *ctx,
1091 void (*helper)(TCGv, TCGv, TCGv),
1092 int ra, int rb, int rc, int fn11)
1093 {
1094 TCGv va, vb;
1095
1096 /* ??? This is wrong: the instruction is not a nop, it still may
1097 raise exceptions. */
1098 if (unlikely(rc == 31)) {
1099 return;
1100 }
1101
1102 gen_fp_exc_clear();
1103
1104 va = gen_ieee_input(ra, fn11, 1);
1105 vb = gen_ieee_input(rb, fn11, 1);
1106 helper(cpu_fir[rc], va, vb);
1107 tcg_temp_free(va);
1108 tcg_temp_free(vb);
1109
1110 gen_fp_exc_raise(rc, fn11);
1111 }
1112
1113 #define IEEE_CMP3(name) \
1114 static inline void glue(gen_f, name)(DisasContext *ctx, \
1115 int ra, int rb, int rc, int fn11) \
1116 { \
1117 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1118 }
1119 IEEE_CMP3(cmptun)
1120 IEEE_CMP3(cmpteq)
1121 IEEE_CMP3(cmptlt)
1122 IEEE_CMP3(cmptle)
1123
1124 static inline uint64_t zapnot_mask(uint8_t lit)
1125 {
1126 uint64_t mask = 0;
1127 int i;
1128
1129 for (i = 0; i < 8; ++i) {
1130 if ((lit >> i) & 1)
1131 mask |= 0xffull << (i * 8);
1132 }
1133 return mask;
1134 }
1135
1136 /* Implement zapnot with an immediate operand, which expands to some
1137 form of immediate AND. This is a basic building block in the
1138 definition of many of the other byte manipulation instructions. */
1139 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1140 {
1141 switch (lit) {
1142 case 0x00:
1143 tcg_gen_movi_i64(dest, 0);
1144 break;
1145 case 0x01:
1146 tcg_gen_ext8u_i64(dest, src);
1147 break;
1148 case 0x03:
1149 tcg_gen_ext16u_i64(dest, src);
1150 break;
1151 case 0x0f:
1152 tcg_gen_ext32u_i64(dest, src);
1153 break;
1154 case 0xff:
1155 tcg_gen_mov_i64(dest, src);
1156 break;
1157 default:
1158 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1159 break;
1160 }
1161 }
1162
1163 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1164 {
1165 if (unlikely(rc == 31))
1166 return;
1167 else if (unlikely(ra == 31))
1168 tcg_gen_movi_i64(cpu_ir[rc], 0);
1169 else if (islit)
1170 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1171 else
1172 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1173 }
1174
1175 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1176 {
1177 if (unlikely(rc == 31))
1178 return;
1179 else if (unlikely(ra == 31))
1180 tcg_gen_movi_i64(cpu_ir[rc], 0);
1181 else if (islit)
1182 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1183 else
1184 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1185 }
1186
1187
1188 /* EXTWH, EXTLH, EXTQH */
1189 static void gen_ext_h(int ra, int rb, int rc, int islit,
1190 uint8_t lit, uint8_t byte_mask)
1191 {
1192 if (unlikely(rc == 31))
1193 return;
1194 else if (unlikely(ra == 31))
1195 tcg_gen_movi_i64(cpu_ir[rc], 0);
1196 else {
1197 if (islit) {
1198 lit = (64 - (lit & 7) * 8) & 0x3f;
1199 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1200 } else {
1201 TCGv tmp1 = tcg_temp_new();
1202 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1203 tcg_gen_shli_i64(tmp1, tmp1, 3);
1204 tcg_gen_neg_i64(tmp1, tmp1);
1205 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1206 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1207 tcg_temp_free(tmp1);
1208 }
1209 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1210 }
1211 }
1212
1213 /* EXTBL, EXTWL, EXTLL, EXTQL */
1214 static void gen_ext_l(int ra, int rb, int rc, int islit,
1215 uint8_t lit, uint8_t byte_mask)
1216 {
1217 if (unlikely(rc == 31))
1218 return;
1219 else if (unlikely(ra == 31))
1220 tcg_gen_movi_i64(cpu_ir[rc], 0);
1221 else {
1222 if (islit) {
1223 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1224 } else {
1225 TCGv tmp = tcg_temp_new();
1226 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1227 tcg_gen_shli_i64(tmp, tmp, 3);
1228 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1229 tcg_temp_free(tmp);
1230 }
1231 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1232 }
1233 }
1234
1235 /* INSWH, INSLH, INSQH */
1236 static void gen_ins_h(int ra, int rb, int rc, int islit,
1237 uint8_t lit, uint8_t byte_mask)
1238 {
1239 if (unlikely(rc == 31))
1240 return;
1241 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1242 tcg_gen_movi_i64(cpu_ir[rc], 0);
1243 else {
1244 TCGv tmp = tcg_temp_new();
1245
1246 /* The instruction description has us left-shift the byte mask
1247 and extract bits <15:8> and apply that zap at the end. This
1248 is equivalent to simply performing the zap first and shifting
1249 afterward. */
1250 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1251
1252 if (islit) {
1253 /* Note that we have handled the lit==0 case above. */
1254 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1255 } else {
1256 TCGv shift = tcg_temp_new();
1257
1258 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1259 Do this portably by splitting the shift into two parts:
1260 shift_count-1 and 1. Arrange for the -1 by using
1261 ones-complement instead of twos-complement in the negation:
1262 ~((B & 7) * 8) & 63. */
1263
1264 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1265 tcg_gen_shli_i64(shift, shift, 3);
1266 tcg_gen_not_i64(shift, shift);
1267 tcg_gen_andi_i64(shift, shift, 0x3f);
1268
1269 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1270 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1271 tcg_temp_free(shift);
1272 }
1273 tcg_temp_free(tmp);
1274 }
1275 }
1276
1277 /* INSBL, INSWL, INSLL, INSQL */
1278 static void gen_ins_l(int ra, int rb, int rc, int islit,
1279 uint8_t lit, uint8_t byte_mask)
1280 {
1281 if (unlikely(rc == 31))
1282 return;
1283 else if (unlikely(ra == 31))
1284 tcg_gen_movi_i64(cpu_ir[rc], 0);
1285 else {
1286 TCGv tmp = tcg_temp_new();
1287
1288 /* The instruction description has us left-shift the byte mask
1289 the same number of byte slots as the data and apply the zap
1290 at the end. This is equivalent to simply performing the zap
1291 first and shifting afterward. */
1292 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1293
1294 if (islit) {
1295 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1296 } else {
1297 TCGv shift = tcg_temp_new();
1298 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1299 tcg_gen_shli_i64(shift, shift, 3);
1300 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1301 tcg_temp_free(shift);
1302 }
1303 tcg_temp_free(tmp);
1304 }
1305 }
1306
1307 /* MSKWH, MSKLH, MSKQH */
1308 static void gen_msk_h(int ra, int rb, int rc, int islit,
1309 uint8_t lit, uint8_t byte_mask)
1310 {
1311 if (unlikely(rc == 31))
1312 return;
1313 else if (unlikely(ra == 31))
1314 tcg_gen_movi_i64(cpu_ir[rc], 0);
1315 else if (islit) {
1316 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1317 } else {
1318 TCGv shift = tcg_temp_new();
1319 TCGv mask = tcg_temp_new();
1320
1321 /* The instruction description is as above, where the byte_mask
1322 is shifted left, and then we extract bits <15:8>. This can be
1323 emulated with a right-shift on the expanded byte mask. This
1324 requires extra care because for an input <2:0> == 0 we need a
1325 shift of 64 bits in order to generate a zero. This is done by
1326 splitting the shift into two parts, the variable shift - 1
1327 followed by a constant 1 shift. The code we expand below is
1328 equivalent to ~((B & 7) * 8) & 63. */
1329
1330 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1331 tcg_gen_shli_i64(shift, shift, 3);
1332 tcg_gen_not_i64(shift, shift);
1333 tcg_gen_andi_i64(shift, shift, 0x3f);
1334 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1335 tcg_gen_shr_i64(mask, mask, shift);
1336 tcg_gen_shri_i64(mask, mask, 1);
1337
1338 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1339
1340 tcg_temp_free(mask);
1341 tcg_temp_free(shift);
1342 }
1343 }
1344
1345 /* MSKBL, MSKWL, MSKLL, MSKQL */
1346 static void gen_msk_l(int ra, int rb, int rc, int islit,
1347 uint8_t lit, uint8_t byte_mask)
1348 {
1349 if (unlikely(rc == 31))
1350 return;
1351 else if (unlikely(ra == 31))
1352 tcg_gen_movi_i64(cpu_ir[rc], 0);
1353 else if (islit) {
1354 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1355 } else {
1356 TCGv shift = tcg_temp_new();
1357 TCGv mask = tcg_temp_new();
1358
1359 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1360 tcg_gen_shli_i64(shift, shift, 3);
1361 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1362 tcg_gen_shl_i64(mask, mask, shift);
1363
1364 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1365
1366 tcg_temp_free(mask);
1367 tcg_temp_free(shift);
1368 }
1369 }
1370
1371 /* Code to call arith3 helpers */
1372 #define ARITH3(name) \
1373 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1374 uint8_t lit) \
1375 { \
1376 if (unlikely(rc == 31)) \
1377 return; \
1378 \
1379 if (ra != 31) { \
1380 if (islit) { \
1381 TCGv tmp = tcg_const_i64(lit); \
1382 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1383 tcg_temp_free(tmp); \
1384 } else \
1385 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1386 } else { \
1387 TCGv tmp1 = tcg_const_i64(0); \
1388 if (islit) { \
1389 TCGv tmp2 = tcg_const_i64(lit); \
1390 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1391 tcg_temp_free(tmp2); \
1392 } else \
1393 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1394 tcg_temp_free(tmp1); \
1395 } \
1396 }
1397 ARITH3(cmpbge)
1398 ARITH3(addlv)
1399 ARITH3(sublv)
1400 ARITH3(addqv)
1401 ARITH3(subqv)
1402 ARITH3(umulh)
1403 ARITH3(mullv)
1404 ARITH3(mulqv)
1405 ARITH3(minub8)
1406 ARITH3(minsb8)
1407 ARITH3(minuw4)
1408 ARITH3(minsw4)
1409 ARITH3(maxub8)
1410 ARITH3(maxsb8)
1411 ARITH3(maxuw4)
1412 ARITH3(maxsw4)
1413 ARITH3(perr)
1414
1415 #define MVIOP2(name) \
1416 static inline void glue(gen_, name)(int rb, int rc) \
1417 { \
1418 if (unlikely(rc == 31)) \
1419 return; \
1420 if (unlikely(rb == 31)) \
1421 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1422 else \
1423 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1424 }
1425 MVIOP2(pklb)
1426 MVIOP2(pkwb)
1427 MVIOP2(unpkbl)
1428 MVIOP2(unpkbw)
1429
1430 static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1431 int islit, uint8_t lit)
1432 {
1433 TCGv va, vb;
1434
1435 if (unlikely(rc == 31)) {
1436 return;
1437 }
1438
1439 if (ra == 31) {
1440 va = tcg_const_i64(0);
1441 } else {
1442 va = cpu_ir[ra];
1443 }
1444 if (islit) {
1445 vb = tcg_const_i64(lit);
1446 } else {
1447 vb = cpu_ir[rb];
1448 }
1449
1450 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1451
1452 if (ra == 31) {
1453 tcg_temp_free(va);
1454 }
1455 if (islit) {
1456 tcg_temp_free(vb);
1457 }
1458 }
1459
1460 static void gen_rx(int ra, int set)
1461 {
1462 TCGv_i32 tmp;
1463
1464 if (ra != 31) {
1465 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1466 }
1467
1468 tmp = tcg_const_i32(set);
1469 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1470 tcg_temp_free_i32(tmp);
1471 }
1472
1473 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1474 {
1475 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1476 to internal cpu registers. */
1477
1478 /* Unprivileged PAL call */
1479 if (palcode >= 0x80 && palcode < 0xC0) {
1480 switch (palcode) {
1481 case 0x86:
1482 /* IMB */
1483 /* No-op inside QEMU. */
1484 break;
1485 case 0x9E:
1486 /* RDUNIQUE */
1487 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1488 break;
1489 case 0x9F:
1490 /* WRUNIQUE */
1491 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1492 break;
1493 default:
1494 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1495 }
1496 return NO_EXIT;
1497 }
1498
1499 #ifndef CONFIG_USER_ONLY
1500 /* Privileged PAL code */
1501 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1502 switch (palcode) {
1503 case 0x01:
1504 /* CFLUSH */
1505 /* No-op inside QEMU. */
1506 break;
1507 case 0x02:
1508 /* DRAINA */
1509 /* No-op inside QEMU. */
1510 break;
1511 case 0x2D:
1512 /* WRVPTPTR */
1513 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUState, vptptr));
1514 break;
1515 case 0x31:
1516 /* WRVAL */
1517 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1518 break;
1519 case 0x32:
1520 /* RDVAL */
1521 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1522 break;
1523
1524 case 0x35: {
1525 /* SWPIPL */
1526 TCGv tmp;
1527
1528 /* Note that we already know we're in kernel mode, so we know
1529 that PS only contains the 3 IPL bits. */
1530 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1531
1532 /* But make sure and store only the 3 IPL bits from the user. */
1533 tmp = tcg_temp_new();
1534 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1535 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUState, ps));
1536 tcg_temp_free(tmp);
1537 break;
1538 }
1539
1540 case 0x36:
1541 /* RDPS */
1542 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1543 break;
1544 case 0x38:
1545 /* WRUSP */
1546 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1547 break;
1548 case 0x3A:
1549 /* RDUSP */
1550 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1551 break;
1552 case 0x3C:
1553 /* WHAMI */
1554 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1555 offsetof(CPUState, cpu_index));
1556 break;
1557
1558 default:
1559 return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1560 }
1561 return NO_EXIT;
1562 }
1563 #endif
1564
1565 return gen_invalid(ctx);
1566 }
1567
1568 #ifndef CONFIG_USER_ONLY
1569
1570 #define PR_BYTE 0x100000
1571 #define PR_LONG 0x200000
1572
1573 static int cpu_pr_data(int pr)
1574 {
1575 switch (pr) {
1576 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1577 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1578 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1579 case 3: return offsetof(CPUAlphaState, trap_arg0);
1580 case 4: return offsetof(CPUAlphaState, trap_arg1);
1581 case 5: return offsetof(CPUAlphaState, trap_arg2);
1582 case 6: return offsetof(CPUAlphaState, exc_addr);
1583 case 7: return offsetof(CPUAlphaState, palbr);
1584 case 8: return offsetof(CPUAlphaState, ptbr);
1585 case 9: return offsetof(CPUAlphaState, vptptr);
1586 case 10: return offsetof(CPUAlphaState, unique);
1587 case 11: return offsetof(CPUAlphaState, sysval);
1588 case 12: return offsetof(CPUAlphaState, usp);
1589
1590 case 32 ... 39:
1591 return offsetof(CPUAlphaState, shadow[pr - 32]);
1592 case 40 ... 63:
1593 return offsetof(CPUAlphaState, scratch[pr - 40]);
1594 }
1595 return 0;
1596 }
1597
1598 static void gen_mfpr(int ra, int regno)
1599 {
1600 int data = cpu_pr_data(regno);
1601
1602 /* In our emulated PALcode, these processor registers have no
1603 side effects from reading. */
1604 if (ra == 31) {
1605 return;
1606 }
1607
1608 /* The basic registers are data only, and unknown registers
1609 are read-zero, write-ignore. */
1610 if (data == 0) {
1611 tcg_gen_movi_i64(cpu_ir[ra], 0);
1612 } else if (data & PR_BYTE) {
1613 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1614 } else if (data & PR_LONG) {
1615 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1616 } else {
1617 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1618 }
1619 }
1620
1621 static void gen_mtpr(int rb, int regno)
1622 {
1623 TCGv tmp;
1624
1625 if (rb == 31) {
1626 tmp = tcg_const_i64(0);
1627 } else {
1628 tmp = cpu_ir[rb];
1629 }
1630
1631 /* These two register numbers perform a TLB cache flush. Thankfully we
1632 can only do this inside PALmode, which means that the current basic
1633 block cannot be affected by the change in mappings. */
1634 if (regno == 255) {
1635 /* TBIA */
1636 gen_helper_tbia();
1637 } else if (regno == 254) {
1638 /* TBIS */
1639 gen_helper_tbis(tmp);
1640 } else {
1641 /* The basic registers are data only, and unknown registers
1642 are read-zero, write-ignore. */
1643 int data = cpu_pr_data(regno);
1644 if (data != 0) {
1645 if (data & PR_BYTE) {
1646 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1647 } else if (data & PR_LONG) {
1648 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1649 } else {
1650 tcg_gen_st_i64(tmp, cpu_env, data);
1651 }
1652 }
1653 }
1654
1655 if (rb == 31) {
1656 tcg_temp_free(tmp);
1657 }
1658 }
1659 #endif /* !USER_ONLY*/
1660
1661 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1662 {
1663 uint32_t palcode;
1664 int32_t disp21, disp16;
1665 #ifndef CONFIG_USER_ONLY
1666 int32_t disp12;
1667 #endif
1668 uint16_t fn11;
1669 uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
1670 uint8_t lit;
1671 ExitStatus ret;
1672
1673 /* Decode all instruction fields */
1674 opc = insn >> 26;
1675 ra = (insn >> 21) & 0x1F;
1676 rb = (insn >> 16) & 0x1F;
1677 rc = insn & 0x1F;
1678 real_islit = islit = (insn >> 12) & 1;
1679 if (rb == 31 && !islit) {
1680 islit = 1;
1681 lit = 0;
1682 } else
1683 lit = (insn >> 13) & 0xFF;
1684 palcode = insn & 0x03FFFFFF;
1685 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1686 disp16 = (int16_t)(insn & 0x0000FFFF);
1687 #ifndef CONFIG_USER_ONLY
1688 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1689 #endif
1690 fn11 = (insn >> 5) & 0x000007FF;
1691 fpfn = fn11 & 0x3F;
1692 fn7 = (insn >> 5) & 0x0000007F;
1693 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1694 opc, ra, rb, rc, disp16);
1695
1696 ret = NO_EXIT;
1697 switch (opc) {
1698 case 0x00:
1699 /* CALL_PAL */
1700 ret = gen_call_pal(ctx, palcode);
1701 break;
1702 case 0x01:
1703 /* OPC01 */
1704 goto invalid_opc;
1705 case 0x02:
1706 /* OPC02 */
1707 goto invalid_opc;
1708 case 0x03:
1709 /* OPC03 */
1710 goto invalid_opc;
1711 case 0x04:
1712 /* OPC04 */
1713 goto invalid_opc;
1714 case 0x05:
1715 /* OPC05 */
1716 goto invalid_opc;
1717 case 0x06:
1718 /* OPC06 */
1719 goto invalid_opc;
1720 case 0x07:
1721 /* OPC07 */
1722 goto invalid_opc;
1723 case 0x08:
1724 /* LDA */
1725 if (likely(ra != 31)) {
1726 if (rb != 31)
1727 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1728 else
1729 tcg_gen_movi_i64(cpu_ir[ra], disp16);
1730 }
1731 break;
1732 case 0x09:
1733 /* LDAH */
1734 if (likely(ra != 31)) {
1735 if (rb != 31)
1736 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1737 else
1738 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1739 }
1740 break;
1741 case 0x0A:
1742 /* LDBU */
1743 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1744 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1745 break;
1746 }
1747 goto invalid_opc;
1748 case 0x0B:
1749 /* LDQ_U */
1750 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1751 break;
1752 case 0x0C:
1753 /* LDWU */
1754 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1755 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1756 break;
1757 }
1758 goto invalid_opc;
1759 case 0x0D:
1760 /* STW */
1761 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1762 break;
1763 case 0x0E:
1764 /* STB */
1765 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1766 break;
1767 case 0x0F:
1768 /* STQ_U */
1769 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1770 break;
1771 case 0x10:
1772 switch (fn7) {
1773 case 0x00:
1774 /* ADDL */
1775 if (likely(rc != 31)) {
1776 if (ra != 31) {
1777 if (islit) {
1778 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1779 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1780 } else {
1781 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1782 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1783 }
1784 } else {
1785 if (islit)
1786 tcg_gen_movi_i64(cpu_ir[rc], lit);
1787 else
1788 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1789 }
1790 }
1791 break;
1792 case 0x02:
1793 /* S4ADDL */
1794 if (likely(rc != 31)) {
1795 if (ra != 31) {
1796 TCGv tmp = tcg_temp_new();
1797 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1798 if (islit)
1799 tcg_gen_addi_i64(tmp, tmp, lit);
1800 else
1801 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1802 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1803 tcg_temp_free(tmp);
1804 } else {
1805 if (islit)
1806 tcg_gen_movi_i64(cpu_ir[rc], lit);
1807 else
1808 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1809 }
1810 }
1811 break;
1812 case 0x09:
1813 /* SUBL */
1814 if (likely(rc != 31)) {
1815 if (ra != 31) {
1816 if (islit)
1817 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1818 else
1819 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1820 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1821 } else {
1822 if (islit)
1823 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1824 else {
1825 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1826 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1827 }
1828 }
1829 break;
1830 case 0x0B:
1831 /* S4SUBL */
1832 if (likely(rc != 31)) {
1833 if (ra != 31) {
1834 TCGv tmp = tcg_temp_new();
1835 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1836 if (islit)
1837 tcg_gen_subi_i64(tmp, tmp, lit);
1838 else
1839 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1840 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1841 tcg_temp_free(tmp);
1842 } else {
1843 if (islit)
1844 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1845 else {
1846 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1847 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1848 }
1849 }
1850 }
1851 break;
1852 case 0x0F:
1853 /* CMPBGE */
1854 gen_cmpbge(ra, rb, rc, islit, lit);
1855 break;
1856 case 0x12:
1857 /* S8ADDL */
1858 if (likely(rc != 31)) {
1859 if (ra != 31) {
1860 TCGv tmp = tcg_temp_new();
1861 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1862 if (islit)
1863 tcg_gen_addi_i64(tmp, tmp, lit);
1864 else
1865 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1866 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1867 tcg_temp_free(tmp);
1868 } else {
1869 if (islit)
1870 tcg_gen_movi_i64(cpu_ir[rc], lit);
1871 else
1872 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1873 }
1874 }
1875 break;
1876 case 0x1B:
1877 /* S8SUBL */
1878 if (likely(rc != 31)) {
1879 if (ra != 31) {
1880 TCGv tmp = tcg_temp_new();
1881 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1882 if (islit)
1883 tcg_gen_subi_i64(tmp, tmp, lit);
1884 else
1885 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1886 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1887 tcg_temp_free(tmp);
1888 } else {
1889 if (islit)
1890 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1891 else
1892 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1893 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1894 }
1895 }
1896 }
1897 break;
1898 case 0x1D:
1899 /* CMPULT */
1900 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1901 break;
1902 case 0x20:
1903 /* ADDQ */
1904 if (likely(rc != 31)) {
1905 if (ra != 31) {
1906 if (islit)
1907 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1908 else
1909 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1910 } else {
1911 if (islit)
1912 tcg_gen_movi_i64(cpu_ir[rc], lit);
1913 else
1914 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1915 }
1916 }
1917 break;
1918 case 0x22:
1919 /* S4ADDQ */
1920 if (likely(rc != 31)) {
1921 if (ra != 31) {
1922 TCGv tmp = tcg_temp_new();
1923 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1924 if (islit)
1925 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1926 else
1927 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1928 tcg_temp_free(tmp);
1929 } else {
1930 if (islit)
1931 tcg_gen_movi_i64(cpu_ir[rc], lit);
1932 else
1933 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1934 }
1935 }
1936 break;
1937 case 0x29:
1938 /* SUBQ */
1939 if (likely(rc != 31)) {
1940 if (ra != 31) {
1941 if (islit)
1942 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1943 else
1944 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1945 } else {
1946 if (islit)
1947 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1948 else
1949 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1950 }
1951 }
1952 break;
1953 case 0x2B:
1954 /* S4SUBQ */
1955 if (likely(rc != 31)) {
1956 if (ra != 31) {
1957 TCGv tmp = tcg_temp_new();
1958 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1959 if (islit)
1960 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1961 else
1962 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1963 tcg_temp_free(tmp);
1964 } else {
1965 if (islit)
1966 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1967 else
1968 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1969 }
1970 }
1971 break;
1972 case 0x2D:
1973 /* CMPEQ */
1974 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1975 break;
1976 case 0x32:
1977 /* S8ADDQ */
1978 if (likely(rc != 31)) {
1979 if (ra != 31) {
1980 TCGv tmp = tcg_temp_new();
1981 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1982 if (islit)
1983 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1984 else
1985 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1986 tcg_temp_free(tmp);
1987 } else {
1988 if (islit)
1989 tcg_gen_movi_i64(cpu_ir[rc], lit);
1990 else
1991 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1992 }
1993 }
1994 break;
1995 case 0x3B:
1996 /* S8SUBQ */
1997 if (likely(rc != 31)) {
1998 if (ra != 31) {
1999 TCGv tmp = tcg_temp_new();
2000 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2001 if (islit)
2002 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2003 else
2004 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2005 tcg_temp_free(tmp);
2006 } else {
2007 if (islit)
2008 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2009 else
2010 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2011 }
2012 }
2013 break;
2014 case 0x3D:
2015 /* CMPULE */
2016 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
2017 break;
2018 case 0x40:
2019 /* ADDL/V */
2020 gen_addlv(ra, rb, rc, islit, lit);
2021 break;
2022 case 0x49:
2023 /* SUBL/V */
2024 gen_sublv(ra, rb, rc, islit, lit);
2025 break;
2026 case 0x4D:
2027 /* CMPLT */
2028 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
2029 break;
2030 case 0x60:
2031 /* ADDQ/V */
2032 gen_addqv(ra, rb, rc, islit, lit);
2033 break;
2034 case 0x69:
2035 /* SUBQ/V */
2036 gen_subqv(ra, rb, rc, islit, lit);
2037 break;
2038 case 0x6D:
2039 /* CMPLE */
2040 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
2041 break;
2042 default:
2043 goto invalid_opc;
2044 }
2045 break;
2046 case 0x11:
2047 switch (fn7) {
2048 case 0x00:
2049 /* AND */
2050 if (likely(rc != 31)) {
2051 if (ra == 31)
2052 tcg_gen_movi_i64(cpu_ir[rc], 0);
2053 else if (islit)
2054 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2055 else
2056 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2057 }
2058 break;
2059 case 0x08:
2060 /* BIC */
2061 if (likely(rc != 31)) {
2062 if (ra != 31) {
2063 if (islit)
2064 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2065 else
2066 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2067 } else
2068 tcg_gen_movi_i64(cpu_ir[rc], 0);
2069 }
2070 break;
2071 case 0x14:
2072 /* CMOVLBS */
2073 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
2074 break;
2075 case 0x16:
2076 /* CMOVLBC */
2077 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
2078 break;
2079 case 0x20:
2080 /* BIS */
2081 if (likely(rc != 31)) {
2082 if (ra != 31) {
2083 if (islit)
2084 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2085 else
2086 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2087 } else {
2088 if (islit)
2089 tcg_gen_movi_i64(cpu_ir[rc], lit);
2090 else
2091 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2092 }
2093 }
2094 break;
2095 case 0x24:
2096 /* CMOVEQ */
2097 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
2098 break;
2099 case 0x26:
2100 /* CMOVNE */
2101 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
2102 break;
2103 case 0x28:
2104 /* ORNOT */
2105 if (likely(rc != 31)) {
2106 if (ra != 31) {
2107 if (islit)
2108 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2109 else
2110 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2111 } else {
2112 if (islit)
2113 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2114 else
2115 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2116 }
2117 }
2118 break;
2119 case 0x40:
2120 /* XOR */
2121 if (likely(rc != 31)) {
2122 if (ra != 31) {
2123 if (islit)
2124 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2125 else
2126 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2127 } else {
2128 if (islit)
2129 tcg_gen_movi_i64(cpu_ir[rc], lit);
2130 else
2131 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2132 }
2133 }
2134 break;
2135 case 0x44:
2136 /* CMOVLT */
2137 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
2138 break;
2139 case 0x46:
2140 /* CMOVGE */
2141 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
2142 break;
2143 case 0x48:
2144 /* EQV */
2145 if (likely(rc != 31)) {
2146 if (ra != 31) {
2147 if (islit)
2148 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2149 else
2150 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2151 } else {
2152 if (islit)
2153 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2154 else
2155 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2156 }
2157 }
2158 break;
2159 case 0x61:
2160 /* AMASK */
2161 if (likely(rc != 31)) {
2162 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2163
2164 if (islit) {
2165 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2166 } else {
2167 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
2168 }
2169 }
2170 break;
2171 case 0x64:
2172 /* CMOVLE */
2173 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2174 break;
2175 case 0x66:
2176 /* CMOVGT */
2177 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2178 break;
2179 case 0x6C:
2180 /* IMPLVER */
2181 if (rc != 31)
2182 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
2183 break;
2184 default:
2185 goto invalid_opc;
2186 }
2187 break;
2188 case 0x12:
2189 switch (fn7) {
2190 case 0x02:
2191 /* MSKBL */
2192 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2193 break;
2194 case 0x06:
2195 /* EXTBL */
2196 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2197 break;
2198 case 0x0B:
2199 /* INSBL */
2200 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2201 break;
2202 case 0x12:
2203 /* MSKWL */
2204 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2205 break;
2206 case 0x16:
2207 /* EXTWL */
2208 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2209 break;
2210 case 0x1B:
2211 /* INSWL */
2212 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2213 break;
2214 case 0x22:
2215 /* MSKLL */
2216 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2217 break;
2218 case 0x26:
2219 /* EXTLL */
2220 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2221 break;
2222 case 0x2B:
2223 /* INSLL */
2224 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2225 break;
2226 case 0x30:
2227 /* ZAP */
2228 gen_zap(ra, rb, rc, islit, lit);
2229 break;
2230 case 0x31:
2231 /* ZAPNOT */
2232 gen_zapnot(ra, rb, rc, islit, lit);
2233 break;
2234 case 0x32:
2235 /* MSKQL */
2236 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2237 break;
2238 case 0x34:
2239 /* SRL */
2240 if (likely(rc != 31)) {
2241 if (ra != 31) {
2242 if (islit)
2243 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2244 else {
2245 TCGv shift = tcg_temp_new();
2246 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2247 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2248 tcg_temp_free(shift);
2249 }
2250 } else
2251 tcg_gen_movi_i64(cpu_ir[rc], 0);
2252 }
2253 break;
2254 case 0x36:
2255 /* EXTQL */
2256 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2257 break;
2258 case 0x39:
2259 /* SLL */
2260 if (likely(rc != 31)) {
2261 if (ra != 31) {
2262 if (islit)
2263 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2264 else {
2265 TCGv shift = tcg_temp_new();
2266 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2267 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2268 tcg_temp_free(shift);
2269 }
2270 } else
2271 tcg_gen_movi_i64(cpu_ir[rc], 0);
2272 }
2273 break;
2274 case 0x3B:
2275 /* INSQL */
2276 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2277 break;
2278 case 0x3C:
2279 /* SRA */
2280 if (likely(rc != 31)) {
2281 if (ra != 31) {
2282 if (islit)
2283 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2284 else {
2285 TCGv shift = tcg_temp_new();
2286 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2287 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2288 tcg_temp_free(shift);
2289 }
2290 } else
2291 tcg_gen_movi_i64(cpu_ir[rc], 0);
2292 }
2293 break;
2294 case 0x52:
2295 /* MSKWH */
2296 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2297 break;
2298 case 0x57:
2299 /* INSWH */
2300 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2301 break;
2302 case 0x5A:
2303 /* EXTWH */
2304 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2305 break;
2306 case 0x62:
2307 /* MSKLH */
2308 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2309 break;
2310 case 0x67:
2311 /* INSLH */
2312 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2313 break;
2314 case 0x6A:
2315 /* EXTLH */
2316 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2317 break;
2318 case 0x72:
2319 /* MSKQH */
2320 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2321 break;
2322 case 0x77:
2323 /* INSQH */
2324 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2325 break;
2326 case 0x7A:
2327 /* EXTQH */
2328 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2329 break;
2330 default:
2331 goto invalid_opc;
2332 }
2333 break;
2334 case 0x13:
2335 switch (fn7) {
2336 case 0x00:
2337 /* MULL */
2338 if (likely(rc != 31)) {
2339 if (ra == 31)
2340 tcg_gen_movi_i64(cpu_ir[rc], 0);
2341 else {
2342 if (islit)
2343 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2344 else
2345 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2346 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2347 }
2348 }
2349 break;
2350 case 0x20:
2351 /* MULQ */
2352 if (likely(rc != 31)) {
2353 if (ra == 31)
2354 tcg_gen_movi_i64(cpu_ir[rc], 0);
2355 else if (islit)
2356 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2357 else
2358 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2359 }
2360 break;
2361 case 0x30:
2362 /* UMULH */
2363 gen_umulh(ra, rb, rc, islit, lit);
2364 break;
2365 case 0x40:
2366 /* MULL/V */
2367 gen_mullv(ra, rb, rc, islit, lit);
2368 break;
2369 case 0x60:
2370 /* MULQ/V */
2371 gen_mulqv(ra, rb, rc, islit, lit);
2372 break;
2373 default:
2374 goto invalid_opc;
2375 }
2376 break;
2377 case 0x14:
2378 switch (fpfn) { /* fn11 & 0x3F */
2379 case 0x04:
2380 /* ITOFS */
2381 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2382 goto invalid_opc;
2383 }
2384 if (likely(rc != 31)) {
2385 if (ra != 31) {
2386 TCGv_i32 tmp = tcg_temp_new_i32();
2387 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2388 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2389 tcg_temp_free_i32(tmp);
2390 } else
2391 tcg_gen_movi_i64(cpu_fir[rc], 0);
2392 }
2393 break;
2394 case 0x0A:
2395 /* SQRTF */
2396 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2397 gen_fsqrtf(rb, rc);
2398 break;
2399 }
2400 goto invalid_opc;
2401 case 0x0B:
2402 /* SQRTS */
2403 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2404 gen_fsqrts(ctx, rb, rc, fn11);
2405 break;
2406 }
2407 goto invalid_opc;
2408 case 0x14:
2409 /* ITOFF */
2410 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2411 goto invalid_opc;
2412 }
2413 if (likely(rc != 31)) {
2414 if (ra != 31) {
2415 TCGv_i32 tmp = tcg_temp_new_i32();
2416 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2417 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2418 tcg_temp_free_i32(tmp);
2419 } else
2420 tcg_gen_movi_i64(cpu_fir[rc], 0);
2421 }
2422 break;
2423 case 0x24:
2424 /* ITOFT */
2425 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2426 goto invalid_opc;
2427 }
2428 if (likely(rc != 31)) {
2429 if (ra != 31)
2430 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2431 else
2432 tcg_gen_movi_i64(cpu_fir[rc], 0);
2433 }
2434 break;
2435 case 0x2A:
2436 /* SQRTG */
2437 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2438 gen_fsqrtg(rb, rc);
2439 break;
2440 }
2441 goto invalid_opc;
2442 case 0x02B:
2443 /* SQRTT */
2444 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2445 gen_fsqrtt(ctx, rb, rc, fn11);
2446 break;
2447 }
2448 goto invalid_opc;
2449 default:
2450 goto invalid_opc;
2451 }
2452 break;
2453 case 0x15:
2454 /* VAX floating point */
2455 /* XXX: rounding mode and trap are ignored (!) */
2456 switch (fpfn) { /* fn11 & 0x3F */
2457 case 0x00:
2458 /* ADDF */
2459 gen_faddf(ra, rb, rc);
2460 break;
2461 case 0x01:
2462 /* SUBF */
2463 gen_fsubf(ra, rb, rc);
2464 break;
2465 case 0x02:
2466 /* MULF */
2467 gen_fmulf(ra, rb, rc);
2468 break;
2469 case 0x03:
2470 /* DIVF */
2471 gen_fdivf(ra, rb, rc);
2472 break;
2473 case 0x1E:
2474 /* CVTDG */
2475 #if 0 // TODO
2476 gen_fcvtdg(rb, rc);
2477 #else
2478 goto invalid_opc;
2479 #endif
2480 break;
2481 case 0x20:
2482 /* ADDG */
2483 gen_faddg(ra, rb, rc);
2484 break;
2485 case 0x21:
2486 /* SUBG */
2487 gen_fsubg(ra, rb, rc);
2488 break;
2489 case 0x22:
2490 /* MULG */
2491 gen_fmulg(ra, rb, rc);
2492 break;
2493 case 0x23:
2494 /* DIVG */
2495 gen_fdivg(ra, rb, rc);
2496 break;
2497 case 0x25:
2498 /* CMPGEQ */
2499 gen_fcmpgeq(ra, rb, rc);
2500 break;
2501 case 0x26:
2502 /* CMPGLT */
2503 gen_fcmpglt(ra, rb, rc);
2504 break;
2505 case 0x27:
2506 /* CMPGLE */
2507 gen_fcmpgle(ra, rb, rc);
2508 break;
2509 case 0x2C:
2510 /* CVTGF */
2511 gen_fcvtgf(rb, rc);
2512 break;
2513 case 0x2D:
2514 /* CVTGD */
2515 #if 0 // TODO
2516 gen_fcvtgd(rb, rc);
2517 #else
2518 goto invalid_opc;
2519 #endif
2520 break;
2521 case 0x2F:
2522 /* CVTGQ */
2523 gen_fcvtgq(rb, rc);
2524 break;
2525 case 0x3C:
2526 /* CVTQF */
2527 gen_fcvtqf(rb, rc);
2528 break;
2529 case 0x3E:
2530 /* CVTQG */
2531 gen_fcvtqg(rb, rc);
2532 break;
2533 default:
2534 goto invalid_opc;
2535 }
2536 break;
2537 case 0x16:
2538 /* IEEE floating-point */
2539 switch (fpfn) { /* fn11 & 0x3F */
2540 case 0x00:
2541 /* ADDS */
2542 gen_fadds(ctx, ra, rb, rc, fn11);
2543 break;
2544 case 0x01:
2545 /* SUBS */
2546 gen_fsubs(ctx, ra, rb, rc, fn11);
2547 break;
2548 case 0x02:
2549 /* MULS */
2550 gen_fmuls(ctx, ra, rb, rc, fn11);
2551 break;
2552 case 0x03:
2553 /* DIVS */
2554 gen_fdivs(ctx, ra, rb, rc, fn11);
2555 break;
2556 case 0x20:
2557 /* ADDT */
2558 gen_faddt(ctx, ra, rb, rc, fn11);
2559 break;
2560 case 0x21:
2561 /* SUBT */
2562 gen_fsubt(ctx, ra, rb, rc, fn11);
2563 break;
2564 case 0x22:
2565 /* MULT */
2566 gen_fmult(ctx, ra, rb, rc, fn11);
2567 break;
2568 case 0x23:
2569 /* DIVT */
2570 gen_fdivt(ctx, ra, rb, rc, fn11);
2571 break;
2572 case 0x24:
2573 /* CMPTUN */
2574 gen_fcmptun(ctx, ra, rb, rc, fn11);
2575 break;
2576 case 0x25:
2577 /* CMPTEQ */
2578 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2579 break;
2580 case 0x26:
2581 /* CMPTLT */
2582 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2583 break;
2584 case 0x27:
2585 /* CMPTLE */
2586 gen_fcmptle(ctx, ra, rb, rc, fn11);
2587 break;
2588 case 0x2C:
2589 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2590 /* CVTST */
2591 gen_fcvtst(ctx, rb, rc, fn11);
2592 } else {
2593 /* CVTTS */
2594 gen_fcvtts(ctx, rb, rc, fn11);
2595 }
2596 break;
2597 case 0x2F:
2598 /* CVTTQ */
2599 gen_fcvttq(ctx, rb, rc, fn11);
2600 break;
2601 case 0x3C:
2602 /* CVTQS */
2603 gen_fcvtqs(ctx, rb, rc, fn11);
2604 break;
2605 case 0x3E:
2606 /* CVTQT */
2607 gen_fcvtqt(ctx, rb, rc, fn11);
2608 break;
2609 default:
2610 goto invalid_opc;
2611 }
2612 break;
2613 case 0x17:
2614 switch (fn11) {
2615 case 0x010:
2616 /* CVTLQ */
2617 gen_fcvtlq(rb, rc);
2618 break;
2619 case 0x020:
2620 if (likely(rc != 31)) {
2621 if (ra == rb) {
2622 /* FMOV */
2623 if (ra == 31)
2624 tcg_gen_movi_i64(cpu_fir[rc], 0);
2625 else
2626 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2627 } else {
2628 /* CPYS */
2629 gen_fcpys(ra, rb, rc);
2630 }
2631 }
2632 break;
2633 case 0x021:
2634 /* CPYSN */
2635 gen_fcpysn(ra, rb, rc);
2636 break;
2637 case 0x022:
2638 /* CPYSE */
2639 gen_fcpyse(ra, rb, rc);
2640 break;
2641 case 0x024:
2642 /* MT_FPCR */
2643 if (likely(ra != 31))
2644 gen_helper_store_fpcr(cpu_fir[ra]);
2645 else {
2646 TCGv tmp = tcg_const_i64(0);
2647 gen_helper_store_fpcr(tmp);
2648 tcg_temp_free(tmp);
2649 }
2650 break;
2651 case 0x025:
2652 /* MF_FPCR */
2653 if (likely(ra != 31))
2654 gen_helper_load_fpcr(cpu_fir[ra]);
2655 break;
2656 case 0x02A:
2657 /* FCMOVEQ */
2658 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2659 break;
2660 case 0x02B:
2661 /* FCMOVNE */
2662 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2663 break;
2664 case 0x02C:
2665 /* FCMOVLT */
2666 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2667 break;
2668 case 0x02D:
2669 /* FCMOVGE */
2670 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2671 break;
2672 case 0x02E:
2673 /* FCMOVLE */
2674 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2675 break;
2676 case 0x02F:
2677 /* FCMOVGT */
2678 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2679 break;
2680 case 0x030:
2681 /* CVTQL */
2682 gen_fcvtql(rb, rc);
2683 break;
2684 case 0x130:
2685 /* CVTQL/V */
2686 case 0x530:
2687 /* CVTQL/SV */
2688 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2689 /v doesn't do. The only thing I can think is that /sv is a
2690 valid instruction merely for completeness in the ISA. */
2691 gen_fcvtql_v(ctx, rb, rc);
2692 break;
2693 default:
2694 goto invalid_opc;
2695 }
2696 break;
2697 case 0x18:
2698 switch ((uint16_t)disp16) {
2699 case 0x0000:
2700 /* TRAPB */
2701 /* No-op. */
2702 break;
2703 case 0x0400:
2704 /* EXCB */
2705 /* No-op. */
2706 break;
2707 case 0x4000:
2708 /* MB */
2709 /* No-op */
2710 break;
2711 case 0x4400:
2712 /* WMB */
2713 /* No-op */
2714 break;
2715 case 0x8000:
2716 /* FETCH */
2717 /* No-op */
2718 break;
2719 case 0xA000:
2720 /* FETCH_M */
2721 /* No-op */
2722 break;
2723 case 0xC000:
2724 /* RPCC */
2725 if (ra != 31)
2726 gen_helper_load_pcc(cpu_ir[ra]);
2727 break;
2728 case 0xE000:
2729 /* RC */
2730 gen_rx(ra, 0);
2731 break;
2732 case 0xE800:
2733 /* ECB */
2734 break;
2735 case 0xF000:
2736 /* RS */
2737 gen_rx(ra, 1);
2738 break;
2739 case 0xF800:
2740 /* WH64 */
2741 /* No-op */
2742 break;
2743 default:
2744 goto invalid_opc;
2745 }
2746 break;
2747 case 0x19:
2748 /* HW_MFPR (PALcode) */
2749 #ifndef CONFIG_USER_ONLY
2750 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2751 gen_mfpr(ra, insn & 0xffff);
2752 break;
2753 }
2754 #endif
2755 goto invalid_opc;
2756 case 0x1A:
2757 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2758 prediction stack action, which of course we don't implement. */
2759 if (rb != 31) {
2760 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2761 } else {
2762 tcg_gen_movi_i64(cpu_pc, 0);
2763 }
2764 if (ra != 31) {
2765 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2766 }
2767 ret = EXIT_PC_UPDATED;
2768 break;
2769 case 0x1B:
2770 /* HW_LD (PALcode) */
2771 #ifndef CONFIG_USER_ONLY
2772 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2773 TCGv addr;
2774
2775 if (ra == 31) {
2776 break;
2777 }
2778
2779 addr = tcg_temp_new();
2780 if (rb != 31)
2781 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2782 else
2783 tcg_gen_movi_i64(addr, disp12);
2784 switch ((insn >> 12) & 0xF) {
2785 case 0x0:
2786 /* Longword physical access (hw_ldl/p) */
2787 gen_helper_ldl_phys(cpu_ir[ra], addr);
2788 break;
2789 case 0x1:
2790 /* Quadword physical access (hw_ldq/p) */
2791 gen_helper_ldq_phys(cpu_ir[ra], addr);
2792 break;
2793 case 0x2:
2794 /* Longword physical access with lock (hw_ldl_l/p) */
2795 gen_helper_ldl_l_phys(cpu_ir[ra], addr);
2796 break;
2797 case 0x3:
2798 /* Quadword physical access with lock (hw_ldq_l/p) */
2799 gen_helper_ldq_l_phys(cpu_ir[ra], addr);
2800 break;
2801 case 0x4:
2802 /* Longword virtual PTE fetch (hw_ldl/v) */
2803 goto invalid_opc;
2804 case 0x5:
2805 /* Quadword virtual PTE fetch (hw_ldq/v) */
2806 goto invalid_opc;
2807 break;
2808 case 0x6:
2809 /* Incpu_ir[ra]id */
2810 goto invalid_opc;
2811 case 0x7:
2812 /* Incpu_ir[ra]id */
2813 goto invalid_opc;
2814 case 0x8:
2815 /* Longword virtual access (hw_ldl) */
2816 goto invalid_opc;
2817 case 0x9:
2818 /* Quadword virtual access (hw_ldq) */
2819 goto invalid_opc;
2820 case 0xA:
2821 /* Longword virtual access with protection check (hw_ldl/w) */
2822 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2823 break;
2824 case 0xB:
2825 /* Quadword virtual access with protection check (hw_ldq/w) */
2826 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2827 break;
2828 case 0xC:
2829 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2830 goto invalid_opc;
2831 case 0xD:
2832 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2833 goto invalid_opc;
2834 case 0xE:
2835 /* Longword virtual access with alternate access mode and
2836 protection checks (hw_ldl/wa) */
2837 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2838 break;
2839 case 0xF:
2840 /* Quadword virtual access with alternate access mode and
2841 protection checks (hw_ldq/wa) */
2842 tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2843 break;
2844 }
2845 tcg_temp_free(addr);
2846 break;
2847 }
2848 #endif
2849 goto invalid_opc;
2850 case 0x1C:
2851 switch (fn7) {
2852 case 0x00:
2853 /* SEXTB */
2854 if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
2855 goto invalid_opc;
2856 }
2857 if (likely(rc != 31)) {
2858 if (islit)
2859 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2860 else
2861 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2862 }
2863 break;
2864 case 0x01:
2865 /* SEXTW */
2866 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2867 if (likely(rc != 31)) {
2868 if (islit) {
2869 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2870 } else {
2871 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2872 }
2873 }
2874 break;
2875 }
2876 goto invalid_opc;
2877 case 0x30:
2878 /* CTPOP */
2879 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2880 if (likely(rc != 31)) {
2881 if (islit) {
2882 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2883 } else {
2884 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2885 }
2886 }
2887 break;
2888 }
2889 goto invalid_opc;
2890 case 0x31:
2891 /* PERR */
2892 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2893 gen_perr(ra, rb, rc, islit, lit);
2894 break;
2895 }
2896 goto invalid_opc;
2897 case 0x32:
2898 /* CTLZ */
2899 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2900 if (likely(rc != 31)) {
2901 if (islit) {
2902 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2903 } else {
2904 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2905 }
2906 }
2907 break;
2908 }
2909 goto invalid_opc;
2910 case 0x33:
2911 /* CTTZ */
2912 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2913 if (likely(rc != 31)) {
2914 if (islit) {
2915 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2916 } else {
2917 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2918 }
2919 }
2920 break;
2921 }
2922 goto invalid_opc;
2923 case 0x34:
2924 /* UNPKBW */
2925 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2926 if (real_islit || ra != 31) {
2927 goto invalid_opc;
2928 }
2929 gen_unpkbw(rb, rc);
2930 break;
2931 }
2932 goto invalid_opc;
2933 case 0x35:
2934 /* UNPKBL */
2935 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2936 if (real_islit || ra != 31) {
2937 goto invalid_opc;
2938 }
2939 gen_unpkbl(rb, rc);
2940 break;
2941 }
2942 goto invalid_opc;
2943 case 0x36:
2944 /* PKWB */
2945 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2946 if (real_islit || ra != 31) {
2947 goto invalid_opc;
2948 }
2949 gen_pkwb(rb, rc);
2950 break;
2951 }
2952 goto invalid_opc;
2953 case 0x37:
2954 /* PKLB */
2955 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2956 if (real_islit || ra != 31) {
2957 goto invalid_opc;
2958 }
2959 gen_pklb(rb, rc);
2960 break;
2961 }
2962 goto invalid_opc;
2963 case 0x38:
2964 /* MINSB8 */
2965 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2966 gen_minsb8(ra, rb, rc, islit, lit);
2967 break;
2968 }
2969 goto invalid_opc;
2970 case 0x39:
2971 /* MINSW4 */
2972 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2973 gen_minsw4(ra, rb, rc, islit, lit);
2974 break;
2975 }
2976 goto invalid_opc;
2977 case 0x3A:
2978 /* MINUB8 */
2979 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2980 gen_minub8(ra, rb, rc, islit, lit);
2981 break;
2982 }
2983 goto invalid_opc;
2984 case 0x3B:
2985 /* MINUW4 */
2986 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2987 gen_minuw4(ra, rb, rc, islit, lit);
2988 break;
2989 }
2990 goto invalid_opc;
2991 case 0x3C:
2992 /* MAXUB8 */
2993 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2994 gen_maxub8(ra, rb, rc, islit, lit);
2995 break;
2996 }
2997 goto invalid_opc;
2998 case 0x3D:
2999 /* MAXUW4 */
3000 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3001 gen_maxuw4(ra, rb, rc, islit, lit);
3002 break;
3003 }
3004 goto invalid_opc;
3005 case 0x3E:
3006 /* MAXSB8 */
3007 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3008 gen_maxsb8(ra, rb, rc, islit, lit);
3009 break;
3010 }
3011 goto invalid_opc;
3012 case 0x3F:
3013 /* MAXSW4 */
3014 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3015 gen_maxsw4(ra, rb, rc, islit, lit);
3016 break;
3017 }
3018 goto invalid_opc;
3019 case 0x70:
3020 /* FTOIT */
3021 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3022 goto invalid_opc;
3023 }
3024 if (likely(rc != 31)) {
3025 if (ra != 31)
3026 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3027 else
3028 tcg_gen_movi_i64(cpu_ir[rc], 0);
3029 }
3030 break;
3031 case 0x78:
3032 /* FTOIS */
3033 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3034 goto invalid_opc;
3035 }
3036 if (rc != 31) {
3037 TCGv_i32 tmp1 = tcg_temp_new_i32();
3038 if (ra != 31)
3039 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
3040 else {
3041 TCGv tmp2 = tcg_const_i64(0);
3042 gen_helper_s_to_memory(tmp1, tmp2);
3043 tcg_temp_free(tmp2);
3044 }
3045 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
3046 tcg_temp_free_i32(tmp1);
3047 }
3048 break;
3049 default:
3050 goto invalid_opc;
3051 }
3052 break;
3053 case 0x1D:
3054 /* HW_MTPR (PALcode) */
3055 #ifndef CONFIG_USER_ONLY
3056 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3057 gen_mtpr(rb, insn & 0xffff);
3058 break;
3059 }
3060 #endif
3061 goto invalid_opc;
3062 case 0x1E:
3063 /* HW_RET (PALcode) */
3064 #ifndef CONFIG_USER_ONLY
3065 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3066 if (rb == 31) {
3067 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3068 address from EXC_ADDR. This turns out to be useful for our
3069 emulation PALcode, so continue to accept it. */
3070 TCGv tmp = tcg_temp_new();
3071 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUState, exc_addr));
3072 gen_helper_hw_ret(tmp);
3073 tcg_temp_free(tmp);
3074 } else {
3075 gen_helper_hw_ret(cpu_ir[rb]);
3076 }
3077 ret = EXIT_PC_UPDATED;
3078 break;
3079 }
3080 #endif
3081 goto invalid_opc;
3082 case 0x1F:
3083 /* HW_ST (PALcode) */
3084 #ifndef CONFIG_USER_ONLY
3085 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3086 TCGv addr, val;
3087 addr = tcg_temp_new();
3088 if (rb != 31)
3089 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3090 else
3091 tcg_gen_movi_i64(addr, disp12);
3092 if (ra != 31)
3093 val = cpu_ir[ra];
3094 else {
3095 val = tcg_temp_new();
3096 tcg_gen_movi_i64(val, 0);
3097 }
3098 switch ((insn >> 12) & 0xF) {
3099 case 0x0:
3100 /* Longword physical access */
3101 gen_helper_stl_phys(addr, val);
3102 break;
3103 case 0x1:
3104 /* Quadword physical access */
3105 gen_helper_stq_phys(addr, val);
3106 break;
3107 case 0x2:
3108 /* Longword physical access with lock */
3109 gen_helper_stl_c_phys(val, addr, val);
3110 break;
3111 case 0x3:
3112 /* Quadword physical access with lock */
3113 gen_helper_stq_c_phys(val, addr, val);
3114 break;
3115 case 0x4:
3116 /* Longword virtual access */
3117 goto invalid_opc;
3118 case 0x5:
3119 /* Quadword virtual access */
3120 goto invalid_opc;
3121 case 0x6:
3122 /* Invalid */
3123 goto invalid_opc;
3124 case 0x7:
3125 /* Invalid */
3126 goto invalid_opc;
3127 case 0x8:
3128 /* Invalid */
3129 goto invalid_opc;
3130 case 0x9:
3131 /* Invalid */
3132 goto invalid_opc;
3133 case 0xA:
3134 /* Invalid */
3135 goto invalid_opc;
3136 case 0xB:
3137 /* Invalid */
3138 goto invalid_opc;
3139 case 0xC:
3140 /* Longword virtual access with alternate access mode */
3141 goto invalid_opc;
3142 case 0xD:
3143 /* Quadword virtual access with alternate access mode */
3144 goto invalid_opc;
3145 case 0xE:
3146 /* Invalid */
3147 goto invalid_opc;
3148 case 0xF:
3149 /* Invalid */
3150 goto invalid_opc;
3151 }
3152 if (ra == 31)
3153 tcg_temp_free(val);
3154 tcg_temp_free(addr);
3155 break;
3156 }
3157 #endif
3158 goto invalid_opc;
3159 case 0x20:
3160 /* LDF */
3161 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
3162 break;
3163 case 0x21:
3164 /* LDG */
3165 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
3166 break;
3167 case 0x22:
3168 /* LDS */
3169 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
3170 break;
3171 case 0x23:
3172 /* LDT */
3173 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
3174 break;
3175 case 0x24:
3176 /* STF */
3177 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3178 break;
3179 case 0x25:
3180 /* STG */
3181 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3182 break;
3183 case 0x26:
3184 /* STS */
3185 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3186 break;
3187 case 0x27:
3188 /* STT */
3189 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3190 break;
3191 case 0x28:
3192 /* LDL */
3193 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3194 break;
3195 case 0x29:
3196 /* LDQ */
3197 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3198 break;
3199 case 0x2A:
3200 /* LDL_L */
3201 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3202 break;
3203 case 0x2B:
3204 /* LDQ_L */
3205 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3206 break;
3207 case 0x2C:
3208 /* STL */
3209 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3210 break;
3211 case 0x2D:
3212 /* STQ */
3213 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3214 break;
3215 case 0x2E:
3216 /* STL_C */
3217 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3218 break;
3219 case 0x2F:
3220 /* STQ_C */
3221 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3222 break;
3223 case 0x30:
3224 /* BR */
3225 ret = gen_bdirect(ctx, ra, disp21);
3226 break;
3227 case 0x31: /* FBEQ */
3228 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3229 break;
3230 case 0x32: /* FBLT */
3231 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3232 break;
3233 case 0x33: /* FBLE */
3234 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3235 break;
3236 case 0x34:
3237 /* BSR */
3238 ret = gen_bdirect(ctx, ra, disp21);
3239 break;
3240 case 0x35: /* FBNE */
3241 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3242 break;
3243 case 0x36: /* FBGE */
3244 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3245 break;
3246 case 0x37: /* FBGT */
3247 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3248 break;
3249 case 0x38:
3250 /* BLBC */
3251 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3252 break;
3253 case 0x39:
3254 /* BEQ */
3255 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3256 break;
3257 case 0x3A:
3258 /* BLT */
3259 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3260 break;
3261 case 0x3B:
3262 /* BLE */
3263 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3264 break;
3265 case 0x3C:
3266 /* BLBS */
3267 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3268 break;
3269 case 0x3D:
3270 /* BNE */
3271 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3272 break;
3273 case 0x3E:
3274 /* BGE */
3275 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3276 break;
3277 case 0x3F:
3278 /* BGT */
3279 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3280 break;
3281 invalid_opc:
3282 ret = gen_invalid(ctx);
3283 break;
3284 }
3285
3286 return ret;
3287 }
3288
3289 static inline void gen_intermediate_code_internal(CPUState *env,
3290 TranslationBlock *tb,
3291 int search_pc)
3292 {
3293 DisasContext ctx, *ctxp = &ctx;
3294 target_ulong pc_start;
3295 uint32_t insn;
3296 uint16_t *gen_opc_end;
3297 CPUBreakpoint *bp;
3298 int j, lj = -1;
3299 ExitStatus ret;
3300 int num_insns;
3301 int max_insns;
3302
3303 pc_start = tb->pc;
3304 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3305
3306 ctx.tb = tb;
3307 ctx.env = env;
3308 ctx.pc = pc_start;
3309 ctx.mem_idx = cpu_mmu_index(env);
3310
3311 /* ??? Every TB begins with unset rounding mode, to be initialized on
3312 the first fp insn of the TB. Alternately we could define a proper
3313 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3314 to reset the FP_STATUS to that default at the end of any TB that
3315 changes the default. We could even (gasp) dynamiclly figure out
3316 what default would be most efficient given the running program. */
3317 ctx.tb_rm = -1;
3318 /* Similarly for flush-to-zero. */
3319 ctx.tb_ftz = -1;
3320
3321 num_insns = 0;
3322 max_insns = tb->cflags & CF_COUNT_MASK;
3323 if (max_insns == 0)
3324 max_insns = CF_COUNT_MASK;
3325
3326 gen_icount_start();
3327 do {
3328 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3329 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3330 if (bp->pc == ctx.pc) {
3331 gen_excp(&ctx, EXCP_DEBUG, 0);
3332 break;
3333 }
3334 }
3335 }
3336 if (search_pc) {
3337 j = gen_opc_ptr - gen_opc_buf;
3338 if (lj < j) {
3339 lj++;
3340 while (lj < j)
3341 gen_opc_instr_start[lj++] = 0;
3342 }
3343 gen_opc_pc[lj] = ctx.pc;
3344 gen_opc_instr_start[lj] = 1;
3345 gen_opc_icount[lj] = num_insns;
3346 }
3347 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3348 gen_io_start();
3349 insn = ldl_code(ctx.pc);
3350 num_insns++;
3351
3352 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3353 tcg_gen_debug_insn_start(ctx.pc);
3354 }
3355
3356 ctx.pc += 4;
3357 ret = translate_one(ctxp, insn);
3358
3359 /* If we reach a page boundary, are single stepping,
3360 or exhaust instruction count, stop generation. */
3361 if (ret == NO_EXIT
3362 && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3363 || gen_opc_ptr >= gen_opc_end
3364 || num_insns >= max_insns
3365 || singlestep
3366 || env->singlestep_enabled)) {
3367 ret = EXIT_PC_STALE;
3368 }
3369 } while (ret == NO_EXIT);
3370
3371 if (tb->cflags & CF_LAST_IO) {
3372 gen_io_end();
3373 }
3374
3375 switch (ret) {
3376 case EXIT_GOTO_TB:
3377 case EXIT_NORETURN:
3378 break;
3379 case EXIT_PC_STALE:
3380 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3381 /* FALLTHRU */
3382 case EXIT_PC_UPDATED:
3383 if (env->singlestep_enabled) {
3384 gen_excp_1(EXCP_DEBUG, 0);
3385 } else {
3386 tcg_gen_exit_tb(0);
3387 }
3388 break;
3389 default:
3390 abort();
3391 }
3392
3393 gen_icount_end(tb, num_insns);
3394 *gen_opc_ptr = INDEX_op_end;
3395 if (search_pc) {
3396 j = gen_opc_ptr - gen_opc_buf;
3397 lj++;
3398 while (lj <= j)
3399 gen_opc_instr_start[lj++] = 0;
3400 } else {
3401 tb->size = ctx.pc - pc_start;
3402 tb->icount = num_insns;
3403 }
3404
3405 #ifdef DEBUG_DISAS
3406 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3407 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3408 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3409 qemu_log("\n");
3410 }
3411 #endif
3412 }
3413
3414 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3415 {
3416 gen_intermediate_code_internal(env, tb, 0);
3417 }
3418
3419 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3420 {
3421 gen_intermediate_code_internal(env, tb, 1);
3422 }
3423
3424 struct cpu_def_t {
3425 const char *name;
3426 int implver, amask;
3427 };
3428
3429 static const struct cpu_def_t cpu_defs[] = {
3430 { "ev4", IMPLVER_2106x, 0 },
3431 { "ev5", IMPLVER_21164, 0 },
3432 { "ev56", IMPLVER_21164, AMASK_BWX },
3433 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3434 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3435 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3436 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3437 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3438 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3439 { "21064", IMPLVER_2106x, 0 },
3440 { "21164", IMPLVER_21164, 0 },
3441 { "21164a", IMPLVER_21164, AMASK_BWX },
3442 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3443 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3444 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3445 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3446 };
3447
3448 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3449 {
3450 CPUAlphaState *env;
3451 int implver, amask, i, max;
3452
3453 env = qemu_mallocz(sizeof(CPUAlphaState));
3454 cpu_exec_init(env);
3455 alpha_translate_init();
3456 tlb_flush(env, 1);
3457
3458 /* Default to ev67; no reason not to emulate insns by default. */
3459 implver = IMPLVER_21264;
3460 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3461 | AMASK_TRAP | AMASK_PREFETCH);
3462
3463 max = ARRAY_SIZE(cpu_defs);
3464 for (i = 0; i < max; i++) {
3465 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3466 implver = cpu_defs[i].implver;
3467 amask = cpu_defs[i].amask;
3468 break;
3469 }
3470 }
3471 env->implver = implver;
3472 env->amask = amask;
3473
3474 #if defined (CONFIG_USER_ONLY)
3475 env->ps = PS_USER_MODE;
3476 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3477 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
3478 #endif
3479 env->lock_addr = -1;
3480 env->fen = 1;
3481
3482 qemu_init_vcpu(env);
3483 return env;
3484 }
3485
3486 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
3487 {
3488 env->pc = gen_opc_pc[pc_pos];
3489 }