]> git.proxmox.com Git - qemu.git/blob - target-alpha/translate.c
target-alpha: Fix fbcond branch offset.
[qemu.git] / target-alpha / translate.c
1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23
24 #include "cpu.h"
25 #include "exec-all.h"
26 #include "disas.h"
27 #include "host-utils.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
30
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #undef ALPHA_DEBUG_DISAS
36
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
39 #else
40 # define LOG_DISAS(...) do { } while (0)
41 #endif
42
43 typedef struct DisasContext DisasContext;
44 struct DisasContext {
45 uint64_t pc;
46 int mem_idx;
47 #if !defined (CONFIG_USER_ONLY)
48 int pal_mode;
49 #endif
50 CPUAlphaState *env;
51 uint32_t amask;
52 };
53
54 /* global register indexes */
55 static TCGv_ptr cpu_env;
56 static TCGv cpu_ir[31];
57 static TCGv cpu_fir[31];
58 static TCGv cpu_pc;
59 static TCGv cpu_lock;
60
61 /* register names */
62 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
63
64 #include "gen-icount.h"
65
66 static void alpha_translate_init(void)
67 {
68 int i;
69 char *p;
70 static int done_init = 0;
71
72 if (done_init)
73 return;
74
75 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
76
77 p = cpu_reg_names;
78 for (i = 0; i < 31; i++) {
79 sprintf(p, "ir%d", i);
80 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
81 offsetof(CPUState, ir[i]), p);
82 p += (i < 10) ? 4 : 5;
83
84 sprintf(p, "fir%d", i);
85 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
86 offsetof(CPUState, fir[i]), p);
87 p += (i < 10) ? 5 : 6;
88 }
89
90 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
91 offsetof(CPUState, pc), "pc");
92
93 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
94 offsetof(CPUState, lock), "lock");
95
96 /* register helpers */
97 #define GEN_HELPER 2
98 #include "helper.h"
99
100 done_init = 1;
101 }
102
103 static inline void gen_excp(DisasContext *ctx, int exception, int error_code)
104 {
105 TCGv_i32 tmp1, tmp2;
106
107 tcg_gen_movi_i64(cpu_pc, ctx->pc);
108 tmp1 = tcg_const_i32(exception);
109 tmp2 = tcg_const_i32(error_code);
110 gen_helper_excp(tmp1, tmp2);
111 tcg_temp_free_i32(tmp2);
112 tcg_temp_free_i32(tmp1);
113 }
114
115 static inline void gen_invalid(DisasContext *ctx)
116 {
117 gen_excp(ctx, EXCP_OPCDEC, 0);
118 }
119
120 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
121 {
122 TCGv tmp = tcg_temp_new();
123 TCGv_i32 tmp32 = tcg_temp_new_i32();
124 tcg_gen_qemu_ld32u(tmp, t1, flags);
125 tcg_gen_trunc_i64_i32(tmp32, tmp);
126 gen_helper_memory_to_f(t0, tmp32);
127 tcg_temp_free_i32(tmp32);
128 tcg_temp_free(tmp);
129 }
130
131 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
132 {
133 TCGv tmp = tcg_temp_new();
134 tcg_gen_qemu_ld64(tmp, t1, flags);
135 gen_helper_memory_to_g(t0, tmp);
136 tcg_temp_free(tmp);
137 }
138
139 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
140 {
141 TCGv tmp = tcg_temp_new();
142 TCGv_i32 tmp32 = tcg_temp_new_i32();
143 tcg_gen_qemu_ld32u(tmp, t1, flags);
144 tcg_gen_trunc_i64_i32(tmp32, tmp);
145 gen_helper_memory_to_s(t0, tmp32);
146 tcg_temp_free_i32(tmp32);
147 tcg_temp_free(tmp);
148 }
149
150 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
151 {
152 tcg_gen_mov_i64(cpu_lock, t1);
153 tcg_gen_qemu_ld32s(t0, t1, flags);
154 }
155
156 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
157 {
158 tcg_gen_mov_i64(cpu_lock, t1);
159 tcg_gen_qemu_ld64(t0, t1, flags);
160 }
161
162 static inline void gen_load_mem(DisasContext *ctx,
163 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
164 int flags),
165 int ra, int rb, int32_t disp16, int fp,
166 int clear)
167 {
168 TCGv addr;
169
170 if (unlikely(ra == 31))
171 return;
172
173 addr = tcg_temp_new();
174 if (rb != 31) {
175 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
176 if (clear)
177 tcg_gen_andi_i64(addr, addr, ~0x7);
178 } else {
179 if (clear)
180 disp16 &= ~0x7;
181 tcg_gen_movi_i64(addr, disp16);
182 }
183 if (fp)
184 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
185 else
186 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
187 tcg_temp_free(addr);
188 }
189
190 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
191 {
192 TCGv_i32 tmp32 = tcg_temp_new_i32();
193 TCGv tmp = tcg_temp_new();
194 gen_helper_f_to_memory(tmp32, t0);
195 tcg_gen_extu_i32_i64(tmp, tmp32);
196 tcg_gen_qemu_st32(tmp, t1, flags);
197 tcg_temp_free(tmp);
198 tcg_temp_free_i32(tmp32);
199 }
200
201 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
202 {
203 TCGv tmp = tcg_temp_new();
204 gen_helper_g_to_memory(tmp, t0);
205 tcg_gen_qemu_st64(tmp, t1, flags);
206 tcg_temp_free(tmp);
207 }
208
209 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
210 {
211 TCGv_i32 tmp32 = tcg_temp_new_i32();
212 TCGv tmp = tcg_temp_new();
213 gen_helper_s_to_memory(tmp32, t0);
214 tcg_gen_extu_i32_i64(tmp, tmp32);
215 tcg_gen_qemu_st32(tmp, t1, flags);
216 tcg_temp_free(tmp);
217 tcg_temp_free_i32(tmp32);
218 }
219
220 static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
221 {
222 int l1, l2;
223
224 l1 = gen_new_label();
225 l2 = gen_new_label();
226 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
227 tcg_gen_qemu_st32(t0, t1, flags);
228 tcg_gen_movi_i64(t0, 1);
229 tcg_gen_br(l2);
230 gen_set_label(l1);
231 tcg_gen_movi_i64(t0, 0);
232 gen_set_label(l2);
233 tcg_gen_movi_i64(cpu_lock, -1);
234 }
235
236 static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
237 {
238 int l1, l2;
239
240 l1 = gen_new_label();
241 l2 = gen_new_label();
242 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
243 tcg_gen_qemu_st64(t0, t1, flags);
244 tcg_gen_movi_i64(t0, 1);
245 tcg_gen_br(l2);
246 gen_set_label(l1);
247 tcg_gen_movi_i64(t0, 0);
248 gen_set_label(l2);
249 tcg_gen_movi_i64(cpu_lock, -1);
250 }
251
252 static inline void gen_store_mem(DisasContext *ctx,
253 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
254 int flags),
255 int ra, int rb, int32_t disp16, int fp,
256 int clear, int local)
257 {
258 TCGv addr;
259 if (local)
260 addr = tcg_temp_local_new();
261 else
262 addr = tcg_temp_new();
263 if (rb != 31) {
264 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
265 if (clear)
266 tcg_gen_andi_i64(addr, addr, ~0x7);
267 } else {
268 if (clear)
269 disp16 &= ~0x7;
270 tcg_gen_movi_i64(addr, disp16);
271 }
272 if (ra != 31) {
273 if (fp)
274 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
275 else
276 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
277 } else {
278 TCGv zero;
279 if (local)
280 zero = tcg_const_local_i64(0);
281 else
282 zero = tcg_const_i64(0);
283 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
284 tcg_temp_free(zero);
285 }
286 tcg_temp_free(addr);
287 }
288
289 static inline void gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
290 int32_t disp, int mask)
291 {
292 int l1, l2;
293
294 l1 = gen_new_label();
295 l2 = gen_new_label();
296 if (likely(ra != 31)) {
297 if (mask) {
298 TCGv tmp = tcg_temp_new();
299 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
300 tcg_gen_brcondi_i64(cond, tmp, 0, l1);
301 tcg_temp_free(tmp);
302 } else
303 tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, l1);
304 } else {
305 /* Very uncommon case - Do not bother to optimize. */
306 TCGv tmp = tcg_const_i64(0);
307 tcg_gen_brcondi_i64(cond, tmp, 0, l1);
308 tcg_temp_free(tmp);
309 }
310 tcg_gen_movi_i64(cpu_pc, ctx->pc);
311 tcg_gen_br(l2);
312 gen_set_label(l1);
313 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
314 gen_set_label(l2);
315 }
316
317 static inline void gen_fbcond(DisasContext *ctx, int opc, int ra, int32_t disp)
318 {
319 int l1, l2;
320 TCGv tmp;
321 TCGv src;
322
323 l1 = gen_new_label();
324 l2 = gen_new_label();
325 if (ra != 31) {
326 tmp = tcg_temp_new();
327 src = cpu_fir[ra];
328 } else {
329 tmp = tcg_const_i64(0);
330 src = tmp;
331 }
332 switch (opc) {
333 case 0x31: /* FBEQ */
334 gen_helper_cmpfeq(tmp, src);
335 break;
336 case 0x32: /* FBLT */
337 gen_helper_cmpflt(tmp, src);
338 break;
339 case 0x33: /* FBLE */
340 gen_helper_cmpfle(tmp, src);
341 break;
342 case 0x35: /* FBNE */
343 gen_helper_cmpfne(tmp, src);
344 break;
345 case 0x36: /* FBGE */
346 gen_helper_cmpfge(tmp, src);
347 break;
348 case 0x37: /* FBGT */
349 gen_helper_cmpfgt(tmp, src);
350 break;
351 default:
352 abort();
353 }
354 tcg_gen_brcondi_i64(TCG_COND_NE, tmp, 0, l1);
355 tcg_gen_movi_i64(cpu_pc, ctx->pc);
356 tcg_gen_br(l2);
357 gen_set_label(l1);
358 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
359 gen_set_label(l2);
360 }
361
362 static inline void gen_cmov(TCGCond inv_cond, int ra, int rb, int rc,
363 int islit, uint8_t lit, int mask)
364 {
365 int l1;
366
367 if (unlikely(rc == 31))
368 return;
369
370 l1 = gen_new_label();
371
372 if (ra != 31) {
373 if (mask) {
374 TCGv tmp = tcg_temp_new();
375 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
376 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
377 tcg_temp_free(tmp);
378 } else
379 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
380 } else {
381 /* Very uncommon case - Do not bother to optimize. */
382 TCGv tmp = tcg_const_i64(0);
383 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
384 tcg_temp_free(tmp);
385 }
386
387 if (islit)
388 tcg_gen_movi_i64(cpu_ir[rc], lit);
389 else
390 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
391 gen_set_label(l1);
392 }
393
394 #define FARITH2(name) \
395 static inline void glue(gen_f, name)(int rb, int rc) \
396 { \
397 if (unlikely(rc == 31)) \
398 return; \
399 \
400 if (rb != 31) \
401 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
402 else { \
403 TCGv tmp = tcg_const_i64(0); \
404 gen_helper_ ## name (cpu_fir[rc], tmp); \
405 tcg_temp_free(tmp); \
406 } \
407 }
408 FARITH2(sqrts)
409 FARITH2(sqrtf)
410 FARITH2(sqrtg)
411 FARITH2(sqrtt)
412 FARITH2(cvtgf)
413 FARITH2(cvtgq)
414 FARITH2(cvtqf)
415 FARITH2(cvtqg)
416 FARITH2(cvtst)
417 FARITH2(cvtts)
418 FARITH2(cvttq)
419 FARITH2(cvtqs)
420 FARITH2(cvtqt)
421 FARITH2(cvtlq)
422 FARITH2(cvtql)
423 FARITH2(cvtqlv)
424 FARITH2(cvtqlsv)
425
426 #define FARITH3(name) \
427 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
428 { \
429 if (unlikely(rc == 31)) \
430 return; \
431 \
432 if (ra != 31) { \
433 if (rb != 31) \
434 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], cpu_fir[rb]); \
435 else { \
436 TCGv tmp = tcg_const_i64(0); \
437 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], tmp); \
438 tcg_temp_free(tmp); \
439 } \
440 } else { \
441 TCGv tmp = tcg_const_i64(0); \
442 if (rb != 31) \
443 gen_helper_ ## name (cpu_fir[rc], tmp, cpu_fir[rb]); \
444 else \
445 gen_helper_ ## name (cpu_fir[rc], tmp, tmp); \
446 tcg_temp_free(tmp); \
447 } \
448 }
449
450 FARITH3(addf)
451 FARITH3(subf)
452 FARITH3(mulf)
453 FARITH3(divf)
454 FARITH3(addg)
455 FARITH3(subg)
456 FARITH3(mulg)
457 FARITH3(divg)
458 FARITH3(cmpgeq)
459 FARITH3(cmpglt)
460 FARITH3(cmpgle)
461 FARITH3(adds)
462 FARITH3(subs)
463 FARITH3(muls)
464 FARITH3(divs)
465 FARITH3(addt)
466 FARITH3(subt)
467 FARITH3(mult)
468 FARITH3(divt)
469 FARITH3(cmptun)
470 FARITH3(cmpteq)
471 FARITH3(cmptlt)
472 FARITH3(cmptle)
473 FARITH3(cpys)
474 FARITH3(cpysn)
475 FARITH3(cpyse)
476
477 #define FCMOV(name) \
478 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
479 { \
480 int l1; \
481 TCGv tmp; \
482 \
483 if (unlikely(rc == 31)) \
484 return; \
485 \
486 l1 = gen_new_label(); \
487 tmp = tcg_temp_new(); \
488 if (ra != 31) { \
489 tmp = tcg_temp_new(); \
490 gen_helper_ ## name (tmp, cpu_fir[ra]); \
491 } else { \
492 tmp = tcg_const_i64(0); \
493 gen_helper_ ## name (tmp, tmp); \
494 } \
495 tcg_gen_brcondi_i64(TCG_COND_EQ, tmp, 0, l1); \
496 if (rb != 31) \
497 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]); \
498 else \
499 tcg_gen_movi_i64(cpu_fir[rc], 0); \
500 gen_set_label(l1); \
501 }
502 FCMOV(cmpfeq)
503 FCMOV(cmpfne)
504 FCMOV(cmpflt)
505 FCMOV(cmpfge)
506 FCMOV(cmpfle)
507 FCMOV(cmpfgt)
508
509 /* Implement zapnot with an immediate operand, which expands to some
510 form of immediate AND. This is a basic building block in the
511 definition of many of the other byte manipulation instructions. */
512 static inline void gen_zapnoti(int ra, int rc, uint8_t lit)
513 {
514 uint64_t mask;
515 int i;
516
517 switch (lit) {
518 case 0x00:
519 tcg_gen_movi_i64(cpu_ir[rc], 0);
520 break;
521 case 0x01:
522 tcg_gen_ext8u_i64(cpu_ir[rc], cpu_ir[ra]);
523 break;
524 case 0x03:
525 tcg_gen_ext16u_i64(cpu_ir[rc], cpu_ir[ra]);
526 break;
527 case 0x0f:
528 tcg_gen_ext32u_i64(cpu_ir[rc], cpu_ir[ra]);
529 break;
530 case 0xff:
531 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[ra]);
532 break;
533 default:
534 for (mask = i = 0; i < 8; ++i) {
535 if ((lit >> i) & 1)
536 mask |= 0xffull << (i * 8);
537 }
538 tcg_gen_andi_i64 (cpu_ir[rc], cpu_ir[ra], mask);
539 break;
540 }
541 }
542
543 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
544 {
545 if (unlikely(rc == 31))
546 return;
547 else if (unlikely(ra == 31))
548 tcg_gen_movi_i64(cpu_ir[rc], 0);
549 else if (islit)
550 gen_zapnoti(ra, rc, lit);
551 else
552 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
553 }
554
555 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
556 {
557 if (unlikely(rc == 31))
558 return;
559 else if (unlikely(ra == 31))
560 tcg_gen_movi_i64(cpu_ir[rc], 0);
561 else if (islit)
562 gen_zapnoti(ra, rc, ~lit);
563 else
564 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
565 }
566
567
568 /* EXTWH, EXTWH, EXTLH, EXTQH */
569 static inline void gen_ext_h(int ra, int rb, int rc, int islit,
570 uint8_t lit, uint8_t byte_mask)
571 {
572 if (unlikely(rc == 31))
573 return;
574 else if (unlikely(ra == 31))
575 tcg_gen_movi_i64(cpu_ir[rc], 0);
576 else {
577 if (islit) {
578 lit = (64 - (lit & 7) * 8) & 0x3f;
579 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
580 } else {
581 TCGv tmp1 = tcg_temp_new();
582 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
583 tcg_gen_shli_i64(tmp1, tmp1, 3);
584 tcg_gen_neg_i64(tmp1, tmp1);
585 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
586 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
587 tcg_temp_free(tmp1);
588 }
589 gen_zapnoti(rc, rc, byte_mask);
590 }
591 }
592
593 /* EXTBL, EXTWL, EXTWL, EXTLL, EXTQL */
594 static inline void gen_ext_l(int ra, int rb, int rc, int islit,
595 uint8_t lit, uint8_t byte_mask)
596 {
597 if (unlikely(rc == 31))
598 return;
599 else if (unlikely(ra == 31))
600 tcg_gen_movi_i64(cpu_ir[rc], 0);
601 else {
602 if (islit) {
603 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
604 } else {
605 TCGv tmp = tcg_temp_new();
606 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
607 tcg_gen_shli_i64(tmp, tmp, 3);
608 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
609 tcg_temp_free(tmp);
610 }
611 gen_zapnoti(rc, rc, byte_mask);
612 }
613 }
614
615 /* Code to call arith3 helpers */
616 #define ARITH3(name) \
617 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
618 uint8_t lit) \
619 { \
620 if (unlikely(rc == 31)) \
621 return; \
622 \
623 if (ra != 31) { \
624 if (islit) { \
625 TCGv tmp = tcg_const_i64(lit); \
626 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
627 tcg_temp_free(tmp); \
628 } else \
629 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
630 } else { \
631 TCGv tmp1 = tcg_const_i64(0); \
632 if (islit) { \
633 TCGv tmp2 = tcg_const_i64(lit); \
634 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
635 tcg_temp_free(tmp2); \
636 } else \
637 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
638 tcg_temp_free(tmp1); \
639 } \
640 }
641 ARITH3(cmpbge)
642 ARITH3(addlv)
643 ARITH3(sublv)
644 ARITH3(addqv)
645 ARITH3(subqv)
646 ARITH3(mskbl)
647 ARITH3(insbl)
648 ARITH3(mskwl)
649 ARITH3(inswl)
650 ARITH3(mskll)
651 ARITH3(insll)
652 ARITH3(mskql)
653 ARITH3(insql)
654 ARITH3(mskwh)
655 ARITH3(inswh)
656 ARITH3(msklh)
657 ARITH3(inslh)
658 ARITH3(mskqh)
659 ARITH3(insqh)
660 ARITH3(umulh)
661 ARITH3(mullv)
662 ARITH3(mulqv)
663 ARITH3(minub8)
664 ARITH3(minsb8)
665 ARITH3(minuw4)
666 ARITH3(minsw4)
667 ARITH3(maxub8)
668 ARITH3(maxsb8)
669 ARITH3(maxuw4)
670 ARITH3(maxsw4)
671 ARITH3(perr)
672
673 #define MVIOP2(name) \
674 static inline void glue(gen_, name)(int rb, int rc) \
675 { \
676 if (unlikely(rc == 31)) \
677 return; \
678 if (unlikely(rb == 31)) \
679 tcg_gen_movi_i64(cpu_ir[rc], 0); \
680 else \
681 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
682 }
683 MVIOP2(pklb)
684 MVIOP2(pkwb)
685 MVIOP2(unpkbl)
686 MVIOP2(unpkbw)
687
688 static inline void gen_cmp(TCGCond cond, int ra, int rb, int rc, int islit,
689 uint8_t lit)
690 {
691 int l1, l2;
692 TCGv tmp;
693
694 if (unlikely(rc == 31))
695 return;
696
697 l1 = gen_new_label();
698 l2 = gen_new_label();
699
700 if (ra != 31) {
701 tmp = tcg_temp_new();
702 tcg_gen_mov_i64(tmp, cpu_ir[ra]);
703 } else
704 tmp = tcg_const_i64(0);
705 if (islit)
706 tcg_gen_brcondi_i64(cond, tmp, lit, l1);
707 else
708 tcg_gen_brcond_i64(cond, tmp, cpu_ir[rb], l1);
709
710 tcg_gen_movi_i64(cpu_ir[rc], 0);
711 tcg_gen_br(l2);
712 gen_set_label(l1);
713 tcg_gen_movi_i64(cpu_ir[rc], 1);
714 gen_set_label(l2);
715 }
716
717 static inline int translate_one(DisasContext *ctx, uint32_t insn)
718 {
719 uint32_t palcode;
720 int32_t disp21, disp16, disp12;
721 uint16_t fn11, fn16;
722 uint8_t opc, ra, rb, rc, sbz, fpfn, fn7, fn2, islit, real_islit;
723 uint8_t lit;
724 int ret;
725
726 /* Decode all instruction fields */
727 opc = insn >> 26;
728 ra = (insn >> 21) & 0x1F;
729 rb = (insn >> 16) & 0x1F;
730 rc = insn & 0x1F;
731 sbz = (insn >> 13) & 0x07;
732 real_islit = islit = (insn >> 12) & 1;
733 if (rb == 31 && !islit) {
734 islit = 1;
735 lit = 0;
736 } else
737 lit = (insn >> 13) & 0xFF;
738 palcode = insn & 0x03FFFFFF;
739 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
740 disp16 = (int16_t)(insn & 0x0000FFFF);
741 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
742 fn16 = insn & 0x0000FFFF;
743 fn11 = (insn >> 5) & 0x000007FF;
744 fpfn = fn11 & 0x3F;
745 fn7 = (insn >> 5) & 0x0000007F;
746 fn2 = (insn >> 5) & 0x00000003;
747 ret = 0;
748 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
749 opc, ra, rb, rc, disp16);
750
751 switch (opc) {
752 case 0x00:
753 /* CALL_PAL */
754 if (palcode >= 0x80 && palcode < 0xC0) {
755 /* Unprivileged PAL call */
756 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
757 #if !defined (CONFIG_USER_ONLY)
758 } else if (palcode < 0x40) {
759 /* Privileged PAL code */
760 if (ctx->mem_idx & 1)
761 goto invalid_opc;
762 else
763 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
764 #endif
765 } else {
766 /* Invalid PAL call */
767 goto invalid_opc;
768 }
769 ret = 3;
770 break;
771 case 0x01:
772 /* OPC01 */
773 goto invalid_opc;
774 case 0x02:
775 /* OPC02 */
776 goto invalid_opc;
777 case 0x03:
778 /* OPC03 */
779 goto invalid_opc;
780 case 0x04:
781 /* OPC04 */
782 goto invalid_opc;
783 case 0x05:
784 /* OPC05 */
785 goto invalid_opc;
786 case 0x06:
787 /* OPC06 */
788 goto invalid_opc;
789 case 0x07:
790 /* OPC07 */
791 goto invalid_opc;
792 case 0x08:
793 /* LDA */
794 if (likely(ra != 31)) {
795 if (rb != 31)
796 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
797 else
798 tcg_gen_movi_i64(cpu_ir[ra], disp16);
799 }
800 break;
801 case 0x09:
802 /* LDAH */
803 if (likely(ra != 31)) {
804 if (rb != 31)
805 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
806 else
807 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
808 }
809 break;
810 case 0x0A:
811 /* LDBU */
812 if (!(ctx->amask & AMASK_BWX))
813 goto invalid_opc;
814 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
815 break;
816 case 0x0B:
817 /* LDQ_U */
818 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
819 break;
820 case 0x0C:
821 /* LDWU */
822 if (!(ctx->amask & AMASK_BWX))
823 goto invalid_opc;
824 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
825 break;
826 case 0x0D:
827 /* STW */
828 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
829 break;
830 case 0x0E:
831 /* STB */
832 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
833 break;
834 case 0x0F:
835 /* STQ_U */
836 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
837 break;
838 case 0x10:
839 switch (fn7) {
840 case 0x00:
841 /* ADDL */
842 if (likely(rc != 31)) {
843 if (ra != 31) {
844 if (islit) {
845 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
846 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
847 } else {
848 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
849 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
850 }
851 } else {
852 if (islit)
853 tcg_gen_movi_i64(cpu_ir[rc], lit);
854 else
855 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
856 }
857 }
858 break;
859 case 0x02:
860 /* S4ADDL */
861 if (likely(rc != 31)) {
862 if (ra != 31) {
863 TCGv tmp = tcg_temp_new();
864 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
865 if (islit)
866 tcg_gen_addi_i64(tmp, tmp, lit);
867 else
868 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
869 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
870 tcg_temp_free(tmp);
871 } else {
872 if (islit)
873 tcg_gen_movi_i64(cpu_ir[rc], lit);
874 else
875 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
876 }
877 }
878 break;
879 case 0x09:
880 /* SUBL */
881 if (likely(rc != 31)) {
882 if (ra != 31) {
883 if (islit)
884 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
885 else
886 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
887 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
888 } else {
889 if (islit)
890 tcg_gen_movi_i64(cpu_ir[rc], -lit);
891 else {
892 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
893 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
894 }
895 }
896 break;
897 case 0x0B:
898 /* S4SUBL */
899 if (likely(rc != 31)) {
900 if (ra != 31) {
901 TCGv tmp = tcg_temp_new();
902 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
903 if (islit)
904 tcg_gen_subi_i64(tmp, tmp, lit);
905 else
906 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
907 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
908 tcg_temp_free(tmp);
909 } else {
910 if (islit)
911 tcg_gen_movi_i64(cpu_ir[rc], -lit);
912 else {
913 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
914 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
915 }
916 }
917 }
918 break;
919 case 0x0F:
920 /* CMPBGE */
921 gen_cmpbge(ra, rb, rc, islit, lit);
922 break;
923 case 0x12:
924 /* S8ADDL */
925 if (likely(rc != 31)) {
926 if (ra != 31) {
927 TCGv tmp = tcg_temp_new();
928 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
929 if (islit)
930 tcg_gen_addi_i64(tmp, tmp, lit);
931 else
932 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
933 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
934 tcg_temp_free(tmp);
935 } else {
936 if (islit)
937 tcg_gen_movi_i64(cpu_ir[rc], lit);
938 else
939 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
940 }
941 }
942 break;
943 case 0x1B:
944 /* S8SUBL */
945 if (likely(rc != 31)) {
946 if (ra != 31) {
947 TCGv tmp = tcg_temp_new();
948 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
949 if (islit)
950 tcg_gen_subi_i64(tmp, tmp, lit);
951 else
952 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
953 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
954 tcg_temp_free(tmp);
955 } else {
956 if (islit)
957 tcg_gen_movi_i64(cpu_ir[rc], -lit);
958 else
959 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
960 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
961 }
962 }
963 }
964 break;
965 case 0x1D:
966 /* CMPULT */
967 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
968 break;
969 case 0x20:
970 /* ADDQ */
971 if (likely(rc != 31)) {
972 if (ra != 31) {
973 if (islit)
974 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
975 else
976 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
977 } else {
978 if (islit)
979 tcg_gen_movi_i64(cpu_ir[rc], lit);
980 else
981 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
982 }
983 }
984 break;
985 case 0x22:
986 /* S4ADDQ */
987 if (likely(rc != 31)) {
988 if (ra != 31) {
989 TCGv tmp = tcg_temp_new();
990 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
991 if (islit)
992 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
993 else
994 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
995 tcg_temp_free(tmp);
996 } else {
997 if (islit)
998 tcg_gen_movi_i64(cpu_ir[rc], lit);
999 else
1000 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1001 }
1002 }
1003 break;
1004 case 0x29:
1005 /* SUBQ */
1006 if (likely(rc != 31)) {
1007 if (ra != 31) {
1008 if (islit)
1009 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1010 else
1011 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1012 } else {
1013 if (islit)
1014 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1015 else
1016 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1017 }
1018 }
1019 break;
1020 case 0x2B:
1021 /* S4SUBQ */
1022 if (likely(rc != 31)) {
1023 if (ra != 31) {
1024 TCGv tmp = tcg_temp_new();
1025 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1026 if (islit)
1027 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1028 else
1029 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1030 tcg_temp_free(tmp);
1031 } else {
1032 if (islit)
1033 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1034 else
1035 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1036 }
1037 }
1038 break;
1039 case 0x2D:
1040 /* CMPEQ */
1041 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1042 break;
1043 case 0x32:
1044 /* S8ADDQ */
1045 if (likely(rc != 31)) {
1046 if (ra != 31) {
1047 TCGv tmp = tcg_temp_new();
1048 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1049 if (islit)
1050 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1051 else
1052 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1053 tcg_temp_free(tmp);
1054 } else {
1055 if (islit)
1056 tcg_gen_movi_i64(cpu_ir[rc], lit);
1057 else
1058 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1059 }
1060 }
1061 break;
1062 case 0x3B:
1063 /* S8SUBQ */
1064 if (likely(rc != 31)) {
1065 if (ra != 31) {
1066 TCGv tmp = tcg_temp_new();
1067 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1068 if (islit)
1069 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1070 else
1071 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1072 tcg_temp_free(tmp);
1073 } else {
1074 if (islit)
1075 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1076 else
1077 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1078 }
1079 }
1080 break;
1081 case 0x3D:
1082 /* CMPULE */
1083 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1084 break;
1085 case 0x40:
1086 /* ADDL/V */
1087 gen_addlv(ra, rb, rc, islit, lit);
1088 break;
1089 case 0x49:
1090 /* SUBL/V */
1091 gen_sublv(ra, rb, rc, islit, lit);
1092 break;
1093 case 0x4D:
1094 /* CMPLT */
1095 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1096 break;
1097 case 0x60:
1098 /* ADDQ/V */
1099 gen_addqv(ra, rb, rc, islit, lit);
1100 break;
1101 case 0x69:
1102 /* SUBQ/V */
1103 gen_subqv(ra, rb, rc, islit, lit);
1104 break;
1105 case 0x6D:
1106 /* CMPLE */
1107 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1108 break;
1109 default:
1110 goto invalid_opc;
1111 }
1112 break;
1113 case 0x11:
1114 switch (fn7) {
1115 case 0x00:
1116 /* AND */
1117 if (likely(rc != 31)) {
1118 if (ra == 31)
1119 tcg_gen_movi_i64(cpu_ir[rc], 0);
1120 else if (islit)
1121 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1122 else
1123 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1124 }
1125 break;
1126 case 0x08:
1127 /* BIC */
1128 if (likely(rc != 31)) {
1129 if (ra != 31) {
1130 if (islit)
1131 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1132 else
1133 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1134 } else
1135 tcg_gen_movi_i64(cpu_ir[rc], 0);
1136 }
1137 break;
1138 case 0x14:
1139 /* CMOVLBS */
1140 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1141 break;
1142 case 0x16:
1143 /* CMOVLBC */
1144 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1145 break;
1146 case 0x20:
1147 /* BIS */
1148 if (likely(rc != 31)) {
1149 if (ra != 31) {
1150 if (islit)
1151 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1152 else
1153 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1154 } else {
1155 if (islit)
1156 tcg_gen_movi_i64(cpu_ir[rc], lit);
1157 else
1158 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1159 }
1160 }
1161 break;
1162 case 0x24:
1163 /* CMOVEQ */
1164 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1165 break;
1166 case 0x26:
1167 /* CMOVNE */
1168 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1169 break;
1170 case 0x28:
1171 /* ORNOT */
1172 if (likely(rc != 31)) {
1173 if (ra != 31) {
1174 if (islit)
1175 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1176 else
1177 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1178 } else {
1179 if (islit)
1180 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1181 else
1182 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1183 }
1184 }
1185 break;
1186 case 0x40:
1187 /* XOR */
1188 if (likely(rc != 31)) {
1189 if (ra != 31) {
1190 if (islit)
1191 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1192 else
1193 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1194 } else {
1195 if (islit)
1196 tcg_gen_movi_i64(cpu_ir[rc], lit);
1197 else
1198 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1199 }
1200 }
1201 break;
1202 case 0x44:
1203 /* CMOVLT */
1204 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1205 break;
1206 case 0x46:
1207 /* CMOVGE */
1208 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1209 break;
1210 case 0x48:
1211 /* EQV */
1212 if (likely(rc != 31)) {
1213 if (ra != 31) {
1214 if (islit)
1215 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1216 else
1217 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1218 } else {
1219 if (islit)
1220 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1221 else
1222 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1223 }
1224 }
1225 break;
1226 case 0x61:
1227 /* AMASK */
1228 if (likely(rc != 31)) {
1229 if (islit)
1230 tcg_gen_movi_i64(cpu_ir[rc], lit);
1231 else
1232 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1233 switch (ctx->env->implver) {
1234 case IMPLVER_2106x:
1235 /* EV4, EV45, LCA, LCA45 & EV5 */
1236 break;
1237 case IMPLVER_21164:
1238 case IMPLVER_21264:
1239 case IMPLVER_21364:
1240 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1241 ~(uint64_t)ctx->amask);
1242 break;
1243 }
1244 }
1245 break;
1246 case 0x64:
1247 /* CMOVLE */
1248 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
1249 break;
1250 case 0x66:
1251 /* CMOVGT */
1252 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
1253 break;
1254 case 0x6C:
1255 /* IMPLVER */
1256 if (rc != 31)
1257 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
1258 break;
1259 default:
1260 goto invalid_opc;
1261 }
1262 break;
1263 case 0x12:
1264 switch (fn7) {
1265 case 0x02:
1266 /* MSKBL */
1267 gen_mskbl(ra, rb, rc, islit, lit);
1268 break;
1269 case 0x06:
1270 /* EXTBL */
1271 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
1272 break;
1273 case 0x0B:
1274 /* INSBL */
1275 gen_insbl(ra, rb, rc, islit, lit);
1276 break;
1277 case 0x12:
1278 /* MSKWL */
1279 gen_mskwl(ra, rb, rc, islit, lit);
1280 break;
1281 case 0x16:
1282 /* EXTWL */
1283 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
1284 break;
1285 case 0x1B:
1286 /* INSWL */
1287 gen_inswl(ra, rb, rc, islit, lit);
1288 break;
1289 case 0x22:
1290 /* MSKLL */
1291 gen_mskll(ra, rb, rc, islit, lit);
1292 break;
1293 case 0x26:
1294 /* EXTLL */
1295 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
1296 break;
1297 case 0x2B:
1298 /* INSLL */
1299 gen_insll(ra, rb, rc, islit, lit);
1300 break;
1301 case 0x30:
1302 /* ZAP */
1303 gen_zap(ra, rb, rc, islit, lit);
1304 break;
1305 case 0x31:
1306 /* ZAPNOT */
1307 gen_zapnot(ra, rb, rc, islit, lit);
1308 break;
1309 case 0x32:
1310 /* MSKQL */
1311 gen_mskql(ra, rb, rc, islit, lit);
1312 break;
1313 case 0x34:
1314 /* SRL */
1315 if (likely(rc != 31)) {
1316 if (ra != 31) {
1317 if (islit)
1318 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1319 else {
1320 TCGv shift = tcg_temp_new();
1321 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1322 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
1323 tcg_temp_free(shift);
1324 }
1325 } else
1326 tcg_gen_movi_i64(cpu_ir[rc], 0);
1327 }
1328 break;
1329 case 0x36:
1330 /* EXTQL */
1331 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
1332 break;
1333 case 0x39:
1334 /* SLL */
1335 if (likely(rc != 31)) {
1336 if (ra != 31) {
1337 if (islit)
1338 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1339 else {
1340 TCGv shift = tcg_temp_new();
1341 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1342 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
1343 tcg_temp_free(shift);
1344 }
1345 } else
1346 tcg_gen_movi_i64(cpu_ir[rc], 0);
1347 }
1348 break;
1349 case 0x3B:
1350 /* INSQL */
1351 gen_insql(ra, rb, rc, islit, lit);
1352 break;
1353 case 0x3C:
1354 /* SRA */
1355 if (likely(rc != 31)) {
1356 if (ra != 31) {
1357 if (islit)
1358 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1359 else {
1360 TCGv shift = tcg_temp_new();
1361 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1362 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
1363 tcg_temp_free(shift);
1364 }
1365 } else
1366 tcg_gen_movi_i64(cpu_ir[rc], 0);
1367 }
1368 break;
1369 case 0x52:
1370 /* MSKWH */
1371 gen_mskwh(ra, rb, rc, islit, lit);
1372 break;
1373 case 0x57:
1374 /* INSWH */
1375 gen_inswh(ra, rb, rc, islit, lit);
1376 break;
1377 case 0x5A:
1378 /* EXTWH */
1379 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
1380 break;
1381 case 0x62:
1382 /* MSKLH */
1383 gen_msklh(ra, rb, rc, islit, lit);
1384 break;
1385 case 0x67:
1386 /* INSLH */
1387 gen_inslh(ra, rb, rc, islit, lit);
1388 break;
1389 case 0x6A:
1390 /* EXTLH */
1391 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
1392 break;
1393 case 0x72:
1394 /* MSKQH */
1395 gen_mskqh(ra, rb, rc, islit, lit);
1396 break;
1397 case 0x77:
1398 /* INSQH */
1399 gen_insqh(ra, rb, rc, islit, lit);
1400 break;
1401 case 0x7A:
1402 /* EXTQH */
1403 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
1404 break;
1405 default:
1406 goto invalid_opc;
1407 }
1408 break;
1409 case 0x13:
1410 switch (fn7) {
1411 case 0x00:
1412 /* MULL */
1413 if (likely(rc != 31)) {
1414 if (ra == 31)
1415 tcg_gen_movi_i64(cpu_ir[rc], 0);
1416 else {
1417 if (islit)
1418 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1419 else
1420 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1421 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1422 }
1423 }
1424 break;
1425 case 0x20:
1426 /* MULQ */
1427 if (likely(rc != 31)) {
1428 if (ra == 31)
1429 tcg_gen_movi_i64(cpu_ir[rc], 0);
1430 else if (islit)
1431 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1432 else
1433 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1434 }
1435 break;
1436 case 0x30:
1437 /* UMULH */
1438 gen_umulh(ra, rb, rc, islit, lit);
1439 break;
1440 case 0x40:
1441 /* MULL/V */
1442 gen_mullv(ra, rb, rc, islit, lit);
1443 break;
1444 case 0x60:
1445 /* MULQ/V */
1446 gen_mulqv(ra, rb, rc, islit, lit);
1447 break;
1448 default:
1449 goto invalid_opc;
1450 }
1451 break;
1452 case 0x14:
1453 switch (fpfn) { /* f11 & 0x3F */
1454 case 0x04:
1455 /* ITOFS */
1456 if (!(ctx->amask & AMASK_FIX))
1457 goto invalid_opc;
1458 if (likely(rc != 31)) {
1459 if (ra != 31) {
1460 TCGv_i32 tmp = tcg_temp_new_i32();
1461 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1462 gen_helper_memory_to_s(cpu_fir[rc], tmp);
1463 tcg_temp_free_i32(tmp);
1464 } else
1465 tcg_gen_movi_i64(cpu_fir[rc], 0);
1466 }
1467 break;
1468 case 0x0A:
1469 /* SQRTF */
1470 if (!(ctx->amask & AMASK_FIX))
1471 goto invalid_opc;
1472 gen_fsqrtf(rb, rc);
1473 break;
1474 case 0x0B:
1475 /* SQRTS */
1476 if (!(ctx->amask & AMASK_FIX))
1477 goto invalid_opc;
1478 gen_fsqrts(rb, rc);
1479 break;
1480 case 0x14:
1481 /* ITOFF */
1482 if (!(ctx->amask & AMASK_FIX))
1483 goto invalid_opc;
1484 if (likely(rc != 31)) {
1485 if (ra != 31) {
1486 TCGv_i32 tmp = tcg_temp_new_i32();
1487 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1488 gen_helper_memory_to_f(cpu_fir[rc], tmp);
1489 tcg_temp_free_i32(tmp);
1490 } else
1491 tcg_gen_movi_i64(cpu_fir[rc], 0);
1492 }
1493 break;
1494 case 0x24:
1495 /* ITOFT */
1496 if (!(ctx->amask & AMASK_FIX))
1497 goto invalid_opc;
1498 if (likely(rc != 31)) {
1499 if (ra != 31)
1500 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
1501 else
1502 tcg_gen_movi_i64(cpu_fir[rc], 0);
1503 }
1504 break;
1505 case 0x2A:
1506 /* SQRTG */
1507 if (!(ctx->amask & AMASK_FIX))
1508 goto invalid_opc;
1509 gen_fsqrtg(rb, rc);
1510 break;
1511 case 0x02B:
1512 /* SQRTT */
1513 if (!(ctx->amask & AMASK_FIX))
1514 goto invalid_opc;
1515 gen_fsqrtt(rb, rc);
1516 break;
1517 default:
1518 goto invalid_opc;
1519 }
1520 break;
1521 case 0x15:
1522 /* VAX floating point */
1523 /* XXX: rounding mode and trap are ignored (!) */
1524 switch (fpfn) { /* f11 & 0x3F */
1525 case 0x00:
1526 /* ADDF */
1527 gen_faddf(ra, rb, rc);
1528 break;
1529 case 0x01:
1530 /* SUBF */
1531 gen_fsubf(ra, rb, rc);
1532 break;
1533 case 0x02:
1534 /* MULF */
1535 gen_fmulf(ra, rb, rc);
1536 break;
1537 case 0x03:
1538 /* DIVF */
1539 gen_fdivf(ra, rb, rc);
1540 break;
1541 case 0x1E:
1542 /* CVTDG */
1543 #if 0 // TODO
1544 gen_fcvtdg(rb, rc);
1545 #else
1546 goto invalid_opc;
1547 #endif
1548 break;
1549 case 0x20:
1550 /* ADDG */
1551 gen_faddg(ra, rb, rc);
1552 break;
1553 case 0x21:
1554 /* SUBG */
1555 gen_fsubg(ra, rb, rc);
1556 break;
1557 case 0x22:
1558 /* MULG */
1559 gen_fmulg(ra, rb, rc);
1560 break;
1561 case 0x23:
1562 /* DIVG */
1563 gen_fdivg(ra, rb, rc);
1564 break;
1565 case 0x25:
1566 /* CMPGEQ */
1567 gen_fcmpgeq(ra, rb, rc);
1568 break;
1569 case 0x26:
1570 /* CMPGLT */
1571 gen_fcmpglt(ra, rb, rc);
1572 break;
1573 case 0x27:
1574 /* CMPGLE */
1575 gen_fcmpgle(ra, rb, rc);
1576 break;
1577 case 0x2C:
1578 /* CVTGF */
1579 gen_fcvtgf(rb, rc);
1580 break;
1581 case 0x2D:
1582 /* CVTGD */
1583 #if 0 // TODO
1584 gen_fcvtgd(rb, rc);
1585 #else
1586 goto invalid_opc;
1587 #endif
1588 break;
1589 case 0x2F:
1590 /* CVTGQ */
1591 gen_fcvtgq(rb, rc);
1592 break;
1593 case 0x3C:
1594 /* CVTQF */
1595 gen_fcvtqf(rb, rc);
1596 break;
1597 case 0x3E:
1598 /* CVTQG */
1599 gen_fcvtqg(rb, rc);
1600 break;
1601 default:
1602 goto invalid_opc;
1603 }
1604 break;
1605 case 0x16:
1606 /* IEEE floating-point */
1607 /* XXX: rounding mode and traps are ignored (!) */
1608 switch (fpfn) { /* f11 & 0x3F */
1609 case 0x00:
1610 /* ADDS */
1611 gen_fadds(ra, rb, rc);
1612 break;
1613 case 0x01:
1614 /* SUBS */
1615 gen_fsubs(ra, rb, rc);
1616 break;
1617 case 0x02:
1618 /* MULS */
1619 gen_fmuls(ra, rb, rc);
1620 break;
1621 case 0x03:
1622 /* DIVS */
1623 gen_fdivs(ra, rb, rc);
1624 break;
1625 case 0x20:
1626 /* ADDT */
1627 gen_faddt(ra, rb, rc);
1628 break;
1629 case 0x21:
1630 /* SUBT */
1631 gen_fsubt(ra, rb, rc);
1632 break;
1633 case 0x22:
1634 /* MULT */
1635 gen_fmult(ra, rb, rc);
1636 break;
1637 case 0x23:
1638 /* DIVT */
1639 gen_fdivt(ra, rb, rc);
1640 break;
1641 case 0x24:
1642 /* CMPTUN */
1643 gen_fcmptun(ra, rb, rc);
1644 break;
1645 case 0x25:
1646 /* CMPTEQ */
1647 gen_fcmpteq(ra, rb, rc);
1648 break;
1649 case 0x26:
1650 /* CMPTLT */
1651 gen_fcmptlt(ra, rb, rc);
1652 break;
1653 case 0x27:
1654 /* CMPTLE */
1655 gen_fcmptle(ra, rb, rc);
1656 break;
1657 case 0x2C:
1658 /* XXX: incorrect */
1659 if (fn11 == 0x2AC || fn11 == 0x6AC) {
1660 /* CVTST */
1661 gen_fcvtst(rb, rc);
1662 } else {
1663 /* CVTTS */
1664 gen_fcvtts(rb, rc);
1665 }
1666 break;
1667 case 0x2F:
1668 /* CVTTQ */
1669 gen_fcvttq(rb, rc);
1670 break;
1671 case 0x3C:
1672 /* CVTQS */
1673 gen_fcvtqs(rb, rc);
1674 break;
1675 case 0x3E:
1676 /* CVTQT */
1677 gen_fcvtqt(rb, rc);
1678 break;
1679 default:
1680 goto invalid_opc;
1681 }
1682 break;
1683 case 0x17:
1684 switch (fn11) {
1685 case 0x010:
1686 /* CVTLQ */
1687 gen_fcvtlq(rb, rc);
1688 break;
1689 case 0x020:
1690 if (likely(rc != 31)) {
1691 if (ra == rb)
1692 /* FMOV */
1693 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
1694 else
1695 /* CPYS */
1696 gen_fcpys(ra, rb, rc);
1697 }
1698 break;
1699 case 0x021:
1700 /* CPYSN */
1701 gen_fcpysn(ra, rb, rc);
1702 break;
1703 case 0x022:
1704 /* CPYSE */
1705 gen_fcpyse(ra, rb, rc);
1706 break;
1707 case 0x024:
1708 /* MT_FPCR */
1709 if (likely(ra != 31))
1710 gen_helper_store_fpcr(cpu_fir[ra]);
1711 else {
1712 TCGv tmp = tcg_const_i64(0);
1713 gen_helper_store_fpcr(tmp);
1714 tcg_temp_free(tmp);
1715 }
1716 break;
1717 case 0x025:
1718 /* MF_FPCR */
1719 if (likely(ra != 31))
1720 gen_helper_load_fpcr(cpu_fir[ra]);
1721 break;
1722 case 0x02A:
1723 /* FCMOVEQ */
1724 gen_fcmpfeq(ra, rb, rc);
1725 break;
1726 case 0x02B:
1727 /* FCMOVNE */
1728 gen_fcmpfne(ra, rb, rc);
1729 break;
1730 case 0x02C:
1731 /* FCMOVLT */
1732 gen_fcmpflt(ra, rb, rc);
1733 break;
1734 case 0x02D:
1735 /* FCMOVGE */
1736 gen_fcmpfge(ra, rb, rc);
1737 break;
1738 case 0x02E:
1739 /* FCMOVLE */
1740 gen_fcmpfle(ra, rb, rc);
1741 break;
1742 case 0x02F:
1743 /* FCMOVGT */
1744 gen_fcmpfgt(ra, rb, rc);
1745 break;
1746 case 0x030:
1747 /* CVTQL */
1748 gen_fcvtql(rb, rc);
1749 break;
1750 case 0x130:
1751 /* CVTQL/V */
1752 gen_fcvtqlv(rb, rc);
1753 break;
1754 case 0x530:
1755 /* CVTQL/SV */
1756 gen_fcvtqlsv(rb, rc);
1757 break;
1758 default:
1759 goto invalid_opc;
1760 }
1761 break;
1762 case 0x18:
1763 switch ((uint16_t)disp16) {
1764 case 0x0000:
1765 /* TRAPB */
1766 /* No-op. Just exit from the current tb */
1767 ret = 2;
1768 break;
1769 case 0x0400:
1770 /* EXCB */
1771 /* No-op. Just exit from the current tb */
1772 ret = 2;
1773 break;
1774 case 0x4000:
1775 /* MB */
1776 /* No-op */
1777 break;
1778 case 0x4400:
1779 /* WMB */
1780 /* No-op */
1781 break;
1782 case 0x8000:
1783 /* FETCH */
1784 /* No-op */
1785 break;
1786 case 0xA000:
1787 /* FETCH_M */
1788 /* No-op */
1789 break;
1790 case 0xC000:
1791 /* RPCC */
1792 if (ra != 31)
1793 gen_helper_load_pcc(cpu_ir[ra]);
1794 break;
1795 case 0xE000:
1796 /* RC */
1797 if (ra != 31)
1798 gen_helper_rc(cpu_ir[ra]);
1799 break;
1800 case 0xE800:
1801 /* ECB */
1802 break;
1803 case 0xF000:
1804 /* RS */
1805 if (ra != 31)
1806 gen_helper_rs(cpu_ir[ra]);
1807 break;
1808 case 0xF800:
1809 /* WH64 */
1810 /* No-op */
1811 break;
1812 default:
1813 goto invalid_opc;
1814 }
1815 break;
1816 case 0x19:
1817 /* HW_MFPR (PALcode) */
1818 #if defined (CONFIG_USER_ONLY)
1819 goto invalid_opc;
1820 #else
1821 if (!ctx->pal_mode)
1822 goto invalid_opc;
1823 if (ra != 31) {
1824 TCGv tmp = tcg_const_i32(insn & 0xFF);
1825 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
1826 tcg_temp_free(tmp);
1827 }
1828 break;
1829 #endif
1830 case 0x1A:
1831 if (rb != 31)
1832 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
1833 else
1834 tcg_gen_movi_i64(cpu_pc, 0);
1835 if (ra != 31)
1836 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
1837 /* Those four jumps only differ by the branch prediction hint */
1838 switch (fn2) {
1839 case 0x0:
1840 /* JMP */
1841 break;
1842 case 0x1:
1843 /* JSR */
1844 break;
1845 case 0x2:
1846 /* RET */
1847 break;
1848 case 0x3:
1849 /* JSR_COROUTINE */
1850 break;
1851 }
1852 ret = 1;
1853 break;
1854 case 0x1B:
1855 /* HW_LD (PALcode) */
1856 #if defined (CONFIG_USER_ONLY)
1857 goto invalid_opc;
1858 #else
1859 if (!ctx->pal_mode)
1860 goto invalid_opc;
1861 if (ra != 31) {
1862 TCGv addr = tcg_temp_new();
1863 if (rb != 31)
1864 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
1865 else
1866 tcg_gen_movi_i64(addr, disp12);
1867 switch ((insn >> 12) & 0xF) {
1868 case 0x0:
1869 /* Longword physical access (hw_ldl/p) */
1870 gen_helper_ldl_raw(cpu_ir[ra], addr);
1871 break;
1872 case 0x1:
1873 /* Quadword physical access (hw_ldq/p) */
1874 gen_helper_ldq_raw(cpu_ir[ra], addr);
1875 break;
1876 case 0x2:
1877 /* Longword physical access with lock (hw_ldl_l/p) */
1878 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
1879 break;
1880 case 0x3:
1881 /* Quadword physical access with lock (hw_ldq_l/p) */
1882 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
1883 break;
1884 case 0x4:
1885 /* Longword virtual PTE fetch (hw_ldl/v) */
1886 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
1887 break;
1888 case 0x5:
1889 /* Quadword virtual PTE fetch (hw_ldq/v) */
1890 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
1891 break;
1892 case 0x6:
1893 /* Incpu_ir[ra]id */
1894 goto invalid_opc;
1895 case 0x7:
1896 /* Incpu_ir[ra]id */
1897 goto invalid_opc;
1898 case 0x8:
1899 /* Longword virtual access (hw_ldl) */
1900 gen_helper_st_virt_to_phys(addr, addr);
1901 gen_helper_ldl_raw(cpu_ir[ra], addr);
1902 break;
1903 case 0x9:
1904 /* Quadword virtual access (hw_ldq) */
1905 gen_helper_st_virt_to_phys(addr, addr);
1906 gen_helper_ldq_raw(cpu_ir[ra], addr);
1907 break;
1908 case 0xA:
1909 /* Longword virtual access with protection check (hw_ldl/w) */
1910 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
1911 break;
1912 case 0xB:
1913 /* Quadword virtual access with protection check (hw_ldq/w) */
1914 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
1915 break;
1916 case 0xC:
1917 /* Longword virtual access with alt access mode (hw_ldl/a)*/
1918 gen_helper_set_alt_mode();
1919 gen_helper_st_virt_to_phys(addr, addr);
1920 gen_helper_ldl_raw(cpu_ir[ra], addr);
1921 gen_helper_restore_mode();
1922 break;
1923 case 0xD:
1924 /* Quadword virtual access with alt access mode (hw_ldq/a) */
1925 gen_helper_set_alt_mode();
1926 gen_helper_st_virt_to_phys(addr, addr);
1927 gen_helper_ldq_raw(cpu_ir[ra], addr);
1928 gen_helper_restore_mode();
1929 break;
1930 case 0xE:
1931 /* Longword virtual access with alternate access mode and
1932 * protection checks (hw_ldl/wa)
1933 */
1934 gen_helper_set_alt_mode();
1935 gen_helper_ldl_data(cpu_ir[ra], addr);
1936 gen_helper_restore_mode();
1937 break;
1938 case 0xF:
1939 /* Quadword virtual access with alternate access mode and
1940 * protection checks (hw_ldq/wa)
1941 */
1942 gen_helper_set_alt_mode();
1943 gen_helper_ldq_data(cpu_ir[ra], addr);
1944 gen_helper_restore_mode();
1945 break;
1946 }
1947 tcg_temp_free(addr);
1948 }
1949 break;
1950 #endif
1951 case 0x1C:
1952 switch (fn7) {
1953 case 0x00:
1954 /* SEXTB */
1955 if (!(ctx->amask & AMASK_BWX))
1956 goto invalid_opc;
1957 if (likely(rc != 31)) {
1958 if (islit)
1959 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
1960 else
1961 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
1962 }
1963 break;
1964 case 0x01:
1965 /* SEXTW */
1966 if (!(ctx->amask & AMASK_BWX))
1967 goto invalid_opc;
1968 if (likely(rc != 31)) {
1969 if (islit)
1970 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
1971 else
1972 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
1973 }
1974 break;
1975 case 0x30:
1976 /* CTPOP */
1977 if (!(ctx->amask & AMASK_CIX))
1978 goto invalid_opc;
1979 if (likely(rc != 31)) {
1980 if (islit)
1981 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
1982 else
1983 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
1984 }
1985 break;
1986 case 0x31:
1987 /* PERR */
1988 if (!(ctx->amask & AMASK_MVI))
1989 goto invalid_opc;
1990 gen_perr(ra, rb, rc, islit, lit);
1991 break;
1992 case 0x32:
1993 /* CTLZ */
1994 if (!(ctx->amask & AMASK_CIX))
1995 goto invalid_opc;
1996 if (likely(rc != 31)) {
1997 if (islit)
1998 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
1999 else
2000 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2001 }
2002 break;
2003 case 0x33:
2004 /* CTTZ */
2005 if (!(ctx->amask & AMASK_CIX))
2006 goto invalid_opc;
2007 if (likely(rc != 31)) {
2008 if (islit)
2009 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2010 else
2011 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2012 }
2013 break;
2014 case 0x34:
2015 /* UNPKBW */
2016 if (!(ctx->amask & AMASK_MVI))
2017 goto invalid_opc;
2018 if (real_islit || ra != 31)
2019 goto invalid_opc;
2020 gen_unpkbw (rb, rc);
2021 break;
2022 case 0x35:
2023 /* UNPKBL */
2024 if (!(ctx->amask & AMASK_MVI))
2025 goto invalid_opc;
2026 if (real_islit || ra != 31)
2027 goto invalid_opc;
2028 gen_unpkbl (rb, rc);
2029 break;
2030 case 0x36:
2031 /* PKWB */
2032 if (!(ctx->amask & AMASK_MVI))
2033 goto invalid_opc;
2034 if (real_islit || ra != 31)
2035 goto invalid_opc;
2036 gen_pkwb (rb, rc);
2037 break;
2038 case 0x37:
2039 /* PKLB */
2040 if (!(ctx->amask & AMASK_MVI))
2041 goto invalid_opc;
2042 if (real_islit || ra != 31)
2043 goto invalid_opc;
2044 gen_pklb (rb, rc);
2045 break;
2046 case 0x38:
2047 /* MINSB8 */
2048 if (!(ctx->amask & AMASK_MVI))
2049 goto invalid_opc;
2050 gen_minsb8 (ra, rb, rc, islit, lit);
2051 break;
2052 case 0x39:
2053 /* MINSW4 */
2054 if (!(ctx->amask & AMASK_MVI))
2055 goto invalid_opc;
2056 gen_minsw4 (ra, rb, rc, islit, lit);
2057 break;
2058 case 0x3A:
2059 /* MINUB8 */
2060 if (!(ctx->amask & AMASK_MVI))
2061 goto invalid_opc;
2062 gen_minub8 (ra, rb, rc, islit, lit);
2063 break;
2064 case 0x3B:
2065 /* MINUW4 */
2066 if (!(ctx->amask & AMASK_MVI))
2067 goto invalid_opc;
2068 gen_minuw4 (ra, rb, rc, islit, lit);
2069 break;
2070 case 0x3C:
2071 /* MAXUB8 */
2072 if (!(ctx->amask & AMASK_MVI))
2073 goto invalid_opc;
2074 gen_maxub8 (ra, rb, rc, islit, lit);
2075 break;
2076 case 0x3D:
2077 /* MAXUW4 */
2078 if (!(ctx->amask & AMASK_MVI))
2079 goto invalid_opc;
2080 gen_maxuw4 (ra, rb, rc, islit, lit);
2081 break;
2082 case 0x3E:
2083 /* MAXSB8 */
2084 if (!(ctx->amask & AMASK_MVI))
2085 goto invalid_opc;
2086 gen_maxsb8 (ra, rb, rc, islit, lit);
2087 break;
2088 case 0x3F:
2089 /* MAXSW4 */
2090 if (!(ctx->amask & AMASK_MVI))
2091 goto invalid_opc;
2092 gen_maxsw4 (ra, rb, rc, islit, lit);
2093 break;
2094 case 0x70:
2095 /* FTOIT */
2096 if (!(ctx->amask & AMASK_FIX))
2097 goto invalid_opc;
2098 if (likely(rc != 31)) {
2099 if (ra != 31)
2100 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2101 else
2102 tcg_gen_movi_i64(cpu_ir[rc], 0);
2103 }
2104 break;
2105 case 0x78:
2106 /* FTOIS */
2107 if (!(ctx->amask & AMASK_FIX))
2108 goto invalid_opc;
2109 if (rc != 31) {
2110 TCGv_i32 tmp1 = tcg_temp_new_i32();
2111 if (ra != 31)
2112 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2113 else {
2114 TCGv tmp2 = tcg_const_i64(0);
2115 gen_helper_s_to_memory(tmp1, tmp2);
2116 tcg_temp_free(tmp2);
2117 }
2118 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2119 tcg_temp_free_i32(tmp1);
2120 }
2121 break;
2122 default:
2123 goto invalid_opc;
2124 }
2125 break;
2126 case 0x1D:
2127 /* HW_MTPR (PALcode) */
2128 #if defined (CONFIG_USER_ONLY)
2129 goto invalid_opc;
2130 #else
2131 if (!ctx->pal_mode)
2132 goto invalid_opc;
2133 else {
2134 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2135 if (ra != 31)
2136 gen_helper_mtpr(tmp1, cpu_ir[ra]);
2137 else {
2138 TCGv tmp2 = tcg_const_i64(0);
2139 gen_helper_mtpr(tmp1, tmp2);
2140 tcg_temp_free(tmp2);
2141 }
2142 tcg_temp_free(tmp1);
2143 ret = 2;
2144 }
2145 break;
2146 #endif
2147 case 0x1E:
2148 /* HW_REI (PALcode) */
2149 #if defined (CONFIG_USER_ONLY)
2150 goto invalid_opc;
2151 #else
2152 if (!ctx->pal_mode)
2153 goto invalid_opc;
2154 if (rb == 31) {
2155 /* "Old" alpha */
2156 gen_helper_hw_rei();
2157 } else {
2158 TCGv tmp;
2159
2160 if (ra != 31) {
2161 tmp = tcg_temp_new();
2162 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2163 } else
2164 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2165 gen_helper_hw_ret(tmp);
2166 tcg_temp_free(tmp);
2167 }
2168 ret = 2;
2169 break;
2170 #endif
2171 case 0x1F:
2172 /* HW_ST (PALcode) */
2173 #if defined (CONFIG_USER_ONLY)
2174 goto invalid_opc;
2175 #else
2176 if (!ctx->pal_mode)
2177 goto invalid_opc;
2178 else {
2179 TCGv addr, val;
2180 addr = tcg_temp_new();
2181 if (rb != 31)
2182 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2183 else
2184 tcg_gen_movi_i64(addr, disp12);
2185 if (ra != 31)
2186 val = cpu_ir[ra];
2187 else {
2188 val = tcg_temp_new();
2189 tcg_gen_movi_i64(val, 0);
2190 }
2191 switch ((insn >> 12) & 0xF) {
2192 case 0x0:
2193 /* Longword physical access */
2194 gen_helper_stl_raw(val, addr);
2195 break;
2196 case 0x1:
2197 /* Quadword physical access */
2198 gen_helper_stq_raw(val, addr);
2199 break;
2200 case 0x2:
2201 /* Longword physical access with lock */
2202 gen_helper_stl_c_raw(val, val, addr);
2203 break;
2204 case 0x3:
2205 /* Quadword physical access with lock */
2206 gen_helper_stq_c_raw(val, val, addr);
2207 break;
2208 case 0x4:
2209 /* Longword virtual access */
2210 gen_helper_st_virt_to_phys(addr, addr);
2211 gen_helper_stl_raw(val, addr);
2212 break;
2213 case 0x5:
2214 /* Quadword virtual access */
2215 gen_helper_st_virt_to_phys(addr, addr);
2216 gen_helper_stq_raw(val, addr);
2217 break;
2218 case 0x6:
2219 /* Invalid */
2220 goto invalid_opc;
2221 case 0x7:
2222 /* Invalid */
2223 goto invalid_opc;
2224 case 0x8:
2225 /* Invalid */
2226 goto invalid_opc;
2227 case 0x9:
2228 /* Invalid */
2229 goto invalid_opc;
2230 case 0xA:
2231 /* Invalid */
2232 goto invalid_opc;
2233 case 0xB:
2234 /* Invalid */
2235 goto invalid_opc;
2236 case 0xC:
2237 /* Longword virtual access with alternate access mode */
2238 gen_helper_set_alt_mode();
2239 gen_helper_st_virt_to_phys(addr, addr);
2240 gen_helper_stl_raw(val, addr);
2241 gen_helper_restore_mode();
2242 break;
2243 case 0xD:
2244 /* Quadword virtual access with alternate access mode */
2245 gen_helper_set_alt_mode();
2246 gen_helper_st_virt_to_phys(addr, addr);
2247 gen_helper_stl_raw(val, addr);
2248 gen_helper_restore_mode();
2249 break;
2250 case 0xE:
2251 /* Invalid */
2252 goto invalid_opc;
2253 case 0xF:
2254 /* Invalid */
2255 goto invalid_opc;
2256 }
2257 if (ra == 31)
2258 tcg_temp_free(val);
2259 tcg_temp_free(addr);
2260 }
2261 break;
2262 #endif
2263 case 0x20:
2264 /* LDF */
2265 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2266 break;
2267 case 0x21:
2268 /* LDG */
2269 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2270 break;
2271 case 0x22:
2272 /* LDS */
2273 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2274 break;
2275 case 0x23:
2276 /* LDT */
2277 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2278 break;
2279 case 0x24:
2280 /* STF */
2281 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
2282 break;
2283 case 0x25:
2284 /* STG */
2285 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
2286 break;
2287 case 0x26:
2288 /* STS */
2289 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
2290 break;
2291 case 0x27:
2292 /* STT */
2293 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
2294 break;
2295 case 0x28:
2296 /* LDL */
2297 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2298 break;
2299 case 0x29:
2300 /* LDQ */
2301 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2302 break;
2303 case 0x2A:
2304 /* LDL_L */
2305 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2306 break;
2307 case 0x2B:
2308 /* LDQ_L */
2309 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2310 break;
2311 case 0x2C:
2312 /* STL */
2313 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
2314 break;
2315 case 0x2D:
2316 /* STQ */
2317 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
2318 break;
2319 case 0x2E:
2320 /* STL_C */
2321 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
2322 break;
2323 case 0x2F:
2324 /* STQ_C */
2325 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
2326 break;
2327 case 0x30:
2328 /* BR */
2329 if (ra != 31)
2330 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2331 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2332 ret = 1;
2333 break;
2334 case 0x31: /* FBEQ */
2335 case 0x32: /* FBLT */
2336 case 0x33: /* FBLE */
2337 gen_fbcond(ctx, opc, ra, disp21);
2338 ret = 1;
2339 break;
2340 case 0x34:
2341 /* BSR */
2342 if (ra != 31)
2343 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2344 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2345 ret = 1;
2346 break;
2347 case 0x35: /* FBNE */
2348 case 0x36: /* FBGE */
2349 case 0x37: /* FBGT */
2350 gen_fbcond(ctx, opc, ra, disp21);
2351 ret = 1;
2352 break;
2353 case 0x38:
2354 /* BLBC */
2355 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2356 ret = 1;
2357 break;
2358 case 0x39:
2359 /* BEQ */
2360 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2361 ret = 1;
2362 break;
2363 case 0x3A:
2364 /* BLT */
2365 gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2366 ret = 1;
2367 break;
2368 case 0x3B:
2369 /* BLE */
2370 gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2371 ret = 1;
2372 break;
2373 case 0x3C:
2374 /* BLBS */
2375 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2376 ret = 1;
2377 break;
2378 case 0x3D:
2379 /* BNE */
2380 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2381 ret = 1;
2382 break;
2383 case 0x3E:
2384 /* BGE */
2385 gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2386 ret = 1;
2387 break;
2388 case 0x3F:
2389 /* BGT */
2390 gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2391 ret = 1;
2392 break;
2393 invalid_opc:
2394 gen_invalid(ctx);
2395 ret = 3;
2396 break;
2397 }
2398
2399 return ret;
2400 }
2401
2402 static inline void gen_intermediate_code_internal(CPUState *env,
2403 TranslationBlock *tb,
2404 int search_pc)
2405 {
2406 DisasContext ctx, *ctxp = &ctx;
2407 target_ulong pc_start;
2408 uint32_t insn;
2409 uint16_t *gen_opc_end;
2410 CPUBreakpoint *bp;
2411 int j, lj = -1;
2412 int ret;
2413 int num_insns;
2414 int max_insns;
2415
2416 pc_start = tb->pc;
2417 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2418 ctx.pc = pc_start;
2419 ctx.amask = env->amask;
2420 ctx.env = env;
2421 #if defined (CONFIG_USER_ONLY)
2422 ctx.mem_idx = 0;
2423 #else
2424 ctx.mem_idx = ((env->ps >> 3) & 3);
2425 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
2426 #endif
2427 num_insns = 0;
2428 max_insns = tb->cflags & CF_COUNT_MASK;
2429 if (max_insns == 0)
2430 max_insns = CF_COUNT_MASK;
2431
2432 gen_icount_start();
2433 for (ret = 0; ret == 0;) {
2434 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2435 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2436 if (bp->pc == ctx.pc) {
2437 gen_excp(&ctx, EXCP_DEBUG, 0);
2438 break;
2439 }
2440 }
2441 }
2442 if (search_pc) {
2443 j = gen_opc_ptr - gen_opc_buf;
2444 if (lj < j) {
2445 lj++;
2446 while (lj < j)
2447 gen_opc_instr_start[lj++] = 0;
2448 }
2449 gen_opc_pc[lj] = ctx.pc;
2450 gen_opc_instr_start[lj] = 1;
2451 gen_opc_icount[lj] = num_insns;
2452 }
2453 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
2454 gen_io_start();
2455 insn = ldl_code(ctx.pc);
2456 num_insns++;
2457 ctx.pc += 4;
2458 ret = translate_one(ctxp, insn);
2459 if (ret != 0)
2460 break;
2461 /* if we reach a page boundary or are single stepping, stop
2462 * generation
2463 */
2464 if (env->singlestep_enabled) {
2465 gen_excp(&ctx, EXCP_DEBUG, 0);
2466 break;
2467 }
2468
2469 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
2470 break;
2471
2472 if (gen_opc_ptr >= gen_opc_end)
2473 break;
2474
2475 if (num_insns >= max_insns)
2476 break;
2477
2478 if (singlestep) {
2479 break;
2480 }
2481 }
2482 if (ret != 1 && ret != 3) {
2483 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2484 }
2485 if (tb->cflags & CF_LAST_IO)
2486 gen_io_end();
2487 /* Generate the return instruction */
2488 tcg_gen_exit_tb(0);
2489 gen_icount_end(tb, num_insns);
2490 *gen_opc_ptr = INDEX_op_end;
2491 if (search_pc) {
2492 j = gen_opc_ptr - gen_opc_buf;
2493 lj++;
2494 while (lj <= j)
2495 gen_opc_instr_start[lj++] = 0;
2496 } else {
2497 tb->size = ctx.pc - pc_start;
2498 tb->icount = num_insns;
2499 }
2500 #ifdef DEBUG_DISAS
2501 log_cpu_state_mask(CPU_LOG_TB_CPU, env, 0);
2502 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2503 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2504 log_target_disas(pc_start, ctx.pc - pc_start, 1);
2505 qemu_log("\n");
2506 }
2507 #endif
2508 }
2509
2510 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
2511 {
2512 gen_intermediate_code_internal(env, tb, 0);
2513 }
2514
2515 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
2516 {
2517 gen_intermediate_code_internal(env, tb, 1);
2518 }
2519
2520 struct cpu_def_t {
2521 const char *name;
2522 int implver, amask;
2523 };
2524
2525 static const struct cpu_def_t cpu_defs[] = {
2526 { "ev4", IMPLVER_2106x, 0 },
2527 { "ev5", IMPLVER_21164, 0 },
2528 { "ev56", IMPLVER_21164, AMASK_BWX },
2529 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
2530 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
2531 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2532 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
2533 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2534 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
2535 { "21064", IMPLVER_2106x, 0 },
2536 { "21164", IMPLVER_21164, 0 },
2537 { "21164a", IMPLVER_21164, AMASK_BWX },
2538 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
2539 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
2540 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2541 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
2542 };
2543
2544 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
2545 {
2546 CPUAlphaState *env;
2547 uint64_t hwpcb;
2548 int implver, amask, i, max;
2549
2550 env = qemu_mallocz(sizeof(CPUAlphaState));
2551 cpu_exec_init(env);
2552 alpha_translate_init();
2553 tlb_flush(env, 1);
2554
2555 /* Default to ev67; no reason not to emulate insns by default. */
2556 implver = IMPLVER_21264;
2557 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
2558 | AMASK_TRAP | AMASK_PREFETCH);
2559
2560 max = ARRAY_SIZE(cpu_defs);
2561 for (i = 0; i < max; i++) {
2562 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
2563 implver = cpu_defs[i].implver;
2564 amask = cpu_defs[i].amask;
2565 break;
2566 }
2567 }
2568 env->implver = implver;
2569 env->amask = amask;
2570
2571 env->ps = 0x1F00;
2572 #if defined (CONFIG_USER_ONLY)
2573 env->ps |= 1 << 3;
2574 #endif
2575 pal_init(env);
2576 /* Initialize IPR */
2577 hwpcb = env->ipr[IPR_PCBB];
2578 env->ipr[IPR_ASN] = 0;
2579 env->ipr[IPR_ASTEN] = 0;
2580 env->ipr[IPR_ASTSR] = 0;
2581 env->ipr[IPR_DATFX] = 0;
2582 /* XXX: fix this */
2583 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
2584 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
2585 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
2586 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
2587 env->ipr[IPR_FEN] = 0;
2588 env->ipr[IPR_IPL] = 31;
2589 env->ipr[IPR_MCES] = 0;
2590 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
2591 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
2592 env->ipr[IPR_SISR] = 0;
2593 env->ipr[IPR_VIRBND] = -1ULL;
2594
2595 qemu_init_vcpu(env);
2596 return env;
2597 }
2598
2599 void gen_pc_load(CPUState *env, TranslationBlock *tb,
2600 unsigned long searched_pc, int pc_pos, void *puc)
2601 {
2602 env->pc = gen_opc_pc[pc_pos];
2603 }