]> git.proxmox.com Git - qemu.git/blob - target-alpha/translate.c
target-alpha: Rewrite gen_ext_[hl] in terms of zapnot.
[qemu.git] / target-alpha / translate.c
1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23
24 #include "cpu.h"
25 #include "exec-all.h"
26 #include "disas.h"
27 #include "host-utils.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
30
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #undef ALPHA_DEBUG_DISAS
36
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
39 #else
40 # define LOG_DISAS(...) do { } while (0)
41 #endif
42
43 typedef struct DisasContext DisasContext;
44 struct DisasContext {
45 uint64_t pc;
46 int mem_idx;
47 #if !defined (CONFIG_USER_ONLY)
48 int pal_mode;
49 #endif
50 CPUAlphaState *env;
51 uint32_t amask;
52 };
53
54 /* global register indexes */
55 static TCGv_ptr cpu_env;
56 static TCGv cpu_ir[31];
57 static TCGv cpu_fir[31];
58 static TCGv cpu_pc;
59 static TCGv cpu_lock;
60
61 /* register names */
62 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
63
64 #include "gen-icount.h"
65
66 static void alpha_translate_init(void)
67 {
68 int i;
69 char *p;
70 static int done_init = 0;
71
72 if (done_init)
73 return;
74
75 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
76
77 p = cpu_reg_names;
78 for (i = 0; i < 31; i++) {
79 sprintf(p, "ir%d", i);
80 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
81 offsetof(CPUState, ir[i]), p);
82 p += (i < 10) ? 4 : 5;
83
84 sprintf(p, "fir%d", i);
85 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
86 offsetof(CPUState, fir[i]), p);
87 p += (i < 10) ? 5 : 6;
88 }
89
90 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
91 offsetof(CPUState, pc), "pc");
92
93 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
94 offsetof(CPUState, lock), "lock");
95
96 /* register helpers */
97 #define GEN_HELPER 2
98 #include "helper.h"
99
100 done_init = 1;
101 }
102
103 static inline void gen_excp(DisasContext *ctx, int exception, int error_code)
104 {
105 TCGv_i32 tmp1, tmp2;
106
107 tcg_gen_movi_i64(cpu_pc, ctx->pc);
108 tmp1 = tcg_const_i32(exception);
109 tmp2 = tcg_const_i32(error_code);
110 gen_helper_excp(tmp1, tmp2);
111 tcg_temp_free_i32(tmp2);
112 tcg_temp_free_i32(tmp1);
113 }
114
115 static inline void gen_invalid(DisasContext *ctx)
116 {
117 gen_excp(ctx, EXCP_OPCDEC, 0);
118 }
119
120 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
121 {
122 TCGv tmp = tcg_temp_new();
123 TCGv_i32 tmp32 = tcg_temp_new_i32();
124 tcg_gen_qemu_ld32u(tmp, t1, flags);
125 tcg_gen_trunc_i64_i32(tmp32, tmp);
126 gen_helper_memory_to_f(t0, tmp32);
127 tcg_temp_free_i32(tmp32);
128 tcg_temp_free(tmp);
129 }
130
131 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
132 {
133 TCGv tmp = tcg_temp_new();
134 tcg_gen_qemu_ld64(tmp, t1, flags);
135 gen_helper_memory_to_g(t0, tmp);
136 tcg_temp_free(tmp);
137 }
138
139 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
140 {
141 TCGv tmp = tcg_temp_new();
142 TCGv_i32 tmp32 = tcg_temp_new_i32();
143 tcg_gen_qemu_ld32u(tmp, t1, flags);
144 tcg_gen_trunc_i64_i32(tmp32, tmp);
145 gen_helper_memory_to_s(t0, tmp32);
146 tcg_temp_free_i32(tmp32);
147 tcg_temp_free(tmp);
148 }
149
150 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
151 {
152 tcg_gen_mov_i64(cpu_lock, t1);
153 tcg_gen_qemu_ld32s(t0, t1, flags);
154 }
155
156 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
157 {
158 tcg_gen_mov_i64(cpu_lock, t1);
159 tcg_gen_qemu_ld64(t0, t1, flags);
160 }
161
162 static inline void gen_load_mem(DisasContext *ctx,
163 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
164 int flags),
165 int ra, int rb, int32_t disp16, int fp,
166 int clear)
167 {
168 TCGv addr;
169
170 if (unlikely(ra == 31))
171 return;
172
173 addr = tcg_temp_new();
174 if (rb != 31) {
175 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
176 if (clear)
177 tcg_gen_andi_i64(addr, addr, ~0x7);
178 } else {
179 if (clear)
180 disp16 &= ~0x7;
181 tcg_gen_movi_i64(addr, disp16);
182 }
183 if (fp)
184 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
185 else
186 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
187 tcg_temp_free(addr);
188 }
189
190 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
191 {
192 TCGv_i32 tmp32 = tcg_temp_new_i32();
193 TCGv tmp = tcg_temp_new();
194 gen_helper_f_to_memory(tmp32, t0);
195 tcg_gen_extu_i32_i64(tmp, tmp32);
196 tcg_gen_qemu_st32(tmp, t1, flags);
197 tcg_temp_free(tmp);
198 tcg_temp_free_i32(tmp32);
199 }
200
201 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
202 {
203 TCGv tmp = tcg_temp_new();
204 gen_helper_g_to_memory(tmp, t0);
205 tcg_gen_qemu_st64(tmp, t1, flags);
206 tcg_temp_free(tmp);
207 }
208
209 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
210 {
211 TCGv_i32 tmp32 = tcg_temp_new_i32();
212 TCGv tmp = tcg_temp_new();
213 gen_helper_s_to_memory(tmp32, t0);
214 tcg_gen_extu_i32_i64(tmp, tmp32);
215 tcg_gen_qemu_st32(tmp, t1, flags);
216 tcg_temp_free(tmp);
217 tcg_temp_free_i32(tmp32);
218 }
219
220 static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
221 {
222 int l1, l2;
223
224 l1 = gen_new_label();
225 l2 = gen_new_label();
226 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
227 tcg_gen_qemu_st32(t0, t1, flags);
228 tcg_gen_movi_i64(t0, 1);
229 tcg_gen_br(l2);
230 gen_set_label(l1);
231 tcg_gen_movi_i64(t0, 0);
232 gen_set_label(l2);
233 tcg_gen_movi_i64(cpu_lock, -1);
234 }
235
236 static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
237 {
238 int l1, l2;
239
240 l1 = gen_new_label();
241 l2 = gen_new_label();
242 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
243 tcg_gen_qemu_st64(t0, t1, flags);
244 tcg_gen_movi_i64(t0, 1);
245 tcg_gen_br(l2);
246 gen_set_label(l1);
247 tcg_gen_movi_i64(t0, 0);
248 gen_set_label(l2);
249 tcg_gen_movi_i64(cpu_lock, -1);
250 }
251
252 static inline void gen_store_mem(DisasContext *ctx,
253 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
254 int flags),
255 int ra, int rb, int32_t disp16, int fp,
256 int clear, int local)
257 {
258 TCGv addr;
259 if (local)
260 addr = tcg_temp_local_new();
261 else
262 addr = tcg_temp_new();
263 if (rb != 31) {
264 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
265 if (clear)
266 tcg_gen_andi_i64(addr, addr, ~0x7);
267 } else {
268 if (clear)
269 disp16 &= ~0x7;
270 tcg_gen_movi_i64(addr, disp16);
271 }
272 if (ra != 31) {
273 if (fp)
274 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
275 else
276 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
277 } else {
278 TCGv zero;
279 if (local)
280 zero = tcg_const_local_i64(0);
281 else
282 zero = tcg_const_i64(0);
283 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
284 tcg_temp_free(zero);
285 }
286 tcg_temp_free(addr);
287 }
288
289 static inline void gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
290 int32_t disp, int mask)
291 {
292 int l1, l2;
293
294 l1 = gen_new_label();
295 l2 = gen_new_label();
296 if (likely(ra != 31)) {
297 if (mask) {
298 TCGv tmp = tcg_temp_new();
299 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
300 tcg_gen_brcondi_i64(cond, tmp, 0, l1);
301 tcg_temp_free(tmp);
302 } else
303 tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, l1);
304 } else {
305 /* Very uncommon case - Do not bother to optimize. */
306 TCGv tmp = tcg_const_i64(0);
307 tcg_gen_brcondi_i64(cond, tmp, 0, l1);
308 tcg_temp_free(tmp);
309 }
310 tcg_gen_movi_i64(cpu_pc, ctx->pc);
311 tcg_gen_br(l2);
312 gen_set_label(l1);
313 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
314 gen_set_label(l2);
315 }
316
317 static inline void gen_fbcond(DisasContext *ctx, int opc, int ra,
318 int32_t disp16)
319 {
320 int l1, l2;
321 TCGv tmp;
322 TCGv src;
323
324 l1 = gen_new_label();
325 l2 = gen_new_label();
326 if (ra != 31) {
327 tmp = tcg_temp_new();
328 src = cpu_fir[ra];
329 } else {
330 tmp = tcg_const_i64(0);
331 src = tmp;
332 }
333 switch (opc) {
334 case 0x31: /* FBEQ */
335 gen_helper_cmpfeq(tmp, src);
336 break;
337 case 0x32: /* FBLT */
338 gen_helper_cmpflt(tmp, src);
339 break;
340 case 0x33: /* FBLE */
341 gen_helper_cmpfle(tmp, src);
342 break;
343 case 0x35: /* FBNE */
344 gen_helper_cmpfne(tmp, src);
345 break;
346 case 0x36: /* FBGE */
347 gen_helper_cmpfge(tmp, src);
348 break;
349 case 0x37: /* FBGT */
350 gen_helper_cmpfgt(tmp, src);
351 break;
352 default:
353 abort();
354 }
355 tcg_gen_brcondi_i64(TCG_COND_NE, tmp, 0, l1);
356 tcg_gen_movi_i64(cpu_pc, ctx->pc);
357 tcg_gen_br(l2);
358 gen_set_label(l1);
359 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp16 << 2));
360 gen_set_label(l2);
361 }
362
363 static inline void gen_cmov(TCGCond inv_cond, int ra, int rb, int rc,
364 int islit, uint8_t lit, int mask)
365 {
366 int l1;
367
368 if (unlikely(rc == 31))
369 return;
370
371 l1 = gen_new_label();
372
373 if (ra != 31) {
374 if (mask) {
375 TCGv tmp = tcg_temp_new();
376 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
377 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
378 tcg_temp_free(tmp);
379 } else
380 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
381 } else {
382 /* Very uncommon case - Do not bother to optimize. */
383 TCGv tmp = tcg_const_i64(0);
384 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
385 tcg_temp_free(tmp);
386 }
387
388 if (islit)
389 tcg_gen_movi_i64(cpu_ir[rc], lit);
390 else
391 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
392 gen_set_label(l1);
393 }
394
395 #define FARITH2(name) \
396 static inline void glue(gen_f, name)(int rb, int rc) \
397 { \
398 if (unlikely(rc == 31)) \
399 return; \
400 \
401 if (rb != 31) \
402 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
403 else { \
404 TCGv tmp = tcg_const_i64(0); \
405 gen_helper_ ## name (cpu_fir[rc], tmp); \
406 tcg_temp_free(tmp); \
407 } \
408 }
409 FARITH2(sqrts)
410 FARITH2(sqrtf)
411 FARITH2(sqrtg)
412 FARITH2(sqrtt)
413 FARITH2(cvtgf)
414 FARITH2(cvtgq)
415 FARITH2(cvtqf)
416 FARITH2(cvtqg)
417 FARITH2(cvtst)
418 FARITH2(cvtts)
419 FARITH2(cvttq)
420 FARITH2(cvtqs)
421 FARITH2(cvtqt)
422 FARITH2(cvtlq)
423 FARITH2(cvtql)
424 FARITH2(cvtqlv)
425 FARITH2(cvtqlsv)
426
427 #define FARITH3(name) \
428 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
429 { \
430 if (unlikely(rc == 31)) \
431 return; \
432 \
433 if (ra != 31) { \
434 if (rb != 31) \
435 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], cpu_fir[rb]); \
436 else { \
437 TCGv tmp = tcg_const_i64(0); \
438 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], tmp); \
439 tcg_temp_free(tmp); \
440 } \
441 } else { \
442 TCGv tmp = tcg_const_i64(0); \
443 if (rb != 31) \
444 gen_helper_ ## name (cpu_fir[rc], tmp, cpu_fir[rb]); \
445 else \
446 gen_helper_ ## name (cpu_fir[rc], tmp, tmp); \
447 tcg_temp_free(tmp); \
448 } \
449 }
450
451 FARITH3(addf)
452 FARITH3(subf)
453 FARITH3(mulf)
454 FARITH3(divf)
455 FARITH3(addg)
456 FARITH3(subg)
457 FARITH3(mulg)
458 FARITH3(divg)
459 FARITH3(cmpgeq)
460 FARITH3(cmpglt)
461 FARITH3(cmpgle)
462 FARITH3(adds)
463 FARITH3(subs)
464 FARITH3(muls)
465 FARITH3(divs)
466 FARITH3(addt)
467 FARITH3(subt)
468 FARITH3(mult)
469 FARITH3(divt)
470 FARITH3(cmptun)
471 FARITH3(cmpteq)
472 FARITH3(cmptlt)
473 FARITH3(cmptle)
474 FARITH3(cpys)
475 FARITH3(cpysn)
476 FARITH3(cpyse)
477
478 #define FCMOV(name) \
479 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
480 { \
481 int l1; \
482 TCGv tmp; \
483 \
484 if (unlikely(rc == 31)) \
485 return; \
486 \
487 l1 = gen_new_label(); \
488 tmp = tcg_temp_new(); \
489 if (ra != 31) { \
490 tmp = tcg_temp_new(); \
491 gen_helper_ ## name (tmp, cpu_fir[ra]); \
492 } else { \
493 tmp = tcg_const_i64(0); \
494 gen_helper_ ## name (tmp, tmp); \
495 } \
496 tcg_gen_brcondi_i64(TCG_COND_EQ, tmp, 0, l1); \
497 if (rb != 31) \
498 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]); \
499 else \
500 tcg_gen_movi_i64(cpu_fir[rc], 0); \
501 gen_set_label(l1); \
502 }
503 FCMOV(cmpfeq)
504 FCMOV(cmpfne)
505 FCMOV(cmpflt)
506 FCMOV(cmpfge)
507 FCMOV(cmpfle)
508 FCMOV(cmpfgt)
509
510 /* Implement zapnot with an immediate operand, which expands to some
511 form of immediate AND. This is a basic building block in the
512 definition of many of the other byte manipulation instructions. */
513 static inline void gen_zapnoti(int ra, int rc, uint8_t lit)
514 {
515 uint64_t mask;
516 int i;
517
518 switch (lit) {
519 case 0x00:
520 tcg_gen_movi_i64(cpu_ir[rc], 0);
521 break;
522 case 0x01:
523 tcg_gen_ext8u_i64(cpu_ir[rc], cpu_ir[ra]);
524 break;
525 case 0x03:
526 tcg_gen_ext16u_i64(cpu_ir[rc], cpu_ir[ra]);
527 break;
528 case 0x0f:
529 tcg_gen_ext32u_i64(cpu_ir[rc], cpu_ir[ra]);
530 break;
531 case 0xff:
532 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[ra]);
533 break;
534 default:
535 for (mask = i = 0; i < 8; ++i) {
536 if ((lit >> i) & 1)
537 mask |= 0xffull << (i * 8);
538 }
539 tcg_gen_andi_i64 (cpu_ir[rc], cpu_ir[ra], mask);
540 break;
541 }
542 }
543
544 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
545 {
546 if (unlikely(rc == 31))
547 return;
548 else if (unlikely(ra == 31))
549 tcg_gen_movi_i64(cpu_ir[rc], 0);
550 else if (islit)
551 gen_zapnoti(ra, rc, lit);
552 else
553 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
554 }
555
556 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
557 {
558 if (unlikely(rc == 31))
559 return;
560 else if (unlikely(ra == 31))
561 tcg_gen_movi_i64(cpu_ir[rc], 0);
562 else if (islit)
563 gen_zapnoti(ra, rc, ~lit);
564 else
565 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
566 }
567
568
569 /* EXTWH, EXTWH, EXTLH, EXTQH */
570 static inline void gen_ext_h(int ra, int rb, int rc, int islit,
571 uint8_t lit, uint8_t byte_mask)
572 {
573 if (unlikely(rc == 31))
574 return;
575 else if (unlikely(ra == 31))
576 tcg_gen_movi_i64(cpu_ir[rc], 0);
577 else {
578 if (islit) {
579 lit = (64 - (lit & 7) * 8) & 0x3f;
580 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
581 } else {
582 TCGv tmp1 = tcg_temp_new();
583 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
584 tcg_gen_shli_i64(tmp1, tmp1, 3);
585 tcg_gen_neg_i64(tmp1, tmp1);
586 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
587 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
588 tcg_temp_free(tmp1);
589 }
590 gen_zapnoti(rc, rc, byte_mask);
591 }
592 }
593
594 /* EXTBL, EXTWL, EXTWL, EXTLL, EXTQL */
595 static inline void gen_ext_l(int ra, int rb, int rc, int islit,
596 uint8_t lit, uint8_t byte_mask)
597 {
598 if (unlikely(rc == 31))
599 return;
600 else if (unlikely(ra == 31))
601 tcg_gen_movi_i64(cpu_ir[rc], 0);
602 else {
603 if (islit) {
604 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
605 } else {
606 TCGv tmp = tcg_temp_new();
607 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
608 tcg_gen_shli_i64(tmp, tmp, 3);
609 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
610 tcg_temp_free(tmp);
611 }
612 gen_zapnoti(rc, rc, byte_mask);
613 }
614 }
615
616 /* Code to call arith3 helpers */
617 #define ARITH3(name) \
618 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
619 uint8_t lit) \
620 { \
621 if (unlikely(rc == 31)) \
622 return; \
623 \
624 if (ra != 31) { \
625 if (islit) { \
626 TCGv tmp = tcg_const_i64(lit); \
627 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
628 tcg_temp_free(tmp); \
629 } else \
630 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
631 } else { \
632 TCGv tmp1 = tcg_const_i64(0); \
633 if (islit) { \
634 TCGv tmp2 = tcg_const_i64(lit); \
635 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
636 tcg_temp_free(tmp2); \
637 } else \
638 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
639 tcg_temp_free(tmp1); \
640 } \
641 }
642 ARITH3(cmpbge)
643 ARITH3(addlv)
644 ARITH3(sublv)
645 ARITH3(addqv)
646 ARITH3(subqv)
647 ARITH3(mskbl)
648 ARITH3(insbl)
649 ARITH3(mskwl)
650 ARITH3(inswl)
651 ARITH3(mskll)
652 ARITH3(insll)
653 ARITH3(mskql)
654 ARITH3(insql)
655 ARITH3(mskwh)
656 ARITH3(inswh)
657 ARITH3(msklh)
658 ARITH3(inslh)
659 ARITH3(mskqh)
660 ARITH3(insqh)
661 ARITH3(umulh)
662 ARITH3(mullv)
663 ARITH3(mulqv)
664 ARITH3(minub8)
665 ARITH3(minsb8)
666 ARITH3(minuw4)
667 ARITH3(minsw4)
668 ARITH3(maxub8)
669 ARITH3(maxsb8)
670 ARITH3(maxuw4)
671 ARITH3(maxsw4)
672 ARITH3(perr)
673
674 #define MVIOP2(name) \
675 static inline void glue(gen_, name)(int rb, int rc) \
676 { \
677 if (unlikely(rc == 31)) \
678 return; \
679 if (unlikely(rb == 31)) \
680 tcg_gen_movi_i64(cpu_ir[rc], 0); \
681 else \
682 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
683 }
684 MVIOP2(pklb)
685 MVIOP2(pkwb)
686 MVIOP2(unpkbl)
687 MVIOP2(unpkbw)
688
689 static inline void gen_cmp(TCGCond cond, int ra, int rb, int rc, int islit,
690 uint8_t lit)
691 {
692 int l1, l2;
693 TCGv tmp;
694
695 if (unlikely(rc == 31))
696 return;
697
698 l1 = gen_new_label();
699 l2 = gen_new_label();
700
701 if (ra != 31) {
702 tmp = tcg_temp_new();
703 tcg_gen_mov_i64(tmp, cpu_ir[ra]);
704 } else
705 tmp = tcg_const_i64(0);
706 if (islit)
707 tcg_gen_brcondi_i64(cond, tmp, lit, l1);
708 else
709 tcg_gen_brcond_i64(cond, tmp, cpu_ir[rb], l1);
710
711 tcg_gen_movi_i64(cpu_ir[rc], 0);
712 tcg_gen_br(l2);
713 gen_set_label(l1);
714 tcg_gen_movi_i64(cpu_ir[rc], 1);
715 gen_set_label(l2);
716 }
717
718 static inline int translate_one(DisasContext *ctx, uint32_t insn)
719 {
720 uint32_t palcode;
721 int32_t disp21, disp16, disp12;
722 uint16_t fn11, fn16;
723 uint8_t opc, ra, rb, rc, sbz, fpfn, fn7, fn2, islit, real_islit;
724 uint8_t lit;
725 int ret;
726
727 /* Decode all instruction fields */
728 opc = insn >> 26;
729 ra = (insn >> 21) & 0x1F;
730 rb = (insn >> 16) & 0x1F;
731 rc = insn & 0x1F;
732 sbz = (insn >> 13) & 0x07;
733 real_islit = islit = (insn >> 12) & 1;
734 if (rb == 31 && !islit) {
735 islit = 1;
736 lit = 0;
737 } else
738 lit = (insn >> 13) & 0xFF;
739 palcode = insn & 0x03FFFFFF;
740 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
741 disp16 = (int16_t)(insn & 0x0000FFFF);
742 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
743 fn16 = insn & 0x0000FFFF;
744 fn11 = (insn >> 5) & 0x000007FF;
745 fpfn = fn11 & 0x3F;
746 fn7 = (insn >> 5) & 0x0000007F;
747 fn2 = (insn >> 5) & 0x00000003;
748 ret = 0;
749 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
750 opc, ra, rb, rc, disp16);
751
752 switch (opc) {
753 case 0x00:
754 /* CALL_PAL */
755 if (palcode >= 0x80 && palcode < 0xC0) {
756 /* Unprivileged PAL call */
757 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
758 #if !defined (CONFIG_USER_ONLY)
759 } else if (palcode < 0x40) {
760 /* Privileged PAL code */
761 if (ctx->mem_idx & 1)
762 goto invalid_opc;
763 else
764 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
765 #endif
766 } else {
767 /* Invalid PAL call */
768 goto invalid_opc;
769 }
770 ret = 3;
771 break;
772 case 0x01:
773 /* OPC01 */
774 goto invalid_opc;
775 case 0x02:
776 /* OPC02 */
777 goto invalid_opc;
778 case 0x03:
779 /* OPC03 */
780 goto invalid_opc;
781 case 0x04:
782 /* OPC04 */
783 goto invalid_opc;
784 case 0x05:
785 /* OPC05 */
786 goto invalid_opc;
787 case 0x06:
788 /* OPC06 */
789 goto invalid_opc;
790 case 0x07:
791 /* OPC07 */
792 goto invalid_opc;
793 case 0x08:
794 /* LDA */
795 if (likely(ra != 31)) {
796 if (rb != 31)
797 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
798 else
799 tcg_gen_movi_i64(cpu_ir[ra], disp16);
800 }
801 break;
802 case 0x09:
803 /* LDAH */
804 if (likely(ra != 31)) {
805 if (rb != 31)
806 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
807 else
808 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
809 }
810 break;
811 case 0x0A:
812 /* LDBU */
813 if (!(ctx->amask & AMASK_BWX))
814 goto invalid_opc;
815 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
816 break;
817 case 0x0B:
818 /* LDQ_U */
819 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
820 break;
821 case 0x0C:
822 /* LDWU */
823 if (!(ctx->amask & AMASK_BWX))
824 goto invalid_opc;
825 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
826 break;
827 case 0x0D:
828 /* STW */
829 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
830 break;
831 case 0x0E:
832 /* STB */
833 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
834 break;
835 case 0x0F:
836 /* STQ_U */
837 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
838 break;
839 case 0x10:
840 switch (fn7) {
841 case 0x00:
842 /* ADDL */
843 if (likely(rc != 31)) {
844 if (ra != 31) {
845 if (islit) {
846 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
847 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
848 } else {
849 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
850 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
851 }
852 } else {
853 if (islit)
854 tcg_gen_movi_i64(cpu_ir[rc], lit);
855 else
856 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
857 }
858 }
859 break;
860 case 0x02:
861 /* S4ADDL */
862 if (likely(rc != 31)) {
863 if (ra != 31) {
864 TCGv tmp = tcg_temp_new();
865 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
866 if (islit)
867 tcg_gen_addi_i64(tmp, tmp, lit);
868 else
869 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
870 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
871 tcg_temp_free(tmp);
872 } else {
873 if (islit)
874 tcg_gen_movi_i64(cpu_ir[rc], lit);
875 else
876 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
877 }
878 }
879 break;
880 case 0x09:
881 /* SUBL */
882 if (likely(rc != 31)) {
883 if (ra != 31) {
884 if (islit)
885 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
886 else
887 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
888 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
889 } else {
890 if (islit)
891 tcg_gen_movi_i64(cpu_ir[rc], -lit);
892 else {
893 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
894 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
895 }
896 }
897 break;
898 case 0x0B:
899 /* S4SUBL */
900 if (likely(rc != 31)) {
901 if (ra != 31) {
902 TCGv tmp = tcg_temp_new();
903 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
904 if (islit)
905 tcg_gen_subi_i64(tmp, tmp, lit);
906 else
907 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
908 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
909 tcg_temp_free(tmp);
910 } else {
911 if (islit)
912 tcg_gen_movi_i64(cpu_ir[rc], -lit);
913 else {
914 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
915 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
916 }
917 }
918 }
919 break;
920 case 0x0F:
921 /* CMPBGE */
922 gen_cmpbge(ra, rb, rc, islit, lit);
923 break;
924 case 0x12:
925 /* S8ADDL */
926 if (likely(rc != 31)) {
927 if (ra != 31) {
928 TCGv tmp = tcg_temp_new();
929 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
930 if (islit)
931 tcg_gen_addi_i64(tmp, tmp, lit);
932 else
933 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
934 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
935 tcg_temp_free(tmp);
936 } else {
937 if (islit)
938 tcg_gen_movi_i64(cpu_ir[rc], lit);
939 else
940 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
941 }
942 }
943 break;
944 case 0x1B:
945 /* S8SUBL */
946 if (likely(rc != 31)) {
947 if (ra != 31) {
948 TCGv tmp = tcg_temp_new();
949 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
950 if (islit)
951 tcg_gen_subi_i64(tmp, tmp, lit);
952 else
953 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
954 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
955 tcg_temp_free(tmp);
956 } else {
957 if (islit)
958 tcg_gen_movi_i64(cpu_ir[rc], -lit);
959 else
960 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
961 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
962 }
963 }
964 }
965 break;
966 case 0x1D:
967 /* CMPULT */
968 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
969 break;
970 case 0x20:
971 /* ADDQ */
972 if (likely(rc != 31)) {
973 if (ra != 31) {
974 if (islit)
975 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
976 else
977 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
978 } else {
979 if (islit)
980 tcg_gen_movi_i64(cpu_ir[rc], lit);
981 else
982 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
983 }
984 }
985 break;
986 case 0x22:
987 /* S4ADDQ */
988 if (likely(rc != 31)) {
989 if (ra != 31) {
990 TCGv tmp = tcg_temp_new();
991 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
992 if (islit)
993 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
994 else
995 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
996 tcg_temp_free(tmp);
997 } else {
998 if (islit)
999 tcg_gen_movi_i64(cpu_ir[rc], lit);
1000 else
1001 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1002 }
1003 }
1004 break;
1005 case 0x29:
1006 /* SUBQ */
1007 if (likely(rc != 31)) {
1008 if (ra != 31) {
1009 if (islit)
1010 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1011 else
1012 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1013 } else {
1014 if (islit)
1015 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1016 else
1017 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1018 }
1019 }
1020 break;
1021 case 0x2B:
1022 /* S4SUBQ */
1023 if (likely(rc != 31)) {
1024 if (ra != 31) {
1025 TCGv tmp = tcg_temp_new();
1026 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1027 if (islit)
1028 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1029 else
1030 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1031 tcg_temp_free(tmp);
1032 } else {
1033 if (islit)
1034 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1035 else
1036 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1037 }
1038 }
1039 break;
1040 case 0x2D:
1041 /* CMPEQ */
1042 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1043 break;
1044 case 0x32:
1045 /* S8ADDQ */
1046 if (likely(rc != 31)) {
1047 if (ra != 31) {
1048 TCGv tmp = tcg_temp_new();
1049 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1050 if (islit)
1051 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1052 else
1053 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1054 tcg_temp_free(tmp);
1055 } else {
1056 if (islit)
1057 tcg_gen_movi_i64(cpu_ir[rc], lit);
1058 else
1059 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1060 }
1061 }
1062 break;
1063 case 0x3B:
1064 /* S8SUBQ */
1065 if (likely(rc != 31)) {
1066 if (ra != 31) {
1067 TCGv tmp = tcg_temp_new();
1068 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1069 if (islit)
1070 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1071 else
1072 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1073 tcg_temp_free(tmp);
1074 } else {
1075 if (islit)
1076 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1077 else
1078 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1079 }
1080 }
1081 break;
1082 case 0x3D:
1083 /* CMPULE */
1084 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1085 break;
1086 case 0x40:
1087 /* ADDL/V */
1088 gen_addlv(ra, rb, rc, islit, lit);
1089 break;
1090 case 0x49:
1091 /* SUBL/V */
1092 gen_sublv(ra, rb, rc, islit, lit);
1093 break;
1094 case 0x4D:
1095 /* CMPLT */
1096 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1097 break;
1098 case 0x60:
1099 /* ADDQ/V */
1100 gen_addqv(ra, rb, rc, islit, lit);
1101 break;
1102 case 0x69:
1103 /* SUBQ/V */
1104 gen_subqv(ra, rb, rc, islit, lit);
1105 break;
1106 case 0x6D:
1107 /* CMPLE */
1108 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1109 break;
1110 default:
1111 goto invalid_opc;
1112 }
1113 break;
1114 case 0x11:
1115 switch (fn7) {
1116 case 0x00:
1117 /* AND */
1118 if (likely(rc != 31)) {
1119 if (ra == 31)
1120 tcg_gen_movi_i64(cpu_ir[rc], 0);
1121 else if (islit)
1122 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1123 else
1124 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1125 }
1126 break;
1127 case 0x08:
1128 /* BIC */
1129 if (likely(rc != 31)) {
1130 if (ra != 31) {
1131 if (islit)
1132 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1133 else
1134 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1135 } else
1136 tcg_gen_movi_i64(cpu_ir[rc], 0);
1137 }
1138 break;
1139 case 0x14:
1140 /* CMOVLBS */
1141 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1142 break;
1143 case 0x16:
1144 /* CMOVLBC */
1145 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1146 break;
1147 case 0x20:
1148 /* BIS */
1149 if (likely(rc != 31)) {
1150 if (ra != 31) {
1151 if (islit)
1152 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1153 else
1154 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1155 } else {
1156 if (islit)
1157 tcg_gen_movi_i64(cpu_ir[rc], lit);
1158 else
1159 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1160 }
1161 }
1162 break;
1163 case 0x24:
1164 /* CMOVEQ */
1165 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1166 break;
1167 case 0x26:
1168 /* CMOVNE */
1169 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1170 break;
1171 case 0x28:
1172 /* ORNOT */
1173 if (likely(rc != 31)) {
1174 if (ra != 31) {
1175 if (islit)
1176 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1177 else
1178 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1179 } else {
1180 if (islit)
1181 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1182 else
1183 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1184 }
1185 }
1186 break;
1187 case 0x40:
1188 /* XOR */
1189 if (likely(rc != 31)) {
1190 if (ra != 31) {
1191 if (islit)
1192 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1193 else
1194 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1195 } else {
1196 if (islit)
1197 tcg_gen_movi_i64(cpu_ir[rc], lit);
1198 else
1199 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1200 }
1201 }
1202 break;
1203 case 0x44:
1204 /* CMOVLT */
1205 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1206 break;
1207 case 0x46:
1208 /* CMOVGE */
1209 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1210 break;
1211 case 0x48:
1212 /* EQV */
1213 if (likely(rc != 31)) {
1214 if (ra != 31) {
1215 if (islit)
1216 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1217 else
1218 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1219 } else {
1220 if (islit)
1221 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1222 else
1223 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1224 }
1225 }
1226 break;
1227 case 0x61:
1228 /* AMASK */
1229 if (likely(rc != 31)) {
1230 if (islit)
1231 tcg_gen_movi_i64(cpu_ir[rc], lit);
1232 else
1233 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1234 switch (ctx->env->implver) {
1235 case IMPLVER_2106x:
1236 /* EV4, EV45, LCA, LCA45 & EV5 */
1237 break;
1238 case IMPLVER_21164:
1239 case IMPLVER_21264:
1240 case IMPLVER_21364:
1241 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1242 ~(uint64_t)ctx->amask);
1243 break;
1244 }
1245 }
1246 break;
1247 case 0x64:
1248 /* CMOVLE */
1249 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
1250 break;
1251 case 0x66:
1252 /* CMOVGT */
1253 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
1254 break;
1255 case 0x6C:
1256 /* IMPLVER */
1257 if (rc != 31)
1258 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
1259 break;
1260 default:
1261 goto invalid_opc;
1262 }
1263 break;
1264 case 0x12:
1265 switch (fn7) {
1266 case 0x02:
1267 /* MSKBL */
1268 gen_mskbl(ra, rb, rc, islit, lit);
1269 break;
1270 case 0x06:
1271 /* EXTBL */
1272 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
1273 break;
1274 case 0x0B:
1275 /* INSBL */
1276 gen_insbl(ra, rb, rc, islit, lit);
1277 break;
1278 case 0x12:
1279 /* MSKWL */
1280 gen_mskwl(ra, rb, rc, islit, lit);
1281 break;
1282 case 0x16:
1283 /* EXTWL */
1284 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
1285 break;
1286 case 0x1B:
1287 /* INSWL */
1288 gen_inswl(ra, rb, rc, islit, lit);
1289 break;
1290 case 0x22:
1291 /* MSKLL */
1292 gen_mskll(ra, rb, rc, islit, lit);
1293 break;
1294 case 0x26:
1295 /* EXTLL */
1296 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
1297 break;
1298 case 0x2B:
1299 /* INSLL */
1300 gen_insll(ra, rb, rc, islit, lit);
1301 break;
1302 case 0x30:
1303 /* ZAP */
1304 gen_zap(ra, rb, rc, islit, lit);
1305 break;
1306 case 0x31:
1307 /* ZAPNOT */
1308 gen_zapnot(ra, rb, rc, islit, lit);
1309 break;
1310 case 0x32:
1311 /* MSKQL */
1312 gen_mskql(ra, rb, rc, islit, lit);
1313 break;
1314 case 0x34:
1315 /* SRL */
1316 if (likely(rc != 31)) {
1317 if (ra != 31) {
1318 if (islit)
1319 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1320 else {
1321 TCGv shift = tcg_temp_new();
1322 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1323 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
1324 tcg_temp_free(shift);
1325 }
1326 } else
1327 tcg_gen_movi_i64(cpu_ir[rc], 0);
1328 }
1329 break;
1330 case 0x36:
1331 /* EXTQL */
1332 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
1333 break;
1334 case 0x39:
1335 /* SLL */
1336 if (likely(rc != 31)) {
1337 if (ra != 31) {
1338 if (islit)
1339 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1340 else {
1341 TCGv shift = tcg_temp_new();
1342 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1343 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
1344 tcg_temp_free(shift);
1345 }
1346 } else
1347 tcg_gen_movi_i64(cpu_ir[rc], 0);
1348 }
1349 break;
1350 case 0x3B:
1351 /* INSQL */
1352 gen_insql(ra, rb, rc, islit, lit);
1353 break;
1354 case 0x3C:
1355 /* SRA */
1356 if (likely(rc != 31)) {
1357 if (ra != 31) {
1358 if (islit)
1359 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1360 else {
1361 TCGv shift = tcg_temp_new();
1362 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1363 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
1364 tcg_temp_free(shift);
1365 }
1366 } else
1367 tcg_gen_movi_i64(cpu_ir[rc], 0);
1368 }
1369 break;
1370 case 0x52:
1371 /* MSKWH */
1372 gen_mskwh(ra, rb, rc, islit, lit);
1373 break;
1374 case 0x57:
1375 /* INSWH */
1376 gen_inswh(ra, rb, rc, islit, lit);
1377 break;
1378 case 0x5A:
1379 /* EXTWH */
1380 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
1381 break;
1382 case 0x62:
1383 /* MSKLH */
1384 gen_msklh(ra, rb, rc, islit, lit);
1385 break;
1386 case 0x67:
1387 /* INSLH */
1388 gen_inslh(ra, rb, rc, islit, lit);
1389 break;
1390 case 0x6A:
1391 /* EXTLH */
1392 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
1393 break;
1394 case 0x72:
1395 /* MSKQH */
1396 gen_mskqh(ra, rb, rc, islit, lit);
1397 break;
1398 case 0x77:
1399 /* INSQH */
1400 gen_insqh(ra, rb, rc, islit, lit);
1401 break;
1402 case 0x7A:
1403 /* EXTQH */
1404 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
1405 break;
1406 default:
1407 goto invalid_opc;
1408 }
1409 break;
1410 case 0x13:
1411 switch (fn7) {
1412 case 0x00:
1413 /* MULL */
1414 if (likely(rc != 31)) {
1415 if (ra == 31)
1416 tcg_gen_movi_i64(cpu_ir[rc], 0);
1417 else {
1418 if (islit)
1419 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1420 else
1421 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1422 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1423 }
1424 }
1425 break;
1426 case 0x20:
1427 /* MULQ */
1428 if (likely(rc != 31)) {
1429 if (ra == 31)
1430 tcg_gen_movi_i64(cpu_ir[rc], 0);
1431 else if (islit)
1432 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1433 else
1434 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1435 }
1436 break;
1437 case 0x30:
1438 /* UMULH */
1439 gen_umulh(ra, rb, rc, islit, lit);
1440 break;
1441 case 0x40:
1442 /* MULL/V */
1443 gen_mullv(ra, rb, rc, islit, lit);
1444 break;
1445 case 0x60:
1446 /* MULQ/V */
1447 gen_mulqv(ra, rb, rc, islit, lit);
1448 break;
1449 default:
1450 goto invalid_opc;
1451 }
1452 break;
1453 case 0x14:
1454 switch (fpfn) { /* f11 & 0x3F */
1455 case 0x04:
1456 /* ITOFS */
1457 if (!(ctx->amask & AMASK_FIX))
1458 goto invalid_opc;
1459 if (likely(rc != 31)) {
1460 if (ra != 31) {
1461 TCGv_i32 tmp = tcg_temp_new_i32();
1462 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1463 gen_helper_memory_to_s(cpu_fir[rc], tmp);
1464 tcg_temp_free_i32(tmp);
1465 } else
1466 tcg_gen_movi_i64(cpu_fir[rc], 0);
1467 }
1468 break;
1469 case 0x0A:
1470 /* SQRTF */
1471 if (!(ctx->amask & AMASK_FIX))
1472 goto invalid_opc;
1473 gen_fsqrtf(rb, rc);
1474 break;
1475 case 0x0B:
1476 /* SQRTS */
1477 if (!(ctx->amask & AMASK_FIX))
1478 goto invalid_opc;
1479 gen_fsqrts(rb, rc);
1480 break;
1481 case 0x14:
1482 /* ITOFF */
1483 if (!(ctx->amask & AMASK_FIX))
1484 goto invalid_opc;
1485 if (likely(rc != 31)) {
1486 if (ra != 31) {
1487 TCGv_i32 tmp = tcg_temp_new_i32();
1488 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1489 gen_helper_memory_to_f(cpu_fir[rc], tmp);
1490 tcg_temp_free_i32(tmp);
1491 } else
1492 tcg_gen_movi_i64(cpu_fir[rc], 0);
1493 }
1494 break;
1495 case 0x24:
1496 /* ITOFT */
1497 if (!(ctx->amask & AMASK_FIX))
1498 goto invalid_opc;
1499 if (likely(rc != 31)) {
1500 if (ra != 31)
1501 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
1502 else
1503 tcg_gen_movi_i64(cpu_fir[rc], 0);
1504 }
1505 break;
1506 case 0x2A:
1507 /* SQRTG */
1508 if (!(ctx->amask & AMASK_FIX))
1509 goto invalid_opc;
1510 gen_fsqrtg(rb, rc);
1511 break;
1512 case 0x02B:
1513 /* SQRTT */
1514 if (!(ctx->amask & AMASK_FIX))
1515 goto invalid_opc;
1516 gen_fsqrtt(rb, rc);
1517 break;
1518 default:
1519 goto invalid_opc;
1520 }
1521 break;
1522 case 0x15:
1523 /* VAX floating point */
1524 /* XXX: rounding mode and trap are ignored (!) */
1525 switch (fpfn) { /* f11 & 0x3F */
1526 case 0x00:
1527 /* ADDF */
1528 gen_faddf(ra, rb, rc);
1529 break;
1530 case 0x01:
1531 /* SUBF */
1532 gen_fsubf(ra, rb, rc);
1533 break;
1534 case 0x02:
1535 /* MULF */
1536 gen_fmulf(ra, rb, rc);
1537 break;
1538 case 0x03:
1539 /* DIVF */
1540 gen_fdivf(ra, rb, rc);
1541 break;
1542 case 0x1E:
1543 /* CVTDG */
1544 #if 0 // TODO
1545 gen_fcvtdg(rb, rc);
1546 #else
1547 goto invalid_opc;
1548 #endif
1549 break;
1550 case 0x20:
1551 /* ADDG */
1552 gen_faddg(ra, rb, rc);
1553 break;
1554 case 0x21:
1555 /* SUBG */
1556 gen_fsubg(ra, rb, rc);
1557 break;
1558 case 0x22:
1559 /* MULG */
1560 gen_fmulg(ra, rb, rc);
1561 break;
1562 case 0x23:
1563 /* DIVG */
1564 gen_fdivg(ra, rb, rc);
1565 break;
1566 case 0x25:
1567 /* CMPGEQ */
1568 gen_fcmpgeq(ra, rb, rc);
1569 break;
1570 case 0x26:
1571 /* CMPGLT */
1572 gen_fcmpglt(ra, rb, rc);
1573 break;
1574 case 0x27:
1575 /* CMPGLE */
1576 gen_fcmpgle(ra, rb, rc);
1577 break;
1578 case 0x2C:
1579 /* CVTGF */
1580 gen_fcvtgf(rb, rc);
1581 break;
1582 case 0x2D:
1583 /* CVTGD */
1584 #if 0 // TODO
1585 gen_fcvtgd(rb, rc);
1586 #else
1587 goto invalid_opc;
1588 #endif
1589 break;
1590 case 0x2F:
1591 /* CVTGQ */
1592 gen_fcvtgq(rb, rc);
1593 break;
1594 case 0x3C:
1595 /* CVTQF */
1596 gen_fcvtqf(rb, rc);
1597 break;
1598 case 0x3E:
1599 /* CVTQG */
1600 gen_fcvtqg(rb, rc);
1601 break;
1602 default:
1603 goto invalid_opc;
1604 }
1605 break;
1606 case 0x16:
1607 /* IEEE floating-point */
1608 /* XXX: rounding mode and traps are ignored (!) */
1609 switch (fpfn) { /* f11 & 0x3F */
1610 case 0x00:
1611 /* ADDS */
1612 gen_fadds(ra, rb, rc);
1613 break;
1614 case 0x01:
1615 /* SUBS */
1616 gen_fsubs(ra, rb, rc);
1617 break;
1618 case 0x02:
1619 /* MULS */
1620 gen_fmuls(ra, rb, rc);
1621 break;
1622 case 0x03:
1623 /* DIVS */
1624 gen_fdivs(ra, rb, rc);
1625 break;
1626 case 0x20:
1627 /* ADDT */
1628 gen_faddt(ra, rb, rc);
1629 break;
1630 case 0x21:
1631 /* SUBT */
1632 gen_fsubt(ra, rb, rc);
1633 break;
1634 case 0x22:
1635 /* MULT */
1636 gen_fmult(ra, rb, rc);
1637 break;
1638 case 0x23:
1639 /* DIVT */
1640 gen_fdivt(ra, rb, rc);
1641 break;
1642 case 0x24:
1643 /* CMPTUN */
1644 gen_fcmptun(ra, rb, rc);
1645 break;
1646 case 0x25:
1647 /* CMPTEQ */
1648 gen_fcmpteq(ra, rb, rc);
1649 break;
1650 case 0x26:
1651 /* CMPTLT */
1652 gen_fcmptlt(ra, rb, rc);
1653 break;
1654 case 0x27:
1655 /* CMPTLE */
1656 gen_fcmptle(ra, rb, rc);
1657 break;
1658 case 0x2C:
1659 /* XXX: incorrect */
1660 if (fn11 == 0x2AC || fn11 == 0x6AC) {
1661 /* CVTST */
1662 gen_fcvtst(rb, rc);
1663 } else {
1664 /* CVTTS */
1665 gen_fcvtts(rb, rc);
1666 }
1667 break;
1668 case 0x2F:
1669 /* CVTTQ */
1670 gen_fcvttq(rb, rc);
1671 break;
1672 case 0x3C:
1673 /* CVTQS */
1674 gen_fcvtqs(rb, rc);
1675 break;
1676 case 0x3E:
1677 /* CVTQT */
1678 gen_fcvtqt(rb, rc);
1679 break;
1680 default:
1681 goto invalid_opc;
1682 }
1683 break;
1684 case 0x17:
1685 switch (fn11) {
1686 case 0x010:
1687 /* CVTLQ */
1688 gen_fcvtlq(rb, rc);
1689 break;
1690 case 0x020:
1691 if (likely(rc != 31)) {
1692 if (ra == rb)
1693 /* FMOV */
1694 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
1695 else
1696 /* CPYS */
1697 gen_fcpys(ra, rb, rc);
1698 }
1699 break;
1700 case 0x021:
1701 /* CPYSN */
1702 gen_fcpysn(ra, rb, rc);
1703 break;
1704 case 0x022:
1705 /* CPYSE */
1706 gen_fcpyse(ra, rb, rc);
1707 break;
1708 case 0x024:
1709 /* MT_FPCR */
1710 if (likely(ra != 31))
1711 gen_helper_store_fpcr(cpu_fir[ra]);
1712 else {
1713 TCGv tmp = tcg_const_i64(0);
1714 gen_helper_store_fpcr(tmp);
1715 tcg_temp_free(tmp);
1716 }
1717 break;
1718 case 0x025:
1719 /* MF_FPCR */
1720 if (likely(ra != 31))
1721 gen_helper_load_fpcr(cpu_fir[ra]);
1722 break;
1723 case 0x02A:
1724 /* FCMOVEQ */
1725 gen_fcmpfeq(ra, rb, rc);
1726 break;
1727 case 0x02B:
1728 /* FCMOVNE */
1729 gen_fcmpfne(ra, rb, rc);
1730 break;
1731 case 0x02C:
1732 /* FCMOVLT */
1733 gen_fcmpflt(ra, rb, rc);
1734 break;
1735 case 0x02D:
1736 /* FCMOVGE */
1737 gen_fcmpfge(ra, rb, rc);
1738 break;
1739 case 0x02E:
1740 /* FCMOVLE */
1741 gen_fcmpfle(ra, rb, rc);
1742 break;
1743 case 0x02F:
1744 /* FCMOVGT */
1745 gen_fcmpfgt(ra, rb, rc);
1746 break;
1747 case 0x030:
1748 /* CVTQL */
1749 gen_fcvtql(rb, rc);
1750 break;
1751 case 0x130:
1752 /* CVTQL/V */
1753 gen_fcvtqlv(rb, rc);
1754 break;
1755 case 0x530:
1756 /* CVTQL/SV */
1757 gen_fcvtqlsv(rb, rc);
1758 break;
1759 default:
1760 goto invalid_opc;
1761 }
1762 break;
1763 case 0x18:
1764 switch ((uint16_t)disp16) {
1765 case 0x0000:
1766 /* TRAPB */
1767 /* No-op. Just exit from the current tb */
1768 ret = 2;
1769 break;
1770 case 0x0400:
1771 /* EXCB */
1772 /* No-op. Just exit from the current tb */
1773 ret = 2;
1774 break;
1775 case 0x4000:
1776 /* MB */
1777 /* No-op */
1778 break;
1779 case 0x4400:
1780 /* WMB */
1781 /* No-op */
1782 break;
1783 case 0x8000:
1784 /* FETCH */
1785 /* No-op */
1786 break;
1787 case 0xA000:
1788 /* FETCH_M */
1789 /* No-op */
1790 break;
1791 case 0xC000:
1792 /* RPCC */
1793 if (ra != 31)
1794 gen_helper_load_pcc(cpu_ir[ra]);
1795 break;
1796 case 0xE000:
1797 /* RC */
1798 if (ra != 31)
1799 gen_helper_rc(cpu_ir[ra]);
1800 break;
1801 case 0xE800:
1802 /* ECB */
1803 break;
1804 case 0xF000:
1805 /* RS */
1806 if (ra != 31)
1807 gen_helper_rs(cpu_ir[ra]);
1808 break;
1809 case 0xF800:
1810 /* WH64 */
1811 /* No-op */
1812 break;
1813 default:
1814 goto invalid_opc;
1815 }
1816 break;
1817 case 0x19:
1818 /* HW_MFPR (PALcode) */
1819 #if defined (CONFIG_USER_ONLY)
1820 goto invalid_opc;
1821 #else
1822 if (!ctx->pal_mode)
1823 goto invalid_opc;
1824 if (ra != 31) {
1825 TCGv tmp = tcg_const_i32(insn & 0xFF);
1826 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
1827 tcg_temp_free(tmp);
1828 }
1829 break;
1830 #endif
1831 case 0x1A:
1832 if (rb != 31)
1833 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
1834 else
1835 tcg_gen_movi_i64(cpu_pc, 0);
1836 if (ra != 31)
1837 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
1838 /* Those four jumps only differ by the branch prediction hint */
1839 switch (fn2) {
1840 case 0x0:
1841 /* JMP */
1842 break;
1843 case 0x1:
1844 /* JSR */
1845 break;
1846 case 0x2:
1847 /* RET */
1848 break;
1849 case 0x3:
1850 /* JSR_COROUTINE */
1851 break;
1852 }
1853 ret = 1;
1854 break;
1855 case 0x1B:
1856 /* HW_LD (PALcode) */
1857 #if defined (CONFIG_USER_ONLY)
1858 goto invalid_opc;
1859 #else
1860 if (!ctx->pal_mode)
1861 goto invalid_opc;
1862 if (ra != 31) {
1863 TCGv addr = tcg_temp_new();
1864 if (rb != 31)
1865 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
1866 else
1867 tcg_gen_movi_i64(addr, disp12);
1868 switch ((insn >> 12) & 0xF) {
1869 case 0x0:
1870 /* Longword physical access (hw_ldl/p) */
1871 gen_helper_ldl_raw(cpu_ir[ra], addr);
1872 break;
1873 case 0x1:
1874 /* Quadword physical access (hw_ldq/p) */
1875 gen_helper_ldq_raw(cpu_ir[ra], addr);
1876 break;
1877 case 0x2:
1878 /* Longword physical access with lock (hw_ldl_l/p) */
1879 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
1880 break;
1881 case 0x3:
1882 /* Quadword physical access with lock (hw_ldq_l/p) */
1883 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
1884 break;
1885 case 0x4:
1886 /* Longword virtual PTE fetch (hw_ldl/v) */
1887 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
1888 break;
1889 case 0x5:
1890 /* Quadword virtual PTE fetch (hw_ldq/v) */
1891 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
1892 break;
1893 case 0x6:
1894 /* Incpu_ir[ra]id */
1895 goto invalid_opc;
1896 case 0x7:
1897 /* Incpu_ir[ra]id */
1898 goto invalid_opc;
1899 case 0x8:
1900 /* Longword virtual access (hw_ldl) */
1901 gen_helper_st_virt_to_phys(addr, addr);
1902 gen_helper_ldl_raw(cpu_ir[ra], addr);
1903 break;
1904 case 0x9:
1905 /* Quadword virtual access (hw_ldq) */
1906 gen_helper_st_virt_to_phys(addr, addr);
1907 gen_helper_ldq_raw(cpu_ir[ra], addr);
1908 break;
1909 case 0xA:
1910 /* Longword virtual access with protection check (hw_ldl/w) */
1911 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
1912 break;
1913 case 0xB:
1914 /* Quadword virtual access with protection check (hw_ldq/w) */
1915 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
1916 break;
1917 case 0xC:
1918 /* Longword virtual access with alt access mode (hw_ldl/a)*/
1919 gen_helper_set_alt_mode();
1920 gen_helper_st_virt_to_phys(addr, addr);
1921 gen_helper_ldl_raw(cpu_ir[ra], addr);
1922 gen_helper_restore_mode();
1923 break;
1924 case 0xD:
1925 /* Quadword virtual access with alt access mode (hw_ldq/a) */
1926 gen_helper_set_alt_mode();
1927 gen_helper_st_virt_to_phys(addr, addr);
1928 gen_helper_ldq_raw(cpu_ir[ra], addr);
1929 gen_helper_restore_mode();
1930 break;
1931 case 0xE:
1932 /* Longword virtual access with alternate access mode and
1933 * protection checks (hw_ldl/wa)
1934 */
1935 gen_helper_set_alt_mode();
1936 gen_helper_ldl_data(cpu_ir[ra], addr);
1937 gen_helper_restore_mode();
1938 break;
1939 case 0xF:
1940 /* Quadword virtual access with alternate access mode and
1941 * protection checks (hw_ldq/wa)
1942 */
1943 gen_helper_set_alt_mode();
1944 gen_helper_ldq_data(cpu_ir[ra], addr);
1945 gen_helper_restore_mode();
1946 break;
1947 }
1948 tcg_temp_free(addr);
1949 }
1950 break;
1951 #endif
1952 case 0x1C:
1953 switch (fn7) {
1954 case 0x00:
1955 /* SEXTB */
1956 if (!(ctx->amask & AMASK_BWX))
1957 goto invalid_opc;
1958 if (likely(rc != 31)) {
1959 if (islit)
1960 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
1961 else
1962 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
1963 }
1964 break;
1965 case 0x01:
1966 /* SEXTW */
1967 if (!(ctx->amask & AMASK_BWX))
1968 goto invalid_opc;
1969 if (likely(rc != 31)) {
1970 if (islit)
1971 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
1972 else
1973 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
1974 }
1975 break;
1976 case 0x30:
1977 /* CTPOP */
1978 if (!(ctx->amask & AMASK_CIX))
1979 goto invalid_opc;
1980 if (likely(rc != 31)) {
1981 if (islit)
1982 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
1983 else
1984 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
1985 }
1986 break;
1987 case 0x31:
1988 /* PERR */
1989 if (!(ctx->amask & AMASK_MVI))
1990 goto invalid_opc;
1991 gen_perr(ra, rb, rc, islit, lit);
1992 break;
1993 case 0x32:
1994 /* CTLZ */
1995 if (!(ctx->amask & AMASK_CIX))
1996 goto invalid_opc;
1997 if (likely(rc != 31)) {
1998 if (islit)
1999 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2000 else
2001 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2002 }
2003 break;
2004 case 0x33:
2005 /* CTTZ */
2006 if (!(ctx->amask & AMASK_CIX))
2007 goto invalid_opc;
2008 if (likely(rc != 31)) {
2009 if (islit)
2010 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2011 else
2012 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2013 }
2014 break;
2015 case 0x34:
2016 /* UNPKBW */
2017 if (!(ctx->amask & AMASK_MVI))
2018 goto invalid_opc;
2019 if (real_islit || ra != 31)
2020 goto invalid_opc;
2021 gen_unpkbw (rb, rc);
2022 break;
2023 case 0x35:
2024 /* UNPKBL */
2025 if (!(ctx->amask & AMASK_MVI))
2026 goto invalid_opc;
2027 if (real_islit || ra != 31)
2028 goto invalid_opc;
2029 gen_unpkbl (rb, rc);
2030 break;
2031 case 0x36:
2032 /* PKWB */
2033 if (!(ctx->amask & AMASK_MVI))
2034 goto invalid_opc;
2035 if (real_islit || ra != 31)
2036 goto invalid_opc;
2037 gen_pkwb (rb, rc);
2038 break;
2039 case 0x37:
2040 /* PKLB */
2041 if (!(ctx->amask & AMASK_MVI))
2042 goto invalid_opc;
2043 if (real_islit || ra != 31)
2044 goto invalid_opc;
2045 gen_pklb (rb, rc);
2046 break;
2047 case 0x38:
2048 /* MINSB8 */
2049 if (!(ctx->amask & AMASK_MVI))
2050 goto invalid_opc;
2051 gen_minsb8 (ra, rb, rc, islit, lit);
2052 break;
2053 case 0x39:
2054 /* MINSW4 */
2055 if (!(ctx->amask & AMASK_MVI))
2056 goto invalid_opc;
2057 gen_minsw4 (ra, rb, rc, islit, lit);
2058 break;
2059 case 0x3A:
2060 /* MINUB8 */
2061 if (!(ctx->amask & AMASK_MVI))
2062 goto invalid_opc;
2063 gen_minub8 (ra, rb, rc, islit, lit);
2064 break;
2065 case 0x3B:
2066 /* MINUW4 */
2067 if (!(ctx->amask & AMASK_MVI))
2068 goto invalid_opc;
2069 gen_minuw4 (ra, rb, rc, islit, lit);
2070 break;
2071 case 0x3C:
2072 /* MAXUB8 */
2073 if (!(ctx->amask & AMASK_MVI))
2074 goto invalid_opc;
2075 gen_maxub8 (ra, rb, rc, islit, lit);
2076 break;
2077 case 0x3D:
2078 /* MAXUW4 */
2079 if (!(ctx->amask & AMASK_MVI))
2080 goto invalid_opc;
2081 gen_maxuw4 (ra, rb, rc, islit, lit);
2082 break;
2083 case 0x3E:
2084 /* MAXSB8 */
2085 if (!(ctx->amask & AMASK_MVI))
2086 goto invalid_opc;
2087 gen_maxsb8 (ra, rb, rc, islit, lit);
2088 break;
2089 case 0x3F:
2090 /* MAXSW4 */
2091 if (!(ctx->amask & AMASK_MVI))
2092 goto invalid_opc;
2093 gen_maxsw4 (ra, rb, rc, islit, lit);
2094 break;
2095 case 0x70:
2096 /* FTOIT */
2097 if (!(ctx->amask & AMASK_FIX))
2098 goto invalid_opc;
2099 if (likely(rc != 31)) {
2100 if (ra != 31)
2101 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2102 else
2103 tcg_gen_movi_i64(cpu_ir[rc], 0);
2104 }
2105 break;
2106 case 0x78:
2107 /* FTOIS */
2108 if (!(ctx->amask & AMASK_FIX))
2109 goto invalid_opc;
2110 if (rc != 31) {
2111 TCGv_i32 tmp1 = tcg_temp_new_i32();
2112 if (ra != 31)
2113 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2114 else {
2115 TCGv tmp2 = tcg_const_i64(0);
2116 gen_helper_s_to_memory(tmp1, tmp2);
2117 tcg_temp_free(tmp2);
2118 }
2119 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2120 tcg_temp_free_i32(tmp1);
2121 }
2122 break;
2123 default:
2124 goto invalid_opc;
2125 }
2126 break;
2127 case 0x1D:
2128 /* HW_MTPR (PALcode) */
2129 #if defined (CONFIG_USER_ONLY)
2130 goto invalid_opc;
2131 #else
2132 if (!ctx->pal_mode)
2133 goto invalid_opc;
2134 else {
2135 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2136 if (ra != 31)
2137 gen_helper_mtpr(tmp1, cpu_ir[ra]);
2138 else {
2139 TCGv tmp2 = tcg_const_i64(0);
2140 gen_helper_mtpr(tmp1, tmp2);
2141 tcg_temp_free(tmp2);
2142 }
2143 tcg_temp_free(tmp1);
2144 ret = 2;
2145 }
2146 break;
2147 #endif
2148 case 0x1E:
2149 /* HW_REI (PALcode) */
2150 #if defined (CONFIG_USER_ONLY)
2151 goto invalid_opc;
2152 #else
2153 if (!ctx->pal_mode)
2154 goto invalid_opc;
2155 if (rb == 31) {
2156 /* "Old" alpha */
2157 gen_helper_hw_rei();
2158 } else {
2159 TCGv tmp;
2160
2161 if (ra != 31) {
2162 tmp = tcg_temp_new();
2163 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2164 } else
2165 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2166 gen_helper_hw_ret(tmp);
2167 tcg_temp_free(tmp);
2168 }
2169 ret = 2;
2170 break;
2171 #endif
2172 case 0x1F:
2173 /* HW_ST (PALcode) */
2174 #if defined (CONFIG_USER_ONLY)
2175 goto invalid_opc;
2176 #else
2177 if (!ctx->pal_mode)
2178 goto invalid_opc;
2179 else {
2180 TCGv addr, val;
2181 addr = tcg_temp_new();
2182 if (rb != 31)
2183 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2184 else
2185 tcg_gen_movi_i64(addr, disp12);
2186 if (ra != 31)
2187 val = cpu_ir[ra];
2188 else {
2189 val = tcg_temp_new();
2190 tcg_gen_movi_i64(val, 0);
2191 }
2192 switch ((insn >> 12) & 0xF) {
2193 case 0x0:
2194 /* Longword physical access */
2195 gen_helper_stl_raw(val, addr);
2196 break;
2197 case 0x1:
2198 /* Quadword physical access */
2199 gen_helper_stq_raw(val, addr);
2200 break;
2201 case 0x2:
2202 /* Longword physical access with lock */
2203 gen_helper_stl_c_raw(val, val, addr);
2204 break;
2205 case 0x3:
2206 /* Quadword physical access with lock */
2207 gen_helper_stq_c_raw(val, val, addr);
2208 break;
2209 case 0x4:
2210 /* Longword virtual access */
2211 gen_helper_st_virt_to_phys(addr, addr);
2212 gen_helper_stl_raw(val, addr);
2213 break;
2214 case 0x5:
2215 /* Quadword virtual access */
2216 gen_helper_st_virt_to_phys(addr, addr);
2217 gen_helper_stq_raw(val, addr);
2218 break;
2219 case 0x6:
2220 /* Invalid */
2221 goto invalid_opc;
2222 case 0x7:
2223 /* Invalid */
2224 goto invalid_opc;
2225 case 0x8:
2226 /* Invalid */
2227 goto invalid_opc;
2228 case 0x9:
2229 /* Invalid */
2230 goto invalid_opc;
2231 case 0xA:
2232 /* Invalid */
2233 goto invalid_opc;
2234 case 0xB:
2235 /* Invalid */
2236 goto invalid_opc;
2237 case 0xC:
2238 /* Longword virtual access with alternate access mode */
2239 gen_helper_set_alt_mode();
2240 gen_helper_st_virt_to_phys(addr, addr);
2241 gen_helper_stl_raw(val, addr);
2242 gen_helper_restore_mode();
2243 break;
2244 case 0xD:
2245 /* Quadword virtual access with alternate access mode */
2246 gen_helper_set_alt_mode();
2247 gen_helper_st_virt_to_phys(addr, addr);
2248 gen_helper_stl_raw(val, addr);
2249 gen_helper_restore_mode();
2250 break;
2251 case 0xE:
2252 /* Invalid */
2253 goto invalid_opc;
2254 case 0xF:
2255 /* Invalid */
2256 goto invalid_opc;
2257 }
2258 if (ra == 31)
2259 tcg_temp_free(val);
2260 tcg_temp_free(addr);
2261 }
2262 break;
2263 #endif
2264 case 0x20:
2265 /* LDF */
2266 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2267 break;
2268 case 0x21:
2269 /* LDG */
2270 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2271 break;
2272 case 0x22:
2273 /* LDS */
2274 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2275 break;
2276 case 0x23:
2277 /* LDT */
2278 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2279 break;
2280 case 0x24:
2281 /* STF */
2282 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
2283 break;
2284 case 0x25:
2285 /* STG */
2286 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
2287 break;
2288 case 0x26:
2289 /* STS */
2290 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
2291 break;
2292 case 0x27:
2293 /* STT */
2294 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
2295 break;
2296 case 0x28:
2297 /* LDL */
2298 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2299 break;
2300 case 0x29:
2301 /* LDQ */
2302 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2303 break;
2304 case 0x2A:
2305 /* LDL_L */
2306 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2307 break;
2308 case 0x2B:
2309 /* LDQ_L */
2310 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2311 break;
2312 case 0x2C:
2313 /* STL */
2314 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
2315 break;
2316 case 0x2D:
2317 /* STQ */
2318 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
2319 break;
2320 case 0x2E:
2321 /* STL_C */
2322 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
2323 break;
2324 case 0x2F:
2325 /* STQ_C */
2326 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
2327 break;
2328 case 0x30:
2329 /* BR */
2330 if (ra != 31)
2331 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2332 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2333 ret = 1;
2334 break;
2335 case 0x31: /* FBEQ */
2336 case 0x32: /* FBLT */
2337 case 0x33: /* FBLE */
2338 gen_fbcond(ctx, opc, ra, disp16);
2339 ret = 1;
2340 break;
2341 case 0x34:
2342 /* BSR */
2343 if (ra != 31)
2344 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2345 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2346 ret = 1;
2347 break;
2348 case 0x35: /* FBNE */
2349 case 0x36: /* FBGE */
2350 case 0x37: /* FBGT */
2351 gen_fbcond(ctx, opc, ra, disp16);
2352 ret = 1;
2353 break;
2354 case 0x38:
2355 /* BLBC */
2356 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2357 ret = 1;
2358 break;
2359 case 0x39:
2360 /* BEQ */
2361 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2362 ret = 1;
2363 break;
2364 case 0x3A:
2365 /* BLT */
2366 gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2367 ret = 1;
2368 break;
2369 case 0x3B:
2370 /* BLE */
2371 gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2372 ret = 1;
2373 break;
2374 case 0x3C:
2375 /* BLBS */
2376 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2377 ret = 1;
2378 break;
2379 case 0x3D:
2380 /* BNE */
2381 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2382 ret = 1;
2383 break;
2384 case 0x3E:
2385 /* BGE */
2386 gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2387 ret = 1;
2388 break;
2389 case 0x3F:
2390 /* BGT */
2391 gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2392 ret = 1;
2393 break;
2394 invalid_opc:
2395 gen_invalid(ctx);
2396 ret = 3;
2397 break;
2398 }
2399
2400 return ret;
2401 }
2402
2403 static inline void gen_intermediate_code_internal(CPUState *env,
2404 TranslationBlock *tb,
2405 int search_pc)
2406 {
2407 DisasContext ctx, *ctxp = &ctx;
2408 target_ulong pc_start;
2409 uint32_t insn;
2410 uint16_t *gen_opc_end;
2411 CPUBreakpoint *bp;
2412 int j, lj = -1;
2413 int ret;
2414 int num_insns;
2415 int max_insns;
2416
2417 pc_start = tb->pc;
2418 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2419 ctx.pc = pc_start;
2420 ctx.amask = env->amask;
2421 ctx.env = env;
2422 #if defined (CONFIG_USER_ONLY)
2423 ctx.mem_idx = 0;
2424 #else
2425 ctx.mem_idx = ((env->ps >> 3) & 3);
2426 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
2427 #endif
2428 num_insns = 0;
2429 max_insns = tb->cflags & CF_COUNT_MASK;
2430 if (max_insns == 0)
2431 max_insns = CF_COUNT_MASK;
2432
2433 gen_icount_start();
2434 for (ret = 0; ret == 0;) {
2435 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2436 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2437 if (bp->pc == ctx.pc) {
2438 gen_excp(&ctx, EXCP_DEBUG, 0);
2439 break;
2440 }
2441 }
2442 }
2443 if (search_pc) {
2444 j = gen_opc_ptr - gen_opc_buf;
2445 if (lj < j) {
2446 lj++;
2447 while (lj < j)
2448 gen_opc_instr_start[lj++] = 0;
2449 }
2450 gen_opc_pc[lj] = ctx.pc;
2451 gen_opc_instr_start[lj] = 1;
2452 gen_opc_icount[lj] = num_insns;
2453 }
2454 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
2455 gen_io_start();
2456 insn = ldl_code(ctx.pc);
2457 num_insns++;
2458 ctx.pc += 4;
2459 ret = translate_one(ctxp, insn);
2460 if (ret != 0)
2461 break;
2462 /* if we reach a page boundary or are single stepping, stop
2463 * generation
2464 */
2465 if (env->singlestep_enabled) {
2466 gen_excp(&ctx, EXCP_DEBUG, 0);
2467 break;
2468 }
2469
2470 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
2471 break;
2472
2473 if (gen_opc_ptr >= gen_opc_end)
2474 break;
2475
2476 if (num_insns >= max_insns)
2477 break;
2478
2479 if (singlestep) {
2480 break;
2481 }
2482 }
2483 if (ret != 1 && ret != 3) {
2484 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2485 }
2486 if (tb->cflags & CF_LAST_IO)
2487 gen_io_end();
2488 /* Generate the return instruction */
2489 tcg_gen_exit_tb(0);
2490 gen_icount_end(tb, num_insns);
2491 *gen_opc_ptr = INDEX_op_end;
2492 if (search_pc) {
2493 j = gen_opc_ptr - gen_opc_buf;
2494 lj++;
2495 while (lj <= j)
2496 gen_opc_instr_start[lj++] = 0;
2497 } else {
2498 tb->size = ctx.pc - pc_start;
2499 tb->icount = num_insns;
2500 }
2501 #ifdef DEBUG_DISAS
2502 log_cpu_state_mask(CPU_LOG_TB_CPU, env, 0);
2503 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2504 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2505 log_target_disas(pc_start, ctx.pc - pc_start, 1);
2506 qemu_log("\n");
2507 }
2508 #endif
2509 }
2510
2511 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
2512 {
2513 gen_intermediate_code_internal(env, tb, 0);
2514 }
2515
2516 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
2517 {
2518 gen_intermediate_code_internal(env, tb, 1);
2519 }
2520
2521 struct cpu_def_t {
2522 const char *name;
2523 int implver, amask;
2524 };
2525
2526 static const struct cpu_def_t cpu_defs[] = {
2527 { "ev4", IMPLVER_2106x, 0 },
2528 { "ev5", IMPLVER_21164, 0 },
2529 { "ev56", IMPLVER_21164, AMASK_BWX },
2530 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
2531 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
2532 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2533 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
2534 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2535 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
2536 { "21064", IMPLVER_2106x, 0 },
2537 { "21164", IMPLVER_21164, 0 },
2538 { "21164a", IMPLVER_21164, AMASK_BWX },
2539 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
2540 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
2541 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2542 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
2543 };
2544
2545 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
2546 {
2547 CPUAlphaState *env;
2548 uint64_t hwpcb;
2549 int implver, amask, i, max;
2550
2551 env = qemu_mallocz(sizeof(CPUAlphaState));
2552 cpu_exec_init(env);
2553 alpha_translate_init();
2554 tlb_flush(env, 1);
2555
2556 /* Default to ev67; no reason not to emulate insns by default. */
2557 implver = IMPLVER_21264;
2558 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
2559 | AMASK_TRAP | AMASK_PREFETCH);
2560
2561 max = ARRAY_SIZE(cpu_defs);
2562 for (i = 0; i < max; i++) {
2563 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
2564 implver = cpu_defs[i].implver;
2565 amask = cpu_defs[i].amask;
2566 break;
2567 }
2568 }
2569 env->implver = implver;
2570 env->amask = amask;
2571
2572 env->ps = 0x1F00;
2573 #if defined (CONFIG_USER_ONLY)
2574 env->ps |= 1 << 3;
2575 #endif
2576 pal_init(env);
2577 /* Initialize IPR */
2578 hwpcb = env->ipr[IPR_PCBB];
2579 env->ipr[IPR_ASN] = 0;
2580 env->ipr[IPR_ASTEN] = 0;
2581 env->ipr[IPR_ASTSR] = 0;
2582 env->ipr[IPR_DATFX] = 0;
2583 /* XXX: fix this */
2584 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
2585 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
2586 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
2587 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
2588 env->ipr[IPR_FEN] = 0;
2589 env->ipr[IPR_IPL] = 31;
2590 env->ipr[IPR_MCES] = 0;
2591 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
2592 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
2593 env->ipr[IPR_SISR] = 0;
2594 env->ipr[IPR_VIRBND] = -1ULL;
2595
2596 qemu_init_vcpu(env);
2597 return env;
2598 }
2599
2600 void gen_pc_load(CPUState *env, TranslationBlock *tb,
2601 unsigned long searched_pc, int pc_pos, void *puc)
2602 {
2603 env->pc = gen_opc_pc[pc_pos];
2604 }