]> git.proxmox.com Git - qemu.git/blob - target-alpha/translate.c
target-alpha: Implement missing MVI instructions.
[qemu.git] / target-alpha / translate.c
1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23
24 #include "cpu.h"
25 #include "exec-all.h"
26 #include "disas.h"
27 #include "host-utils.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
30
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #undef ALPHA_DEBUG_DISAS
36
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
39 #else
40 # define LOG_DISAS(...) do { } while (0)
41 #endif
42
43 typedef struct DisasContext DisasContext;
44 struct DisasContext {
45 uint64_t pc;
46 int mem_idx;
47 #if !defined (CONFIG_USER_ONLY)
48 int pal_mode;
49 #endif
50 CPUAlphaState *env;
51 uint32_t amask;
52 };
53
54 /* global register indexes */
55 static TCGv_ptr cpu_env;
56 static TCGv cpu_ir[31];
57 static TCGv cpu_fir[31];
58 static TCGv cpu_pc;
59 static TCGv cpu_lock;
60
61 /* register names */
62 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
63
64 #include "gen-icount.h"
65
66 static void alpha_translate_init(void)
67 {
68 int i;
69 char *p;
70 static int done_init = 0;
71
72 if (done_init)
73 return;
74
75 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
76
77 p = cpu_reg_names;
78 for (i = 0; i < 31; i++) {
79 sprintf(p, "ir%d", i);
80 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
81 offsetof(CPUState, ir[i]), p);
82 p += (i < 10) ? 4 : 5;
83
84 sprintf(p, "fir%d", i);
85 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
86 offsetof(CPUState, fir[i]), p);
87 p += (i < 10) ? 5 : 6;
88 }
89
90 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
91 offsetof(CPUState, pc), "pc");
92
93 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
94 offsetof(CPUState, lock), "lock");
95
96 /* register helpers */
97 #define GEN_HELPER 2
98 #include "helper.h"
99
100 done_init = 1;
101 }
102
103 static inline void gen_excp(DisasContext *ctx, int exception, int error_code)
104 {
105 TCGv_i32 tmp1, tmp2;
106
107 tcg_gen_movi_i64(cpu_pc, ctx->pc);
108 tmp1 = tcg_const_i32(exception);
109 tmp2 = tcg_const_i32(error_code);
110 gen_helper_excp(tmp1, tmp2);
111 tcg_temp_free_i32(tmp2);
112 tcg_temp_free_i32(tmp1);
113 }
114
115 static inline void gen_invalid(DisasContext *ctx)
116 {
117 gen_excp(ctx, EXCP_OPCDEC, 0);
118 }
119
120 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
121 {
122 TCGv tmp = tcg_temp_new();
123 TCGv_i32 tmp32 = tcg_temp_new_i32();
124 tcg_gen_qemu_ld32u(tmp, t1, flags);
125 tcg_gen_trunc_i64_i32(tmp32, tmp);
126 gen_helper_memory_to_f(t0, tmp32);
127 tcg_temp_free_i32(tmp32);
128 tcg_temp_free(tmp);
129 }
130
131 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
132 {
133 TCGv tmp = tcg_temp_new();
134 tcg_gen_qemu_ld64(tmp, t1, flags);
135 gen_helper_memory_to_g(t0, tmp);
136 tcg_temp_free(tmp);
137 }
138
139 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
140 {
141 TCGv tmp = tcg_temp_new();
142 TCGv_i32 tmp32 = tcg_temp_new_i32();
143 tcg_gen_qemu_ld32u(tmp, t1, flags);
144 tcg_gen_trunc_i64_i32(tmp32, tmp);
145 gen_helper_memory_to_s(t0, tmp32);
146 tcg_temp_free_i32(tmp32);
147 tcg_temp_free(tmp);
148 }
149
150 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
151 {
152 tcg_gen_mov_i64(cpu_lock, t1);
153 tcg_gen_qemu_ld32s(t0, t1, flags);
154 }
155
156 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
157 {
158 tcg_gen_mov_i64(cpu_lock, t1);
159 tcg_gen_qemu_ld64(t0, t1, flags);
160 }
161
162 static inline void gen_load_mem(DisasContext *ctx,
163 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
164 int flags),
165 int ra, int rb, int32_t disp16, int fp,
166 int clear)
167 {
168 TCGv addr;
169
170 if (unlikely(ra == 31))
171 return;
172
173 addr = tcg_temp_new();
174 if (rb != 31) {
175 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
176 if (clear)
177 tcg_gen_andi_i64(addr, addr, ~0x7);
178 } else {
179 if (clear)
180 disp16 &= ~0x7;
181 tcg_gen_movi_i64(addr, disp16);
182 }
183 if (fp)
184 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
185 else
186 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
187 tcg_temp_free(addr);
188 }
189
190 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
191 {
192 TCGv_i32 tmp32 = tcg_temp_new_i32();
193 TCGv tmp = tcg_temp_new();
194 gen_helper_f_to_memory(tmp32, t0);
195 tcg_gen_extu_i32_i64(tmp, tmp32);
196 tcg_gen_qemu_st32(tmp, t1, flags);
197 tcg_temp_free(tmp);
198 tcg_temp_free_i32(tmp32);
199 }
200
201 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
202 {
203 TCGv tmp = tcg_temp_new();
204 gen_helper_g_to_memory(tmp, t0);
205 tcg_gen_qemu_st64(tmp, t1, flags);
206 tcg_temp_free(tmp);
207 }
208
209 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
210 {
211 TCGv_i32 tmp32 = tcg_temp_new_i32();
212 TCGv tmp = tcg_temp_new();
213 gen_helper_s_to_memory(tmp32, t0);
214 tcg_gen_extu_i32_i64(tmp, tmp32);
215 tcg_gen_qemu_st32(tmp, t1, flags);
216 tcg_temp_free(tmp);
217 tcg_temp_free_i32(tmp32);
218 }
219
220 static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
221 {
222 int l1, l2;
223
224 l1 = gen_new_label();
225 l2 = gen_new_label();
226 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
227 tcg_gen_qemu_st32(t0, t1, flags);
228 tcg_gen_movi_i64(t0, 1);
229 tcg_gen_br(l2);
230 gen_set_label(l1);
231 tcg_gen_movi_i64(t0, 0);
232 gen_set_label(l2);
233 tcg_gen_movi_i64(cpu_lock, -1);
234 }
235
236 static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
237 {
238 int l1, l2;
239
240 l1 = gen_new_label();
241 l2 = gen_new_label();
242 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
243 tcg_gen_qemu_st64(t0, t1, flags);
244 tcg_gen_movi_i64(t0, 1);
245 tcg_gen_br(l2);
246 gen_set_label(l1);
247 tcg_gen_movi_i64(t0, 0);
248 gen_set_label(l2);
249 tcg_gen_movi_i64(cpu_lock, -1);
250 }
251
252 static inline void gen_store_mem(DisasContext *ctx,
253 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
254 int flags),
255 int ra, int rb, int32_t disp16, int fp,
256 int clear, int local)
257 {
258 TCGv addr;
259 if (local)
260 addr = tcg_temp_local_new();
261 else
262 addr = tcg_temp_new();
263 if (rb != 31) {
264 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
265 if (clear)
266 tcg_gen_andi_i64(addr, addr, ~0x7);
267 } else {
268 if (clear)
269 disp16 &= ~0x7;
270 tcg_gen_movi_i64(addr, disp16);
271 }
272 if (ra != 31) {
273 if (fp)
274 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
275 else
276 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
277 } else {
278 TCGv zero;
279 if (local)
280 zero = tcg_const_local_i64(0);
281 else
282 zero = tcg_const_i64(0);
283 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
284 tcg_temp_free(zero);
285 }
286 tcg_temp_free(addr);
287 }
288
289 static inline void gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
290 int32_t disp, int mask)
291 {
292 int l1, l2;
293
294 l1 = gen_new_label();
295 l2 = gen_new_label();
296 if (likely(ra != 31)) {
297 if (mask) {
298 TCGv tmp = tcg_temp_new();
299 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
300 tcg_gen_brcondi_i64(cond, tmp, 0, l1);
301 tcg_temp_free(tmp);
302 } else
303 tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, l1);
304 } else {
305 /* Very uncommon case - Do not bother to optimize. */
306 TCGv tmp = tcg_const_i64(0);
307 tcg_gen_brcondi_i64(cond, tmp, 0, l1);
308 tcg_temp_free(tmp);
309 }
310 tcg_gen_movi_i64(cpu_pc, ctx->pc);
311 tcg_gen_br(l2);
312 gen_set_label(l1);
313 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
314 gen_set_label(l2);
315 }
316
317 static inline void gen_fbcond(DisasContext *ctx, int opc, int ra,
318 int32_t disp16)
319 {
320 int l1, l2;
321 TCGv tmp;
322 TCGv src;
323
324 l1 = gen_new_label();
325 l2 = gen_new_label();
326 if (ra != 31) {
327 tmp = tcg_temp_new();
328 src = cpu_fir[ra];
329 } else {
330 tmp = tcg_const_i64(0);
331 src = tmp;
332 }
333 switch (opc) {
334 case 0x31: /* FBEQ */
335 gen_helper_cmpfeq(tmp, src);
336 break;
337 case 0x32: /* FBLT */
338 gen_helper_cmpflt(tmp, src);
339 break;
340 case 0x33: /* FBLE */
341 gen_helper_cmpfle(tmp, src);
342 break;
343 case 0x35: /* FBNE */
344 gen_helper_cmpfne(tmp, src);
345 break;
346 case 0x36: /* FBGE */
347 gen_helper_cmpfge(tmp, src);
348 break;
349 case 0x37: /* FBGT */
350 gen_helper_cmpfgt(tmp, src);
351 break;
352 default:
353 abort();
354 }
355 tcg_gen_brcondi_i64(TCG_COND_NE, tmp, 0, l1);
356 tcg_gen_movi_i64(cpu_pc, ctx->pc);
357 tcg_gen_br(l2);
358 gen_set_label(l1);
359 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp16 << 2));
360 gen_set_label(l2);
361 }
362
363 static inline void gen_cmov(TCGCond inv_cond, int ra, int rb, int rc,
364 int islit, uint8_t lit, int mask)
365 {
366 int l1;
367
368 if (unlikely(rc == 31))
369 return;
370
371 l1 = gen_new_label();
372
373 if (ra != 31) {
374 if (mask) {
375 TCGv tmp = tcg_temp_new();
376 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
377 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
378 tcg_temp_free(tmp);
379 } else
380 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
381 } else {
382 /* Very uncommon case - Do not bother to optimize. */
383 TCGv tmp = tcg_const_i64(0);
384 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
385 tcg_temp_free(tmp);
386 }
387
388 if (islit)
389 tcg_gen_movi_i64(cpu_ir[rc], lit);
390 else
391 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
392 gen_set_label(l1);
393 }
394
395 #define FARITH2(name) \
396 static inline void glue(gen_f, name)(int rb, int rc) \
397 { \
398 if (unlikely(rc == 31)) \
399 return; \
400 \
401 if (rb != 31) \
402 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
403 else { \
404 TCGv tmp = tcg_const_i64(0); \
405 gen_helper_ ## name (cpu_fir[rc], tmp); \
406 tcg_temp_free(tmp); \
407 } \
408 }
409 FARITH2(sqrts)
410 FARITH2(sqrtf)
411 FARITH2(sqrtg)
412 FARITH2(sqrtt)
413 FARITH2(cvtgf)
414 FARITH2(cvtgq)
415 FARITH2(cvtqf)
416 FARITH2(cvtqg)
417 FARITH2(cvtst)
418 FARITH2(cvtts)
419 FARITH2(cvttq)
420 FARITH2(cvtqs)
421 FARITH2(cvtqt)
422 FARITH2(cvtlq)
423 FARITH2(cvtql)
424 FARITH2(cvtqlv)
425 FARITH2(cvtqlsv)
426
427 #define FARITH3(name) \
428 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
429 { \
430 if (unlikely(rc == 31)) \
431 return; \
432 \
433 if (ra != 31) { \
434 if (rb != 31) \
435 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], cpu_fir[rb]); \
436 else { \
437 TCGv tmp = tcg_const_i64(0); \
438 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], tmp); \
439 tcg_temp_free(tmp); \
440 } \
441 } else { \
442 TCGv tmp = tcg_const_i64(0); \
443 if (rb != 31) \
444 gen_helper_ ## name (cpu_fir[rc], tmp, cpu_fir[rb]); \
445 else \
446 gen_helper_ ## name (cpu_fir[rc], tmp, tmp); \
447 tcg_temp_free(tmp); \
448 } \
449 }
450
451 FARITH3(addf)
452 FARITH3(subf)
453 FARITH3(mulf)
454 FARITH3(divf)
455 FARITH3(addg)
456 FARITH3(subg)
457 FARITH3(mulg)
458 FARITH3(divg)
459 FARITH3(cmpgeq)
460 FARITH3(cmpglt)
461 FARITH3(cmpgle)
462 FARITH3(adds)
463 FARITH3(subs)
464 FARITH3(muls)
465 FARITH3(divs)
466 FARITH3(addt)
467 FARITH3(subt)
468 FARITH3(mult)
469 FARITH3(divt)
470 FARITH3(cmptun)
471 FARITH3(cmpteq)
472 FARITH3(cmptlt)
473 FARITH3(cmptle)
474 FARITH3(cpys)
475 FARITH3(cpysn)
476 FARITH3(cpyse)
477
478 #define FCMOV(name) \
479 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
480 { \
481 int l1; \
482 TCGv tmp; \
483 \
484 if (unlikely(rc == 31)) \
485 return; \
486 \
487 l1 = gen_new_label(); \
488 tmp = tcg_temp_new(); \
489 if (ra != 31) { \
490 tmp = tcg_temp_new(); \
491 gen_helper_ ## name (tmp, cpu_fir[ra]); \
492 } else { \
493 tmp = tcg_const_i64(0); \
494 gen_helper_ ## name (tmp, tmp); \
495 } \
496 tcg_gen_brcondi_i64(TCG_COND_EQ, tmp, 0, l1); \
497 if (rb != 31) \
498 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]); \
499 else \
500 tcg_gen_movi_i64(cpu_fir[rc], 0); \
501 gen_set_label(l1); \
502 }
503 FCMOV(cmpfeq)
504 FCMOV(cmpfne)
505 FCMOV(cmpflt)
506 FCMOV(cmpfge)
507 FCMOV(cmpfle)
508 FCMOV(cmpfgt)
509
510 /* EXTWH, EXTWH, EXTLH, EXTQH */
511 static inline void gen_ext_h(void(*tcg_gen_ext_i64)(TCGv t0, TCGv t1),
512 int ra, int rb, int rc, int islit, uint8_t lit)
513 {
514 if (unlikely(rc == 31))
515 return;
516
517 if (ra != 31) {
518 if (islit) {
519 if (lit != 0)
520 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], 64 - ((lit & 7) * 8));
521 else
522 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[ra]);
523 } else {
524 TCGv tmp1;
525 tmp1 = tcg_temp_new();
526
527 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
528 tcg_gen_shli_i64(tmp1, tmp1, 3);
529 tcg_gen_neg_i64(tmp1, tmp1);
530 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
531 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
532
533 tcg_temp_free(tmp1);
534 }
535 if (tcg_gen_ext_i64)
536 tcg_gen_ext_i64(cpu_ir[rc], cpu_ir[rc]);
537 } else
538 tcg_gen_movi_i64(cpu_ir[rc], 0);
539 }
540
541 /* EXTBL, EXTWL, EXTWL, EXTLL, EXTQL */
542 static inline void gen_ext_l(void(*tcg_gen_ext_i64)(TCGv t0, TCGv t1),
543 int ra, int rb, int rc, int islit, uint8_t lit)
544 {
545 if (unlikely(rc == 31))
546 return;
547
548 if (ra != 31) {
549 if (islit) {
550 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
551 } else {
552 TCGv tmp = tcg_temp_new();
553 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
554 tcg_gen_shli_i64(tmp, tmp, 3);
555 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
556 tcg_temp_free(tmp);
557 }
558 if (tcg_gen_ext_i64)
559 tcg_gen_ext_i64(cpu_ir[rc], cpu_ir[rc]);
560 } else
561 tcg_gen_movi_i64(cpu_ir[rc], 0);
562 }
563
564 /* Code to call arith3 helpers */
565 #define ARITH3(name) \
566 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
567 uint8_t lit) \
568 { \
569 if (unlikely(rc == 31)) \
570 return; \
571 \
572 if (ra != 31) { \
573 if (islit) { \
574 TCGv tmp = tcg_const_i64(lit); \
575 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
576 tcg_temp_free(tmp); \
577 } else \
578 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
579 } else { \
580 TCGv tmp1 = tcg_const_i64(0); \
581 if (islit) { \
582 TCGv tmp2 = tcg_const_i64(lit); \
583 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
584 tcg_temp_free(tmp2); \
585 } else \
586 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
587 tcg_temp_free(tmp1); \
588 } \
589 }
590 ARITH3(cmpbge)
591 ARITH3(addlv)
592 ARITH3(sublv)
593 ARITH3(addqv)
594 ARITH3(subqv)
595 ARITH3(mskbl)
596 ARITH3(insbl)
597 ARITH3(mskwl)
598 ARITH3(inswl)
599 ARITH3(mskll)
600 ARITH3(insll)
601 ARITH3(zap)
602 ARITH3(zapnot)
603 ARITH3(mskql)
604 ARITH3(insql)
605 ARITH3(mskwh)
606 ARITH3(inswh)
607 ARITH3(msklh)
608 ARITH3(inslh)
609 ARITH3(mskqh)
610 ARITH3(insqh)
611 ARITH3(umulh)
612 ARITH3(mullv)
613 ARITH3(mulqv)
614 ARITH3(minub8)
615 ARITH3(minsb8)
616 ARITH3(minuw4)
617 ARITH3(minsw4)
618 ARITH3(maxub8)
619 ARITH3(maxsb8)
620 ARITH3(maxuw4)
621 ARITH3(maxsw4)
622 ARITH3(perr)
623
624 #define MVIOP2(name) \
625 static inline void glue(gen_, name)(int rb, int rc) \
626 { \
627 if (unlikely(rc == 31)) \
628 return; \
629 if (unlikely(rb == 31)) \
630 tcg_gen_movi_i64(cpu_ir[rc], 0); \
631 else \
632 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
633 }
634 MVIOP2(pklb)
635 MVIOP2(pkwb)
636 MVIOP2(unpkbl)
637 MVIOP2(unpkbw)
638
639 static inline void gen_cmp(TCGCond cond, int ra, int rb, int rc, int islit,
640 uint8_t lit)
641 {
642 int l1, l2;
643 TCGv tmp;
644
645 if (unlikely(rc == 31))
646 return;
647
648 l1 = gen_new_label();
649 l2 = gen_new_label();
650
651 if (ra != 31) {
652 tmp = tcg_temp_new();
653 tcg_gen_mov_i64(tmp, cpu_ir[ra]);
654 } else
655 tmp = tcg_const_i64(0);
656 if (islit)
657 tcg_gen_brcondi_i64(cond, tmp, lit, l1);
658 else
659 tcg_gen_brcond_i64(cond, tmp, cpu_ir[rb], l1);
660
661 tcg_gen_movi_i64(cpu_ir[rc], 0);
662 tcg_gen_br(l2);
663 gen_set_label(l1);
664 tcg_gen_movi_i64(cpu_ir[rc], 1);
665 gen_set_label(l2);
666 }
667
668 static inline int translate_one(DisasContext *ctx, uint32_t insn)
669 {
670 uint32_t palcode;
671 int32_t disp21, disp16, disp12;
672 uint16_t fn11, fn16;
673 uint8_t opc, ra, rb, rc, sbz, fpfn, fn7, fn2, islit, real_islit;
674 uint8_t lit;
675 int ret;
676
677 /* Decode all instruction fields */
678 opc = insn >> 26;
679 ra = (insn >> 21) & 0x1F;
680 rb = (insn >> 16) & 0x1F;
681 rc = insn & 0x1F;
682 sbz = (insn >> 13) & 0x07;
683 real_islit = islit = (insn >> 12) & 1;
684 if (rb == 31 && !islit) {
685 islit = 1;
686 lit = 0;
687 } else
688 lit = (insn >> 13) & 0xFF;
689 palcode = insn & 0x03FFFFFF;
690 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
691 disp16 = (int16_t)(insn & 0x0000FFFF);
692 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
693 fn16 = insn & 0x0000FFFF;
694 fn11 = (insn >> 5) & 0x000007FF;
695 fpfn = fn11 & 0x3F;
696 fn7 = (insn >> 5) & 0x0000007F;
697 fn2 = (insn >> 5) & 0x00000003;
698 ret = 0;
699 LOG_DISAS("opc %02x ra %d rb %d rc %d disp16 %04x\n",
700 opc, ra, rb, rc, disp16);
701 switch (opc) {
702 case 0x00:
703 /* CALL_PAL */
704 if (palcode >= 0x80 && palcode < 0xC0) {
705 /* Unprivileged PAL call */
706 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
707 #if !defined (CONFIG_USER_ONLY)
708 } else if (palcode < 0x40) {
709 /* Privileged PAL code */
710 if (ctx->mem_idx & 1)
711 goto invalid_opc;
712 else
713 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
714 #endif
715 } else {
716 /* Invalid PAL call */
717 goto invalid_opc;
718 }
719 ret = 3;
720 break;
721 case 0x01:
722 /* OPC01 */
723 goto invalid_opc;
724 case 0x02:
725 /* OPC02 */
726 goto invalid_opc;
727 case 0x03:
728 /* OPC03 */
729 goto invalid_opc;
730 case 0x04:
731 /* OPC04 */
732 goto invalid_opc;
733 case 0x05:
734 /* OPC05 */
735 goto invalid_opc;
736 case 0x06:
737 /* OPC06 */
738 goto invalid_opc;
739 case 0x07:
740 /* OPC07 */
741 goto invalid_opc;
742 case 0x08:
743 /* LDA */
744 if (likely(ra != 31)) {
745 if (rb != 31)
746 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
747 else
748 tcg_gen_movi_i64(cpu_ir[ra], disp16);
749 }
750 break;
751 case 0x09:
752 /* LDAH */
753 if (likely(ra != 31)) {
754 if (rb != 31)
755 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
756 else
757 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
758 }
759 break;
760 case 0x0A:
761 /* LDBU */
762 if (!(ctx->amask & AMASK_BWX))
763 goto invalid_opc;
764 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
765 break;
766 case 0x0B:
767 /* LDQ_U */
768 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
769 break;
770 case 0x0C:
771 /* LDWU */
772 if (!(ctx->amask & AMASK_BWX))
773 goto invalid_opc;
774 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
775 break;
776 case 0x0D:
777 /* STW */
778 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
779 break;
780 case 0x0E:
781 /* STB */
782 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
783 break;
784 case 0x0F:
785 /* STQ_U */
786 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
787 break;
788 case 0x10:
789 switch (fn7) {
790 case 0x00:
791 /* ADDL */
792 if (likely(rc != 31)) {
793 if (ra != 31) {
794 if (islit) {
795 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
796 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
797 } else {
798 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
799 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
800 }
801 } else {
802 if (islit)
803 tcg_gen_movi_i64(cpu_ir[rc], lit);
804 else
805 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
806 }
807 }
808 break;
809 case 0x02:
810 /* S4ADDL */
811 if (likely(rc != 31)) {
812 if (ra != 31) {
813 TCGv tmp = tcg_temp_new();
814 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
815 if (islit)
816 tcg_gen_addi_i64(tmp, tmp, lit);
817 else
818 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
819 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
820 tcg_temp_free(tmp);
821 } else {
822 if (islit)
823 tcg_gen_movi_i64(cpu_ir[rc], lit);
824 else
825 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
826 }
827 }
828 break;
829 case 0x09:
830 /* SUBL */
831 if (likely(rc != 31)) {
832 if (ra != 31) {
833 if (islit)
834 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
835 else
836 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
837 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
838 } else {
839 if (islit)
840 tcg_gen_movi_i64(cpu_ir[rc], -lit);
841 else {
842 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
843 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
844 }
845 }
846 break;
847 case 0x0B:
848 /* S4SUBL */
849 if (likely(rc != 31)) {
850 if (ra != 31) {
851 TCGv tmp = tcg_temp_new();
852 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
853 if (islit)
854 tcg_gen_subi_i64(tmp, tmp, lit);
855 else
856 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
857 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
858 tcg_temp_free(tmp);
859 } else {
860 if (islit)
861 tcg_gen_movi_i64(cpu_ir[rc], -lit);
862 else {
863 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
864 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
865 }
866 }
867 }
868 break;
869 case 0x0F:
870 /* CMPBGE */
871 gen_cmpbge(ra, rb, rc, islit, lit);
872 break;
873 case 0x12:
874 /* S8ADDL */
875 if (likely(rc != 31)) {
876 if (ra != 31) {
877 TCGv tmp = tcg_temp_new();
878 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
879 if (islit)
880 tcg_gen_addi_i64(tmp, tmp, lit);
881 else
882 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
883 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
884 tcg_temp_free(tmp);
885 } else {
886 if (islit)
887 tcg_gen_movi_i64(cpu_ir[rc], lit);
888 else
889 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
890 }
891 }
892 break;
893 case 0x1B:
894 /* S8SUBL */
895 if (likely(rc != 31)) {
896 if (ra != 31) {
897 TCGv tmp = tcg_temp_new();
898 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
899 if (islit)
900 tcg_gen_subi_i64(tmp, tmp, lit);
901 else
902 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
903 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
904 tcg_temp_free(tmp);
905 } else {
906 if (islit)
907 tcg_gen_movi_i64(cpu_ir[rc], -lit);
908 else
909 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
910 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
911 }
912 }
913 }
914 break;
915 case 0x1D:
916 /* CMPULT */
917 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
918 break;
919 case 0x20:
920 /* ADDQ */
921 if (likely(rc != 31)) {
922 if (ra != 31) {
923 if (islit)
924 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
925 else
926 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
927 } else {
928 if (islit)
929 tcg_gen_movi_i64(cpu_ir[rc], lit);
930 else
931 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
932 }
933 }
934 break;
935 case 0x22:
936 /* S4ADDQ */
937 if (likely(rc != 31)) {
938 if (ra != 31) {
939 TCGv tmp = tcg_temp_new();
940 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
941 if (islit)
942 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
943 else
944 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
945 tcg_temp_free(tmp);
946 } else {
947 if (islit)
948 tcg_gen_movi_i64(cpu_ir[rc], lit);
949 else
950 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
951 }
952 }
953 break;
954 case 0x29:
955 /* SUBQ */
956 if (likely(rc != 31)) {
957 if (ra != 31) {
958 if (islit)
959 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
960 else
961 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
962 } else {
963 if (islit)
964 tcg_gen_movi_i64(cpu_ir[rc], -lit);
965 else
966 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
967 }
968 }
969 break;
970 case 0x2B:
971 /* S4SUBQ */
972 if (likely(rc != 31)) {
973 if (ra != 31) {
974 TCGv tmp = tcg_temp_new();
975 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
976 if (islit)
977 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
978 else
979 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
980 tcg_temp_free(tmp);
981 } else {
982 if (islit)
983 tcg_gen_movi_i64(cpu_ir[rc], -lit);
984 else
985 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
986 }
987 }
988 break;
989 case 0x2D:
990 /* CMPEQ */
991 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
992 break;
993 case 0x32:
994 /* S8ADDQ */
995 if (likely(rc != 31)) {
996 if (ra != 31) {
997 TCGv tmp = tcg_temp_new();
998 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
999 if (islit)
1000 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1001 else
1002 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1003 tcg_temp_free(tmp);
1004 } else {
1005 if (islit)
1006 tcg_gen_movi_i64(cpu_ir[rc], lit);
1007 else
1008 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1009 }
1010 }
1011 break;
1012 case 0x3B:
1013 /* S8SUBQ */
1014 if (likely(rc != 31)) {
1015 if (ra != 31) {
1016 TCGv tmp = tcg_temp_new();
1017 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1018 if (islit)
1019 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1020 else
1021 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1022 tcg_temp_free(tmp);
1023 } else {
1024 if (islit)
1025 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1026 else
1027 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1028 }
1029 }
1030 break;
1031 case 0x3D:
1032 /* CMPULE */
1033 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1034 break;
1035 case 0x40:
1036 /* ADDL/V */
1037 gen_addlv(ra, rb, rc, islit, lit);
1038 break;
1039 case 0x49:
1040 /* SUBL/V */
1041 gen_sublv(ra, rb, rc, islit, lit);
1042 break;
1043 case 0x4D:
1044 /* CMPLT */
1045 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1046 break;
1047 case 0x60:
1048 /* ADDQ/V */
1049 gen_addqv(ra, rb, rc, islit, lit);
1050 break;
1051 case 0x69:
1052 /* SUBQ/V */
1053 gen_subqv(ra, rb, rc, islit, lit);
1054 break;
1055 case 0x6D:
1056 /* CMPLE */
1057 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1058 break;
1059 default:
1060 goto invalid_opc;
1061 }
1062 break;
1063 case 0x11:
1064 switch (fn7) {
1065 case 0x00:
1066 /* AND */
1067 if (likely(rc != 31)) {
1068 if (ra == 31)
1069 tcg_gen_movi_i64(cpu_ir[rc], 0);
1070 else if (islit)
1071 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1072 else
1073 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1074 }
1075 break;
1076 case 0x08:
1077 /* BIC */
1078 if (likely(rc != 31)) {
1079 if (ra != 31) {
1080 if (islit)
1081 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1082 else
1083 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1084 } else
1085 tcg_gen_movi_i64(cpu_ir[rc], 0);
1086 }
1087 break;
1088 case 0x14:
1089 /* CMOVLBS */
1090 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1091 break;
1092 case 0x16:
1093 /* CMOVLBC */
1094 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1095 break;
1096 case 0x20:
1097 /* BIS */
1098 if (likely(rc != 31)) {
1099 if (ra != 31) {
1100 if (islit)
1101 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1102 else
1103 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1104 } else {
1105 if (islit)
1106 tcg_gen_movi_i64(cpu_ir[rc], lit);
1107 else
1108 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1109 }
1110 }
1111 break;
1112 case 0x24:
1113 /* CMOVEQ */
1114 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1115 break;
1116 case 0x26:
1117 /* CMOVNE */
1118 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1119 break;
1120 case 0x28:
1121 /* ORNOT */
1122 if (likely(rc != 31)) {
1123 if (ra != 31) {
1124 if (islit)
1125 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1126 else
1127 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1128 } else {
1129 if (islit)
1130 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1131 else
1132 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1133 }
1134 }
1135 break;
1136 case 0x40:
1137 /* XOR */
1138 if (likely(rc != 31)) {
1139 if (ra != 31) {
1140 if (islit)
1141 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1142 else
1143 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1144 } else {
1145 if (islit)
1146 tcg_gen_movi_i64(cpu_ir[rc], lit);
1147 else
1148 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1149 }
1150 }
1151 break;
1152 case 0x44:
1153 /* CMOVLT */
1154 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1155 break;
1156 case 0x46:
1157 /* CMOVGE */
1158 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1159 break;
1160 case 0x48:
1161 /* EQV */
1162 if (likely(rc != 31)) {
1163 if (ra != 31) {
1164 if (islit)
1165 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1166 else
1167 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1168 } else {
1169 if (islit)
1170 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1171 else
1172 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1173 }
1174 }
1175 break;
1176 case 0x61:
1177 /* AMASK */
1178 if (likely(rc != 31)) {
1179 if (islit)
1180 tcg_gen_movi_i64(cpu_ir[rc], lit);
1181 else
1182 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1183 switch (ctx->env->implver) {
1184 case IMPLVER_2106x:
1185 /* EV4, EV45, LCA, LCA45 & EV5 */
1186 break;
1187 case IMPLVER_21164:
1188 case IMPLVER_21264:
1189 case IMPLVER_21364:
1190 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1191 ~(uint64_t)ctx->amask);
1192 break;
1193 }
1194 }
1195 break;
1196 case 0x64:
1197 /* CMOVLE */
1198 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
1199 break;
1200 case 0x66:
1201 /* CMOVGT */
1202 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
1203 break;
1204 case 0x6C:
1205 /* IMPLVER */
1206 if (rc != 31)
1207 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
1208 break;
1209 default:
1210 goto invalid_opc;
1211 }
1212 break;
1213 case 0x12:
1214 switch (fn7) {
1215 case 0x02:
1216 /* MSKBL */
1217 gen_mskbl(ra, rb, rc, islit, lit);
1218 break;
1219 case 0x06:
1220 /* EXTBL */
1221 gen_ext_l(&tcg_gen_ext8u_i64, ra, rb, rc, islit, lit);
1222 break;
1223 case 0x0B:
1224 /* INSBL */
1225 gen_insbl(ra, rb, rc, islit, lit);
1226 break;
1227 case 0x12:
1228 /* MSKWL */
1229 gen_mskwl(ra, rb, rc, islit, lit);
1230 break;
1231 case 0x16:
1232 /* EXTWL */
1233 gen_ext_l(&tcg_gen_ext16u_i64, ra, rb, rc, islit, lit);
1234 break;
1235 case 0x1B:
1236 /* INSWL */
1237 gen_inswl(ra, rb, rc, islit, lit);
1238 break;
1239 case 0x22:
1240 /* MSKLL */
1241 gen_mskll(ra, rb, rc, islit, lit);
1242 break;
1243 case 0x26:
1244 /* EXTLL */
1245 gen_ext_l(&tcg_gen_ext32u_i64, ra, rb, rc, islit, lit);
1246 break;
1247 case 0x2B:
1248 /* INSLL */
1249 gen_insll(ra, rb, rc, islit, lit);
1250 break;
1251 case 0x30:
1252 /* ZAP */
1253 gen_zap(ra, rb, rc, islit, lit);
1254 break;
1255 case 0x31:
1256 /* ZAPNOT */
1257 gen_zapnot(ra, rb, rc, islit, lit);
1258 break;
1259 case 0x32:
1260 /* MSKQL */
1261 gen_mskql(ra, rb, rc, islit, lit);
1262 break;
1263 case 0x34:
1264 /* SRL */
1265 if (likely(rc != 31)) {
1266 if (ra != 31) {
1267 if (islit)
1268 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1269 else {
1270 TCGv shift = tcg_temp_new();
1271 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1272 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
1273 tcg_temp_free(shift);
1274 }
1275 } else
1276 tcg_gen_movi_i64(cpu_ir[rc], 0);
1277 }
1278 break;
1279 case 0x36:
1280 /* EXTQL */
1281 gen_ext_l(NULL, ra, rb, rc, islit, lit);
1282 break;
1283 case 0x39:
1284 /* SLL */
1285 if (likely(rc != 31)) {
1286 if (ra != 31) {
1287 if (islit)
1288 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1289 else {
1290 TCGv shift = tcg_temp_new();
1291 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1292 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
1293 tcg_temp_free(shift);
1294 }
1295 } else
1296 tcg_gen_movi_i64(cpu_ir[rc], 0);
1297 }
1298 break;
1299 case 0x3B:
1300 /* INSQL */
1301 gen_insql(ra, rb, rc, islit, lit);
1302 break;
1303 case 0x3C:
1304 /* SRA */
1305 if (likely(rc != 31)) {
1306 if (ra != 31) {
1307 if (islit)
1308 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1309 else {
1310 TCGv shift = tcg_temp_new();
1311 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1312 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
1313 tcg_temp_free(shift);
1314 }
1315 } else
1316 tcg_gen_movi_i64(cpu_ir[rc], 0);
1317 }
1318 break;
1319 case 0x52:
1320 /* MSKWH */
1321 gen_mskwh(ra, rb, rc, islit, lit);
1322 break;
1323 case 0x57:
1324 /* INSWH */
1325 gen_inswh(ra, rb, rc, islit, lit);
1326 break;
1327 case 0x5A:
1328 /* EXTWH */
1329 gen_ext_h(&tcg_gen_ext16u_i64, ra, rb, rc, islit, lit);
1330 break;
1331 case 0x62:
1332 /* MSKLH */
1333 gen_msklh(ra, rb, rc, islit, lit);
1334 break;
1335 case 0x67:
1336 /* INSLH */
1337 gen_inslh(ra, rb, rc, islit, lit);
1338 break;
1339 case 0x6A:
1340 /* EXTLH */
1341 gen_ext_h(&tcg_gen_ext32u_i64, ra, rb, rc, islit, lit);
1342 break;
1343 case 0x72:
1344 /* MSKQH */
1345 gen_mskqh(ra, rb, rc, islit, lit);
1346 break;
1347 case 0x77:
1348 /* INSQH */
1349 gen_insqh(ra, rb, rc, islit, lit);
1350 break;
1351 case 0x7A:
1352 /* EXTQH */
1353 gen_ext_h(NULL, ra, rb, rc, islit, lit);
1354 break;
1355 default:
1356 goto invalid_opc;
1357 }
1358 break;
1359 case 0x13:
1360 switch (fn7) {
1361 case 0x00:
1362 /* MULL */
1363 if (likely(rc != 31)) {
1364 if (ra == 31)
1365 tcg_gen_movi_i64(cpu_ir[rc], 0);
1366 else {
1367 if (islit)
1368 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1369 else
1370 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1371 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1372 }
1373 }
1374 break;
1375 case 0x20:
1376 /* MULQ */
1377 if (likely(rc != 31)) {
1378 if (ra == 31)
1379 tcg_gen_movi_i64(cpu_ir[rc], 0);
1380 else if (islit)
1381 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1382 else
1383 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1384 }
1385 break;
1386 case 0x30:
1387 /* UMULH */
1388 gen_umulh(ra, rb, rc, islit, lit);
1389 break;
1390 case 0x40:
1391 /* MULL/V */
1392 gen_mullv(ra, rb, rc, islit, lit);
1393 break;
1394 case 0x60:
1395 /* MULQ/V */
1396 gen_mulqv(ra, rb, rc, islit, lit);
1397 break;
1398 default:
1399 goto invalid_opc;
1400 }
1401 break;
1402 case 0x14:
1403 switch (fpfn) { /* f11 & 0x3F */
1404 case 0x04:
1405 /* ITOFS */
1406 if (!(ctx->amask & AMASK_FIX))
1407 goto invalid_opc;
1408 if (likely(rc != 31)) {
1409 if (ra != 31) {
1410 TCGv_i32 tmp = tcg_temp_new_i32();
1411 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1412 gen_helper_memory_to_s(cpu_fir[rc], tmp);
1413 tcg_temp_free_i32(tmp);
1414 } else
1415 tcg_gen_movi_i64(cpu_fir[rc], 0);
1416 }
1417 break;
1418 case 0x0A:
1419 /* SQRTF */
1420 if (!(ctx->amask & AMASK_FIX))
1421 goto invalid_opc;
1422 gen_fsqrtf(rb, rc);
1423 break;
1424 case 0x0B:
1425 /* SQRTS */
1426 if (!(ctx->amask & AMASK_FIX))
1427 goto invalid_opc;
1428 gen_fsqrts(rb, rc);
1429 break;
1430 case 0x14:
1431 /* ITOFF */
1432 if (!(ctx->amask & AMASK_FIX))
1433 goto invalid_opc;
1434 if (likely(rc != 31)) {
1435 if (ra != 31) {
1436 TCGv_i32 tmp = tcg_temp_new_i32();
1437 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1438 gen_helper_memory_to_f(cpu_fir[rc], tmp);
1439 tcg_temp_free_i32(tmp);
1440 } else
1441 tcg_gen_movi_i64(cpu_fir[rc], 0);
1442 }
1443 break;
1444 case 0x24:
1445 /* ITOFT */
1446 if (!(ctx->amask & AMASK_FIX))
1447 goto invalid_opc;
1448 if (likely(rc != 31)) {
1449 if (ra != 31)
1450 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
1451 else
1452 tcg_gen_movi_i64(cpu_fir[rc], 0);
1453 }
1454 break;
1455 case 0x2A:
1456 /* SQRTG */
1457 if (!(ctx->amask & AMASK_FIX))
1458 goto invalid_opc;
1459 gen_fsqrtg(rb, rc);
1460 break;
1461 case 0x02B:
1462 /* SQRTT */
1463 if (!(ctx->amask & AMASK_FIX))
1464 goto invalid_opc;
1465 gen_fsqrtt(rb, rc);
1466 break;
1467 default:
1468 goto invalid_opc;
1469 }
1470 break;
1471 case 0x15:
1472 /* VAX floating point */
1473 /* XXX: rounding mode and trap are ignored (!) */
1474 switch (fpfn) { /* f11 & 0x3F */
1475 case 0x00:
1476 /* ADDF */
1477 gen_faddf(ra, rb, rc);
1478 break;
1479 case 0x01:
1480 /* SUBF */
1481 gen_fsubf(ra, rb, rc);
1482 break;
1483 case 0x02:
1484 /* MULF */
1485 gen_fmulf(ra, rb, rc);
1486 break;
1487 case 0x03:
1488 /* DIVF */
1489 gen_fdivf(ra, rb, rc);
1490 break;
1491 case 0x1E:
1492 /* CVTDG */
1493 #if 0 // TODO
1494 gen_fcvtdg(rb, rc);
1495 #else
1496 goto invalid_opc;
1497 #endif
1498 break;
1499 case 0x20:
1500 /* ADDG */
1501 gen_faddg(ra, rb, rc);
1502 break;
1503 case 0x21:
1504 /* SUBG */
1505 gen_fsubg(ra, rb, rc);
1506 break;
1507 case 0x22:
1508 /* MULG */
1509 gen_fmulg(ra, rb, rc);
1510 break;
1511 case 0x23:
1512 /* DIVG */
1513 gen_fdivg(ra, rb, rc);
1514 break;
1515 case 0x25:
1516 /* CMPGEQ */
1517 gen_fcmpgeq(ra, rb, rc);
1518 break;
1519 case 0x26:
1520 /* CMPGLT */
1521 gen_fcmpglt(ra, rb, rc);
1522 break;
1523 case 0x27:
1524 /* CMPGLE */
1525 gen_fcmpgle(ra, rb, rc);
1526 break;
1527 case 0x2C:
1528 /* CVTGF */
1529 gen_fcvtgf(rb, rc);
1530 break;
1531 case 0x2D:
1532 /* CVTGD */
1533 #if 0 // TODO
1534 gen_fcvtgd(rb, rc);
1535 #else
1536 goto invalid_opc;
1537 #endif
1538 break;
1539 case 0x2F:
1540 /* CVTGQ */
1541 gen_fcvtgq(rb, rc);
1542 break;
1543 case 0x3C:
1544 /* CVTQF */
1545 gen_fcvtqf(rb, rc);
1546 break;
1547 case 0x3E:
1548 /* CVTQG */
1549 gen_fcvtqg(rb, rc);
1550 break;
1551 default:
1552 goto invalid_opc;
1553 }
1554 break;
1555 case 0x16:
1556 /* IEEE floating-point */
1557 /* XXX: rounding mode and traps are ignored (!) */
1558 switch (fpfn) { /* f11 & 0x3F */
1559 case 0x00:
1560 /* ADDS */
1561 gen_fadds(ra, rb, rc);
1562 break;
1563 case 0x01:
1564 /* SUBS */
1565 gen_fsubs(ra, rb, rc);
1566 break;
1567 case 0x02:
1568 /* MULS */
1569 gen_fmuls(ra, rb, rc);
1570 break;
1571 case 0x03:
1572 /* DIVS */
1573 gen_fdivs(ra, rb, rc);
1574 break;
1575 case 0x20:
1576 /* ADDT */
1577 gen_faddt(ra, rb, rc);
1578 break;
1579 case 0x21:
1580 /* SUBT */
1581 gen_fsubt(ra, rb, rc);
1582 break;
1583 case 0x22:
1584 /* MULT */
1585 gen_fmult(ra, rb, rc);
1586 break;
1587 case 0x23:
1588 /* DIVT */
1589 gen_fdivt(ra, rb, rc);
1590 break;
1591 case 0x24:
1592 /* CMPTUN */
1593 gen_fcmptun(ra, rb, rc);
1594 break;
1595 case 0x25:
1596 /* CMPTEQ */
1597 gen_fcmpteq(ra, rb, rc);
1598 break;
1599 case 0x26:
1600 /* CMPTLT */
1601 gen_fcmptlt(ra, rb, rc);
1602 break;
1603 case 0x27:
1604 /* CMPTLE */
1605 gen_fcmptle(ra, rb, rc);
1606 break;
1607 case 0x2C:
1608 /* XXX: incorrect */
1609 if (fn11 == 0x2AC || fn11 == 0x6AC) {
1610 /* CVTST */
1611 gen_fcvtst(rb, rc);
1612 } else {
1613 /* CVTTS */
1614 gen_fcvtts(rb, rc);
1615 }
1616 break;
1617 case 0x2F:
1618 /* CVTTQ */
1619 gen_fcvttq(rb, rc);
1620 break;
1621 case 0x3C:
1622 /* CVTQS */
1623 gen_fcvtqs(rb, rc);
1624 break;
1625 case 0x3E:
1626 /* CVTQT */
1627 gen_fcvtqt(rb, rc);
1628 break;
1629 default:
1630 goto invalid_opc;
1631 }
1632 break;
1633 case 0x17:
1634 switch (fn11) {
1635 case 0x010:
1636 /* CVTLQ */
1637 gen_fcvtlq(rb, rc);
1638 break;
1639 case 0x020:
1640 if (likely(rc != 31)) {
1641 if (ra == rb)
1642 /* FMOV */
1643 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
1644 else
1645 /* CPYS */
1646 gen_fcpys(ra, rb, rc);
1647 }
1648 break;
1649 case 0x021:
1650 /* CPYSN */
1651 gen_fcpysn(ra, rb, rc);
1652 break;
1653 case 0x022:
1654 /* CPYSE */
1655 gen_fcpyse(ra, rb, rc);
1656 break;
1657 case 0x024:
1658 /* MT_FPCR */
1659 if (likely(ra != 31))
1660 gen_helper_store_fpcr(cpu_fir[ra]);
1661 else {
1662 TCGv tmp = tcg_const_i64(0);
1663 gen_helper_store_fpcr(tmp);
1664 tcg_temp_free(tmp);
1665 }
1666 break;
1667 case 0x025:
1668 /* MF_FPCR */
1669 if (likely(ra != 31))
1670 gen_helper_load_fpcr(cpu_fir[ra]);
1671 break;
1672 case 0x02A:
1673 /* FCMOVEQ */
1674 gen_fcmpfeq(ra, rb, rc);
1675 break;
1676 case 0x02B:
1677 /* FCMOVNE */
1678 gen_fcmpfne(ra, rb, rc);
1679 break;
1680 case 0x02C:
1681 /* FCMOVLT */
1682 gen_fcmpflt(ra, rb, rc);
1683 break;
1684 case 0x02D:
1685 /* FCMOVGE */
1686 gen_fcmpfge(ra, rb, rc);
1687 break;
1688 case 0x02E:
1689 /* FCMOVLE */
1690 gen_fcmpfle(ra, rb, rc);
1691 break;
1692 case 0x02F:
1693 /* FCMOVGT */
1694 gen_fcmpfgt(ra, rb, rc);
1695 break;
1696 case 0x030:
1697 /* CVTQL */
1698 gen_fcvtql(rb, rc);
1699 break;
1700 case 0x130:
1701 /* CVTQL/V */
1702 gen_fcvtqlv(rb, rc);
1703 break;
1704 case 0x530:
1705 /* CVTQL/SV */
1706 gen_fcvtqlsv(rb, rc);
1707 break;
1708 default:
1709 goto invalid_opc;
1710 }
1711 break;
1712 case 0x18:
1713 switch ((uint16_t)disp16) {
1714 case 0x0000:
1715 /* TRAPB */
1716 /* No-op. Just exit from the current tb */
1717 ret = 2;
1718 break;
1719 case 0x0400:
1720 /* EXCB */
1721 /* No-op. Just exit from the current tb */
1722 ret = 2;
1723 break;
1724 case 0x4000:
1725 /* MB */
1726 /* No-op */
1727 break;
1728 case 0x4400:
1729 /* WMB */
1730 /* No-op */
1731 break;
1732 case 0x8000:
1733 /* FETCH */
1734 /* No-op */
1735 break;
1736 case 0xA000:
1737 /* FETCH_M */
1738 /* No-op */
1739 break;
1740 case 0xC000:
1741 /* RPCC */
1742 if (ra != 31)
1743 gen_helper_load_pcc(cpu_ir[ra]);
1744 break;
1745 case 0xE000:
1746 /* RC */
1747 if (ra != 31)
1748 gen_helper_rc(cpu_ir[ra]);
1749 break;
1750 case 0xE800:
1751 /* ECB */
1752 break;
1753 case 0xF000:
1754 /* RS */
1755 if (ra != 31)
1756 gen_helper_rs(cpu_ir[ra]);
1757 break;
1758 case 0xF800:
1759 /* WH64 */
1760 /* No-op */
1761 break;
1762 default:
1763 goto invalid_opc;
1764 }
1765 break;
1766 case 0x19:
1767 /* HW_MFPR (PALcode) */
1768 #if defined (CONFIG_USER_ONLY)
1769 goto invalid_opc;
1770 #else
1771 if (!ctx->pal_mode)
1772 goto invalid_opc;
1773 if (ra != 31) {
1774 TCGv tmp = tcg_const_i32(insn & 0xFF);
1775 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
1776 tcg_temp_free(tmp);
1777 }
1778 break;
1779 #endif
1780 case 0x1A:
1781 if (rb != 31)
1782 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
1783 else
1784 tcg_gen_movi_i64(cpu_pc, 0);
1785 if (ra != 31)
1786 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
1787 /* Those four jumps only differ by the branch prediction hint */
1788 switch (fn2) {
1789 case 0x0:
1790 /* JMP */
1791 break;
1792 case 0x1:
1793 /* JSR */
1794 break;
1795 case 0x2:
1796 /* RET */
1797 break;
1798 case 0x3:
1799 /* JSR_COROUTINE */
1800 break;
1801 }
1802 ret = 1;
1803 break;
1804 case 0x1B:
1805 /* HW_LD (PALcode) */
1806 #if defined (CONFIG_USER_ONLY)
1807 goto invalid_opc;
1808 #else
1809 if (!ctx->pal_mode)
1810 goto invalid_opc;
1811 if (ra != 31) {
1812 TCGv addr = tcg_temp_new();
1813 if (rb != 31)
1814 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
1815 else
1816 tcg_gen_movi_i64(addr, disp12);
1817 switch ((insn >> 12) & 0xF) {
1818 case 0x0:
1819 /* Longword physical access (hw_ldl/p) */
1820 gen_helper_ldl_raw(cpu_ir[ra], addr);
1821 break;
1822 case 0x1:
1823 /* Quadword physical access (hw_ldq/p) */
1824 gen_helper_ldq_raw(cpu_ir[ra], addr);
1825 break;
1826 case 0x2:
1827 /* Longword physical access with lock (hw_ldl_l/p) */
1828 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
1829 break;
1830 case 0x3:
1831 /* Quadword physical access with lock (hw_ldq_l/p) */
1832 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
1833 break;
1834 case 0x4:
1835 /* Longword virtual PTE fetch (hw_ldl/v) */
1836 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
1837 break;
1838 case 0x5:
1839 /* Quadword virtual PTE fetch (hw_ldq/v) */
1840 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
1841 break;
1842 case 0x6:
1843 /* Incpu_ir[ra]id */
1844 goto invalid_opc;
1845 case 0x7:
1846 /* Incpu_ir[ra]id */
1847 goto invalid_opc;
1848 case 0x8:
1849 /* Longword virtual access (hw_ldl) */
1850 gen_helper_st_virt_to_phys(addr, addr);
1851 gen_helper_ldl_raw(cpu_ir[ra], addr);
1852 break;
1853 case 0x9:
1854 /* Quadword virtual access (hw_ldq) */
1855 gen_helper_st_virt_to_phys(addr, addr);
1856 gen_helper_ldq_raw(cpu_ir[ra], addr);
1857 break;
1858 case 0xA:
1859 /* Longword virtual access with protection check (hw_ldl/w) */
1860 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
1861 break;
1862 case 0xB:
1863 /* Quadword virtual access with protection check (hw_ldq/w) */
1864 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
1865 break;
1866 case 0xC:
1867 /* Longword virtual access with alt access mode (hw_ldl/a)*/
1868 gen_helper_set_alt_mode();
1869 gen_helper_st_virt_to_phys(addr, addr);
1870 gen_helper_ldl_raw(cpu_ir[ra], addr);
1871 gen_helper_restore_mode();
1872 break;
1873 case 0xD:
1874 /* Quadword virtual access with alt access mode (hw_ldq/a) */
1875 gen_helper_set_alt_mode();
1876 gen_helper_st_virt_to_phys(addr, addr);
1877 gen_helper_ldq_raw(cpu_ir[ra], addr);
1878 gen_helper_restore_mode();
1879 break;
1880 case 0xE:
1881 /* Longword virtual access with alternate access mode and
1882 * protection checks (hw_ldl/wa)
1883 */
1884 gen_helper_set_alt_mode();
1885 gen_helper_ldl_data(cpu_ir[ra], addr);
1886 gen_helper_restore_mode();
1887 break;
1888 case 0xF:
1889 /* Quadword virtual access with alternate access mode and
1890 * protection checks (hw_ldq/wa)
1891 */
1892 gen_helper_set_alt_mode();
1893 gen_helper_ldq_data(cpu_ir[ra], addr);
1894 gen_helper_restore_mode();
1895 break;
1896 }
1897 tcg_temp_free(addr);
1898 }
1899 break;
1900 #endif
1901 case 0x1C:
1902 switch (fn7) {
1903 case 0x00:
1904 /* SEXTB */
1905 if (!(ctx->amask & AMASK_BWX))
1906 goto invalid_opc;
1907 if (likely(rc != 31)) {
1908 if (islit)
1909 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
1910 else
1911 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
1912 }
1913 break;
1914 case 0x01:
1915 /* SEXTW */
1916 if (!(ctx->amask & AMASK_BWX))
1917 goto invalid_opc;
1918 if (likely(rc != 31)) {
1919 if (islit)
1920 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
1921 else
1922 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
1923 }
1924 break;
1925 case 0x30:
1926 /* CTPOP */
1927 if (!(ctx->amask & AMASK_CIX))
1928 goto invalid_opc;
1929 if (likely(rc != 31)) {
1930 if (islit)
1931 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
1932 else
1933 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
1934 }
1935 break;
1936 case 0x31:
1937 /* PERR */
1938 if (!(ctx->amask & AMASK_MVI))
1939 goto invalid_opc;
1940 gen_perr(ra, rb, rc, islit, lit);
1941 break;
1942 case 0x32:
1943 /* CTLZ */
1944 if (!(ctx->amask & AMASK_CIX))
1945 goto invalid_opc;
1946 if (likely(rc != 31)) {
1947 if (islit)
1948 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
1949 else
1950 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
1951 }
1952 break;
1953 case 0x33:
1954 /* CTTZ */
1955 if (!(ctx->amask & AMASK_CIX))
1956 goto invalid_opc;
1957 if (likely(rc != 31)) {
1958 if (islit)
1959 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
1960 else
1961 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
1962 }
1963 break;
1964 case 0x34:
1965 /* UNPKBW */
1966 if (!(ctx->amask & AMASK_MVI))
1967 goto invalid_opc;
1968 if (real_islit || ra != 31)
1969 goto invalid_opc;
1970 gen_unpkbw (rb, rc);
1971 break;
1972 case 0x35:
1973 /* UNPKBL */
1974 if (!(ctx->amask & AMASK_MVI))
1975 goto invalid_opc;
1976 if (real_islit || ra != 31)
1977 goto invalid_opc;
1978 gen_unpkbl (rb, rc);
1979 break;
1980 case 0x36:
1981 /* PKWB */
1982 if (!(ctx->amask & AMASK_MVI))
1983 goto invalid_opc;
1984 if (real_islit || ra != 31)
1985 goto invalid_opc;
1986 gen_pkwb (rb, rc);
1987 break;
1988 case 0x37:
1989 /* PKLB */
1990 if (!(ctx->amask & AMASK_MVI))
1991 goto invalid_opc;
1992 if (real_islit || ra != 31)
1993 goto invalid_opc;
1994 gen_pklb (rb, rc);
1995 break;
1996 case 0x38:
1997 /* MINSB8 */
1998 if (!(ctx->amask & AMASK_MVI))
1999 goto invalid_opc;
2000 gen_minsb8 (ra, rb, rc, islit, lit);
2001 break;
2002 case 0x39:
2003 /* MINSW4 */
2004 if (!(ctx->amask & AMASK_MVI))
2005 goto invalid_opc;
2006 gen_minsw4 (ra, rb, rc, islit, lit);
2007 break;
2008 case 0x3A:
2009 /* MINUB8 */
2010 if (!(ctx->amask & AMASK_MVI))
2011 goto invalid_opc;
2012 gen_minub8 (ra, rb, rc, islit, lit);
2013 break;
2014 case 0x3B:
2015 /* MINUW4 */
2016 if (!(ctx->amask & AMASK_MVI))
2017 goto invalid_opc;
2018 gen_minuw4 (ra, rb, rc, islit, lit);
2019 break;
2020 case 0x3C:
2021 /* MAXUB8 */
2022 if (!(ctx->amask & AMASK_MVI))
2023 goto invalid_opc;
2024 gen_maxub8 (ra, rb, rc, islit, lit);
2025 break;
2026 case 0x3D:
2027 /* MAXUW4 */
2028 if (!(ctx->amask & AMASK_MVI))
2029 goto invalid_opc;
2030 gen_maxuw4 (ra, rb, rc, islit, lit);
2031 break;
2032 case 0x3E:
2033 /* MAXSB8 */
2034 if (!(ctx->amask & AMASK_MVI))
2035 goto invalid_opc;
2036 gen_maxsb8 (ra, rb, rc, islit, lit);
2037 break;
2038 case 0x3F:
2039 /* MAXSW4 */
2040 if (!(ctx->amask & AMASK_MVI))
2041 goto invalid_opc;
2042 gen_maxsw4 (ra, rb, rc, islit, lit);
2043 break;
2044 case 0x70:
2045 /* FTOIT */
2046 if (!(ctx->amask & AMASK_FIX))
2047 goto invalid_opc;
2048 if (likely(rc != 31)) {
2049 if (ra != 31)
2050 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2051 else
2052 tcg_gen_movi_i64(cpu_ir[rc], 0);
2053 }
2054 break;
2055 case 0x78:
2056 /* FTOIS */
2057 if (!(ctx->amask & AMASK_FIX))
2058 goto invalid_opc;
2059 if (rc != 31) {
2060 TCGv_i32 tmp1 = tcg_temp_new_i32();
2061 if (ra != 31)
2062 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2063 else {
2064 TCGv tmp2 = tcg_const_i64(0);
2065 gen_helper_s_to_memory(tmp1, tmp2);
2066 tcg_temp_free(tmp2);
2067 }
2068 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2069 tcg_temp_free_i32(tmp1);
2070 }
2071 break;
2072 default:
2073 goto invalid_opc;
2074 }
2075 break;
2076 case 0x1D:
2077 /* HW_MTPR (PALcode) */
2078 #if defined (CONFIG_USER_ONLY)
2079 goto invalid_opc;
2080 #else
2081 if (!ctx->pal_mode)
2082 goto invalid_opc;
2083 else {
2084 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2085 if (ra != 31)
2086 gen_helper_mtpr(tmp1, cpu_ir[ra]);
2087 else {
2088 TCGv tmp2 = tcg_const_i64(0);
2089 gen_helper_mtpr(tmp1, tmp2);
2090 tcg_temp_free(tmp2);
2091 }
2092 tcg_temp_free(tmp1);
2093 ret = 2;
2094 }
2095 break;
2096 #endif
2097 case 0x1E:
2098 /* HW_REI (PALcode) */
2099 #if defined (CONFIG_USER_ONLY)
2100 goto invalid_opc;
2101 #else
2102 if (!ctx->pal_mode)
2103 goto invalid_opc;
2104 if (rb == 31) {
2105 /* "Old" alpha */
2106 gen_helper_hw_rei();
2107 } else {
2108 TCGv tmp;
2109
2110 if (ra != 31) {
2111 tmp = tcg_temp_new();
2112 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2113 } else
2114 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2115 gen_helper_hw_ret(tmp);
2116 tcg_temp_free(tmp);
2117 }
2118 ret = 2;
2119 break;
2120 #endif
2121 case 0x1F:
2122 /* HW_ST (PALcode) */
2123 #if defined (CONFIG_USER_ONLY)
2124 goto invalid_opc;
2125 #else
2126 if (!ctx->pal_mode)
2127 goto invalid_opc;
2128 else {
2129 TCGv addr, val;
2130 addr = tcg_temp_new();
2131 if (rb != 31)
2132 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2133 else
2134 tcg_gen_movi_i64(addr, disp12);
2135 if (ra != 31)
2136 val = cpu_ir[ra];
2137 else {
2138 val = tcg_temp_new();
2139 tcg_gen_movi_i64(val, 0);
2140 }
2141 switch ((insn >> 12) & 0xF) {
2142 case 0x0:
2143 /* Longword physical access */
2144 gen_helper_stl_raw(val, addr);
2145 break;
2146 case 0x1:
2147 /* Quadword physical access */
2148 gen_helper_stq_raw(val, addr);
2149 break;
2150 case 0x2:
2151 /* Longword physical access with lock */
2152 gen_helper_stl_c_raw(val, val, addr);
2153 break;
2154 case 0x3:
2155 /* Quadword physical access with lock */
2156 gen_helper_stq_c_raw(val, val, addr);
2157 break;
2158 case 0x4:
2159 /* Longword virtual access */
2160 gen_helper_st_virt_to_phys(addr, addr);
2161 gen_helper_stl_raw(val, addr);
2162 break;
2163 case 0x5:
2164 /* Quadword virtual access */
2165 gen_helper_st_virt_to_phys(addr, addr);
2166 gen_helper_stq_raw(val, addr);
2167 break;
2168 case 0x6:
2169 /* Invalid */
2170 goto invalid_opc;
2171 case 0x7:
2172 /* Invalid */
2173 goto invalid_opc;
2174 case 0x8:
2175 /* Invalid */
2176 goto invalid_opc;
2177 case 0x9:
2178 /* Invalid */
2179 goto invalid_opc;
2180 case 0xA:
2181 /* Invalid */
2182 goto invalid_opc;
2183 case 0xB:
2184 /* Invalid */
2185 goto invalid_opc;
2186 case 0xC:
2187 /* Longword virtual access with alternate access mode */
2188 gen_helper_set_alt_mode();
2189 gen_helper_st_virt_to_phys(addr, addr);
2190 gen_helper_stl_raw(val, addr);
2191 gen_helper_restore_mode();
2192 break;
2193 case 0xD:
2194 /* Quadword virtual access with alternate access mode */
2195 gen_helper_set_alt_mode();
2196 gen_helper_st_virt_to_phys(addr, addr);
2197 gen_helper_stl_raw(val, addr);
2198 gen_helper_restore_mode();
2199 break;
2200 case 0xE:
2201 /* Invalid */
2202 goto invalid_opc;
2203 case 0xF:
2204 /* Invalid */
2205 goto invalid_opc;
2206 }
2207 if (ra == 31)
2208 tcg_temp_free(val);
2209 tcg_temp_free(addr);
2210 }
2211 break;
2212 #endif
2213 case 0x20:
2214 /* LDF */
2215 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2216 break;
2217 case 0x21:
2218 /* LDG */
2219 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2220 break;
2221 case 0x22:
2222 /* LDS */
2223 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2224 break;
2225 case 0x23:
2226 /* LDT */
2227 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2228 break;
2229 case 0x24:
2230 /* STF */
2231 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
2232 break;
2233 case 0x25:
2234 /* STG */
2235 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
2236 break;
2237 case 0x26:
2238 /* STS */
2239 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
2240 break;
2241 case 0x27:
2242 /* STT */
2243 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
2244 break;
2245 case 0x28:
2246 /* LDL */
2247 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2248 break;
2249 case 0x29:
2250 /* LDQ */
2251 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2252 break;
2253 case 0x2A:
2254 /* LDL_L */
2255 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2256 break;
2257 case 0x2B:
2258 /* LDQ_L */
2259 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2260 break;
2261 case 0x2C:
2262 /* STL */
2263 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
2264 break;
2265 case 0x2D:
2266 /* STQ */
2267 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
2268 break;
2269 case 0x2E:
2270 /* STL_C */
2271 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
2272 break;
2273 case 0x2F:
2274 /* STQ_C */
2275 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
2276 break;
2277 case 0x30:
2278 /* BR */
2279 if (ra != 31)
2280 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2281 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2282 ret = 1;
2283 break;
2284 case 0x31: /* FBEQ */
2285 case 0x32: /* FBLT */
2286 case 0x33: /* FBLE */
2287 gen_fbcond(ctx, opc, ra, disp16);
2288 ret = 1;
2289 break;
2290 case 0x34:
2291 /* BSR */
2292 if (ra != 31)
2293 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2294 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2295 ret = 1;
2296 break;
2297 case 0x35: /* FBNE */
2298 case 0x36: /* FBGE */
2299 case 0x37: /* FBGT */
2300 gen_fbcond(ctx, opc, ra, disp16);
2301 ret = 1;
2302 break;
2303 case 0x38:
2304 /* BLBC */
2305 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2306 ret = 1;
2307 break;
2308 case 0x39:
2309 /* BEQ */
2310 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2311 ret = 1;
2312 break;
2313 case 0x3A:
2314 /* BLT */
2315 gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2316 ret = 1;
2317 break;
2318 case 0x3B:
2319 /* BLE */
2320 gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2321 ret = 1;
2322 break;
2323 case 0x3C:
2324 /* BLBS */
2325 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2326 ret = 1;
2327 break;
2328 case 0x3D:
2329 /* BNE */
2330 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2331 ret = 1;
2332 break;
2333 case 0x3E:
2334 /* BGE */
2335 gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2336 ret = 1;
2337 break;
2338 case 0x3F:
2339 /* BGT */
2340 gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2341 ret = 1;
2342 break;
2343 invalid_opc:
2344 gen_invalid(ctx);
2345 ret = 3;
2346 break;
2347 }
2348
2349 return ret;
2350 }
2351
2352 static inline void gen_intermediate_code_internal(CPUState *env,
2353 TranslationBlock *tb,
2354 int search_pc)
2355 {
2356 #if defined ALPHA_DEBUG_DISAS
2357 static int insn_count;
2358 #endif
2359 DisasContext ctx, *ctxp = &ctx;
2360 target_ulong pc_start;
2361 uint32_t insn;
2362 uint16_t *gen_opc_end;
2363 CPUBreakpoint *bp;
2364 int j, lj = -1;
2365 int ret;
2366 int num_insns;
2367 int max_insns;
2368
2369 pc_start = tb->pc;
2370 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2371 ctx.pc = pc_start;
2372 ctx.amask = env->amask;
2373 ctx.env = env;
2374 #if defined (CONFIG_USER_ONLY)
2375 ctx.mem_idx = 0;
2376 #else
2377 ctx.mem_idx = ((env->ps >> 3) & 3);
2378 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
2379 #endif
2380 num_insns = 0;
2381 max_insns = tb->cflags & CF_COUNT_MASK;
2382 if (max_insns == 0)
2383 max_insns = CF_COUNT_MASK;
2384
2385 gen_icount_start();
2386 for (ret = 0; ret == 0;) {
2387 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2388 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2389 if (bp->pc == ctx.pc) {
2390 gen_excp(&ctx, EXCP_DEBUG, 0);
2391 break;
2392 }
2393 }
2394 }
2395 if (search_pc) {
2396 j = gen_opc_ptr - gen_opc_buf;
2397 if (lj < j) {
2398 lj++;
2399 while (lj < j)
2400 gen_opc_instr_start[lj++] = 0;
2401 }
2402 gen_opc_pc[lj] = ctx.pc;
2403 gen_opc_instr_start[lj] = 1;
2404 gen_opc_icount[lj] = num_insns;
2405 }
2406 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
2407 gen_io_start();
2408 #if defined ALPHA_DEBUG_DISAS
2409 insn_count++;
2410 LOG_DISAS("pc " TARGET_FMT_lx " mem_idx %d\n",
2411 ctx.pc, ctx.mem_idx);
2412 #endif
2413 insn = ldl_code(ctx.pc);
2414 #if defined ALPHA_DEBUG_DISAS
2415 insn_count++;
2416 LOG_DISAS("opcode %08x %d\n", insn, insn_count);
2417 #endif
2418 num_insns++;
2419 ctx.pc += 4;
2420 ret = translate_one(ctxp, insn);
2421 if (ret != 0)
2422 break;
2423 /* if we reach a page boundary or are single stepping, stop
2424 * generation
2425 */
2426 if (env->singlestep_enabled) {
2427 gen_excp(&ctx, EXCP_DEBUG, 0);
2428 break;
2429 }
2430
2431 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
2432 break;
2433
2434 if (gen_opc_ptr >= gen_opc_end)
2435 break;
2436
2437 if (num_insns >= max_insns)
2438 break;
2439
2440 if (singlestep) {
2441 break;
2442 }
2443 }
2444 if (ret != 1 && ret != 3) {
2445 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2446 }
2447 if (tb->cflags & CF_LAST_IO)
2448 gen_io_end();
2449 /* Generate the return instruction */
2450 tcg_gen_exit_tb(0);
2451 gen_icount_end(tb, num_insns);
2452 *gen_opc_ptr = INDEX_op_end;
2453 if (search_pc) {
2454 j = gen_opc_ptr - gen_opc_buf;
2455 lj++;
2456 while (lj <= j)
2457 gen_opc_instr_start[lj++] = 0;
2458 } else {
2459 tb->size = ctx.pc - pc_start;
2460 tb->icount = num_insns;
2461 }
2462 #if defined ALPHA_DEBUG_DISAS
2463 log_cpu_state_mask(CPU_LOG_TB_CPU, env, 0);
2464 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2465 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2466 log_target_disas(pc_start, ctx.pc - pc_start, 1);
2467 qemu_log("\n");
2468 }
2469 #endif
2470 }
2471
2472 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
2473 {
2474 gen_intermediate_code_internal(env, tb, 0);
2475 }
2476
2477 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
2478 {
2479 gen_intermediate_code_internal(env, tb, 1);
2480 }
2481
2482 struct cpu_def_t {
2483 const char *name;
2484 int implver, amask;
2485 };
2486
2487 static const struct cpu_def_t cpu_defs[] = {
2488 { "ev4", IMPLVER_2106x, 0 },
2489 { "ev5", IMPLVER_21164, 0 },
2490 { "ev56", IMPLVER_21164, AMASK_BWX },
2491 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
2492 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
2493 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2494 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
2495 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2496 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
2497 { "21064", IMPLVER_2106x, 0 },
2498 { "21164", IMPLVER_21164, 0 },
2499 { "21164a", IMPLVER_21164, AMASK_BWX },
2500 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
2501 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
2502 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2503 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
2504 };
2505
2506 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
2507 {
2508 CPUAlphaState *env;
2509 uint64_t hwpcb;
2510 int implver, amask, i, max;
2511
2512 env = qemu_mallocz(sizeof(CPUAlphaState));
2513 cpu_exec_init(env);
2514 alpha_translate_init();
2515 tlb_flush(env, 1);
2516
2517 /* Default to ev67; no reason not to emulate insns by default. */
2518 implver = IMPLVER_21264;
2519 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
2520 | AMASK_TRAP | AMASK_PREFETCH);
2521
2522 max = ARRAY_SIZE(cpu_defs);
2523 for (i = 0; i < max; i++) {
2524 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
2525 implver = cpu_defs[i].implver;
2526 amask = cpu_defs[i].amask;
2527 break;
2528 }
2529 }
2530 env->implver = implver;
2531 env->amask = amask;
2532
2533 env->ps = 0x1F00;
2534 #if defined (CONFIG_USER_ONLY)
2535 env->ps |= 1 << 3;
2536 #endif
2537 pal_init(env);
2538 /* Initialize IPR */
2539 hwpcb = env->ipr[IPR_PCBB];
2540 env->ipr[IPR_ASN] = 0;
2541 env->ipr[IPR_ASTEN] = 0;
2542 env->ipr[IPR_ASTSR] = 0;
2543 env->ipr[IPR_DATFX] = 0;
2544 /* XXX: fix this */
2545 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
2546 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
2547 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
2548 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
2549 env->ipr[IPR_FEN] = 0;
2550 env->ipr[IPR_IPL] = 31;
2551 env->ipr[IPR_MCES] = 0;
2552 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
2553 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
2554 env->ipr[IPR_SISR] = 0;
2555 env->ipr[IPR_VIRBND] = -1ULL;
2556
2557 qemu_init_vcpu(env);
2558 return env;
2559 }
2560
2561 void gen_pc_load(CPUState *env, TranslationBlock *tb,
2562 unsigned long searched_pc, int pc_pos, void *puc)
2563 {
2564 env->pc = gen_opc_pc[pc_pos];
2565 }