]> git.proxmox.com Git - qemu.git/blob - target-alpha/translate.c
target-alpha: Expand ins*h inline.
[qemu.git] / target-alpha / translate.c
1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23
24 #include "cpu.h"
25 #include "exec-all.h"
26 #include "disas.h"
27 #include "host-utils.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
30
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #undef ALPHA_DEBUG_DISAS
36
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
39 #else
40 # define LOG_DISAS(...) do { } while (0)
41 #endif
42
43 typedef struct DisasContext DisasContext;
44 struct DisasContext {
45 uint64_t pc;
46 int mem_idx;
47 #if !defined (CONFIG_USER_ONLY)
48 int pal_mode;
49 #endif
50 CPUAlphaState *env;
51 uint32_t amask;
52 };
53
54 /* global register indexes */
55 static TCGv_ptr cpu_env;
56 static TCGv cpu_ir[31];
57 static TCGv cpu_fir[31];
58 static TCGv cpu_pc;
59 static TCGv cpu_lock;
60 #ifdef CONFIG_USER_ONLY
61 static TCGv cpu_uniq;
62 #endif
63
64 /* register names */
65 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
66
67 #include "gen-icount.h"
68
69 static void alpha_translate_init(void)
70 {
71 int i;
72 char *p;
73 static int done_init = 0;
74
75 if (done_init)
76 return;
77
78 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
79
80 p = cpu_reg_names;
81 for (i = 0; i < 31; i++) {
82 sprintf(p, "ir%d", i);
83 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
84 offsetof(CPUState, ir[i]), p);
85 p += (i < 10) ? 4 : 5;
86
87 sprintf(p, "fir%d", i);
88 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
89 offsetof(CPUState, fir[i]), p);
90 p += (i < 10) ? 5 : 6;
91 }
92
93 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
94 offsetof(CPUState, pc), "pc");
95
96 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
97 offsetof(CPUState, lock), "lock");
98
99 #ifdef CONFIG_USER_ONLY
100 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
101 offsetof(CPUState, unique), "uniq");
102 #endif
103
104 /* register helpers */
105 #define GEN_HELPER 2
106 #include "helper.h"
107
108 done_init = 1;
109 }
110
111 static inline void gen_excp(DisasContext *ctx, int exception, int error_code)
112 {
113 TCGv_i32 tmp1, tmp2;
114
115 tcg_gen_movi_i64(cpu_pc, ctx->pc);
116 tmp1 = tcg_const_i32(exception);
117 tmp2 = tcg_const_i32(error_code);
118 gen_helper_excp(tmp1, tmp2);
119 tcg_temp_free_i32(tmp2);
120 tcg_temp_free_i32(tmp1);
121 }
122
123 static inline void gen_invalid(DisasContext *ctx)
124 {
125 gen_excp(ctx, EXCP_OPCDEC, 0);
126 }
127
128 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
129 {
130 TCGv tmp = tcg_temp_new();
131 TCGv_i32 tmp32 = tcg_temp_new_i32();
132 tcg_gen_qemu_ld32u(tmp, t1, flags);
133 tcg_gen_trunc_i64_i32(tmp32, tmp);
134 gen_helper_memory_to_f(t0, tmp32);
135 tcg_temp_free_i32(tmp32);
136 tcg_temp_free(tmp);
137 }
138
139 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
140 {
141 TCGv tmp = tcg_temp_new();
142 tcg_gen_qemu_ld64(tmp, t1, flags);
143 gen_helper_memory_to_g(t0, tmp);
144 tcg_temp_free(tmp);
145 }
146
147 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
148 {
149 TCGv tmp = tcg_temp_new();
150 TCGv_i32 tmp32 = tcg_temp_new_i32();
151 tcg_gen_qemu_ld32u(tmp, t1, flags);
152 tcg_gen_trunc_i64_i32(tmp32, tmp);
153 gen_helper_memory_to_s(t0, tmp32);
154 tcg_temp_free_i32(tmp32);
155 tcg_temp_free(tmp);
156 }
157
158 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
159 {
160 tcg_gen_mov_i64(cpu_lock, t1);
161 tcg_gen_qemu_ld32s(t0, t1, flags);
162 }
163
164 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
165 {
166 tcg_gen_mov_i64(cpu_lock, t1);
167 tcg_gen_qemu_ld64(t0, t1, flags);
168 }
169
170 static inline void gen_load_mem(DisasContext *ctx,
171 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
172 int flags),
173 int ra, int rb, int32_t disp16, int fp,
174 int clear)
175 {
176 TCGv addr;
177
178 if (unlikely(ra == 31))
179 return;
180
181 addr = tcg_temp_new();
182 if (rb != 31) {
183 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
184 if (clear)
185 tcg_gen_andi_i64(addr, addr, ~0x7);
186 } else {
187 if (clear)
188 disp16 &= ~0x7;
189 tcg_gen_movi_i64(addr, disp16);
190 }
191 if (fp)
192 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
193 else
194 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
195 tcg_temp_free(addr);
196 }
197
198 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
199 {
200 TCGv_i32 tmp32 = tcg_temp_new_i32();
201 TCGv tmp = tcg_temp_new();
202 gen_helper_f_to_memory(tmp32, t0);
203 tcg_gen_extu_i32_i64(tmp, tmp32);
204 tcg_gen_qemu_st32(tmp, t1, flags);
205 tcg_temp_free(tmp);
206 tcg_temp_free_i32(tmp32);
207 }
208
209 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
210 {
211 TCGv tmp = tcg_temp_new();
212 gen_helper_g_to_memory(tmp, t0);
213 tcg_gen_qemu_st64(tmp, t1, flags);
214 tcg_temp_free(tmp);
215 }
216
217 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
218 {
219 TCGv_i32 tmp32 = tcg_temp_new_i32();
220 TCGv tmp = tcg_temp_new();
221 gen_helper_s_to_memory(tmp32, t0);
222 tcg_gen_extu_i32_i64(tmp, tmp32);
223 tcg_gen_qemu_st32(tmp, t1, flags);
224 tcg_temp_free(tmp);
225 tcg_temp_free_i32(tmp32);
226 }
227
228 static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
229 {
230 int l1, l2;
231
232 l1 = gen_new_label();
233 l2 = gen_new_label();
234 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
235 tcg_gen_qemu_st32(t0, t1, flags);
236 tcg_gen_movi_i64(t0, 1);
237 tcg_gen_br(l2);
238 gen_set_label(l1);
239 tcg_gen_movi_i64(t0, 0);
240 gen_set_label(l2);
241 tcg_gen_movi_i64(cpu_lock, -1);
242 }
243
244 static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
245 {
246 int l1, l2;
247
248 l1 = gen_new_label();
249 l2 = gen_new_label();
250 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
251 tcg_gen_qemu_st64(t0, t1, flags);
252 tcg_gen_movi_i64(t0, 1);
253 tcg_gen_br(l2);
254 gen_set_label(l1);
255 tcg_gen_movi_i64(t0, 0);
256 gen_set_label(l2);
257 tcg_gen_movi_i64(cpu_lock, -1);
258 }
259
260 static inline void gen_store_mem(DisasContext *ctx,
261 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
262 int flags),
263 int ra, int rb, int32_t disp16, int fp,
264 int clear, int local)
265 {
266 TCGv addr;
267 if (local)
268 addr = tcg_temp_local_new();
269 else
270 addr = tcg_temp_new();
271 if (rb != 31) {
272 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
273 if (clear)
274 tcg_gen_andi_i64(addr, addr, ~0x7);
275 } else {
276 if (clear)
277 disp16 &= ~0x7;
278 tcg_gen_movi_i64(addr, disp16);
279 }
280 if (ra != 31) {
281 if (fp)
282 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
283 else
284 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
285 } else {
286 TCGv zero;
287 if (local)
288 zero = tcg_const_local_i64(0);
289 else
290 zero = tcg_const_i64(0);
291 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
292 tcg_temp_free(zero);
293 }
294 tcg_temp_free(addr);
295 }
296
297 static inline void gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
298 int32_t disp, int mask)
299 {
300 int l1, l2;
301
302 l1 = gen_new_label();
303 l2 = gen_new_label();
304 if (likely(ra != 31)) {
305 if (mask) {
306 TCGv tmp = tcg_temp_new();
307 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
308 tcg_gen_brcondi_i64(cond, tmp, 0, l1);
309 tcg_temp_free(tmp);
310 } else
311 tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, l1);
312 } else {
313 /* Very uncommon case - Do not bother to optimize. */
314 TCGv tmp = tcg_const_i64(0);
315 tcg_gen_brcondi_i64(cond, tmp, 0, l1);
316 tcg_temp_free(tmp);
317 }
318 tcg_gen_movi_i64(cpu_pc, ctx->pc);
319 tcg_gen_br(l2);
320 gen_set_label(l1);
321 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
322 gen_set_label(l2);
323 }
324
325 static inline void gen_fbcond(DisasContext *ctx, int opc, int ra, int32_t disp)
326 {
327 int l1, l2;
328 TCGv tmp;
329 TCGv src;
330
331 l1 = gen_new_label();
332 l2 = gen_new_label();
333 if (ra != 31) {
334 tmp = tcg_temp_new();
335 src = cpu_fir[ra];
336 } else {
337 tmp = tcg_const_i64(0);
338 src = tmp;
339 }
340 switch (opc) {
341 case 0x31: /* FBEQ */
342 gen_helper_cmpfeq(tmp, src);
343 break;
344 case 0x32: /* FBLT */
345 gen_helper_cmpflt(tmp, src);
346 break;
347 case 0x33: /* FBLE */
348 gen_helper_cmpfle(tmp, src);
349 break;
350 case 0x35: /* FBNE */
351 gen_helper_cmpfne(tmp, src);
352 break;
353 case 0x36: /* FBGE */
354 gen_helper_cmpfge(tmp, src);
355 break;
356 case 0x37: /* FBGT */
357 gen_helper_cmpfgt(tmp, src);
358 break;
359 default:
360 abort();
361 }
362 tcg_gen_brcondi_i64(TCG_COND_NE, tmp, 0, l1);
363 tcg_gen_movi_i64(cpu_pc, ctx->pc);
364 tcg_gen_br(l2);
365 gen_set_label(l1);
366 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
367 gen_set_label(l2);
368 }
369
370 static inline void gen_cmov(TCGCond inv_cond, int ra, int rb, int rc,
371 int islit, uint8_t lit, int mask)
372 {
373 int l1;
374
375 if (unlikely(rc == 31))
376 return;
377
378 l1 = gen_new_label();
379
380 if (ra != 31) {
381 if (mask) {
382 TCGv tmp = tcg_temp_new();
383 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
384 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
385 tcg_temp_free(tmp);
386 } else
387 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
388 } else {
389 /* Very uncommon case - Do not bother to optimize. */
390 TCGv tmp = tcg_const_i64(0);
391 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
392 tcg_temp_free(tmp);
393 }
394
395 if (islit)
396 tcg_gen_movi_i64(cpu_ir[rc], lit);
397 else
398 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
399 gen_set_label(l1);
400 }
401
402 #define FARITH2(name) \
403 static inline void glue(gen_f, name)(int rb, int rc) \
404 { \
405 if (unlikely(rc == 31)) \
406 return; \
407 \
408 if (rb != 31) \
409 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
410 else { \
411 TCGv tmp = tcg_const_i64(0); \
412 gen_helper_ ## name (cpu_fir[rc], tmp); \
413 tcg_temp_free(tmp); \
414 } \
415 }
416 FARITH2(sqrts)
417 FARITH2(sqrtf)
418 FARITH2(sqrtg)
419 FARITH2(sqrtt)
420 FARITH2(cvtgf)
421 FARITH2(cvtgq)
422 FARITH2(cvtqf)
423 FARITH2(cvtqg)
424 FARITH2(cvtst)
425 FARITH2(cvtts)
426 FARITH2(cvttq)
427 FARITH2(cvtqs)
428 FARITH2(cvtqt)
429 FARITH2(cvtlq)
430 FARITH2(cvtql)
431 FARITH2(cvtqlv)
432 FARITH2(cvtqlsv)
433
434 #define FARITH3(name) \
435 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
436 { \
437 if (unlikely(rc == 31)) \
438 return; \
439 \
440 if (ra != 31) { \
441 if (rb != 31) \
442 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], cpu_fir[rb]); \
443 else { \
444 TCGv tmp = tcg_const_i64(0); \
445 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], tmp); \
446 tcg_temp_free(tmp); \
447 } \
448 } else { \
449 TCGv tmp = tcg_const_i64(0); \
450 if (rb != 31) \
451 gen_helper_ ## name (cpu_fir[rc], tmp, cpu_fir[rb]); \
452 else \
453 gen_helper_ ## name (cpu_fir[rc], tmp, tmp); \
454 tcg_temp_free(tmp); \
455 } \
456 }
457
458 FARITH3(addf)
459 FARITH3(subf)
460 FARITH3(mulf)
461 FARITH3(divf)
462 FARITH3(addg)
463 FARITH3(subg)
464 FARITH3(mulg)
465 FARITH3(divg)
466 FARITH3(cmpgeq)
467 FARITH3(cmpglt)
468 FARITH3(cmpgle)
469 FARITH3(adds)
470 FARITH3(subs)
471 FARITH3(muls)
472 FARITH3(divs)
473 FARITH3(addt)
474 FARITH3(subt)
475 FARITH3(mult)
476 FARITH3(divt)
477 FARITH3(cmptun)
478 FARITH3(cmpteq)
479 FARITH3(cmptlt)
480 FARITH3(cmptle)
481 FARITH3(cpys)
482 FARITH3(cpysn)
483 FARITH3(cpyse)
484
485 #define FCMOV(name) \
486 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
487 { \
488 int l1; \
489 TCGv tmp; \
490 \
491 if (unlikely(rc == 31)) \
492 return; \
493 \
494 l1 = gen_new_label(); \
495 tmp = tcg_temp_new(); \
496 if (ra != 31) { \
497 tmp = tcg_temp_new(); \
498 gen_helper_ ## name (tmp, cpu_fir[ra]); \
499 } else { \
500 tmp = tcg_const_i64(0); \
501 gen_helper_ ## name (tmp, tmp); \
502 } \
503 tcg_gen_brcondi_i64(TCG_COND_EQ, tmp, 0, l1); \
504 if (rb != 31) \
505 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]); \
506 else \
507 tcg_gen_movi_i64(cpu_fir[rc], 0); \
508 gen_set_label(l1); \
509 }
510 FCMOV(cmpfeq)
511 FCMOV(cmpfne)
512 FCMOV(cmpflt)
513 FCMOV(cmpfge)
514 FCMOV(cmpfle)
515 FCMOV(cmpfgt)
516
517 static inline uint64_t zapnot_mask(uint8_t lit)
518 {
519 uint64_t mask = 0;
520 int i;
521
522 for (i = 0; i < 8; ++i) {
523 if ((lit >> i) & 1)
524 mask |= 0xffull << (i * 8);
525 }
526 return mask;
527 }
528
529 /* Implement zapnot with an immediate operand, which expands to some
530 form of immediate AND. This is a basic building block in the
531 definition of many of the other byte manipulation instructions. */
532 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
533 {
534 switch (lit) {
535 case 0x00:
536 tcg_gen_movi_i64(dest, 0);
537 break;
538 case 0x01:
539 tcg_gen_ext8u_i64(dest, src);
540 break;
541 case 0x03:
542 tcg_gen_ext16u_i64(dest, src);
543 break;
544 case 0x0f:
545 tcg_gen_ext32u_i64(dest, src);
546 break;
547 case 0xff:
548 tcg_gen_mov_i64(dest, src);
549 break;
550 default:
551 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
552 break;
553 }
554 }
555
556 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
557 {
558 if (unlikely(rc == 31))
559 return;
560 else if (unlikely(ra == 31))
561 tcg_gen_movi_i64(cpu_ir[rc], 0);
562 else if (islit)
563 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
564 else
565 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
566 }
567
568 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
569 {
570 if (unlikely(rc == 31))
571 return;
572 else if (unlikely(ra == 31))
573 tcg_gen_movi_i64(cpu_ir[rc], 0);
574 else if (islit)
575 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
576 else
577 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
578 }
579
580
581 /* EXTWH, EXTLH, EXTQH */
582 static void gen_ext_h(int ra, int rb, int rc, int islit,
583 uint8_t lit, uint8_t byte_mask)
584 {
585 if (unlikely(rc == 31))
586 return;
587 else if (unlikely(ra == 31))
588 tcg_gen_movi_i64(cpu_ir[rc], 0);
589 else {
590 if (islit) {
591 lit = (64 - (lit & 7) * 8) & 0x3f;
592 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
593 } else {
594 TCGv tmp1 = tcg_temp_new();
595 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
596 tcg_gen_shli_i64(tmp1, tmp1, 3);
597 tcg_gen_neg_i64(tmp1, tmp1);
598 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
599 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
600 tcg_temp_free(tmp1);
601 }
602 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
603 }
604 }
605
606 /* EXTBL, EXTWL, EXTLL, EXTQL */
607 static void gen_ext_l(int ra, int rb, int rc, int islit,
608 uint8_t lit, uint8_t byte_mask)
609 {
610 if (unlikely(rc == 31))
611 return;
612 else if (unlikely(ra == 31))
613 tcg_gen_movi_i64(cpu_ir[rc], 0);
614 else {
615 if (islit) {
616 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
617 } else {
618 TCGv tmp = tcg_temp_new();
619 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
620 tcg_gen_shli_i64(tmp, tmp, 3);
621 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
622 tcg_temp_free(tmp);
623 }
624 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
625 }
626 }
627
628 /* INSWH, INSLH, INSQH */
629 static void gen_ins_h(int ra, int rb, int rc, int islit,
630 uint8_t lit, uint8_t byte_mask)
631 {
632 if (unlikely(rc == 31))
633 return;
634 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
635 tcg_gen_movi_i64(cpu_ir[rc], 0);
636 else {
637 TCGv tmp = tcg_temp_new();
638
639 /* The instruction description has us left-shift the byte mask
640 and extract bits <15:8> and apply that zap at the end. This
641 is equivalent to simply performing the zap first and shifting
642 afterward. */
643 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
644
645 if (islit) {
646 /* Note that we have handled the lit==0 case above. */
647 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
648 } else {
649 TCGv shift = tcg_temp_new();
650
651 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
652 Do this portably by splitting the shift into two parts:
653 shift_count-1 and 1. Arrange for the -1 by using
654 ones-complement instead of twos-complement in the negation:
655 ~((B & 7) * 8) & 63. */
656
657 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
658 tcg_gen_shli_i64(shift, shift, 3);
659 tcg_gen_not_i64(shift, shift);
660 tcg_gen_andi_i64(shift, shift, 0x3f);
661
662 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
663 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
664 tcg_temp_free(shift);
665 }
666 tcg_temp_free(tmp);
667 }
668 }
669
670 /* INSBL, INSWL, INSLL, INSQL */
671 static void gen_ins_l(int ra, int rb, int rc, int islit,
672 uint8_t lit, uint8_t byte_mask)
673 {
674 if (unlikely(rc == 31))
675 return;
676 else if (unlikely(ra == 31))
677 tcg_gen_movi_i64(cpu_ir[rc], 0);
678 else {
679 TCGv tmp = tcg_temp_new();
680
681 /* The instruction description has us left-shift the byte mask
682 the same number of byte slots as the data and apply the zap
683 at the end. This is equivalent to simply performing the zap
684 first and shifting afterward. */
685 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
686
687 if (islit) {
688 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
689 } else {
690 TCGv shift = tcg_temp_new();
691 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
692 tcg_gen_shli_i64(shift, shift, 3);
693 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
694 tcg_temp_free(shift);
695 }
696 tcg_temp_free(tmp);
697 }
698 }
699
700 /* MSKWH, MSKLH, MSKQH */
701 static void gen_msk_h(int ra, int rb, int rc, int islit,
702 uint8_t lit, uint8_t byte_mask)
703 {
704 if (unlikely(rc == 31))
705 return;
706 else if (unlikely(ra == 31))
707 tcg_gen_movi_i64(cpu_ir[rc], 0);
708 else if (islit) {
709 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
710 } else {
711 TCGv shift = tcg_temp_new();
712 TCGv mask = tcg_temp_new();
713
714 /* The instruction description is as above, where the byte_mask
715 is shifted left, and then we extract bits <15:8>. This can be
716 emulated with a right-shift on the expanded byte mask. This
717 requires extra care because for an input <2:0> == 0 we need a
718 shift of 64 bits in order to generate a zero. This is done by
719 splitting the shift into two parts, the variable shift - 1
720 followed by a constant 1 shift. The code we expand below is
721 equivalent to ~((B & 7) * 8) & 63. */
722
723 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
724 tcg_gen_shli_i64(shift, shift, 3);
725 tcg_gen_not_i64(shift, shift);
726 tcg_gen_andi_i64(shift, shift, 0x3f);
727 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
728 tcg_gen_shr_i64(mask, mask, shift);
729 tcg_gen_shri_i64(mask, mask, 1);
730
731 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
732
733 tcg_temp_free(mask);
734 tcg_temp_free(shift);
735 }
736 }
737
738 /* MSKBL, MSKWL, MSKLL, MSKQL */
739 static void gen_msk_l(int ra, int rb, int rc, int islit,
740 uint8_t lit, uint8_t byte_mask)
741 {
742 if (unlikely(rc == 31))
743 return;
744 else if (unlikely(ra == 31))
745 tcg_gen_movi_i64(cpu_ir[rc], 0);
746 else if (islit) {
747 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
748 } else {
749 TCGv shift = tcg_temp_new();
750 TCGv mask = tcg_temp_new();
751
752 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
753 tcg_gen_shli_i64(shift, shift, 3);
754 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
755 tcg_gen_shl_i64(mask, mask, shift);
756
757 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
758
759 tcg_temp_free(mask);
760 tcg_temp_free(shift);
761 }
762 }
763
764 /* Code to call arith3 helpers */
765 #define ARITH3(name) \
766 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
767 uint8_t lit) \
768 { \
769 if (unlikely(rc == 31)) \
770 return; \
771 \
772 if (ra != 31) { \
773 if (islit) { \
774 TCGv tmp = tcg_const_i64(lit); \
775 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
776 tcg_temp_free(tmp); \
777 } else \
778 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
779 } else { \
780 TCGv tmp1 = tcg_const_i64(0); \
781 if (islit) { \
782 TCGv tmp2 = tcg_const_i64(lit); \
783 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
784 tcg_temp_free(tmp2); \
785 } else \
786 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
787 tcg_temp_free(tmp1); \
788 } \
789 }
790 ARITH3(cmpbge)
791 ARITH3(addlv)
792 ARITH3(sublv)
793 ARITH3(addqv)
794 ARITH3(subqv)
795 ARITH3(umulh)
796 ARITH3(mullv)
797 ARITH3(mulqv)
798 ARITH3(minub8)
799 ARITH3(minsb8)
800 ARITH3(minuw4)
801 ARITH3(minsw4)
802 ARITH3(maxub8)
803 ARITH3(maxsb8)
804 ARITH3(maxuw4)
805 ARITH3(maxsw4)
806 ARITH3(perr)
807
808 #define MVIOP2(name) \
809 static inline void glue(gen_, name)(int rb, int rc) \
810 { \
811 if (unlikely(rc == 31)) \
812 return; \
813 if (unlikely(rb == 31)) \
814 tcg_gen_movi_i64(cpu_ir[rc], 0); \
815 else \
816 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
817 }
818 MVIOP2(pklb)
819 MVIOP2(pkwb)
820 MVIOP2(unpkbl)
821 MVIOP2(unpkbw)
822
823 static inline void gen_cmp(TCGCond cond, int ra, int rb, int rc, int islit,
824 uint8_t lit)
825 {
826 int l1, l2;
827 TCGv tmp;
828
829 if (unlikely(rc == 31))
830 return;
831
832 l1 = gen_new_label();
833 l2 = gen_new_label();
834
835 if (ra != 31) {
836 tmp = tcg_temp_new();
837 tcg_gen_mov_i64(tmp, cpu_ir[ra]);
838 } else
839 tmp = tcg_const_i64(0);
840 if (islit)
841 tcg_gen_brcondi_i64(cond, tmp, lit, l1);
842 else
843 tcg_gen_brcond_i64(cond, tmp, cpu_ir[rb], l1);
844
845 tcg_gen_movi_i64(cpu_ir[rc], 0);
846 tcg_gen_br(l2);
847 gen_set_label(l1);
848 tcg_gen_movi_i64(cpu_ir[rc], 1);
849 gen_set_label(l2);
850 }
851
852 static inline int translate_one(DisasContext *ctx, uint32_t insn)
853 {
854 uint32_t palcode;
855 int32_t disp21, disp16, disp12;
856 uint16_t fn11, fn16;
857 uint8_t opc, ra, rb, rc, sbz, fpfn, fn7, fn2, islit, real_islit;
858 uint8_t lit;
859 int ret;
860
861 /* Decode all instruction fields */
862 opc = insn >> 26;
863 ra = (insn >> 21) & 0x1F;
864 rb = (insn >> 16) & 0x1F;
865 rc = insn & 0x1F;
866 sbz = (insn >> 13) & 0x07;
867 real_islit = islit = (insn >> 12) & 1;
868 if (rb == 31 && !islit) {
869 islit = 1;
870 lit = 0;
871 } else
872 lit = (insn >> 13) & 0xFF;
873 palcode = insn & 0x03FFFFFF;
874 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
875 disp16 = (int16_t)(insn & 0x0000FFFF);
876 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
877 fn16 = insn & 0x0000FFFF;
878 fn11 = (insn >> 5) & 0x000007FF;
879 fpfn = fn11 & 0x3F;
880 fn7 = (insn >> 5) & 0x0000007F;
881 fn2 = (insn >> 5) & 0x00000003;
882 ret = 0;
883 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
884 opc, ra, rb, rc, disp16);
885
886 switch (opc) {
887 case 0x00:
888 /* CALL_PAL */
889 #ifdef CONFIG_USER_ONLY
890 if (palcode == 0x9E) {
891 /* RDUNIQUE */
892 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
893 break;
894 } else if (palcode == 0x9F) {
895 /* WRUNIQUE */
896 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
897 break;
898 }
899 #endif
900 if (palcode >= 0x80 && palcode < 0xC0) {
901 /* Unprivileged PAL call */
902 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
903 ret = 3;
904 break;
905 }
906 #ifndef CONFIG_USER_ONLY
907 if (palcode < 0x40) {
908 /* Privileged PAL code */
909 if (ctx->mem_idx & 1)
910 goto invalid_opc;
911 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
912 ret = 3;
913 }
914 #endif
915 /* Invalid PAL call */
916 goto invalid_opc;
917 case 0x01:
918 /* OPC01 */
919 goto invalid_opc;
920 case 0x02:
921 /* OPC02 */
922 goto invalid_opc;
923 case 0x03:
924 /* OPC03 */
925 goto invalid_opc;
926 case 0x04:
927 /* OPC04 */
928 goto invalid_opc;
929 case 0x05:
930 /* OPC05 */
931 goto invalid_opc;
932 case 0x06:
933 /* OPC06 */
934 goto invalid_opc;
935 case 0x07:
936 /* OPC07 */
937 goto invalid_opc;
938 case 0x08:
939 /* LDA */
940 if (likely(ra != 31)) {
941 if (rb != 31)
942 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
943 else
944 tcg_gen_movi_i64(cpu_ir[ra], disp16);
945 }
946 break;
947 case 0x09:
948 /* LDAH */
949 if (likely(ra != 31)) {
950 if (rb != 31)
951 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
952 else
953 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
954 }
955 break;
956 case 0x0A:
957 /* LDBU */
958 if (!(ctx->amask & AMASK_BWX))
959 goto invalid_opc;
960 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
961 break;
962 case 0x0B:
963 /* LDQ_U */
964 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
965 break;
966 case 0x0C:
967 /* LDWU */
968 if (!(ctx->amask & AMASK_BWX))
969 goto invalid_opc;
970 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
971 break;
972 case 0x0D:
973 /* STW */
974 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
975 break;
976 case 0x0E:
977 /* STB */
978 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
979 break;
980 case 0x0F:
981 /* STQ_U */
982 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
983 break;
984 case 0x10:
985 switch (fn7) {
986 case 0x00:
987 /* ADDL */
988 if (likely(rc != 31)) {
989 if (ra != 31) {
990 if (islit) {
991 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
992 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
993 } else {
994 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
995 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
996 }
997 } else {
998 if (islit)
999 tcg_gen_movi_i64(cpu_ir[rc], lit);
1000 else
1001 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1002 }
1003 }
1004 break;
1005 case 0x02:
1006 /* S4ADDL */
1007 if (likely(rc != 31)) {
1008 if (ra != 31) {
1009 TCGv tmp = tcg_temp_new();
1010 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1011 if (islit)
1012 tcg_gen_addi_i64(tmp, tmp, lit);
1013 else
1014 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1015 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1016 tcg_temp_free(tmp);
1017 } else {
1018 if (islit)
1019 tcg_gen_movi_i64(cpu_ir[rc], lit);
1020 else
1021 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1022 }
1023 }
1024 break;
1025 case 0x09:
1026 /* SUBL */
1027 if (likely(rc != 31)) {
1028 if (ra != 31) {
1029 if (islit)
1030 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1031 else
1032 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1033 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1034 } else {
1035 if (islit)
1036 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1037 else {
1038 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1039 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1040 }
1041 }
1042 break;
1043 case 0x0B:
1044 /* S4SUBL */
1045 if (likely(rc != 31)) {
1046 if (ra != 31) {
1047 TCGv tmp = tcg_temp_new();
1048 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1049 if (islit)
1050 tcg_gen_subi_i64(tmp, tmp, lit);
1051 else
1052 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1053 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1054 tcg_temp_free(tmp);
1055 } else {
1056 if (islit)
1057 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1058 else {
1059 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1060 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1061 }
1062 }
1063 }
1064 break;
1065 case 0x0F:
1066 /* CMPBGE */
1067 gen_cmpbge(ra, rb, rc, islit, lit);
1068 break;
1069 case 0x12:
1070 /* S8ADDL */
1071 if (likely(rc != 31)) {
1072 if (ra != 31) {
1073 TCGv tmp = tcg_temp_new();
1074 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1075 if (islit)
1076 tcg_gen_addi_i64(tmp, tmp, lit);
1077 else
1078 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1079 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1080 tcg_temp_free(tmp);
1081 } else {
1082 if (islit)
1083 tcg_gen_movi_i64(cpu_ir[rc], lit);
1084 else
1085 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1086 }
1087 }
1088 break;
1089 case 0x1B:
1090 /* S8SUBL */
1091 if (likely(rc != 31)) {
1092 if (ra != 31) {
1093 TCGv tmp = tcg_temp_new();
1094 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1095 if (islit)
1096 tcg_gen_subi_i64(tmp, tmp, lit);
1097 else
1098 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1099 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1100 tcg_temp_free(tmp);
1101 } else {
1102 if (islit)
1103 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1104 else
1105 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1106 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1107 }
1108 }
1109 }
1110 break;
1111 case 0x1D:
1112 /* CMPULT */
1113 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1114 break;
1115 case 0x20:
1116 /* ADDQ */
1117 if (likely(rc != 31)) {
1118 if (ra != 31) {
1119 if (islit)
1120 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1121 else
1122 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1123 } else {
1124 if (islit)
1125 tcg_gen_movi_i64(cpu_ir[rc], lit);
1126 else
1127 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1128 }
1129 }
1130 break;
1131 case 0x22:
1132 /* S4ADDQ */
1133 if (likely(rc != 31)) {
1134 if (ra != 31) {
1135 TCGv tmp = tcg_temp_new();
1136 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1137 if (islit)
1138 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1139 else
1140 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1141 tcg_temp_free(tmp);
1142 } else {
1143 if (islit)
1144 tcg_gen_movi_i64(cpu_ir[rc], lit);
1145 else
1146 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1147 }
1148 }
1149 break;
1150 case 0x29:
1151 /* SUBQ */
1152 if (likely(rc != 31)) {
1153 if (ra != 31) {
1154 if (islit)
1155 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1156 else
1157 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1158 } else {
1159 if (islit)
1160 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1161 else
1162 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1163 }
1164 }
1165 break;
1166 case 0x2B:
1167 /* S4SUBQ */
1168 if (likely(rc != 31)) {
1169 if (ra != 31) {
1170 TCGv tmp = tcg_temp_new();
1171 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1172 if (islit)
1173 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1174 else
1175 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1176 tcg_temp_free(tmp);
1177 } else {
1178 if (islit)
1179 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1180 else
1181 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1182 }
1183 }
1184 break;
1185 case 0x2D:
1186 /* CMPEQ */
1187 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1188 break;
1189 case 0x32:
1190 /* S8ADDQ */
1191 if (likely(rc != 31)) {
1192 if (ra != 31) {
1193 TCGv tmp = tcg_temp_new();
1194 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1195 if (islit)
1196 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1197 else
1198 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1199 tcg_temp_free(tmp);
1200 } else {
1201 if (islit)
1202 tcg_gen_movi_i64(cpu_ir[rc], lit);
1203 else
1204 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1205 }
1206 }
1207 break;
1208 case 0x3B:
1209 /* S8SUBQ */
1210 if (likely(rc != 31)) {
1211 if (ra != 31) {
1212 TCGv tmp = tcg_temp_new();
1213 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1214 if (islit)
1215 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1216 else
1217 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1218 tcg_temp_free(tmp);
1219 } else {
1220 if (islit)
1221 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1222 else
1223 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1224 }
1225 }
1226 break;
1227 case 0x3D:
1228 /* CMPULE */
1229 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1230 break;
1231 case 0x40:
1232 /* ADDL/V */
1233 gen_addlv(ra, rb, rc, islit, lit);
1234 break;
1235 case 0x49:
1236 /* SUBL/V */
1237 gen_sublv(ra, rb, rc, islit, lit);
1238 break;
1239 case 0x4D:
1240 /* CMPLT */
1241 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1242 break;
1243 case 0x60:
1244 /* ADDQ/V */
1245 gen_addqv(ra, rb, rc, islit, lit);
1246 break;
1247 case 0x69:
1248 /* SUBQ/V */
1249 gen_subqv(ra, rb, rc, islit, lit);
1250 break;
1251 case 0x6D:
1252 /* CMPLE */
1253 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1254 break;
1255 default:
1256 goto invalid_opc;
1257 }
1258 break;
1259 case 0x11:
1260 switch (fn7) {
1261 case 0x00:
1262 /* AND */
1263 if (likely(rc != 31)) {
1264 if (ra == 31)
1265 tcg_gen_movi_i64(cpu_ir[rc], 0);
1266 else if (islit)
1267 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1268 else
1269 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1270 }
1271 break;
1272 case 0x08:
1273 /* BIC */
1274 if (likely(rc != 31)) {
1275 if (ra != 31) {
1276 if (islit)
1277 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1278 else
1279 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1280 } else
1281 tcg_gen_movi_i64(cpu_ir[rc], 0);
1282 }
1283 break;
1284 case 0x14:
1285 /* CMOVLBS */
1286 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1287 break;
1288 case 0x16:
1289 /* CMOVLBC */
1290 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1291 break;
1292 case 0x20:
1293 /* BIS */
1294 if (likely(rc != 31)) {
1295 if (ra != 31) {
1296 if (islit)
1297 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1298 else
1299 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1300 } else {
1301 if (islit)
1302 tcg_gen_movi_i64(cpu_ir[rc], lit);
1303 else
1304 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1305 }
1306 }
1307 break;
1308 case 0x24:
1309 /* CMOVEQ */
1310 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1311 break;
1312 case 0x26:
1313 /* CMOVNE */
1314 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1315 break;
1316 case 0x28:
1317 /* ORNOT */
1318 if (likely(rc != 31)) {
1319 if (ra != 31) {
1320 if (islit)
1321 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1322 else
1323 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1324 } else {
1325 if (islit)
1326 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1327 else
1328 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1329 }
1330 }
1331 break;
1332 case 0x40:
1333 /* XOR */
1334 if (likely(rc != 31)) {
1335 if (ra != 31) {
1336 if (islit)
1337 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1338 else
1339 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1340 } else {
1341 if (islit)
1342 tcg_gen_movi_i64(cpu_ir[rc], lit);
1343 else
1344 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1345 }
1346 }
1347 break;
1348 case 0x44:
1349 /* CMOVLT */
1350 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1351 break;
1352 case 0x46:
1353 /* CMOVGE */
1354 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1355 break;
1356 case 0x48:
1357 /* EQV */
1358 if (likely(rc != 31)) {
1359 if (ra != 31) {
1360 if (islit)
1361 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1362 else
1363 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1364 } else {
1365 if (islit)
1366 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1367 else
1368 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1369 }
1370 }
1371 break;
1372 case 0x61:
1373 /* AMASK */
1374 if (likely(rc != 31)) {
1375 if (islit)
1376 tcg_gen_movi_i64(cpu_ir[rc], lit);
1377 else
1378 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1379 switch (ctx->env->implver) {
1380 case IMPLVER_2106x:
1381 /* EV4, EV45, LCA, LCA45 & EV5 */
1382 break;
1383 case IMPLVER_21164:
1384 case IMPLVER_21264:
1385 case IMPLVER_21364:
1386 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1387 ~(uint64_t)ctx->amask);
1388 break;
1389 }
1390 }
1391 break;
1392 case 0x64:
1393 /* CMOVLE */
1394 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
1395 break;
1396 case 0x66:
1397 /* CMOVGT */
1398 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
1399 break;
1400 case 0x6C:
1401 /* IMPLVER */
1402 if (rc != 31)
1403 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
1404 break;
1405 default:
1406 goto invalid_opc;
1407 }
1408 break;
1409 case 0x12:
1410 switch (fn7) {
1411 case 0x02:
1412 /* MSKBL */
1413 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
1414 break;
1415 case 0x06:
1416 /* EXTBL */
1417 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
1418 break;
1419 case 0x0B:
1420 /* INSBL */
1421 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
1422 break;
1423 case 0x12:
1424 /* MSKWL */
1425 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
1426 break;
1427 case 0x16:
1428 /* EXTWL */
1429 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
1430 break;
1431 case 0x1B:
1432 /* INSWL */
1433 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
1434 break;
1435 case 0x22:
1436 /* MSKLL */
1437 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
1438 break;
1439 case 0x26:
1440 /* EXTLL */
1441 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
1442 break;
1443 case 0x2B:
1444 /* INSLL */
1445 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
1446 break;
1447 case 0x30:
1448 /* ZAP */
1449 gen_zap(ra, rb, rc, islit, lit);
1450 break;
1451 case 0x31:
1452 /* ZAPNOT */
1453 gen_zapnot(ra, rb, rc, islit, lit);
1454 break;
1455 case 0x32:
1456 /* MSKQL */
1457 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
1458 break;
1459 case 0x34:
1460 /* SRL */
1461 if (likely(rc != 31)) {
1462 if (ra != 31) {
1463 if (islit)
1464 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1465 else {
1466 TCGv shift = tcg_temp_new();
1467 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1468 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
1469 tcg_temp_free(shift);
1470 }
1471 } else
1472 tcg_gen_movi_i64(cpu_ir[rc], 0);
1473 }
1474 break;
1475 case 0x36:
1476 /* EXTQL */
1477 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
1478 break;
1479 case 0x39:
1480 /* SLL */
1481 if (likely(rc != 31)) {
1482 if (ra != 31) {
1483 if (islit)
1484 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1485 else {
1486 TCGv shift = tcg_temp_new();
1487 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1488 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
1489 tcg_temp_free(shift);
1490 }
1491 } else
1492 tcg_gen_movi_i64(cpu_ir[rc], 0);
1493 }
1494 break;
1495 case 0x3B:
1496 /* INSQL */
1497 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
1498 break;
1499 case 0x3C:
1500 /* SRA */
1501 if (likely(rc != 31)) {
1502 if (ra != 31) {
1503 if (islit)
1504 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1505 else {
1506 TCGv shift = tcg_temp_new();
1507 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1508 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
1509 tcg_temp_free(shift);
1510 }
1511 } else
1512 tcg_gen_movi_i64(cpu_ir[rc], 0);
1513 }
1514 break;
1515 case 0x52:
1516 /* MSKWH */
1517 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
1518 break;
1519 case 0x57:
1520 /* INSWH */
1521 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
1522 break;
1523 case 0x5A:
1524 /* EXTWH */
1525 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
1526 break;
1527 case 0x62:
1528 /* MSKLH */
1529 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
1530 break;
1531 case 0x67:
1532 /* INSLH */
1533 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
1534 break;
1535 case 0x6A:
1536 /* EXTLH */
1537 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
1538 break;
1539 case 0x72:
1540 /* MSKQH */
1541 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
1542 break;
1543 case 0x77:
1544 /* INSQH */
1545 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
1546 break;
1547 case 0x7A:
1548 /* EXTQH */
1549 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
1550 break;
1551 default:
1552 goto invalid_opc;
1553 }
1554 break;
1555 case 0x13:
1556 switch (fn7) {
1557 case 0x00:
1558 /* MULL */
1559 if (likely(rc != 31)) {
1560 if (ra == 31)
1561 tcg_gen_movi_i64(cpu_ir[rc], 0);
1562 else {
1563 if (islit)
1564 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1565 else
1566 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1567 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1568 }
1569 }
1570 break;
1571 case 0x20:
1572 /* MULQ */
1573 if (likely(rc != 31)) {
1574 if (ra == 31)
1575 tcg_gen_movi_i64(cpu_ir[rc], 0);
1576 else if (islit)
1577 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1578 else
1579 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1580 }
1581 break;
1582 case 0x30:
1583 /* UMULH */
1584 gen_umulh(ra, rb, rc, islit, lit);
1585 break;
1586 case 0x40:
1587 /* MULL/V */
1588 gen_mullv(ra, rb, rc, islit, lit);
1589 break;
1590 case 0x60:
1591 /* MULQ/V */
1592 gen_mulqv(ra, rb, rc, islit, lit);
1593 break;
1594 default:
1595 goto invalid_opc;
1596 }
1597 break;
1598 case 0x14:
1599 switch (fpfn) { /* f11 & 0x3F */
1600 case 0x04:
1601 /* ITOFS */
1602 if (!(ctx->amask & AMASK_FIX))
1603 goto invalid_opc;
1604 if (likely(rc != 31)) {
1605 if (ra != 31) {
1606 TCGv_i32 tmp = tcg_temp_new_i32();
1607 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1608 gen_helper_memory_to_s(cpu_fir[rc], tmp);
1609 tcg_temp_free_i32(tmp);
1610 } else
1611 tcg_gen_movi_i64(cpu_fir[rc], 0);
1612 }
1613 break;
1614 case 0x0A:
1615 /* SQRTF */
1616 if (!(ctx->amask & AMASK_FIX))
1617 goto invalid_opc;
1618 gen_fsqrtf(rb, rc);
1619 break;
1620 case 0x0B:
1621 /* SQRTS */
1622 if (!(ctx->amask & AMASK_FIX))
1623 goto invalid_opc;
1624 gen_fsqrts(rb, rc);
1625 break;
1626 case 0x14:
1627 /* ITOFF */
1628 if (!(ctx->amask & AMASK_FIX))
1629 goto invalid_opc;
1630 if (likely(rc != 31)) {
1631 if (ra != 31) {
1632 TCGv_i32 tmp = tcg_temp_new_i32();
1633 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1634 gen_helper_memory_to_f(cpu_fir[rc], tmp);
1635 tcg_temp_free_i32(tmp);
1636 } else
1637 tcg_gen_movi_i64(cpu_fir[rc], 0);
1638 }
1639 break;
1640 case 0x24:
1641 /* ITOFT */
1642 if (!(ctx->amask & AMASK_FIX))
1643 goto invalid_opc;
1644 if (likely(rc != 31)) {
1645 if (ra != 31)
1646 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
1647 else
1648 tcg_gen_movi_i64(cpu_fir[rc], 0);
1649 }
1650 break;
1651 case 0x2A:
1652 /* SQRTG */
1653 if (!(ctx->amask & AMASK_FIX))
1654 goto invalid_opc;
1655 gen_fsqrtg(rb, rc);
1656 break;
1657 case 0x02B:
1658 /* SQRTT */
1659 if (!(ctx->amask & AMASK_FIX))
1660 goto invalid_opc;
1661 gen_fsqrtt(rb, rc);
1662 break;
1663 default:
1664 goto invalid_opc;
1665 }
1666 break;
1667 case 0x15:
1668 /* VAX floating point */
1669 /* XXX: rounding mode and trap are ignored (!) */
1670 switch (fpfn) { /* f11 & 0x3F */
1671 case 0x00:
1672 /* ADDF */
1673 gen_faddf(ra, rb, rc);
1674 break;
1675 case 0x01:
1676 /* SUBF */
1677 gen_fsubf(ra, rb, rc);
1678 break;
1679 case 0x02:
1680 /* MULF */
1681 gen_fmulf(ra, rb, rc);
1682 break;
1683 case 0x03:
1684 /* DIVF */
1685 gen_fdivf(ra, rb, rc);
1686 break;
1687 case 0x1E:
1688 /* CVTDG */
1689 #if 0 // TODO
1690 gen_fcvtdg(rb, rc);
1691 #else
1692 goto invalid_opc;
1693 #endif
1694 break;
1695 case 0x20:
1696 /* ADDG */
1697 gen_faddg(ra, rb, rc);
1698 break;
1699 case 0x21:
1700 /* SUBG */
1701 gen_fsubg(ra, rb, rc);
1702 break;
1703 case 0x22:
1704 /* MULG */
1705 gen_fmulg(ra, rb, rc);
1706 break;
1707 case 0x23:
1708 /* DIVG */
1709 gen_fdivg(ra, rb, rc);
1710 break;
1711 case 0x25:
1712 /* CMPGEQ */
1713 gen_fcmpgeq(ra, rb, rc);
1714 break;
1715 case 0x26:
1716 /* CMPGLT */
1717 gen_fcmpglt(ra, rb, rc);
1718 break;
1719 case 0x27:
1720 /* CMPGLE */
1721 gen_fcmpgle(ra, rb, rc);
1722 break;
1723 case 0x2C:
1724 /* CVTGF */
1725 gen_fcvtgf(rb, rc);
1726 break;
1727 case 0x2D:
1728 /* CVTGD */
1729 #if 0 // TODO
1730 gen_fcvtgd(rb, rc);
1731 #else
1732 goto invalid_opc;
1733 #endif
1734 break;
1735 case 0x2F:
1736 /* CVTGQ */
1737 gen_fcvtgq(rb, rc);
1738 break;
1739 case 0x3C:
1740 /* CVTQF */
1741 gen_fcvtqf(rb, rc);
1742 break;
1743 case 0x3E:
1744 /* CVTQG */
1745 gen_fcvtqg(rb, rc);
1746 break;
1747 default:
1748 goto invalid_opc;
1749 }
1750 break;
1751 case 0x16:
1752 /* IEEE floating-point */
1753 /* XXX: rounding mode and traps are ignored (!) */
1754 switch (fpfn) { /* f11 & 0x3F */
1755 case 0x00:
1756 /* ADDS */
1757 gen_fadds(ra, rb, rc);
1758 break;
1759 case 0x01:
1760 /* SUBS */
1761 gen_fsubs(ra, rb, rc);
1762 break;
1763 case 0x02:
1764 /* MULS */
1765 gen_fmuls(ra, rb, rc);
1766 break;
1767 case 0x03:
1768 /* DIVS */
1769 gen_fdivs(ra, rb, rc);
1770 break;
1771 case 0x20:
1772 /* ADDT */
1773 gen_faddt(ra, rb, rc);
1774 break;
1775 case 0x21:
1776 /* SUBT */
1777 gen_fsubt(ra, rb, rc);
1778 break;
1779 case 0x22:
1780 /* MULT */
1781 gen_fmult(ra, rb, rc);
1782 break;
1783 case 0x23:
1784 /* DIVT */
1785 gen_fdivt(ra, rb, rc);
1786 break;
1787 case 0x24:
1788 /* CMPTUN */
1789 gen_fcmptun(ra, rb, rc);
1790 break;
1791 case 0x25:
1792 /* CMPTEQ */
1793 gen_fcmpteq(ra, rb, rc);
1794 break;
1795 case 0x26:
1796 /* CMPTLT */
1797 gen_fcmptlt(ra, rb, rc);
1798 break;
1799 case 0x27:
1800 /* CMPTLE */
1801 gen_fcmptle(ra, rb, rc);
1802 break;
1803 case 0x2C:
1804 /* XXX: incorrect */
1805 if (fn11 == 0x2AC || fn11 == 0x6AC) {
1806 /* CVTST */
1807 gen_fcvtst(rb, rc);
1808 } else {
1809 /* CVTTS */
1810 gen_fcvtts(rb, rc);
1811 }
1812 break;
1813 case 0x2F:
1814 /* CVTTQ */
1815 gen_fcvttq(rb, rc);
1816 break;
1817 case 0x3C:
1818 /* CVTQS */
1819 gen_fcvtqs(rb, rc);
1820 break;
1821 case 0x3E:
1822 /* CVTQT */
1823 gen_fcvtqt(rb, rc);
1824 break;
1825 default:
1826 goto invalid_opc;
1827 }
1828 break;
1829 case 0x17:
1830 switch (fn11) {
1831 case 0x010:
1832 /* CVTLQ */
1833 gen_fcvtlq(rb, rc);
1834 break;
1835 case 0x020:
1836 if (likely(rc != 31)) {
1837 if (ra == rb)
1838 /* FMOV */
1839 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
1840 else
1841 /* CPYS */
1842 gen_fcpys(ra, rb, rc);
1843 }
1844 break;
1845 case 0x021:
1846 /* CPYSN */
1847 gen_fcpysn(ra, rb, rc);
1848 break;
1849 case 0x022:
1850 /* CPYSE */
1851 gen_fcpyse(ra, rb, rc);
1852 break;
1853 case 0x024:
1854 /* MT_FPCR */
1855 if (likely(ra != 31))
1856 gen_helper_store_fpcr(cpu_fir[ra]);
1857 else {
1858 TCGv tmp = tcg_const_i64(0);
1859 gen_helper_store_fpcr(tmp);
1860 tcg_temp_free(tmp);
1861 }
1862 break;
1863 case 0x025:
1864 /* MF_FPCR */
1865 if (likely(ra != 31))
1866 gen_helper_load_fpcr(cpu_fir[ra]);
1867 break;
1868 case 0x02A:
1869 /* FCMOVEQ */
1870 gen_fcmpfeq(ra, rb, rc);
1871 break;
1872 case 0x02B:
1873 /* FCMOVNE */
1874 gen_fcmpfne(ra, rb, rc);
1875 break;
1876 case 0x02C:
1877 /* FCMOVLT */
1878 gen_fcmpflt(ra, rb, rc);
1879 break;
1880 case 0x02D:
1881 /* FCMOVGE */
1882 gen_fcmpfge(ra, rb, rc);
1883 break;
1884 case 0x02E:
1885 /* FCMOVLE */
1886 gen_fcmpfle(ra, rb, rc);
1887 break;
1888 case 0x02F:
1889 /* FCMOVGT */
1890 gen_fcmpfgt(ra, rb, rc);
1891 break;
1892 case 0x030:
1893 /* CVTQL */
1894 gen_fcvtql(rb, rc);
1895 break;
1896 case 0x130:
1897 /* CVTQL/V */
1898 gen_fcvtqlv(rb, rc);
1899 break;
1900 case 0x530:
1901 /* CVTQL/SV */
1902 gen_fcvtqlsv(rb, rc);
1903 break;
1904 default:
1905 goto invalid_opc;
1906 }
1907 break;
1908 case 0x18:
1909 switch ((uint16_t)disp16) {
1910 case 0x0000:
1911 /* TRAPB */
1912 /* No-op. Just exit from the current tb */
1913 ret = 2;
1914 break;
1915 case 0x0400:
1916 /* EXCB */
1917 /* No-op. Just exit from the current tb */
1918 ret = 2;
1919 break;
1920 case 0x4000:
1921 /* MB */
1922 /* No-op */
1923 break;
1924 case 0x4400:
1925 /* WMB */
1926 /* No-op */
1927 break;
1928 case 0x8000:
1929 /* FETCH */
1930 /* No-op */
1931 break;
1932 case 0xA000:
1933 /* FETCH_M */
1934 /* No-op */
1935 break;
1936 case 0xC000:
1937 /* RPCC */
1938 if (ra != 31)
1939 gen_helper_load_pcc(cpu_ir[ra]);
1940 break;
1941 case 0xE000:
1942 /* RC */
1943 if (ra != 31)
1944 gen_helper_rc(cpu_ir[ra]);
1945 break;
1946 case 0xE800:
1947 /* ECB */
1948 break;
1949 case 0xF000:
1950 /* RS */
1951 if (ra != 31)
1952 gen_helper_rs(cpu_ir[ra]);
1953 break;
1954 case 0xF800:
1955 /* WH64 */
1956 /* No-op */
1957 break;
1958 default:
1959 goto invalid_opc;
1960 }
1961 break;
1962 case 0x19:
1963 /* HW_MFPR (PALcode) */
1964 #if defined (CONFIG_USER_ONLY)
1965 goto invalid_opc;
1966 #else
1967 if (!ctx->pal_mode)
1968 goto invalid_opc;
1969 if (ra != 31) {
1970 TCGv tmp = tcg_const_i32(insn & 0xFF);
1971 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
1972 tcg_temp_free(tmp);
1973 }
1974 break;
1975 #endif
1976 case 0x1A:
1977 if (rb != 31)
1978 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
1979 else
1980 tcg_gen_movi_i64(cpu_pc, 0);
1981 if (ra != 31)
1982 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
1983 /* Those four jumps only differ by the branch prediction hint */
1984 switch (fn2) {
1985 case 0x0:
1986 /* JMP */
1987 break;
1988 case 0x1:
1989 /* JSR */
1990 break;
1991 case 0x2:
1992 /* RET */
1993 break;
1994 case 0x3:
1995 /* JSR_COROUTINE */
1996 break;
1997 }
1998 ret = 1;
1999 break;
2000 case 0x1B:
2001 /* HW_LD (PALcode) */
2002 #if defined (CONFIG_USER_ONLY)
2003 goto invalid_opc;
2004 #else
2005 if (!ctx->pal_mode)
2006 goto invalid_opc;
2007 if (ra != 31) {
2008 TCGv addr = tcg_temp_new();
2009 if (rb != 31)
2010 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2011 else
2012 tcg_gen_movi_i64(addr, disp12);
2013 switch ((insn >> 12) & 0xF) {
2014 case 0x0:
2015 /* Longword physical access (hw_ldl/p) */
2016 gen_helper_ldl_raw(cpu_ir[ra], addr);
2017 break;
2018 case 0x1:
2019 /* Quadword physical access (hw_ldq/p) */
2020 gen_helper_ldq_raw(cpu_ir[ra], addr);
2021 break;
2022 case 0x2:
2023 /* Longword physical access with lock (hw_ldl_l/p) */
2024 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
2025 break;
2026 case 0x3:
2027 /* Quadword physical access with lock (hw_ldq_l/p) */
2028 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
2029 break;
2030 case 0x4:
2031 /* Longword virtual PTE fetch (hw_ldl/v) */
2032 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2033 break;
2034 case 0x5:
2035 /* Quadword virtual PTE fetch (hw_ldq/v) */
2036 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2037 break;
2038 case 0x6:
2039 /* Incpu_ir[ra]id */
2040 goto invalid_opc;
2041 case 0x7:
2042 /* Incpu_ir[ra]id */
2043 goto invalid_opc;
2044 case 0x8:
2045 /* Longword virtual access (hw_ldl) */
2046 gen_helper_st_virt_to_phys(addr, addr);
2047 gen_helper_ldl_raw(cpu_ir[ra], addr);
2048 break;
2049 case 0x9:
2050 /* Quadword virtual access (hw_ldq) */
2051 gen_helper_st_virt_to_phys(addr, addr);
2052 gen_helper_ldq_raw(cpu_ir[ra], addr);
2053 break;
2054 case 0xA:
2055 /* Longword virtual access with protection check (hw_ldl/w) */
2056 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2057 break;
2058 case 0xB:
2059 /* Quadword virtual access with protection check (hw_ldq/w) */
2060 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2061 break;
2062 case 0xC:
2063 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2064 gen_helper_set_alt_mode();
2065 gen_helper_st_virt_to_phys(addr, addr);
2066 gen_helper_ldl_raw(cpu_ir[ra], addr);
2067 gen_helper_restore_mode();
2068 break;
2069 case 0xD:
2070 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2071 gen_helper_set_alt_mode();
2072 gen_helper_st_virt_to_phys(addr, addr);
2073 gen_helper_ldq_raw(cpu_ir[ra], addr);
2074 gen_helper_restore_mode();
2075 break;
2076 case 0xE:
2077 /* Longword virtual access with alternate access mode and
2078 * protection checks (hw_ldl/wa)
2079 */
2080 gen_helper_set_alt_mode();
2081 gen_helper_ldl_data(cpu_ir[ra], addr);
2082 gen_helper_restore_mode();
2083 break;
2084 case 0xF:
2085 /* Quadword virtual access with alternate access mode and
2086 * protection checks (hw_ldq/wa)
2087 */
2088 gen_helper_set_alt_mode();
2089 gen_helper_ldq_data(cpu_ir[ra], addr);
2090 gen_helper_restore_mode();
2091 break;
2092 }
2093 tcg_temp_free(addr);
2094 }
2095 break;
2096 #endif
2097 case 0x1C:
2098 switch (fn7) {
2099 case 0x00:
2100 /* SEXTB */
2101 if (!(ctx->amask & AMASK_BWX))
2102 goto invalid_opc;
2103 if (likely(rc != 31)) {
2104 if (islit)
2105 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2106 else
2107 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2108 }
2109 break;
2110 case 0x01:
2111 /* SEXTW */
2112 if (!(ctx->amask & AMASK_BWX))
2113 goto invalid_opc;
2114 if (likely(rc != 31)) {
2115 if (islit)
2116 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2117 else
2118 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2119 }
2120 break;
2121 case 0x30:
2122 /* CTPOP */
2123 if (!(ctx->amask & AMASK_CIX))
2124 goto invalid_opc;
2125 if (likely(rc != 31)) {
2126 if (islit)
2127 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2128 else
2129 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2130 }
2131 break;
2132 case 0x31:
2133 /* PERR */
2134 if (!(ctx->amask & AMASK_MVI))
2135 goto invalid_opc;
2136 gen_perr(ra, rb, rc, islit, lit);
2137 break;
2138 case 0x32:
2139 /* CTLZ */
2140 if (!(ctx->amask & AMASK_CIX))
2141 goto invalid_opc;
2142 if (likely(rc != 31)) {
2143 if (islit)
2144 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2145 else
2146 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2147 }
2148 break;
2149 case 0x33:
2150 /* CTTZ */
2151 if (!(ctx->amask & AMASK_CIX))
2152 goto invalid_opc;
2153 if (likely(rc != 31)) {
2154 if (islit)
2155 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2156 else
2157 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2158 }
2159 break;
2160 case 0x34:
2161 /* UNPKBW */
2162 if (!(ctx->amask & AMASK_MVI))
2163 goto invalid_opc;
2164 if (real_islit || ra != 31)
2165 goto invalid_opc;
2166 gen_unpkbw (rb, rc);
2167 break;
2168 case 0x35:
2169 /* UNPKBL */
2170 if (!(ctx->amask & AMASK_MVI))
2171 goto invalid_opc;
2172 if (real_islit || ra != 31)
2173 goto invalid_opc;
2174 gen_unpkbl (rb, rc);
2175 break;
2176 case 0x36:
2177 /* PKWB */
2178 if (!(ctx->amask & AMASK_MVI))
2179 goto invalid_opc;
2180 if (real_islit || ra != 31)
2181 goto invalid_opc;
2182 gen_pkwb (rb, rc);
2183 break;
2184 case 0x37:
2185 /* PKLB */
2186 if (!(ctx->amask & AMASK_MVI))
2187 goto invalid_opc;
2188 if (real_islit || ra != 31)
2189 goto invalid_opc;
2190 gen_pklb (rb, rc);
2191 break;
2192 case 0x38:
2193 /* MINSB8 */
2194 if (!(ctx->amask & AMASK_MVI))
2195 goto invalid_opc;
2196 gen_minsb8 (ra, rb, rc, islit, lit);
2197 break;
2198 case 0x39:
2199 /* MINSW4 */
2200 if (!(ctx->amask & AMASK_MVI))
2201 goto invalid_opc;
2202 gen_minsw4 (ra, rb, rc, islit, lit);
2203 break;
2204 case 0x3A:
2205 /* MINUB8 */
2206 if (!(ctx->amask & AMASK_MVI))
2207 goto invalid_opc;
2208 gen_minub8 (ra, rb, rc, islit, lit);
2209 break;
2210 case 0x3B:
2211 /* MINUW4 */
2212 if (!(ctx->amask & AMASK_MVI))
2213 goto invalid_opc;
2214 gen_minuw4 (ra, rb, rc, islit, lit);
2215 break;
2216 case 0x3C:
2217 /* MAXUB8 */
2218 if (!(ctx->amask & AMASK_MVI))
2219 goto invalid_opc;
2220 gen_maxub8 (ra, rb, rc, islit, lit);
2221 break;
2222 case 0x3D:
2223 /* MAXUW4 */
2224 if (!(ctx->amask & AMASK_MVI))
2225 goto invalid_opc;
2226 gen_maxuw4 (ra, rb, rc, islit, lit);
2227 break;
2228 case 0x3E:
2229 /* MAXSB8 */
2230 if (!(ctx->amask & AMASK_MVI))
2231 goto invalid_opc;
2232 gen_maxsb8 (ra, rb, rc, islit, lit);
2233 break;
2234 case 0x3F:
2235 /* MAXSW4 */
2236 if (!(ctx->amask & AMASK_MVI))
2237 goto invalid_opc;
2238 gen_maxsw4 (ra, rb, rc, islit, lit);
2239 break;
2240 case 0x70:
2241 /* FTOIT */
2242 if (!(ctx->amask & AMASK_FIX))
2243 goto invalid_opc;
2244 if (likely(rc != 31)) {
2245 if (ra != 31)
2246 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2247 else
2248 tcg_gen_movi_i64(cpu_ir[rc], 0);
2249 }
2250 break;
2251 case 0x78:
2252 /* FTOIS */
2253 if (!(ctx->amask & AMASK_FIX))
2254 goto invalid_opc;
2255 if (rc != 31) {
2256 TCGv_i32 tmp1 = tcg_temp_new_i32();
2257 if (ra != 31)
2258 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2259 else {
2260 TCGv tmp2 = tcg_const_i64(0);
2261 gen_helper_s_to_memory(tmp1, tmp2);
2262 tcg_temp_free(tmp2);
2263 }
2264 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2265 tcg_temp_free_i32(tmp1);
2266 }
2267 break;
2268 default:
2269 goto invalid_opc;
2270 }
2271 break;
2272 case 0x1D:
2273 /* HW_MTPR (PALcode) */
2274 #if defined (CONFIG_USER_ONLY)
2275 goto invalid_opc;
2276 #else
2277 if (!ctx->pal_mode)
2278 goto invalid_opc;
2279 else {
2280 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2281 if (ra != 31)
2282 gen_helper_mtpr(tmp1, cpu_ir[ra]);
2283 else {
2284 TCGv tmp2 = tcg_const_i64(0);
2285 gen_helper_mtpr(tmp1, tmp2);
2286 tcg_temp_free(tmp2);
2287 }
2288 tcg_temp_free(tmp1);
2289 ret = 2;
2290 }
2291 break;
2292 #endif
2293 case 0x1E:
2294 /* HW_REI (PALcode) */
2295 #if defined (CONFIG_USER_ONLY)
2296 goto invalid_opc;
2297 #else
2298 if (!ctx->pal_mode)
2299 goto invalid_opc;
2300 if (rb == 31) {
2301 /* "Old" alpha */
2302 gen_helper_hw_rei();
2303 } else {
2304 TCGv tmp;
2305
2306 if (ra != 31) {
2307 tmp = tcg_temp_new();
2308 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2309 } else
2310 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2311 gen_helper_hw_ret(tmp);
2312 tcg_temp_free(tmp);
2313 }
2314 ret = 2;
2315 break;
2316 #endif
2317 case 0x1F:
2318 /* HW_ST (PALcode) */
2319 #if defined (CONFIG_USER_ONLY)
2320 goto invalid_opc;
2321 #else
2322 if (!ctx->pal_mode)
2323 goto invalid_opc;
2324 else {
2325 TCGv addr, val;
2326 addr = tcg_temp_new();
2327 if (rb != 31)
2328 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2329 else
2330 tcg_gen_movi_i64(addr, disp12);
2331 if (ra != 31)
2332 val = cpu_ir[ra];
2333 else {
2334 val = tcg_temp_new();
2335 tcg_gen_movi_i64(val, 0);
2336 }
2337 switch ((insn >> 12) & 0xF) {
2338 case 0x0:
2339 /* Longword physical access */
2340 gen_helper_stl_raw(val, addr);
2341 break;
2342 case 0x1:
2343 /* Quadword physical access */
2344 gen_helper_stq_raw(val, addr);
2345 break;
2346 case 0x2:
2347 /* Longword physical access with lock */
2348 gen_helper_stl_c_raw(val, val, addr);
2349 break;
2350 case 0x3:
2351 /* Quadword physical access with lock */
2352 gen_helper_stq_c_raw(val, val, addr);
2353 break;
2354 case 0x4:
2355 /* Longword virtual access */
2356 gen_helper_st_virt_to_phys(addr, addr);
2357 gen_helper_stl_raw(val, addr);
2358 break;
2359 case 0x5:
2360 /* Quadword virtual access */
2361 gen_helper_st_virt_to_phys(addr, addr);
2362 gen_helper_stq_raw(val, addr);
2363 break;
2364 case 0x6:
2365 /* Invalid */
2366 goto invalid_opc;
2367 case 0x7:
2368 /* Invalid */
2369 goto invalid_opc;
2370 case 0x8:
2371 /* Invalid */
2372 goto invalid_opc;
2373 case 0x9:
2374 /* Invalid */
2375 goto invalid_opc;
2376 case 0xA:
2377 /* Invalid */
2378 goto invalid_opc;
2379 case 0xB:
2380 /* Invalid */
2381 goto invalid_opc;
2382 case 0xC:
2383 /* Longword virtual access with alternate access mode */
2384 gen_helper_set_alt_mode();
2385 gen_helper_st_virt_to_phys(addr, addr);
2386 gen_helper_stl_raw(val, addr);
2387 gen_helper_restore_mode();
2388 break;
2389 case 0xD:
2390 /* Quadword virtual access with alternate access mode */
2391 gen_helper_set_alt_mode();
2392 gen_helper_st_virt_to_phys(addr, addr);
2393 gen_helper_stl_raw(val, addr);
2394 gen_helper_restore_mode();
2395 break;
2396 case 0xE:
2397 /* Invalid */
2398 goto invalid_opc;
2399 case 0xF:
2400 /* Invalid */
2401 goto invalid_opc;
2402 }
2403 if (ra == 31)
2404 tcg_temp_free(val);
2405 tcg_temp_free(addr);
2406 }
2407 break;
2408 #endif
2409 case 0x20:
2410 /* LDF */
2411 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2412 break;
2413 case 0x21:
2414 /* LDG */
2415 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2416 break;
2417 case 0x22:
2418 /* LDS */
2419 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2420 break;
2421 case 0x23:
2422 /* LDT */
2423 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2424 break;
2425 case 0x24:
2426 /* STF */
2427 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
2428 break;
2429 case 0x25:
2430 /* STG */
2431 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
2432 break;
2433 case 0x26:
2434 /* STS */
2435 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
2436 break;
2437 case 0x27:
2438 /* STT */
2439 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
2440 break;
2441 case 0x28:
2442 /* LDL */
2443 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2444 break;
2445 case 0x29:
2446 /* LDQ */
2447 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2448 break;
2449 case 0x2A:
2450 /* LDL_L */
2451 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2452 break;
2453 case 0x2B:
2454 /* LDQ_L */
2455 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2456 break;
2457 case 0x2C:
2458 /* STL */
2459 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
2460 break;
2461 case 0x2D:
2462 /* STQ */
2463 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
2464 break;
2465 case 0x2E:
2466 /* STL_C */
2467 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
2468 break;
2469 case 0x2F:
2470 /* STQ_C */
2471 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
2472 break;
2473 case 0x30:
2474 /* BR */
2475 if (ra != 31)
2476 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2477 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2478 ret = 1;
2479 break;
2480 case 0x31: /* FBEQ */
2481 case 0x32: /* FBLT */
2482 case 0x33: /* FBLE */
2483 gen_fbcond(ctx, opc, ra, disp21);
2484 ret = 1;
2485 break;
2486 case 0x34:
2487 /* BSR */
2488 if (ra != 31)
2489 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2490 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2491 ret = 1;
2492 break;
2493 case 0x35: /* FBNE */
2494 case 0x36: /* FBGE */
2495 case 0x37: /* FBGT */
2496 gen_fbcond(ctx, opc, ra, disp21);
2497 ret = 1;
2498 break;
2499 case 0x38:
2500 /* BLBC */
2501 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2502 ret = 1;
2503 break;
2504 case 0x39:
2505 /* BEQ */
2506 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2507 ret = 1;
2508 break;
2509 case 0x3A:
2510 /* BLT */
2511 gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2512 ret = 1;
2513 break;
2514 case 0x3B:
2515 /* BLE */
2516 gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2517 ret = 1;
2518 break;
2519 case 0x3C:
2520 /* BLBS */
2521 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2522 ret = 1;
2523 break;
2524 case 0x3D:
2525 /* BNE */
2526 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2527 ret = 1;
2528 break;
2529 case 0x3E:
2530 /* BGE */
2531 gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2532 ret = 1;
2533 break;
2534 case 0x3F:
2535 /* BGT */
2536 gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2537 ret = 1;
2538 break;
2539 invalid_opc:
2540 gen_invalid(ctx);
2541 ret = 3;
2542 break;
2543 }
2544
2545 return ret;
2546 }
2547
2548 static inline void gen_intermediate_code_internal(CPUState *env,
2549 TranslationBlock *tb,
2550 int search_pc)
2551 {
2552 DisasContext ctx, *ctxp = &ctx;
2553 target_ulong pc_start;
2554 uint32_t insn;
2555 uint16_t *gen_opc_end;
2556 CPUBreakpoint *bp;
2557 int j, lj = -1;
2558 int ret;
2559 int num_insns;
2560 int max_insns;
2561
2562 pc_start = tb->pc;
2563 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2564 ctx.pc = pc_start;
2565 ctx.amask = env->amask;
2566 ctx.env = env;
2567 #if defined (CONFIG_USER_ONLY)
2568 ctx.mem_idx = 0;
2569 #else
2570 ctx.mem_idx = ((env->ps >> 3) & 3);
2571 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
2572 #endif
2573 num_insns = 0;
2574 max_insns = tb->cflags & CF_COUNT_MASK;
2575 if (max_insns == 0)
2576 max_insns = CF_COUNT_MASK;
2577
2578 gen_icount_start();
2579 for (ret = 0; ret == 0;) {
2580 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2581 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2582 if (bp->pc == ctx.pc) {
2583 gen_excp(&ctx, EXCP_DEBUG, 0);
2584 break;
2585 }
2586 }
2587 }
2588 if (search_pc) {
2589 j = gen_opc_ptr - gen_opc_buf;
2590 if (lj < j) {
2591 lj++;
2592 while (lj < j)
2593 gen_opc_instr_start[lj++] = 0;
2594 }
2595 gen_opc_pc[lj] = ctx.pc;
2596 gen_opc_instr_start[lj] = 1;
2597 gen_opc_icount[lj] = num_insns;
2598 }
2599 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
2600 gen_io_start();
2601 insn = ldl_code(ctx.pc);
2602 num_insns++;
2603 ctx.pc += 4;
2604 ret = translate_one(ctxp, insn);
2605 if (ret != 0)
2606 break;
2607 /* if we reach a page boundary or are single stepping, stop
2608 * generation
2609 */
2610 if (env->singlestep_enabled) {
2611 gen_excp(&ctx, EXCP_DEBUG, 0);
2612 break;
2613 }
2614
2615 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
2616 break;
2617
2618 if (gen_opc_ptr >= gen_opc_end)
2619 break;
2620
2621 if (num_insns >= max_insns)
2622 break;
2623
2624 if (singlestep) {
2625 break;
2626 }
2627 }
2628 if (ret != 1 && ret != 3) {
2629 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2630 }
2631 if (tb->cflags & CF_LAST_IO)
2632 gen_io_end();
2633 /* Generate the return instruction */
2634 tcg_gen_exit_tb(0);
2635 gen_icount_end(tb, num_insns);
2636 *gen_opc_ptr = INDEX_op_end;
2637 if (search_pc) {
2638 j = gen_opc_ptr - gen_opc_buf;
2639 lj++;
2640 while (lj <= j)
2641 gen_opc_instr_start[lj++] = 0;
2642 } else {
2643 tb->size = ctx.pc - pc_start;
2644 tb->icount = num_insns;
2645 }
2646 #ifdef DEBUG_DISAS
2647 log_cpu_state_mask(CPU_LOG_TB_CPU, env, 0);
2648 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2649 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2650 log_target_disas(pc_start, ctx.pc - pc_start, 1);
2651 qemu_log("\n");
2652 }
2653 #endif
2654 }
2655
2656 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
2657 {
2658 gen_intermediate_code_internal(env, tb, 0);
2659 }
2660
2661 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
2662 {
2663 gen_intermediate_code_internal(env, tb, 1);
2664 }
2665
2666 struct cpu_def_t {
2667 const char *name;
2668 int implver, amask;
2669 };
2670
2671 static const struct cpu_def_t cpu_defs[] = {
2672 { "ev4", IMPLVER_2106x, 0 },
2673 { "ev5", IMPLVER_21164, 0 },
2674 { "ev56", IMPLVER_21164, AMASK_BWX },
2675 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
2676 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
2677 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2678 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
2679 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2680 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
2681 { "21064", IMPLVER_2106x, 0 },
2682 { "21164", IMPLVER_21164, 0 },
2683 { "21164a", IMPLVER_21164, AMASK_BWX },
2684 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
2685 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
2686 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
2687 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
2688 };
2689
2690 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
2691 {
2692 CPUAlphaState *env;
2693 uint64_t hwpcb;
2694 int implver, amask, i, max;
2695
2696 env = qemu_mallocz(sizeof(CPUAlphaState));
2697 cpu_exec_init(env);
2698 alpha_translate_init();
2699 tlb_flush(env, 1);
2700
2701 /* Default to ev67; no reason not to emulate insns by default. */
2702 implver = IMPLVER_21264;
2703 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
2704 | AMASK_TRAP | AMASK_PREFETCH);
2705
2706 max = ARRAY_SIZE(cpu_defs);
2707 for (i = 0; i < max; i++) {
2708 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
2709 implver = cpu_defs[i].implver;
2710 amask = cpu_defs[i].amask;
2711 break;
2712 }
2713 }
2714 env->implver = implver;
2715 env->amask = amask;
2716
2717 env->ps = 0x1F00;
2718 #if defined (CONFIG_USER_ONLY)
2719 env->ps |= 1 << 3;
2720 #endif
2721 pal_init(env);
2722 /* Initialize IPR */
2723 hwpcb = env->ipr[IPR_PCBB];
2724 env->ipr[IPR_ASN] = 0;
2725 env->ipr[IPR_ASTEN] = 0;
2726 env->ipr[IPR_ASTSR] = 0;
2727 env->ipr[IPR_DATFX] = 0;
2728 /* XXX: fix this */
2729 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
2730 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
2731 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
2732 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
2733 env->ipr[IPR_FEN] = 0;
2734 env->ipr[IPR_IPL] = 31;
2735 env->ipr[IPR_MCES] = 0;
2736 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
2737 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
2738 env->ipr[IPR_SISR] = 0;
2739 env->ipr[IPR_VIRBND] = -1ULL;
2740
2741 qemu_init_vcpu(env);
2742 return env;
2743 }
2744
2745 void gen_pc_load(CPUState *env, TranslationBlock *tb,
2746 unsigned long searched_pc, int pc_pos, void *puc)
2747 {
2748 env->pc = gen_opc_pc[pc_pos];
2749 }