]> git.proxmox.com Git - mirror_qemu.git/blob - target-sparc/translate.c
target-sparc: Remove last uses of cpu_tmp64
[mirror_qemu.git] / target-sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "disas.h"
29 #include "helper.h"
30 #include "tcg-op.h"
31
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #define DEBUG_DISAS
36
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
40
41 /* global register indexes */
42 static TCGv_ptr cpu_env, cpu_regwptr;
43 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
44 static TCGv_i32 cpu_cc_op;
45 static TCGv_i32 cpu_psr;
46 static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
47 static TCGv cpu_y;
48 #ifndef CONFIG_USER_ONLY
49 static TCGv cpu_tbr;
50 #endif
51 static TCGv cpu_cond, cpu_dst;
52 #ifdef TARGET_SPARC64
53 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
54 static TCGv cpu_gsr;
55 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
56 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
57 static TCGv_i32 cpu_softint;
58 #else
59 static TCGv cpu_wim;
60 #endif
61 /* local register indexes (only used inside old micro ops) */
62 static TCGv cpu_tmp0;
63 /* Floating point registers */
64 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
65
66 static target_ulong gen_opc_npc[OPC_BUF_SIZE];
67 static target_ulong gen_opc_jump_pc[2];
68
69 #include "gen-icount.h"
70
71 typedef struct DisasContext {
72 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
73 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
74 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
75 int is_br;
76 int mem_idx;
77 int fpu_enabled;
78 int address_mask_32bit;
79 int singlestep;
80 uint32_t cc_op; /* current CC operation */
81 struct TranslationBlock *tb;
82 sparc_def_t *def;
83 TCGv_i32 t32[3];
84 TCGv ttl[5];
85 int n_t32;
86 int n_ttl;
87 } DisasContext;
88
89 typedef struct {
90 TCGCond cond;
91 bool is_bool;
92 bool g1, g2;
93 TCGv c1, c2;
94 } DisasCompare;
95
96 // This function uses non-native bit order
97 #define GET_FIELD(X, FROM, TO) \
98 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
99
100 // This function uses the order in the manuals, i.e. bit 0 is 2^0
101 #define GET_FIELD_SP(X, FROM, TO) \
102 GET_FIELD(X, 31 - (TO), 31 - (FROM))
103
104 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
105 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
106
107 #ifdef TARGET_SPARC64
108 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
109 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
110 #else
111 #define DFPREG(r) (r & 0x1e)
112 #define QFPREG(r) (r & 0x1c)
113 #endif
114
115 #define UA2005_HTRAP_MASK 0xff
116 #define V8_TRAP_MASK 0x7f
117
118 static int sign_extend(int x, int len)
119 {
120 len = 32 - len;
121 return (x << len) >> len;
122 }
123
124 #define IS_IMM (insn & (1<<13))
125
126 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
127 {
128 TCGv_i32 t;
129 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
130 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
131 return t;
132 }
133
134 static inline TCGv get_temp_tl(DisasContext *dc)
135 {
136 TCGv t;
137 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
138 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
139 return t;
140 }
141
142 static inline void gen_update_fprs_dirty(int rd)
143 {
144 #if defined(TARGET_SPARC64)
145 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
146 #endif
147 }
148
149 /* floating point registers moves */
150 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
151 {
152 #if TCG_TARGET_REG_BITS == 32
153 if (src & 1) {
154 return TCGV_LOW(cpu_fpr[src / 2]);
155 } else {
156 return TCGV_HIGH(cpu_fpr[src / 2]);
157 }
158 #else
159 if (src & 1) {
160 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
161 } else {
162 TCGv_i32 ret = get_temp_i32(dc);
163 TCGv_i64 t = tcg_temp_new_i64();
164
165 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
166 tcg_gen_trunc_i64_i32(ret, t);
167 tcg_temp_free_i64(t);
168
169 return ret;
170 }
171 #endif
172 }
173
174 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
175 {
176 #if TCG_TARGET_REG_BITS == 32
177 if (dst & 1) {
178 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
179 } else {
180 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
181 }
182 #else
183 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
184 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
185 (dst & 1 ? 0 : 32), 32);
186 #endif
187 gen_update_fprs_dirty(dst);
188 }
189
190 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
191 {
192 return get_temp_i32(dc);
193 }
194
195 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
196 {
197 src = DFPREG(src);
198 return cpu_fpr[src / 2];
199 }
200
201 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
202 {
203 dst = DFPREG(dst);
204 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
205 gen_update_fprs_dirty(dst);
206 }
207
208 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
209 {
210 return cpu_fpr[DFPREG(dst) / 2];
211 }
212
213 static void gen_op_load_fpr_QT0(unsigned int src)
214 {
215 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
216 offsetof(CPU_QuadU, ll.upper));
217 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
218 offsetof(CPU_QuadU, ll.lower));
219 }
220
221 static void gen_op_load_fpr_QT1(unsigned int src)
222 {
223 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
224 offsetof(CPU_QuadU, ll.upper));
225 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
226 offsetof(CPU_QuadU, ll.lower));
227 }
228
229 static void gen_op_store_QT0_fpr(unsigned int dst)
230 {
231 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
232 offsetof(CPU_QuadU, ll.upper));
233 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
234 offsetof(CPU_QuadU, ll.lower));
235 }
236
237 #ifdef TARGET_SPARC64
238 static void gen_move_Q(unsigned int rd, unsigned int rs)
239 {
240 rd = QFPREG(rd);
241 rs = QFPREG(rs);
242
243 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
244 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
245 gen_update_fprs_dirty(rd);
246 }
247 #endif
248
249 /* moves */
250 #ifdef CONFIG_USER_ONLY
251 #define supervisor(dc) 0
252 #ifdef TARGET_SPARC64
253 #define hypervisor(dc) 0
254 #endif
255 #else
256 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
257 #ifdef TARGET_SPARC64
258 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
259 #else
260 #endif
261 #endif
262
263 #ifdef TARGET_SPARC64
264 #ifndef TARGET_ABI32
265 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
266 #else
267 #define AM_CHECK(dc) (1)
268 #endif
269 #endif
270
271 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
272 {
273 #ifdef TARGET_SPARC64
274 if (AM_CHECK(dc))
275 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
276 #endif
277 }
278
279 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
280 {
281 if (reg == 0 || reg >= 8) {
282 TCGv t = get_temp_tl(dc);
283 if (reg == 0) {
284 tcg_gen_movi_tl(t, 0);
285 } else {
286 tcg_gen_ld_tl(t, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
287 }
288 return t;
289 } else {
290 return cpu_gregs[reg];
291 }
292 }
293
294 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
295 {
296 if (reg > 0) {
297 if (reg < 8) {
298 tcg_gen_mov_tl(cpu_gregs[reg], v);
299 } else {
300 tcg_gen_st_tl(v, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
301 }
302 }
303 }
304
305 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
306 {
307 if (reg == 0 || reg >= 8) {
308 return get_temp_tl(dc);
309 } else {
310 return cpu_gregs[reg];
311 }
312 }
313
314 static inline void gen_goto_tb(DisasContext *s, int tb_num,
315 target_ulong pc, target_ulong npc)
316 {
317 TranslationBlock *tb;
318
319 tb = s->tb;
320 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
321 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
322 !s->singlestep) {
323 /* jump to same page: we can use a direct jump */
324 tcg_gen_goto_tb(tb_num);
325 tcg_gen_movi_tl(cpu_pc, pc);
326 tcg_gen_movi_tl(cpu_npc, npc);
327 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
328 } else {
329 /* jump to another page: currently not optimized */
330 tcg_gen_movi_tl(cpu_pc, pc);
331 tcg_gen_movi_tl(cpu_npc, npc);
332 tcg_gen_exit_tb(0);
333 }
334 }
335
336 // XXX suboptimal
337 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
338 {
339 tcg_gen_extu_i32_tl(reg, src);
340 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
341 tcg_gen_andi_tl(reg, reg, 0x1);
342 }
343
344 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
345 {
346 tcg_gen_extu_i32_tl(reg, src);
347 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
348 tcg_gen_andi_tl(reg, reg, 0x1);
349 }
350
351 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
352 {
353 tcg_gen_extu_i32_tl(reg, src);
354 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
355 tcg_gen_andi_tl(reg, reg, 0x1);
356 }
357
358 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
359 {
360 tcg_gen_extu_i32_tl(reg, src);
361 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
362 tcg_gen_andi_tl(reg, reg, 0x1);
363 }
364
365 static inline void gen_op_addi_cc(TCGv dst, TCGv src1, target_long src2)
366 {
367 tcg_gen_mov_tl(cpu_cc_src, src1);
368 tcg_gen_movi_tl(cpu_cc_src2, src2);
369 tcg_gen_addi_tl(cpu_cc_dst, cpu_cc_src, src2);
370 tcg_gen_mov_tl(dst, cpu_cc_dst);
371 }
372
373 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
374 {
375 tcg_gen_mov_tl(cpu_cc_src, src1);
376 tcg_gen_mov_tl(cpu_cc_src2, src2);
377 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
378 tcg_gen_mov_tl(dst, cpu_cc_dst);
379 }
380
381 static TCGv_i32 gen_add32_carry32(void)
382 {
383 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
384
385 /* Carry is computed from a previous add: (dst < src) */
386 #if TARGET_LONG_BITS == 64
387 cc_src1_32 = tcg_temp_new_i32();
388 cc_src2_32 = tcg_temp_new_i32();
389 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_dst);
390 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src);
391 #else
392 cc_src1_32 = cpu_cc_dst;
393 cc_src2_32 = cpu_cc_src;
394 #endif
395
396 carry_32 = tcg_temp_new_i32();
397 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
398
399 #if TARGET_LONG_BITS == 64
400 tcg_temp_free_i32(cc_src1_32);
401 tcg_temp_free_i32(cc_src2_32);
402 #endif
403
404 return carry_32;
405 }
406
407 static TCGv_i32 gen_sub32_carry32(void)
408 {
409 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
410
411 /* Carry is computed from a previous borrow: (src1 < src2) */
412 #if TARGET_LONG_BITS == 64
413 cc_src1_32 = tcg_temp_new_i32();
414 cc_src2_32 = tcg_temp_new_i32();
415 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_src);
416 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src2);
417 #else
418 cc_src1_32 = cpu_cc_src;
419 cc_src2_32 = cpu_cc_src2;
420 #endif
421
422 carry_32 = tcg_temp_new_i32();
423 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
424
425 #if TARGET_LONG_BITS == 64
426 tcg_temp_free_i32(cc_src1_32);
427 tcg_temp_free_i32(cc_src2_32);
428 #endif
429
430 return carry_32;
431 }
432
433 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
434 TCGv src2, int update_cc)
435 {
436 TCGv_i32 carry_32;
437 TCGv carry;
438
439 switch (dc->cc_op) {
440 case CC_OP_DIV:
441 case CC_OP_LOGIC:
442 /* Carry is known to be zero. Fall back to plain ADD. */
443 if (update_cc) {
444 gen_op_add_cc(dst, src1, src2);
445 } else {
446 tcg_gen_add_tl(dst, src1, src2);
447 }
448 return;
449
450 case CC_OP_ADD:
451 case CC_OP_TADD:
452 case CC_OP_TADDTV:
453 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
454 {
455 /* For 32-bit hosts, we can re-use the host's hardware carry
456 generation by using an ADD2 opcode. We discard the low
457 part of the output. Ideally we'd combine this operation
458 with the add that generated the carry in the first place. */
459 TCGv dst_low = tcg_temp_new();
460 tcg_gen_op6_i32(INDEX_op_add2_i32, dst_low, dst,
461 cpu_cc_src, src1, cpu_cc_src2, src2);
462 tcg_temp_free(dst_low);
463 goto add_done;
464 }
465 #endif
466 carry_32 = gen_add32_carry32();
467 break;
468
469 case CC_OP_SUB:
470 case CC_OP_TSUB:
471 case CC_OP_TSUBTV:
472 carry_32 = gen_sub32_carry32();
473 break;
474
475 default:
476 /* We need external help to produce the carry. */
477 carry_32 = tcg_temp_new_i32();
478 gen_helper_compute_C_icc(carry_32, cpu_env);
479 break;
480 }
481
482 #if TARGET_LONG_BITS == 64
483 carry = tcg_temp_new();
484 tcg_gen_extu_i32_i64(carry, carry_32);
485 #else
486 carry = carry_32;
487 #endif
488
489 tcg_gen_add_tl(dst, src1, src2);
490 tcg_gen_add_tl(dst, dst, carry);
491
492 tcg_temp_free_i32(carry_32);
493 #if TARGET_LONG_BITS == 64
494 tcg_temp_free(carry);
495 #endif
496
497 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
498 add_done:
499 #endif
500 if (update_cc) {
501 tcg_gen_mov_tl(cpu_cc_src, src1);
502 tcg_gen_mov_tl(cpu_cc_src2, src2);
503 tcg_gen_mov_tl(cpu_cc_dst, dst);
504 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
505 dc->cc_op = CC_OP_ADDX;
506 }
507 }
508
509 static inline void gen_op_subi_cc(TCGv dst, TCGv src1, target_long src2, DisasContext *dc)
510 {
511 tcg_gen_mov_tl(cpu_cc_src, src1);
512 tcg_gen_movi_tl(cpu_cc_src2, src2);
513 if (src2 == 0) {
514 tcg_gen_mov_tl(cpu_cc_dst, src1);
515 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
516 dc->cc_op = CC_OP_LOGIC;
517 } else {
518 tcg_gen_subi_tl(cpu_cc_dst, cpu_cc_src, src2);
519 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
520 dc->cc_op = CC_OP_SUB;
521 }
522 tcg_gen_mov_tl(dst, cpu_cc_dst);
523 }
524
525 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
526 {
527 tcg_gen_mov_tl(cpu_cc_src, src1);
528 tcg_gen_mov_tl(cpu_cc_src2, src2);
529 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
530 tcg_gen_mov_tl(dst, cpu_cc_dst);
531 }
532
533 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
534 TCGv src2, int update_cc)
535 {
536 TCGv_i32 carry_32;
537 TCGv carry;
538
539 switch (dc->cc_op) {
540 case CC_OP_DIV:
541 case CC_OP_LOGIC:
542 /* Carry is known to be zero. Fall back to plain SUB. */
543 if (update_cc) {
544 gen_op_sub_cc(dst, src1, src2);
545 } else {
546 tcg_gen_sub_tl(dst, src1, src2);
547 }
548 return;
549
550 case CC_OP_ADD:
551 case CC_OP_TADD:
552 case CC_OP_TADDTV:
553 carry_32 = gen_add32_carry32();
554 break;
555
556 case CC_OP_SUB:
557 case CC_OP_TSUB:
558 case CC_OP_TSUBTV:
559 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
560 {
561 /* For 32-bit hosts, we can re-use the host's hardware carry
562 generation by using a SUB2 opcode. We discard the low
563 part of the output. Ideally we'd combine this operation
564 with the add that generated the carry in the first place. */
565 TCGv dst_low = tcg_temp_new();
566 tcg_gen_op6_i32(INDEX_op_sub2_i32, dst_low, dst,
567 cpu_cc_src, src1, cpu_cc_src2, src2);
568 tcg_temp_free(dst_low);
569 goto sub_done;
570 }
571 #endif
572 carry_32 = gen_sub32_carry32();
573 break;
574
575 default:
576 /* We need external help to produce the carry. */
577 carry_32 = tcg_temp_new_i32();
578 gen_helper_compute_C_icc(carry_32, cpu_env);
579 break;
580 }
581
582 #if TARGET_LONG_BITS == 64
583 carry = tcg_temp_new();
584 tcg_gen_extu_i32_i64(carry, carry_32);
585 #else
586 carry = carry_32;
587 #endif
588
589 tcg_gen_sub_tl(dst, src1, src2);
590 tcg_gen_sub_tl(dst, dst, carry);
591
592 tcg_temp_free_i32(carry_32);
593 #if TARGET_LONG_BITS == 64
594 tcg_temp_free(carry);
595 #endif
596
597 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
598 sub_done:
599 #endif
600 if (update_cc) {
601 tcg_gen_mov_tl(cpu_cc_src, src1);
602 tcg_gen_mov_tl(cpu_cc_src2, src2);
603 tcg_gen_mov_tl(cpu_cc_dst, dst);
604 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
605 dc->cc_op = CC_OP_SUBX;
606 }
607 }
608
609 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
610 {
611 TCGv r_temp, zero;
612
613 r_temp = tcg_temp_new();
614
615 /* old op:
616 if (!(env->y & 1))
617 T1 = 0;
618 */
619 zero = tcg_const_tl(0);
620 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
621 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
622 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
623 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
624 zero, cpu_cc_src2);
625 tcg_temp_free(zero);
626
627 // b2 = T0 & 1;
628 // env->y = (b2 << 31) | (env->y >> 1);
629 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
630 tcg_gen_shli_tl(r_temp, r_temp, 31);
631 tcg_gen_shri_tl(cpu_tmp0, cpu_y, 1);
632 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0x7fffffff);
633 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, r_temp);
634 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
635
636 // b1 = N ^ V;
637 gen_mov_reg_N(cpu_tmp0, cpu_psr);
638 gen_mov_reg_V(r_temp, cpu_psr);
639 tcg_gen_xor_tl(cpu_tmp0, cpu_tmp0, r_temp);
640 tcg_temp_free(r_temp);
641
642 // T0 = (b1 << 31) | (T0 >> 1);
643 // src1 = T0;
644 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, 31);
645 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
646 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
647
648 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
649
650 tcg_gen_mov_tl(dst, cpu_cc_dst);
651 }
652
653 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
654 {
655 TCGv_i32 r_src1, r_src2;
656 TCGv_i64 r_temp, r_temp2;
657
658 r_src1 = tcg_temp_new_i32();
659 r_src2 = tcg_temp_new_i32();
660
661 tcg_gen_trunc_tl_i32(r_src1, src1);
662 tcg_gen_trunc_tl_i32(r_src2, src2);
663
664 r_temp = tcg_temp_new_i64();
665 r_temp2 = tcg_temp_new_i64();
666
667 if (sign_ext) {
668 tcg_gen_ext_i32_i64(r_temp, r_src2);
669 tcg_gen_ext_i32_i64(r_temp2, r_src1);
670 } else {
671 tcg_gen_extu_i32_i64(r_temp, r_src2);
672 tcg_gen_extu_i32_i64(r_temp2, r_src1);
673 }
674
675 tcg_gen_mul_i64(r_temp2, r_temp, r_temp2);
676
677 tcg_gen_shri_i64(r_temp, r_temp2, 32);
678 tcg_gen_trunc_i64_tl(cpu_tmp0, r_temp);
679 tcg_temp_free_i64(r_temp);
680 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
681
682 tcg_gen_trunc_i64_tl(dst, r_temp2);
683
684 tcg_temp_free_i64(r_temp2);
685
686 tcg_temp_free_i32(r_src1);
687 tcg_temp_free_i32(r_src2);
688 }
689
690 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
691 {
692 /* zero-extend truncated operands before multiplication */
693 gen_op_multiply(dst, src1, src2, 0);
694 }
695
696 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
697 {
698 /* sign-extend truncated operands before multiplication */
699 gen_op_multiply(dst, src1, src2, 1);
700 }
701
702 // 1
703 static inline void gen_op_eval_ba(TCGv dst)
704 {
705 tcg_gen_movi_tl(dst, 1);
706 }
707
708 // Z
709 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
710 {
711 gen_mov_reg_Z(dst, src);
712 }
713
714 // Z | (N ^ V)
715 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
716 {
717 gen_mov_reg_N(cpu_tmp0, src);
718 gen_mov_reg_V(dst, src);
719 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
720 gen_mov_reg_Z(cpu_tmp0, src);
721 tcg_gen_or_tl(dst, dst, cpu_tmp0);
722 }
723
724 // N ^ V
725 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
726 {
727 gen_mov_reg_V(cpu_tmp0, src);
728 gen_mov_reg_N(dst, src);
729 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
730 }
731
732 // C | Z
733 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
734 {
735 gen_mov_reg_Z(cpu_tmp0, src);
736 gen_mov_reg_C(dst, src);
737 tcg_gen_or_tl(dst, dst, cpu_tmp0);
738 }
739
740 // C
741 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
742 {
743 gen_mov_reg_C(dst, src);
744 }
745
746 // V
747 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
748 {
749 gen_mov_reg_V(dst, src);
750 }
751
752 // 0
753 static inline void gen_op_eval_bn(TCGv dst)
754 {
755 tcg_gen_movi_tl(dst, 0);
756 }
757
758 // N
759 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
760 {
761 gen_mov_reg_N(dst, src);
762 }
763
764 // !Z
765 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
766 {
767 gen_mov_reg_Z(dst, src);
768 tcg_gen_xori_tl(dst, dst, 0x1);
769 }
770
771 // !(Z | (N ^ V))
772 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
773 {
774 gen_mov_reg_N(cpu_tmp0, src);
775 gen_mov_reg_V(dst, src);
776 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
777 gen_mov_reg_Z(cpu_tmp0, src);
778 tcg_gen_or_tl(dst, dst, cpu_tmp0);
779 tcg_gen_xori_tl(dst, dst, 0x1);
780 }
781
782 // !(N ^ V)
783 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
784 {
785 gen_mov_reg_V(cpu_tmp0, src);
786 gen_mov_reg_N(dst, src);
787 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
788 tcg_gen_xori_tl(dst, dst, 0x1);
789 }
790
791 // !(C | Z)
792 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
793 {
794 gen_mov_reg_Z(cpu_tmp0, src);
795 gen_mov_reg_C(dst, src);
796 tcg_gen_or_tl(dst, dst, cpu_tmp0);
797 tcg_gen_xori_tl(dst, dst, 0x1);
798 }
799
800 // !C
801 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
802 {
803 gen_mov_reg_C(dst, src);
804 tcg_gen_xori_tl(dst, dst, 0x1);
805 }
806
807 // !N
808 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
809 {
810 gen_mov_reg_N(dst, src);
811 tcg_gen_xori_tl(dst, dst, 0x1);
812 }
813
814 // !V
815 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
816 {
817 gen_mov_reg_V(dst, src);
818 tcg_gen_xori_tl(dst, dst, 0x1);
819 }
820
821 /*
822 FPSR bit field FCC1 | FCC0:
823 0 =
824 1 <
825 2 >
826 3 unordered
827 */
828 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
829 unsigned int fcc_offset)
830 {
831 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
832 tcg_gen_andi_tl(reg, reg, 0x1);
833 }
834
835 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
836 unsigned int fcc_offset)
837 {
838 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
839 tcg_gen_andi_tl(reg, reg, 0x1);
840 }
841
842 // !0: FCC0 | FCC1
843 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
844 unsigned int fcc_offset)
845 {
846 gen_mov_reg_FCC0(dst, src, fcc_offset);
847 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
848 tcg_gen_or_tl(dst, dst, cpu_tmp0);
849 }
850
851 // 1 or 2: FCC0 ^ FCC1
852 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
853 unsigned int fcc_offset)
854 {
855 gen_mov_reg_FCC0(dst, src, fcc_offset);
856 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
857 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
858 }
859
860 // 1 or 3: FCC0
861 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
862 unsigned int fcc_offset)
863 {
864 gen_mov_reg_FCC0(dst, src, fcc_offset);
865 }
866
867 // 1: FCC0 & !FCC1
868 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
869 unsigned int fcc_offset)
870 {
871 gen_mov_reg_FCC0(dst, src, fcc_offset);
872 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
873 tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
874 tcg_gen_and_tl(dst, dst, cpu_tmp0);
875 }
876
877 // 2 or 3: FCC1
878 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
879 unsigned int fcc_offset)
880 {
881 gen_mov_reg_FCC1(dst, src, fcc_offset);
882 }
883
884 // 2: !FCC0 & FCC1
885 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
886 unsigned int fcc_offset)
887 {
888 gen_mov_reg_FCC0(dst, src, fcc_offset);
889 tcg_gen_xori_tl(dst, dst, 0x1);
890 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
891 tcg_gen_and_tl(dst, dst, cpu_tmp0);
892 }
893
894 // 3: FCC0 & FCC1
895 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
896 unsigned int fcc_offset)
897 {
898 gen_mov_reg_FCC0(dst, src, fcc_offset);
899 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
900 tcg_gen_and_tl(dst, dst, cpu_tmp0);
901 }
902
903 // 0: !(FCC0 | FCC1)
904 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
905 unsigned int fcc_offset)
906 {
907 gen_mov_reg_FCC0(dst, src, fcc_offset);
908 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
909 tcg_gen_or_tl(dst, dst, cpu_tmp0);
910 tcg_gen_xori_tl(dst, dst, 0x1);
911 }
912
913 // 0 or 3: !(FCC0 ^ FCC1)
914 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
915 unsigned int fcc_offset)
916 {
917 gen_mov_reg_FCC0(dst, src, fcc_offset);
918 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
919 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
920 tcg_gen_xori_tl(dst, dst, 0x1);
921 }
922
923 // 0 or 2: !FCC0
924 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
925 unsigned int fcc_offset)
926 {
927 gen_mov_reg_FCC0(dst, src, fcc_offset);
928 tcg_gen_xori_tl(dst, dst, 0x1);
929 }
930
931 // !1: !(FCC0 & !FCC1)
932 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
933 unsigned int fcc_offset)
934 {
935 gen_mov_reg_FCC0(dst, src, fcc_offset);
936 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
937 tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
938 tcg_gen_and_tl(dst, dst, cpu_tmp0);
939 tcg_gen_xori_tl(dst, dst, 0x1);
940 }
941
942 // 0 or 1: !FCC1
943 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
944 unsigned int fcc_offset)
945 {
946 gen_mov_reg_FCC1(dst, src, fcc_offset);
947 tcg_gen_xori_tl(dst, dst, 0x1);
948 }
949
950 // !2: !(!FCC0 & FCC1)
951 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
952 unsigned int fcc_offset)
953 {
954 gen_mov_reg_FCC0(dst, src, fcc_offset);
955 tcg_gen_xori_tl(dst, dst, 0x1);
956 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
957 tcg_gen_and_tl(dst, dst, cpu_tmp0);
958 tcg_gen_xori_tl(dst, dst, 0x1);
959 }
960
961 // !3: !(FCC0 & FCC1)
962 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
963 unsigned int fcc_offset)
964 {
965 gen_mov_reg_FCC0(dst, src, fcc_offset);
966 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
967 tcg_gen_and_tl(dst, dst, cpu_tmp0);
968 tcg_gen_xori_tl(dst, dst, 0x1);
969 }
970
971 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
972 target_ulong pc2, TCGv r_cond)
973 {
974 int l1;
975
976 l1 = gen_new_label();
977
978 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
979
980 gen_goto_tb(dc, 0, pc1, pc1 + 4);
981
982 gen_set_label(l1);
983 gen_goto_tb(dc, 1, pc2, pc2 + 4);
984 }
985
986 static inline void gen_branch_a(DisasContext *dc, target_ulong pc1,
987 target_ulong pc2, TCGv r_cond)
988 {
989 int l1;
990
991 l1 = gen_new_label();
992
993 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
994
995 gen_goto_tb(dc, 0, pc2, pc1);
996
997 gen_set_label(l1);
998 gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8);
999 }
1000
1001 static inline void gen_generic_branch(DisasContext *dc)
1002 {
1003 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1004 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1005 TCGv zero = tcg_const_tl(0);
1006
1007 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1008
1009 tcg_temp_free(npc0);
1010 tcg_temp_free(npc1);
1011 tcg_temp_free(zero);
1012 }
1013
1014 /* call this function before using the condition register as it may
1015 have been set for a jump */
1016 static inline void flush_cond(DisasContext *dc)
1017 {
1018 if (dc->npc == JUMP_PC) {
1019 gen_generic_branch(dc);
1020 dc->npc = DYNAMIC_PC;
1021 }
1022 }
1023
1024 static inline void save_npc(DisasContext *dc)
1025 {
1026 if (dc->npc == JUMP_PC) {
1027 gen_generic_branch(dc);
1028 dc->npc = DYNAMIC_PC;
1029 } else if (dc->npc != DYNAMIC_PC) {
1030 tcg_gen_movi_tl(cpu_npc, dc->npc);
1031 }
1032 }
1033
1034 static inline void update_psr(DisasContext *dc)
1035 {
1036 if (dc->cc_op != CC_OP_FLAGS) {
1037 dc->cc_op = CC_OP_FLAGS;
1038 gen_helper_compute_psr(cpu_env);
1039 }
1040 }
1041
1042 static inline void save_state(DisasContext *dc)
1043 {
1044 tcg_gen_movi_tl(cpu_pc, dc->pc);
1045 save_npc(dc);
1046 }
1047
1048 static inline void gen_mov_pc_npc(DisasContext *dc)
1049 {
1050 if (dc->npc == JUMP_PC) {
1051 gen_generic_branch(dc);
1052 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1053 dc->pc = DYNAMIC_PC;
1054 } else if (dc->npc == DYNAMIC_PC) {
1055 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1056 dc->pc = DYNAMIC_PC;
1057 } else {
1058 dc->pc = dc->npc;
1059 }
1060 }
1061
1062 static inline void gen_op_next_insn(void)
1063 {
1064 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1065 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1066 }
1067
1068 static void free_compare(DisasCompare *cmp)
1069 {
1070 if (!cmp->g1) {
1071 tcg_temp_free(cmp->c1);
1072 }
1073 if (!cmp->g2) {
1074 tcg_temp_free(cmp->c2);
1075 }
1076 }
1077
1078 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1079 DisasContext *dc)
1080 {
1081 static int subcc_cond[16] = {
1082 TCG_COND_NEVER,
1083 TCG_COND_EQ,
1084 TCG_COND_LE,
1085 TCG_COND_LT,
1086 TCG_COND_LEU,
1087 TCG_COND_LTU,
1088 -1, /* neg */
1089 -1, /* overflow */
1090 TCG_COND_ALWAYS,
1091 TCG_COND_NE,
1092 TCG_COND_GT,
1093 TCG_COND_GE,
1094 TCG_COND_GTU,
1095 TCG_COND_GEU,
1096 -1, /* pos */
1097 -1, /* no overflow */
1098 };
1099
1100 static int logic_cond[16] = {
1101 TCG_COND_NEVER,
1102 TCG_COND_EQ, /* eq: Z */
1103 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1104 TCG_COND_LT, /* lt: N ^ V -> N */
1105 TCG_COND_EQ, /* leu: C | Z -> Z */
1106 TCG_COND_NEVER, /* ltu: C -> 0 */
1107 TCG_COND_LT, /* neg: N */
1108 TCG_COND_NEVER, /* vs: V -> 0 */
1109 TCG_COND_ALWAYS,
1110 TCG_COND_NE, /* ne: !Z */
1111 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1112 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1113 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1114 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1115 TCG_COND_GE, /* pos: !N */
1116 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1117 };
1118
1119 TCGv_i32 r_src;
1120 TCGv r_dst;
1121
1122 #ifdef TARGET_SPARC64
1123 if (xcc) {
1124 r_src = cpu_xcc;
1125 } else {
1126 r_src = cpu_psr;
1127 }
1128 #else
1129 r_src = cpu_psr;
1130 #endif
1131
1132 switch (dc->cc_op) {
1133 case CC_OP_LOGIC:
1134 cmp->cond = logic_cond[cond];
1135 do_compare_dst_0:
1136 cmp->is_bool = false;
1137 cmp->g2 = false;
1138 cmp->c2 = tcg_const_tl(0);
1139 #ifdef TARGET_SPARC64
1140 if (!xcc) {
1141 cmp->g1 = false;
1142 cmp->c1 = tcg_temp_new();
1143 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1144 break;
1145 }
1146 #endif
1147 cmp->g1 = true;
1148 cmp->c1 = cpu_cc_dst;
1149 break;
1150
1151 case CC_OP_SUB:
1152 switch (cond) {
1153 case 6: /* neg */
1154 case 14: /* pos */
1155 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1156 goto do_compare_dst_0;
1157
1158 case 7: /* overflow */
1159 case 15: /* !overflow */
1160 goto do_dynamic;
1161
1162 default:
1163 cmp->cond = subcc_cond[cond];
1164 cmp->is_bool = false;
1165 #ifdef TARGET_SPARC64
1166 if (!xcc) {
1167 /* Note that sign-extension works for unsigned compares as
1168 long as both operands are sign-extended. */
1169 cmp->g1 = cmp->g2 = false;
1170 cmp->c1 = tcg_temp_new();
1171 cmp->c2 = tcg_temp_new();
1172 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1173 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1174 break;
1175 }
1176 #endif
1177 cmp->g1 = cmp->g2 = true;
1178 cmp->c1 = cpu_cc_src;
1179 cmp->c2 = cpu_cc_src2;
1180 break;
1181 }
1182 break;
1183
1184 default:
1185 do_dynamic:
1186 gen_helper_compute_psr(cpu_env);
1187 dc->cc_op = CC_OP_FLAGS;
1188 /* FALLTHRU */
1189
1190 case CC_OP_FLAGS:
1191 /* We're going to generate a boolean result. */
1192 cmp->cond = TCG_COND_NE;
1193 cmp->is_bool = true;
1194 cmp->g1 = cmp->g2 = false;
1195 cmp->c1 = r_dst = tcg_temp_new();
1196 cmp->c2 = tcg_const_tl(0);
1197
1198 switch (cond) {
1199 case 0x0:
1200 gen_op_eval_bn(r_dst);
1201 break;
1202 case 0x1:
1203 gen_op_eval_be(r_dst, r_src);
1204 break;
1205 case 0x2:
1206 gen_op_eval_ble(r_dst, r_src);
1207 break;
1208 case 0x3:
1209 gen_op_eval_bl(r_dst, r_src);
1210 break;
1211 case 0x4:
1212 gen_op_eval_bleu(r_dst, r_src);
1213 break;
1214 case 0x5:
1215 gen_op_eval_bcs(r_dst, r_src);
1216 break;
1217 case 0x6:
1218 gen_op_eval_bneg(r_dst, r_src);
1219 break;
1220 case 0x7:
1221 gen_op_eval_bvs(r_dst, r_src);
1222 break;
1223 case 0x8:
1224 gen_op_eval_ba(r_dst);
1225 break;
1226 case 0x9:
1227 gen_op_eval_bne(r_dst, r_src);
1228 break;
1229 case 0xa:
1230 gen_op_eval_bg(r_dst, r_src);
1231 break;
1232 case 0xb:
1233 gen_op_eval_bge(r_dst, r_src);
1234 break;
1235 case 0xc:
1236 gen_op_eval_bgu(r_dst, r_src);
1237 break;
1238 case 0xd:
1239 gen_op_eval_bcc(r_dst, r_src);
1240 break;
1241 case 0xe:
1242 gen_op_eval_bpos(r_dst, r_src);
1243 break;
1244 case 0xf:
1245 gen_op_eval_bvc(r_dst, r_src);
1246 break;
1247 }
1248 break;
1249 }
1250 }
1251
1252 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1253 {
1254 unsigned int offset;
1255 TCGv r_dst;
1256
1257 /* For now we still generate a straight boolean result. */
1258 cmp->cond = TCG_COND_NE;
1259 cmp->is_bool = true;
1260 cmp->g1 = cmp->g2 = false;
1261 cmp->c1 = r_dst = tcg_temp_new();
1262 cmp->c2 = tcg_const_tl(0);
1263
1264 switch (cc) {
1265 default:
1266 case 0x0:
1267 offset = 0;
1268 break;
1269 case 0x1:
1270 offset = 32 - 10;
1271 break;
1272 case 0x2:
1273 offset = 34 - 10;
1274 break;
1275 case 0x3:
1276 offset = 36 - 10;
1277 break;
1278 }
1279
1280 switch (cond) {
1281 case 0x0:
1282 gen_op_eval_bn(r_dst);
1283 break;
1284 case 0x1:
1285 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1286 break;
1287 case 0x2:
1288 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1289 break;
1290 case 0x3:
1291 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1292 break;
1293 case 0x4:
1294 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1295 break;
1296 case 0x5:
1297 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1298 break;
1299 case 0x6:
1300 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1301 break;
1302 case 0x7:
1303 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1304 break;
1305 case 0x8:
1306 gen_op_eval_ba(r_dst);
1307 break;
1308 case 0x9:
1309 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1310 break;
1311 case 0xa:
1312 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1313 break;
1314 case 0xb:
1315 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1316 break;
1317 case 0xc:
1318 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1319 break;
1320 case 0xd:
1321 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1322 break;
1323 case 0xe:
1324 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1325 break;
1326 case 0xf:
1327 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1328 break;
1329 }
1330 }
1331
1332 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1333 DisasContext *dc)
1334 {
1335 DisasCompare cmp;
1336 gen_compare(&cmp, cc, cond, dc);
1337
1338 /* The interface is to return a boolean in r_dst. */
1339 if (cmp.is_bool) {
1340 tcg_gen_mov_tl(r_dst, cmp.c1);
1341 } else {
1342 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1343 }
1344
1345 free_compare(&cmp);
1346 }
1347
1348 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1349 {
1350 DisasCompare cmp;
1351 gen_fcompare(&cmp, cc, cond);
1352
1353 /* The interface is to return a boolean in r_dst. */
1354 if (cmp.is_bool) {
1355 tcg_gen_mov_tl(r_dst, cmp.c1);
1356 } else {
1357 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1358 }
1359
1360 free_compare(&cmp);
1361 }
1362
1363 #ifdef TARGET_SPARC64
1364 // Inverted logic
1365 static const int gen_tcg_cond_reg[8] = {
1366 -1,
1367 TCG_COND_NE,
1368 TCG_COND_GT,
1369 TCG_COND_GE,
1370 -1,
1371 TCG_COND_EQ,
1372 TCG_COND_LE,
1373 TCG_COND_LT,
1374 };
1375
1376 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1377 {
1378 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1379 cmp->is_bool = false;
1380 cmp->g1 = true;
1381 cmp->g2 = false;
1382 cmp->c1 = r_src;
1383 cmp->c2 = tcg_const_tl(0);
1384 }
1385
1386 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1387 {
1388 DisasCompare cmp;
1389 gen_compare_reg(&cmp, cond, r_src);
1390
1391 /* The interface is to return a boolean in r_dst. */
1392 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1393
1394 free_compare(&cmp);
1395 }
1396 #endif
1397
1398 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1399 {
1400 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1401 target_ulong target = dc->pc + offset;
1402
1403 #ifdef TARGET_SPARC64
1404 if (unlikely(AM_CHECK(dc))) {
1405 target &= 0xffffffffULL;
1406 }
1407 #endif
1408 if (cond == 0x0) {
1409 /* unconditional not taken */
1410 if (a) {
1411 dc->pc = dc->npc + 4;
1412 dc->npc = dc->pc + 4;
1413 } else {
1414 dc->pc = dc->npc;
1415 dc->npc = dc->pc + 4;
1416 }
1417 } else if (cond == 0x8) {
1418 /* unconditional taken */
1419 if (a) {
1420 dc->pc = target;
1421 dc->npc = dc->pc + 4;
1422 } else {
1423 dc->pc = dc->npc;
1424 dc->npc = target;
1425 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1426 }
1427 } else {
1428 flush_cond(dc);
1429 gen_cond(cpu_cond, cc, cond, dc);
1430 if (a) {
1431 gen_branch_a(dc, target, dc->npc, cpu_cond);
1432 dc->is_br = 1;
1433 } else {
1434 dc->pc = dc->npc;
1435 dc->jump_pc[0] = target;
1436 if (unlikely(dc->npc == DYNAMIC_PC)) {
1437 dc->jump_pc[1] = DYNAMIC_PC;
1438 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1439 } else {
1440 dc->jump_pc[1] = dc->npc + 4;
1441 dc->npc = JUMP_PC;
1442 }
1443 }
1444 }
1445 }
1446
1447 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1448 {
1449 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1450 target_ulong target = dc->pc + offset;
1451
1452 #ifdef TARGET_SPARC64
1453 if (unlikely(AM_CHECK(dc))) {
1454 target &= 0xffffffffULL;
1455 }
1456 #endif
1457 if (cond == 0x0) {
1458 /* unconditional not taken */
1459 if (a) {
1460 dc->pc = dc->npc + 4;
1461 dc->npc = dc->pc + 4;
1462 } else {
1463 dc->pc = dc->npc;
1464 dc->npc = dc->pc + 4;
1465 }
1466 } else if (cond == 0x8) {
1467 /* unconditional taken */
1468 if (a) {
1469 dc->pc = target;
1470 dc->npc = dc->pc + 4;
1471 } else {
1472 dc->pc = dc->npc;
1473 dc->npc = target;
1474 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1475 }
1476 } else {
1477 flush_cond(dc);
1478 gen_fcond(cpu_cond, cc, cond);
1479 if (a) {
1480 gen_branch_a(dc, target, dc->npc, cpu_cond);
1481 dc->is_br = 1;
1482 } else {
1483 dc->pc = dc->npc;
1484 dc->jump_pc[0] = target;
1485 if (unlikely(dc->npc == DYNAMIC_PC)) {
1486 dc->jump_pc[1] = DYNAMIC_PC;
1487 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1488 } else {
1489 dc->jump_pc[1] = dc->npc + 4;
1490 dc->npc = JUMP_PC;
1491 }
1492 }
1493 }
1494 }
1495
1496 #ifdef TARGET_SPARC64
1497 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1498 TCGv r_reg)
1499 {
1500 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1501 target_ulong target = dc->pc + offset;
1502
1503 if (unlikely(AM_CHECK(dc))) {
1504 target &= 0xffffffffULL;
1505 }
1506 flush_cond(dc);
1507 gen_cond_reg(cpu_cond, cond, r_reg);
1508 if (a) {
1509 gen_branch_a(dc, target, dc->npc, cpu_cond);
1510 dc->is_br = 1;
1511 } else {
1512 dc->pc = dc->npc;
1513 dc->jump_pc[0] = target;
1514 if (unlikely(dc->npc == DYNAMIC_PC)) {
1515 dc->jump_pc[1] = DYNAMIC_PC;
1516 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1517 } else {
1518 dc->jump_pc[1] = dc->npc + 4;
1519 dc->npc = JUMP_PC;
1520 }
1521 }
1522 }
1523
1524 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1525 {
1526 switch (fccno) {
1527 case 0:
1528 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1529 break;
1530 case 1:
1531 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1532 break;
1533 case 2:
1534 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1535 break;
1536 case 3:
1537 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1538 break;
1539 }
1540 }
1541
1542 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1543 {
1544 switch (fccno) {
1545 case 0:
1546 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1547 break;
1548 case 1:
1549 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1550 break;
1551 case 2:
1552 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1553 break;
1554 case 3:
1555 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1556 break;
1557 }
1558 }
1559
1560 static inline void gen_op_fcmpq(int fccno)
1561 {
1562 switch (fccno) {
1563 case 0:
1564 gen_helper_fcmpq(cpu_env);
1565 break;
1566 case 1:
1567 gen_helper_fcmpq_fcc1(cpu_env);
1568 break;
1569 case 2:
1570 gen_helper_fcmpq_fcc2(cpu_env);
1571 break;
1572 case 3:
1573 gen_helper_fcmpq_fcc3(cpu_env);
1574 break;
1575 }
1576 }
1577
1578 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1579 {
1580 switch (fccno) {
1581 case 0:
1582 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1583 break;
1584 case 1:
1585 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1586 break;
1587 case 2:
1588 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1589 break;
1590 case 3:
1591 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1592 break;
1593 }
1594 }
1595
1596 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1597 {
1598 switch (fccno) {
1599 case 0:
1600 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1601 break;
1602 case 1:
1603 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1604 break;
1605 case 2:
1606 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1607 break;
1608 case 3:
1609 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1610 break;
1611 }
1612 }
1613
1614 static inline void gen_op_fcmpeq(int fccno)
1615 {
1616 switch (fccno) {
1617 case 0:
1618 gen_helper_fcmpeq(cpu_env);
1619 break;
1620 case 1:
1621 gen_helper_fcmpeq_fcc1(cpu_env);
1622 break;
1623 case 2:
1624 gen_helper_fcmpeq_fcc2(cpu_env);
1625 break;
1626 case 3:
1627 gen_helper_fcmpeq_fcc3(cpu_env);
1628 break;
1629 }
1630 }
1631
1632 #else
1633
1634 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1635 {
1636 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1637 }
1638
1639 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1640 {
1641 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1642 }
1643
1644 static inline void gen_op_fcmpq(int fccno)
1645 {
1646 gen_helper_fcmpq(cpu_env);
1647 }
1648
1649 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1650 {
1651 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1652 }
1653
1654 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1655 {
1656 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1657 }
1658
1659 static inline void gen_op_fcmpeq(int fccno)
1660 {
1661 gen_helper_fcmpeq(cpu_env);
1662 }
1663 #endif
1664
1665 static inline void gen_op_fpexception_im(int fsr_flags)
1666 {
1667 TCGv_i32 r_const;
1668
1669 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1670 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1671 r_const = tcg_const_i32(TT_FP_EXCP);
1672 gen_helper_raise_exception(cpu_env, r_const);
1673 tcg_temp_free_i32(r_const);
1674 }
1675
1676 static int gen_trap_ifnofpu(DisasContext *dc)
1677 {
1678 #if !defined(CONFIG_USER_ONLY)
1679 if (!dc->fpu_enabled) {
1680 TCGv_i32 r_const;
1681
1682 save_state(dc);
1683 r_const = tcg_const_i32(TT_NFPU_INSN);
1684 gen_helper_raise_exception(cpu_env, r_const);
1685 tcg_temp_free_i32(r_const);
1686 dc->is_br = 1;
1687 return 1;
1688 }
1689 #endif
1690 return 0;
1691 }
1692
1693 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1694 {
1695 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1696 }
1697
1698 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1699 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1700 {
1701 TCGv_i32 dst, src;
1702
1703 src = gen_load_fpr_F(dc, rs);
1704 dst = gen_dest_fpr_F(dc);
1705
1706 gen(dst, cpu_env, src);
1707
1708 gen_store_fpr_F(dc, rd, dst);
1709 }
1710
1711 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1712 void (*gen)(TCGv_i32, TCGv_i32))
1713 {
1714 TCGv_i32 dst, src;
1715
1716 src = gen_load_fpr_F(dc, rs);
1717 dst = gen_dest_fpr_F(dc);
1718
1719 gen(dst, src);
1720
1721 gen_store_fpr_F(dc, rd, dst);
1722 }
1723
1724 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1725 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1726 {
1727 TCGv_i32 dst, src1, src2;
1728
1729 src1 = gen_load_fpr_F(dc, rs1);
1730 src2 = gen_load_fpr_F(dc, rs2);
1731 dst = gen_dest_fpr_F(dc);
1732
1733 gen(dst, cpu_env, src1, src2);
1734
1735 gen_store_fpr_F(dc, rd, dst);
1736 }
1737
1738 #ifdef TARGET_SPARC64
1739 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1740 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1741 {
1742 TCGv_i32 dst, src1, src2;
1743
1744 src1 = gen_load_fpr_F(dc, rs1);
1745 src2 = gen_load_fpr_F(dc, rs2);
1746 dst = gen_dest_fpr_F(dc);
1747
1748 gen(dst, src1, src2);
1749
1750 gen_store_fpr_F(dc, rd, dst);
1751 }
1752 #endif
1753
1754 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1755 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1756 {
1757 TCGv_i64 dst, src;
1758
1759 src = gen_load_fpr_D(dc, rs);
1760 dst = gen_dest_fpr_D(dc, rd);
1761
1762 gen(dst, cpu_env, src);
1763
1764 gen_store_fpr_D(dc, rd, dst);
1765 }
1766
1767 #ifdef TARGET_SPARC64
1768 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1769 void (*gen)(TCGv_i64, TCGv_i64))
1770 {
1771 TCGv_i64 dst, src;
1772
1773 src = gen_load_fpr_D(dc, rs);
1774 dst = gen_dest_fpr_D(dc, rd);
1775
1776 gen(dst, src);
1777
1778 gen_store_fpr_D(dc, rd, dst);
1779 }
1780 #endif
1781
1782 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1783 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1784 {
1785 TCGv_i64 dst, src1, src2;
1786
1787 src1 = gen_load_fpr_D(dc, rs1);
1788 src2 = gen_load_fpr_D(dc, rs2);
1789 dst = gen_dest_fpr_D(dc, rd);
1790
1791 gen(dst, cpu_env, src1, src2);
1792
1793 gen_store_fpr_D(dc, rd, dst);
1794 }
1795
1796 #ifdef TARGET_SPARC64
1797 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1798 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1799 {
1800 TCGv_i64 dst, src1, src2;
1801
1802 src1 = gen_load_fpr_D(dc, rs1);
1803 src2 = gen_load_fpr_D(dc, rs2);
1804 dst = gen_dest_fpr_D(dc, rd);
1805
1806 gen(dst, src1, src2);
1807
1808 gen_store_fpr_D(dc, rd, dst);
1809 }
1810
1811 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1812 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1813 {
1814 TCGv_i64 dst, src1, src2;
1815
1816 src1 = gen_load_fpr_D(dc, rs1);
1817 src2 = gen_load_fpr_D(dc, rs2);
1818 dst = gen_dest_fpr_D(dc, rd);
1819
1820 gen(dst, cpu_gsr, src1, src2);
1821
1822 gen_store_fpr_D(dc, rd, dst);
1823 }
1824
1825 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1826 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1827 {
1828 TCGv_i64 dst, src0, src1, src2;
1829
1830 src1 = gen_load_fpr_D(dc, rs1);
1831 src2 = gen_load_fpr_D(dc, rs2);
1832 src0 = gen_load_fpr_D(dc, rd);
1833 dst = gen_dest_fpr_D(dc, rd);
1834
1835 gen(dst, src0, src1, src2);
1836
1837 gen_store_fpr_D(dc, rd, dst);
1838 }
1839 #endif
1840
1841 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1842 void (*gen)(TCGv_ptr))
1843 {
1844 gen_op_load_fpr_QT1(QFPREG(rs));
1845
1846 gen(cpu_env);
1847
1848 gen_op_store_QT0_fpr(QFPREG(rd));
1849 gen_update_fprs_dirty(QFPREG(rd));
1850 }
1851
1852 #ifdef TARGET_SPARC64
1853 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1854 void (*gen)(TCGv_ptr))
1855 {
1856 gen_op_load_fpr_QT1(QFPREG(rs));
1857
1858 gen(cpu_env);
1859
1860 gen_op_store_QT0_fpr(QFPREG(rd));
1861 gen_update_fprs_dirty(QFPREG(rd));
1862 }
1863 #endif
1864
1865 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1866 void (*gen)(TCGv_ptr))
1867 {
1868 gen_op_load_fpr_QT0(QFPREG(rs1));
1869 gen_op_load_fpr_QT1(QFPREG(rs2));
1870
1871 gen(cpu_env);
1872
1873 gen_op_store_QT0_fpr(QFPREG(rd));
1874 gen_update_fprs_dirty(QFPREG(rd));
1875 }
1876
1877 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1878 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1879 {
1880 TCGv_i64 dst;
1881 TCGv_i32 src1, src2;
1882
1883 src1 = gen_load_fpr_F(dc, rs1);
1884 src2 = gen_load_fpr_F(dc, rs2);
1885 dst = gen_dest_fpr_D(dc, rd);
1886
1887 gen(dst, cpu_env, src1, src2);
1888
1889 gen_store_fpr_D(dc, rd, dst);
1890 }
1891
1892 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1893 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1894 {
1895 TCGv_i64 src1, src2;
1896
1897 src1 = gen_load_fpr_D(dc, rs1);
1898 src2 = gen_load_fpr_D(dc, rs2);
1899
1900 gen(cpu_env, src1, src2);
1901
1902 gen_op_store_QT0_fpr(QFPREG(rd));
1903 gen_update_fprs_dirty(QFPREG(rd));
1904 }
1905
1906 #ifdef TARGET_SPARC64
1907 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1908 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1909 {
1910 TCGv_i64 dst;
1911 TCGv_i32 src;
1912
1913 src = gen_load_fpr_F(dc, rs);
1914 dst = gen_dest_fpr_D(dc, rd);
1915
1916 gen(dst, cpu_env, src);
1917
1918 gen_store_fpr_D(dc, rd, dst);
1919 }
1920 #endif
1921
1922 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1923 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1924 {
1925 TCGv_i64 dst;
1926 TCGv_i32 src;
1927
1928 src = gen_load_fpr_F(dc, rs);
1929 dst = gen_dest_fpr_D(dc, rd);
1930
1931 gen(dst, cpu_env, src);
1932
1933 gen_store_fpr_D(dc, rd, dst);
1934 }
1935
1936 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1937 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1938 {
1939 TCGv_i32 dst;
1940 TCGv_i64 src;
1941
1942 src = gen_load_fpr_D(dc, rs);
1943 dst = gen_dest_fpr_F(dc);
1944
1945 gen(dst, cpu_env, src);
1946
1947 gen_store_fpr_F(dc, rd, dst);
1948 }
1949
1950 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1951 void (*gen)(TCGv_i32, TCGv_ptr))
1952 {
1953 TCGv_i32 dst;
1954
1955 gen_op_load_fpr_QT1(QFPREG(rs));
1956 dst = gen_dest_fpr_F(dc);
1957
1958 gen(dst, cpu_env);
1959
1960 gen_store_fpr_F(dc, rd, dst);
1961 }
1962
1963 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1964 void (*gen)(TCGv_i64, TCGv_ptr))
1965 {
1966 TCGv_i64 dst;
1967
1968 gen_op_load_fpr_QT1(QFPREG(rs));
1969 dst = gen_dest_fpr_D(dc, rd);
1970
1971 gen(dst, cpu_env);
1972
1973 gen_store_fpr_D(dc, rd, dst);
1974 }
1975
1976 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1977 void (*gen)(TCGv_ptr, TCGv_i32))
1978 {
1979 TCGv_i32 src;
1980
1981 src = gen_load_fpr_F(dc, rs);
1982
1983 gen(cpu_env, src);
1984
1985 gen_op_store_QT0_fpr(QFPREG(rd));
1986 gen_update_fprs_dirty(QFPREG(rd));
1987 }
1988
1989 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1990 void (*gen)(TCGv_ptr, TCGv_i64))
1991 {
1992 TCGv_i64 src;
1993
1994 src = gen_load_fpr_D(dc, rs);
1995
1996 gen(cpu_env, src);
1997
1998 gen_op_store_QT0_fpr(QFPREG(rd));
1999 gen_update_fprs_dirty(QFPREG(rd));
2000 }
2001
2002 /* asi moves */
2003 #ifdef TARGET_SPARC64
2004 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
2005 {
2006 int asi;
2007 TCGv_i32 r_asi;
2008
2009 if (IS_IMM) {
2010 r_asi = tcg_temp_new_i32();
2011 tcg_gen_mov_i32(r_asi, cpu_asi);
2012 } else {
2013 asi = GET_FIELD(insn, 19, 26);
2014 r_asi = tcg_const_i32(asi);
2015 }
2016 return r_asi;
2017 }
2018
2019 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2020 int sign)
2021 {
2022 TCGv_i32 r_asi, r_size, r_sign;
2023
2024 r_asi = gen_get_asi(insn, addr);
2025 r_size = tcg_const_i32(size);
2026 r_sign = tcg_const_i32(sign);
2027 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
2028 tcg_temp_free_i32(r_sign);
2029 tcg_temp_free_i32(r_size);
2030 tcg_temp_free_i32(r_asi);
2031 }
2032
2033 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2034 {
2035 TCGv_i32 r_asi, r_size;
2036
2037 r_asi = gen_get_asi(insn, addr);
2038 r_size = tcg_const_i32(size);
2039 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2040 tcg_temp_free_i32(r_size);
2041 tcg_temp_free_i32(r_asi);
2042 }
2043
2044 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
2045 {
2046 TCGv_i32 r_asi, r_size, r_rd;
2047
2048 r_asi = gen_get_asi(insn, addr);
2049 r_size = tcg_const_i32(size);
2050 r_rd = tcg_const_i32(rd);
2051 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2052 tcg_temp_free_i32(r_rd);
2053 tcg_temp_free_i32(r_size);
2054 tcg_temp_free_i32(r_asi);
2055 }
2056
2057 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
2058 {
2059 TCGv_i32 r_asi, r_size, r_rd;
2060
2061 r_asi = gen_get_asi(insn, addr);
2062 r_size = tcg_const_i32(size);
2063 r_rd = tcg_const_i32(rd);
2064 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2065 tcg_temp_free_i32(r_rd);
2066 tcg_temp_free_i32(r_size);
2067 tcg_temp_free_i32(r_asi);
2068 }
2069
2070 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2071 {
2072 TCGv_i32 r_asi, r_size, r_sign;
2073 TCGv_i64 t64 = tcg_temp_new_i64();
2074
2075 r_asi = gen_get_asi(insn, addr);
2076 r_size = tcg_const_i32(4);
2077 r_sign = tcg_const_i32(0);
2078 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2079 tcg_temp_free_i32(r_sign);
2080 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2081 tcg_temp_free_i32(r_size);
2082 tcg_temp_free_i32(r_asi);
2083 tcg_gen_trunc_i64_tl(dst, t64);
2084 tcg_temp_free_i64(t64);
2085 }
2086
2087 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2088 int insn, int rd)
2089 {
2090 TCGv_i32 r_asi, r_rd;
2091
2092 r_asi = gen_get_asi(insn, addr);
2093 r_rd = tcg_const_i32(rd);
2094 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2095 tcg_temp_free_i32(r_rd);
2096 tcg_temp_free_i32(r_asi);
2097 }
2098
2099 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2100 int insn, int rd)
2101 {
2102 TCGv_i32 r_asi, r_size;
2103 TCGv lo = gen_load_gpr(dc, rd + 1);
2104 TCGv_i64 t64 = tcg_temp_new_i64();
2105
2106 tcg_gen_concat_tl_i64(t64, lo, hi);
2107 r_asi = gen_get_asi(insn, addr);
2108 r_size = tcg_const_i32(8);
2109 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2110 tcg_temp_free_i32(r_size);
2111 tcg_temp_free_i32(r_asi);
2112 tcg_temp_free_i64(t64);
2113 }
2114
2115 static inline void gen_cas_asi(DisasContext *dc, TCGv addr,
2116 TCGv val2, int insn, int rd)
2117 {
2118 TCGv val1 = gen_load_gpr(dc, rd);
2119 TCGv dst = gen_dest_gpr(dc, rd);
2120 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2121
2122 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2123 tcg_temp_free_i32(r_asi);
2124 gen_store_gpr(dc, rd, dst);
2125 }
2126
2127 static inline void gen_casx_asi(DisasContext *dc, TCGv addr,
2128 TCGv val2, int insn, int rd)
2129 {
2130 TCGv val1 = gen_load_gpr(dc, rd);
2131 TCGv dst = gen_dest_gpr(dc, rd);
2132 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2133
2134 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2135 tcg_temp_free_i32(r_asi);
2136 gen_store_gpr(dc, rd, dst);
2137 }
2138
2139 #elif !defined(CONFIG_USER_ONLY)
2140
2141 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2142 int sign)
2143 {
2144 TCGv_i32 r_asi, r_size, r_sign;
2145 TCGv_i64 t64 = tcg_temp_new_i64();
2146
2147 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2148 r_size = tcg_const_i32(size);
2149 r_sign = tcg_const_i32(sign);
2150 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2151 tcg_temp_free_i32(r_sign);
2152 tcg_temp_free_i32(r_size);
2153 tcg_temp_free_i32(r_asi);
2154 tcg_gen_trunc_i64_tl(dst, t64);
2155 tcg_temp_free_i64(t64);
2156 }
2157
2158 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2159 {
2160 TCGv_i32 r_asi, r_size;
2161 TCGv_i64 t64 = tcg_temp_new_i64();
2162
2163 tcg_gen_extu_tl_i64(t64, src);
2164 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2165 r_size = tcg_const_i32(size);
2166 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2167 tcg_temp_free_i32(r_size);
2168 tcg_temp_free_i32(r_asi);
2169 tcg_temp_free_i64(t64);
2170 }
2171
2172 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2173 {
2174 TCGv_i32 r_asi, r_size, r_sign;
2175 TCGv_i64 r_val, t64;
2176
2177 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2178 r_size = tcg_const_i32(4);
2179 r_sign = tcg_const_i32(0);
2180 t64 = tcg_temp_new_i64();
2181 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2182 tcg_temp_free(r_sign);
2183 r_val = tcg_temp_new_i64();
2184 tcg_gen_extu_tl_i64(r_val, src);
2185 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2186 tcg_temp_free_i64(r_val);
2187 tcg_temp_free_i32(r_size);
2188 tcg_temp_free_i32(r_asi);
2189 tcg_gen_trunc_i64_tl(dst, t64);
2190 tcg_temp_free_i64(t64);
2191 }
2192
2193 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2194 int insn, int rd)
2195 {
2196 TCGv_i32 r_asi, r_size, r_sign;
2197 TCGv t;
2198 TCGv_i64 t64;
2199
2200 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2201 r_size = tcg_const_i32(8);
2202 r_sign = tcg_const_i32(0);
2203 t64 = tcg_temp_new_i64();
2204 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2205 tcg_temp_free_i32(r_sign);
2206 tcg_temp_free_i32(r_size);
2207 tcg_temp_free_i32(r_asi);
2208
2209 t = gen_dest_gpr(dc, rd + 1);
2210 tcg_gen_trunc_i64_tl(t, t64);
2211 gen_store_gpr(dc, rd + 1, t);
2212
2213 tcg_gen_shri_i64(t64, t64, 32);
2214 tcg_gen_trunc_i64_tl(hi, t64);
2215 tcg_temp_free_i64(t64);
2216 gen_store_gpr(dc, rd, hi);
2217 }
2218
2219 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2220 int insn, int rd)
2221 {
2222 TCGv_i32 r_asi, r_size;
2223 TCGv lo = gen_load_gpr(dc, rd + 1);
2224 TCGv_i64 t64 = tcg_temp_new_i64();
2225
2226 tcg_gen_concat_tl_i64(t64, lo, hi);
2227 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2228 r_size = tcg_const_i32(8);
2229 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2230 tcg_temp_free_i32(r_size);
2231 tcg_temp_free_i32(r_asi);
2232 tcg_temp_free_i64(t64);
2233 }
2234 #endif
2235
2236 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2237 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2238 {
2239 TCGv_i64 r_val;
2240 TCGv_i32 r_asi, r_size;
2241
2242 gen_ld_asi(dst, addr, insn, 1, 0);
2243
2244 r_val = tcg_const_i64(0xffULL);
2245 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2246 r_size = tcg_const_i32(1);
2247 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2248 tcg_temp_free_i32(r_size);
2249 tcg_temp_free_i32(r_asi);
2250 tcg_temp_free_i64(r_val);
2251 }
2252 #endif
2253
2254 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2255 {
2256 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2257 return gen_load_gpr(dc, rs1);
2258 }
2259
2260 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2261 {
2262 if (IS_IMM) { /* immediate */
2263 target_long simm = GET_FIELDs(insn, 19, 31);
2264 TCGv t = get_temp_tl(dc);
2265 tcg_gen_movi_tl(t, simm);
2266 return t;
2267 } else { /* register */
2268 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2269 return gen_load_gpr(dc, rs2);
2270 }
2271 }
2272
2273 #ifdef TARGET_SPARC64
2274 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2275 {
2276 TCGv_i32 c32, zero, dst, s1, s2;
2277
2278 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2279 or fold the comparison down to 32 bits and use movcond_i32. Choose
2280 the later. */
2281 c32 = tcg_temp_new_i32();
2282 if (cmp->is_bool) {
2283 tcg_gen_trunc_i64_i32(c32, cmp->c1);
2284 } else {
2285 TCGv_i64 c64 = tcg_temp_new_i64();
2286 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2287 tcg_gen_trunc_i64_i32(c32, c64);
2288 tcg_temp_free_i64(c64);
2289 }
2290
2291 s1 = gen_load_fpr_F(dc, rs);
2292 s2 = gen_load_fpr_F(dc, rd);
2293 dst = gen_dest_fpr_F(dc);
2294 zero = tcg_const_i32(0);
2295
2296 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2297
2298 tcg_temp_free_i32(c32);
2299 tcg_temp_free_i32(zero);
2300 gen_store_fpr_F(dc, rd, dst);
2301 }
2302
2303 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2304 {
2305 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2306 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2307 gen_load_fpr_D(dc, rs),
2308 gen_load_fpr_D(dc, rd));
2309 gen_store_fpr_D(dc, rd, dst);
2310 }
2311
2312 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2313 {
2314 int qd = QFPREG(rd);
2315 int qs = QFPREG(rs);
2316
2317 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2318 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2319 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2320 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2321
2322 gen_update_fprs_dirty(qd);
2323 }
2324
2325 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2326 {
2327 TCGv_i32 r_tl = tcg_temp_new_i32();
2328
2329 /* load env->tl into r_tl */
2330 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2331
2332 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2333 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2334
2335 /* calculate offset to current trap state from env->ts, reuse r_tl */
2336 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2337 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2338
2339 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2340 {
2341 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2342 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2343 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2344 tcg_temp_free_ptr(r_tl_tmp);
2345 }
2346
2347 tcg_temp_free_i32(r_tl);
2348 }
2349
2350 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2351 int width, bool cc, bool left)
2352 {
2353 TCGv lo1, lo2, t1, t2;
2354 uint64_t amask, tabl, tabr;
2355 int shift, imask, omask;
2356
2357 if (cc) {
2358 tcg_gen_mov_tl(cpu_cc_src, s1);
2359 tcg_gen_mov_tl(cpu_cc_src2, s2);
2360 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2361 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2362 dc->cc_op = CC_OP_SUB;
2363 }
2364
2365 /* Theory of operation: there are two tables, left and right (not to
2366 be confused with the left and right versions of the opcode). These
2367 are indexed by the low 3 bits of the inputs. To make things "easy",
2368 these tables are loaded into two constants, TABL and TABR below.
2369 The operation index = (input & imask) << shift calculates the index
2370 into the constant, while val = (table >> index) & omask calculates
2371 the value we're looking for. */
2372 switch (width) {
2373 case 8:
2374 imask = 0x7;
2375 shift = 3;
2376 omask = 0xff;
2377 if (left) {
2378 tabl = 0x80c0e0f0f8fcfeffULL;
2379 tabr = 0xff7f3f1f0f070301ULL;
2380 } else {
2381 tabl = 0x0103070f1f3f7fffULL;
2382 tabr = 0xfffefcf8f0e0c080ULL;
2383 }
2384 break;
2385 case 16:
2386 imask = 0x6;
2387 shift = 1;
2388 omask = 0xf;
2389 if (left) {
2390 tabl = 0x8cef;
2391 tabr = 0xf731;
2392 } else {
2393 tabl = 0x137f;
2394 tabr = 0xfec8;
2395 }
2396 break;
2397 case 32:
2398 imask = 0x4;
2399 shift = 0;
2400 omask = 0x3;
2401 if (left) {
2402 tabl = (2 << 2) | 3;
2403 tabr = (3 << 2) | 1;
2404 } else {
2405 tabl = (1 << 2) | 3;
2406 tabr = (3 << 2) | 2;
2407 }
2408 break;
2409 default:
2410 abort();
2411 }
2412
2413 lo1 = tcg_temp_new();
2414 lo2 = tcg_temp_new();
2415 tcg_gen_andi_tl(lo1, s1, imask);
2416 tcg_gen_andi_tl(lo2, s2, imask);
2417 tcg_gen_shli_tl(lo1, lo1, shift);
2418 tcg_gen_shli_tl(lo2, lo2, shift);
2419
2420 t1 = tcg_const_tl(tabl);
2421 t2 = tcg_const_tl(tabr);
2422 tcg_gen_shr_tl(lo1, t1, lo1);
2423 tcg_gen_shr_tl(lo2, t2, lo2);
2424 tcg_gen_andi_tl(dst, lo1, omask);
2425 tcg_gen_andi_tl(lo2, lo2, omask);
2426
2427 amask = -8;
2428 if (AM_CHECK(dc)) {
2429 amask &= 0xffffffffULL;
2430 }
2431 tcg_gen_andi_tl(s1, s1, amask);
2432 tcg_gen_andi_tl(s2, s2, amask);
2433
2434 /* We want to compute
2435 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2436 We've already done dst = lo1, so this reduces to
2437 dst &= (s1 == s2 ? -1 : lo2)
2438 Which we perform by
2439 lo2 |= -(s1 == s2)
2440 dst &= lo2
2441 */
2442 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2443 tcg_gen_neg_tl(t1, t1);
2444 tcg_gen_or_tl(lo2, lo2, t1);
2445 tcg_gen_and_tl(dst, dst, lo2);
2446
2447 tcg_temp_free(lo1);
2448 tcg_temp_free(lo2);
2449 tcg_temp_free(t1);
2450 tcg_temp_free(t2);
2451 }
2452
2453 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2454 {
2455 TCGv tmp = tcg_temp_new();
2456
2457 tcg_gen_add_tl(tmp, s1, s2);
2458 tcg_gen_andi_tl(dst, tmp, -8);
2459 if (left) {
2460 tcg_gen_neg_tl(tmp, tmp);
2461 }
2462 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2463
2464 tcg_temp_free(tmp);
2465 }
2466
2467 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2468 {
2469 TCGv t1, t2, shift;
2470
2471 t1 = tcg_temp_new();
2472 t2 = tcg_temp_new();
2473 shift = tcg_temp_new();
2474
2475 tcg_gen_andi_tl(shift, gsr, 7);
2476 tcg_gen_shli_tl(shift, shift, 3);
2477 tcg_gen_shl_tl(t1, s1, shift);
2478
2479 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2480 shift of (up to 63) followed by a constant shift of 1. */
2481 tcg_gen_xori_tl(shift, shift, 63);
2482 tcg_gen_shr_tl(t2, s2, shift);
2483 tcg_gen_shri_tl(t2, t2, 1);
2484
2485 tcg_gen_or_tl(dst, t1, t2);
2486
2487 tcg_temp_free(t1);
2488 tcg_temp_free(t2);
2489 tcg_temp_free(shift);
2490 }
2491 #endif
2492
2493 #define CHECK_IU_FEATURE(dc, FEATURE) \
2494 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2495 goto illegal_insn;
2496 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2497 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2498 goto nfpu_insn;
2499
2500 /* before an instruction, dc->pc must be static */
2501 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2502 {
2503 unsigned int opc, rs1, rs2, rd;
2504 TCGv cpu_src1, cpu_src2;
2505 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2506 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2507 target_long simm;
2508
2509 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2510 tcg_gen_debug_insn_start(dc->pc);
2511 }
2512
2513 opc = GET_FIELD(insn, 0, 1);
2514
2515 rd = GET_FIELD(insn, 2, 6);
2516
2517 switch (opc) {
2518 case 0: /* branches/sethi */
2519 {
2520 unsigned int xop = GET_FIELD(insn, 7, 9);
2521 int32_t target;
2522 switch (xop) {
2523 #ifdef TARGET_SPARC64
2524 case 0x1: /* V9 BPcc */
2525 {
2526 int cc;
2527
2528 target = GET_FIELD_SP(insn, 0, 18);
2529 target = sign_extend(target, 19);
2530 target <<= 2;
2531 cc = GET_FIELD_SP(insn, 20, 21);
2532 if (cc == 0)
2533 do_branch(dc, target, insn, 0);
2534 else if (cc == 2)
2535 do_branch(dc, target, insn, 1);
2536 else
2537 goto illegal_insn;
2538 goto jmp_insn;
2539 }
2540 case 0x3: /* V9 BPr */
2541 {
2542 target = GET_FIELD_SP(insn, 0, 13) |
2543 (GET_FIELD_SP(insn, 20, 21) << 14);
2544 target = sign_extend(target, 16);
2545 target <<= 2;
2546 cpu_src1 = get_src1(dc, insn);
2547 do_branch_reg(dc, target, insn, cpu_src1);
2548 goto jmp_insn;
2549 }
2550 case 0x5: /* V9 FBPcc */
2551 {
2552 int cc = GET_FIELD_SP(insn, 20, 21);
2553 if (gen_trap_ifnofpu(dc)) {
2554 goto jmp_insn;
2555 }
2556 target = GET_FIELD_SP(insn, 0, 18);
2557 target = sign_extend(target, 19);
2558 target <<= 2;
2559 do_fbranch(dc, target, insn, cc);
2560 goto jmp_insn;
2561 }
2562 #else
2563 case 0x7: /* CBN+x */
2564 {
2565 goto ncp_insn;
2566 }
2567 #endif
2568 case 0x2: /* BN+x */
2569 {
2570 target = GET_FIELD(insn, 10, 31);
2571 target = sign_extend(target, 22);
2572 target <<= 2;
2573 do_branch(dc, target, insn, 0);
2574 goto jmp_insn;
2575 }
2576 case 0x6: /* FBN+x */
2577 {
2578 if (gen_trap_ifnofpu(dc)) {
2579 goto jmp_insn;
2580 }
2581 target = GET_FIELD(insn, 10, 31);
2582 target = sign_extend(target, 22);
2583 target <<= 2;
2584 do_fbranch(dc, target, insn, 0);
2585 goto jmp_insn;
2586 }
2587 case 0x4: /* SETHI */
2588 /* Special-case %g0 because that's the canonical nop. */
2589 if (rd) {
2590 uint32_t value = GET_FIELD(insn, 10, 31);
2591 TCGv t = gen_dest_gpr(dc, rd);
2592 tcg_gen_movi_tl(t, value << 10);
2593 gen_store_gpr(dc, rd, t);
2594 }
2595 break;
2596 case 0x0: /* UNIMPL */
2597 default:
2598 goto illegal_insn;
2599 }
2600 break;
2601 }
2602 break;
2603 case 1: /*CALL*/
2604 {
2605 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2606 TCGv o7 = gen_dest_gpr(dc, 15);
2607
2608 tcg_gen_movi_tl(o7, dc->pc);
2609 gen_store_gpr(dc, 15, o7);
2610 target += dc->pc;
2611 gen_mov_pc_npc(dc);
2612 #ifdef TARGET_SPARC64
2613 if (unlikely(AM_CHECK(dc))) {
2614 target &= 0xffffffffULL;
2615 }
2616 #endif
2617 dc->npc = target;
2618 }
2619 goto jmp_insn;
2620 case 2: /* FPU & Logical Operations */
2621 {
2622 unsigned int xop = GET_FIELD(insn, 7, 12);
2623 if (xop == 0x3a) { /* generate trap */
2624 int cond = GET_FIELD(insn, 3, 6);
2625 TCGv_i32 trap;
2626 int l1 = -1, mask;
2627
2628 if (cond == 0) {
2629 /* Trap never. */
2630 break;
2631 }
2632
2633 save_state(dc);
2634
2635 if (cond != 8) {
2636 /* Conditional trap. */
2637 DisasCompare cmp;
2638 #ifdef TARGET_SPARC64
2639 /* V9 icc/xcc */
2640 int cc = GET_FIELD_SP(insn, 11, 12);
2641 if (cc == 0) {
2642 gen_compare(&cmp, 0, cond, dc);
2643 } else if (cc == 2) {
2644 gen_compare(&cmp, 1, cond, dc);
2645 } else {
2646 goto illegal_insn;
2647 }
2648 #else
2649 gen_compare(&cmp, 0, cond, dc);
2650 #endif
2651 l1 = gen_new_label();
2652 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2653 cmp.c1, cmp.c2, l1);
2654 free_compare(&cmp);
2655 }
2656
2657 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2658 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2659
2660 /* Don't use the normal temporaries, as they may well have
2661 gone out of scope with the branch above. While we're
2662 doing that we might as well pre-truncate to 32-bit. */
2663 trap = tcg_temp_new_i32();
2664
2665 rs1 = GET_FIELD_SP(insn, 14, 18);
2666 if (IS_IMM) {
2667 rs2 = GET_FIELD_SP(insn, 0, 6);
2668 if (rs1 == 0) {
2669 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2670 /* Signal that the trap value is fully constant. */
2671 mask = 0;
2672 } else {
2673 TCGv t1 = gen_load_gpr(dc, rs1);
2674 tcg_gen_trunc_tl_i32(trap, t1);
2675 tcg_gen_addi_i32(trap, trap, rs2);
2676 }
2677 } else {
2678 TCGv t1, t2;
2679 rs2 = GET_FIELD_SP(insn, 0, 4);
2680 t1 = gen_load_gpr(dc, rs1);
2681 t2 = gen_load_gpr(dc, rs2);
2682 tcg_gen_add_tl(t1, t1, t2);
2683 tcg_gen_trunc_tl_i32(trap, t1);
2684 }
2685 if (mask != 0) {
2686 tcg_gen_andi_i32(trap, trap, mask);
2687 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2688 }
2689
2690 gen_helper_raise_exception(cpu_env, trap);
2691 tcg_temp_free_i32(trap);
2692
2693 if (cond == 8) {
2694 /* An unconditional trap ends the TB. */
2695 dc->is_br = 1;
2696 goto jmp_insn;
2697 } else {
2698 /* A conditional trap falls through to the next insn. */
2699 gen_set_label(l1);
2700 break;
2701 }
2702 } else if (xop == 0x28) {
2703 rs1 = GET_FIELD(insn, 13, 17);
2704 switch(rs1) {
2705 case 0: /* rdy */
2706 #ifndef TARGET_SPARC64
2707 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2708 manual, rdy on the microSPARC
2709 II */
2710 case 0x0f: /* stbar in the SPARCv8 manual,
2711 rdy on the microSPARC II */
2712 case 0x10 ... 0x1f: /* implementation-dependent in the
2713 SPARCv8 manual, rdy on the
2714 microSPARC II */
2715 /* Read Asr17 */
2716 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2717 TCGv t = gen_dest_gpr(dc, rd);
2718 /* Read Asr17 for a Leon3 monoprocessor */
2719 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
2720 gen_store_gpr(dc, rd, t);
2721 break;
2722 }
2723 #endif
2724 gen_store_gpr(dc, rd, cpu_y);
2725 break;
2726 #ifdef TARGET_SPARC64
2727 case 0x2: /* V9 rdccr */
2728 update_psr(dc);
2729 gen_helper_rdccr(cpu_dst, cpu_env);
2730 gen_store_gpr(dc, rd, cpu_dst);
2731 break;
2732 case 0x3: /* V9 rdasi */
2733 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2734 gen_store_gpr(dc, rd, cpu_dst);
2735 break;
2736 case 0x4: /* V9 rdtick */
2737 {
2738 TCGv_ptr r_tickptr;
2739
2740 r_tickptr = tcg_temp_new_ptr();
2741 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2742 offsetof(CPUSPARCState, tick));
2743 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2744 tcg_temp_free_ptr(r_tickptr);
2745 gen_store_gpr(dc, rd, cpu_dst);
2746 }
2747 break;
2748 case 0x5: /* V9 rdpc */
2749 {
2750 TCGv t = gen_dest_gpr(dc, rd);
2751 if (unlikely(AM_CHECK(dc))) {
2752 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
2753 } else {
2754 tcg_gen_movi_tl(t, dc->pc);
2755 }
2756 gen_store_gpr(dc, rd, t);
2757 }
2758 break;
2759 case 0x6: /* V9 rdfprs */
2760 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2761 gen_store_gpr(dc, rd, cpu_dst);
2762 break;
2763 case 0xf: /* V9 membar */
2764 break; /* no effect */
2765 case 0x13: /* Graphics Status */
2766 if (gen_trap_ifnofpu(dc)) {
2767 goto jmp_insn;
2768 }
2769 gen_store_gpr(dc, rd, cpu_gsr);
2770 break;
2771 case 0x16: /* Softint */
2772 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2773 gen_store_gpr(dc, rd, cpu_dst);
2774 break;
2775 case 0x17: /* Tick compare */
2776 gen_store_gpr(dc, rd, cpu_tick_cmpr);
2777 break;
2778 case 0x18: /* System tick */
2779 {
2780 TCGv_ptr r_tickptr;
2781
2782 r_tickptr = tcg_temp_new_ptr();
2783 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2784 offsetof(CPUSPARCState, stick));
2785 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2786 tcg_temp_free_ptr(r_tickptr);
2787 gen_store_gpr(dc, rd, cpu_dst);
2788 }
2789 break;
2790 case 0x19: /* System tick compare */
2791 gen_store_gpr(dc, rd, cpu_stick_cmpr);
2792 break;
2793 case 0x10: /* Performance Control */
2794 case 0x11: /* Performance Instrumentation Counter */
2795 case 0x12: /* Dispatch Control */
2796 case 0x14: /* Softint set, WO */
2797 case 0x15: /* Softint clear, WO */
2798 #endif
2799 default:
2800 goto illegal_insn;
2801 }
2802 #if !defined(CONFIG_USER_ONLY)
2803 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2804 #ifndef TARGET_SPARC64
2805 if (!supervisor(dc)) {
2806 goto priv_insn;
2807 }
2808 update_psr(dc);
2809 gen_helper_rdpsr(cpu_dst, cpu_env);
2810 #else
2811 CHECK_IU_FEATURE(dc, HYPV);
2812 if (!hypervisor(dc))
2813 goto priv_insn;
2814 rs1 = GET_FIELD(insn, 13, 17);
2815 switch (rs1) {
2816 case 0: // hpstate
2817 // gen_op_rdhpstate();
2818 break;
2819 case 1: // htstate
2820 // gen_op_rdhtstate();
2821 break;
2822 case 3: // hintp
2823 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2824 break;
2825 case 5: // htba
2826 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2827 break;
2828 case 6: // hver
2829 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2830 break;
2831 case 31: // hstick_cmpr
2832 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2833 break;
2834 default:
2835 goto illegal_insn;
2836 }
2837 #endif
2838 gen_store_gpr(dc, rd, cpu_dst);
2839 break;
2840 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2841 if (!supervisor(dc))
2842 goto priv_insn;
2843 #ifdef TARGET_SPARC64
2844 rs1 = GET_FIELD(insn, 13, 17);
2845 switch (rs1) {
2846 case 0: // tpc
2847 {
2848 TCGv_ptr r_tsptr;
2849
2850 r_tsptr = tcg_temp_new_ptr();
2851 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2852 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2853 offsetof(trap_state, tpc));
2854 tcg_temp_free_ptr(r_tsptr);
2855 }
2856 break;
2857 case 1: // tnpc
2858 {
2859 TCGv_ptr r_tsptr;
2860
2861 r_tsptr = tcg_temp_new_ptr();
2862 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2863 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2864 offsetof(trap_state, tnpc));
2865 tcg_temp_free_ptr(r_tsptr);
2866 }
2867 break;
2868 case 2: // tstate
2869 {
2870 TCGv_ptr r_tsptr;
2871
2872 r_tsptr = tcg_temp_new_ptr();
2873 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2874 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2875 offsetof(trap_state, tstate));
2876 tcg_temp_free_ptr(r_tsptr);
2877 }
2878 break;
2879 case 3: // tt
2880 {
2881 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2882
2883 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2884 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
2885 offsetof(trap_state, tt));
2886 tcg_temp_free_ptr(r_tsptr);
2887 }
2888 break;
2889 case 4: // tick
2890 {
2891 TCGv_ptr r_tickptr;
2892
2893 r_tickptr = tcg_temp_new_ptr();
2894 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2895 offsetof(CPUSPARCState, tick));
2896 gen_helper_tick_get_count(cpu_tmp0, r_tickptr);
2897 tcg_temp_free_ptr(r_tickptr);
2898 }
2899 break;
2900 case 5: // tba
2901 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2902 break;
2903 case 6: // pstate
2904 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2905 offsetof(CPUSPARCState, pstate));
2906 break;
2907 case 7: // tl
2908 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2909 offsetof(CPUSPARCState, tl));
2910 break;
2911 case 8: // pil
2912 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2913 offsetof(CPUSPARCState, psrpil));
2914 break;
2915 case 9: // cwp
2916 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2917 break;
2918 case 10: // cansave
2919 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2920 offsetof(CPUSPARCState, cansave));
2921 break;
2922 case 11: // canrestore
2923 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2924 offsetof(CPUSPARCState, canrestore));
2925 break;
2926 case 12: // cleanwin
2927 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2928 offsetof(CPUSPARCState, cleanwin));
2929 break;
2930 case 13: // otherwin
2931 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2932 offsetof(CPUSPARCState, otherwin));
2933 break;
2934 case 14: // wstate
2935 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2936 offsetof(CPUSPARCState, wstate));
2937 break;
2938 case 16: // UA2005 gl
2939 CHECK_IU_FEATURE(dc, GL);
2940 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2941 offsetof(CPUSPARCState, gl));
2942 break;
2943 case 26: // UA2005 strand status
2944 CHECK_IU_FEATURE(dc, HYPV);
2945 if (!hypervisor(dc))
2946 goto priv_insn;
2947 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2948 break;
2949 case 31: // ver
2950 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2951 break;
2952 case 15: // fq
2953 default:
2954 goto illegal_insn;
2955 }
2956 #else
2957 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2958 #endif
2959 gen_store_gpr(dc, rd, cpu_tmp0);
2960 break;
2961 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2962 #ifdef TARGET_SPARC64
2963 save_state(dc);
2964 gen_helper_flushw(cpu_env);
2965 #else
2966 if (!supervisor(dc))
2967 goto priv_insn;
2968 gen_store_gpr(dc, rd, cpu_tbr);
2969 #endif
2970 break;
2971 #endif
2972 } else if (xop == 0x34) { /* FPU Operations */
2973 if (gen_trap_ifnofpu(dc)) {
2974 goto jmp_insn;
2975 }
2976 gen_op_clear_ieee_excp_and_FTT();
2977 rs1 = GET_FIELD(insn, 13, 17);
2978 rs2 = GET_FIELD(insn, 27, 31);
2979 xop = GET_FIELD(insn, 18, 26);
2980 save_state(dc);
2981 switch (xop) {
2982 case 0x1: /* fmovs */
2983 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2984 gen_store_fpr_F(dc, rd, cpu_src1_32);
2985 break;
2986 case 0x5: /* fnegs */
2987 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2988 break;
2989 case 0x9: /* fabss */
2990 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2991 break;
2992 case 0x29: /* fsqrts */
2993 CHECK_FPU_FEATURE(dc, FSQRT);
2994 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2995 break;
2996 case 0x2a: /* fsqrtd */
2997 CHECK_FPU_FEATURE(dc, FSQRT);
2998 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
2999 break;
3000 case 0x2b: /* fsqrtq */
3001 CHECK_FPU_FEATURE(dc, FLOAT128);
3002 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3003 break;
3004 case 0x41: /* fadds */
3005 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3006 break;
3007 case 0x42: /* faddd */
3008 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3009 break;
3010 case 0x43: /* faddq */
3011 CHECK_FPU_FEATURE(dc, FLOAT128);
3012 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3013 break;
3014 case 0x45: /* fsubs */
3015 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3016 break;
3017 case 0x46: /* fsubd */
3018 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3019 break;
3020 case 0x47: /* fsubq */
3021 CHECK_FPU_FEATURE(dc, FLOAT128);
3022 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3023 break;
3024 case 0x49: /* fmuls */
3025 CHECK_FPU_FEATURE(dc, FMUL);
3026 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3027 break;
3028 case 0x4a: /* fmuld */
3029 CHECK_FPU_FEATURE(dc, FMUL);
3030 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3031 break;
3032 case 0x4b: /* fmulq */
3033 CHECK_FPU_FEATURE(dc, FLOAT128);
3034 CHECK_FPU_FEATURE(dc, FMUL);
3035 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3036 break;
3037 case 0x4d: /* fdivs */
3038 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3039 break;
3040 case 0x4e: /* fdivd */
3041 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3042 break;
3043 case 0x4f: /* fdivq */
3044 CHECK_FPU_FEATURE(dc, FLOAT128);
3045 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3046 break;
3047 case 0x69: /* fsmuld */
3048 CHECK_FPU_FEATURE(dc, FSMULD);
3049 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3050 break;
3051 case 0x6e: /* fdmulq */
3052 CHECK_FPU_FEATURE(dc, FLOAT128);
3053 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3054 break;
3055 case 0xc4: /* fitos */
3056 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3057 break;
3058 case 0xc6: /* fdtos */
3059 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3060 break;
3061 case 0xc7: /* fqtos */
3062 CHECK_FPU_FEATURE(dc, FLOAT128);
3063 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3064 break;
3065 case 0xc8: /* fitod */
3066 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3067 break;
3068 case 0xc9: /* fstod */
3069 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3070 break;
3071 case 0xcb: /* fqtod */
3072 CHECK_FPU_FEATURE(dc, FLOAT128);
3073 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3074 break;
3075 case 0xcc: /* fitoq */
3076 CHECK_FPU_FEATURE(dc, FLOAT128);
3077 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3078 break;
3079 case 0xcd: /* fstoq */
3080 CHECK_FPU_FEATURE(dc, FLOAT128);
3081 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3082 break;
3083 case 0xce: /* fdtoq */
3084 CHECK_FPU_FEATURE(dc, FLOAT128);
3085 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3086 break;
3087 case 0xd1: /* fstoi */
3088 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3089 break;
3090 case 0xd2: /* fdtoi */
3091 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3092 break;
3093 case 0xd3: /* fqtoi */
3094 CHECK_FPU_FEATURE(dc, FLOAT128);
3095 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3096 break;
3097 #ifdef TARGET_SPARC64
3098 case 0x2: /* V9 fmovd */
3099 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3100 gen_store_fpr_D(dc, rd, cpu_src1_64);
3101 break;
3102 case 0x3: /* V9 fmovq */
3103 CHECK_FPU_FEATURE(dc, FLOAT128);
3104 gen_move_Q(rd, rs2);
3105 break;
3106 case 0x6: /* V9 fnegd */
3107 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3108 break;
3109 case 0x7: /* V9 fnegq */
3110 CHECK_FPU_FEATURE(dc, FLOAT128);
3111 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3112 break;
3113 case 0xa: /* V9 fabsd */
3114 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3115 break;
3116 case 0xb: /* V9 fabsq */
3117 CHECK_FPU_FEATURE(dc, FLOAT128);
3118 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3119 break;
3120 case 0x81: /* V9 fstox */
3121 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3122 break;
3123 case 0x82: /* V9 fdtox */
3124 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3125 break;
3126 case 0x83: /* V9 fqtox */
3127 CHECK_FPU_FEATURE(dc, FLOAT128);
3128 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3129 break;
3130 case 0x84: /* V9 fxtos */
3131 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3132 break;
3133 case 0x88: /* V9 fxtod */
3134 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3135 break;
3136 case 0x8c: /* V9 fxtoq */
3137 CHECK_FPU_FEATURE(dc, FLOAT128);
3138 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3139 break;
3140 #endif
3141 default:
3142 goto illegal_insn;
3143 }
3144 } else if (xop == 0x35) { /* FPU Operations */
3145 #ifdef TARGET_SPARC64
3146 int cond;
3147 #endif
3148 if (gen_trap_ifnofpu(dc)) {
3149 goto jmp_insn;
3150 }
3151 gen_op_clear_ieee_excp_and_FTT();
3152 rs1 = GET_FIELD(insn, 13, 17);
3153 rs2 = GET_FIELD(insn, 27, 31);
3154 xop = GET_FIELD(insn, 18, 26);
3155 save_state(dc);
3156
3157 #ifdef TARGET_SPARC64
3158 #define FMOVR(sz) \
3159 do { \
3160 DisasCompare cmp; \
3161 cond = GET_FIELD_SP(insn, 14, 17); \
3162 cpu_src1 = get_src1(dc, insn); \
3163 gen_compare_reg(&cmp, cond, cpu_src1); \
3164 gen_fmov##sz(dc, &cmp, rd, rs2); \
3165 free_compare(&cmp); \
3166 } while (0)
3167
3168 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3169 FMOVR(s);
3170 break;
3171 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3172 FMOVR(d);
3173 break;
3174 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3175 CHECK_FPU_FEATURE(dc, FLOAT128);
3176 FMOVR(q);
3177 break;
3178 }
3179 #undef FMOVR
3180 #endif
3181 switch (xop) {
3182 #ifdef TARGET_SPARC64
3183 #define FMOVCC(fcc, sz) \
3184 do { \
3185 DisasCompare cmp; \
3186 cond = GET_FIELD_SP(insn, 14, 17); \
3187 gen_fcompare(&cmp, fcc, cond); \
3188 gen_fmov##sz(dc, &cmp, rd, rs2); \
3189 free_compare(&cmp); \
3190 } while (0)
3191
3192 case 0x001: /* V9 fmovscc %fcc0 */
3193 FMOVCC(0, s);
3194 break;
3195 case 0x002: /* V9 fmovdcc %fcc0 */
3196 FMOVCC(0, d);
3197 break;
3198 case 0x003: /* V9 fmovqcc %fcc0 */
3199 CHECK_FPU_FEATURE(dc, FLOAT128);
3200 FMOVCC(0, q);
3201 break;
3202 case 0x041: /* V9 fmovscc %fcc1 */
3203 FMOVCC(1, s);
3204 break;
3205 case 0x042: /* V9 fmovdcc %fcc1 */
3206 FMOVCC(1, d);
3207 break;
3208 case 0x043: /* V9 fmovqcc %fcc1 */
3209 CHECK_FPU_FEATURE(dc, FLOAT128);
3210 FMOVCC(1, q);
3211 break;
3212 case 0x081: /* V9 fmovscc %fcc2 */
3213 FMOVCC(2, s);
3214 break;
3215 case 0x082: /* V9 fmovdcc %fcc2 */
3216 FMOVCC(2, d);
3217 break;
3218 case 0x083: /* V9 fmovqcc %fcc2 */
3219 CHECK_FPU_FEATURE(dc, FLOAT128);
3220 FMOVCC(2, q);
3221 break;
3222 case 0x0c1: /* V9 fmovscc %fcc3 */
3223 FMOVCC(3, s);
3224 break;
3225 case 0x0c2: /* V9 fmovdcc %fcc3 */
3226 FMOVCC(3, d);
3227 break;
3228 case 0x0c3: /* V9 fmovqcc %fcc3 */
3229 CHECK_FPU_FEATURE(dc, FLOAT128);
3230 FMOVCC(3, q);
3231 break;
3232 #undef FMOVCC
3233 #define FMOVCC(xcc, sz) \
3234 do { \
3235 DisasCompare cmp; \
3236 cond = GET_FIELD_SP(insn, 14, 17); \
3237 gen_compare(&cmp, xcc, cond, dc); \
3238 gen_fmov##sz(dc, &cmp, rd, rs2); \
3239 free_compare(&cmp); \
3240 } while (0)
3241
3242 case 0x101: /* V9 fmovscc %icc */
3243 FMOVCC(0, s);
3244 break;
3245 case 0x102: /* V9 fmovdcc %icc */
3246 FMOVCC(0, d);
3247 break;
3248 case 0x103: /* V9 fmovqcc %icc */
3249 CHECK_FPU_FEATURE(dc, FLOAT128);
3250 FMOVCC(0, q);
3251 break;
3252 case 0x181: /* V9 fmovscc %xcc */
3253 FMOVCC(1, s);
3254 break;
3255 case 0x182: /* V9 fmovdcc %xcc */
3256 FMOVCC(1, d);
3257 break;
3258 case 0x183: /* V9 fmovqcc %xcc */
3259 CHECK_FPU_FEATURE(dc, FLOAT128);
3260 FMOVCC(1, q);
3261 break;
3262 #undef FMOVCC
3263 #endif
3264 case 0x51: /* fcmps, V9 %fcc */
3265 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3266 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3267 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3268 break;
3269 case 0x52: /* fcmpd, V9 %fcc */
3270 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3271 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3272 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3273 break;
3274 case 0x53: /* fcmpq, V9 %fcc */
3275 CHECK_FPU_FEATURE(dc, FLOAT128);
3276 gen_op_load_fpr_QT0(QFPREG(rs1));
3277 gen_op_load_fpr_QT1(QFPREG(rs2));
3278 gen_op_fcmpq(rd & 3);
3279 break;
3280 case 0x55: /* fcmpes, V9 %fcc */
3281 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3282 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3283 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3284 break;
3285 case 0x56: /* fcmped, V9 %fcc */
3286 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3287 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3288 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3289 break;
3290 case 0x57: /* fcmpeq, V9 %fcc */
3291 CHECK_FPU_FEATURE(dc, FLOAT128);
3292 gen_op_load_fpr_QT0(QFPREG(rs1));
3293 gen_op_load_fpr_QT1(QFPREG(rs2));
3294 gen_op_fcmpeq(rd & 3);
3295 break;
3296 default:
3297 goto illegal_insn;
3298 }
3299 } else if (xop == 0x2) {
3300 TCGv dst = gen_dest_gpr(dc, rd);
3301 rs1 = GET_FIELD(insn, 13, 17);
3302 if (rs1 == 0) {
3303 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3304 if (IS_IMM) { /* immediate */
3305 simm = GET_FIELDs(insn, 19, 31);
3306 tcg_gen_movi_tl(dst, simm);
3307 gen_store_gpr(dc, rd, dst);
3308 } else { /* register */
3309 rs2 = GET_FIELD(insn, 27, 31);
3310 if (rs2 == 0) {
3311 tcg_gen_movi_tl(dst, 0);
3312 gen_store_gpr(dc, rd, dst);
3313 } else {
3314 cpu_src2 = gen_load_gpr(dc, rs2);
3315 gen_store_gpr(dc, rd, cpu_src2);
3316 }
3317 }
3318 } else {
3319 cpu_src1 = get_src1(dc, insn);
3320 if (IS_IMM) { /* immediate */
3321 simm = GET_FIELDs(insn, 19, 31);
3322 tcg_gen_ori_tl(dst, cpu_src1, simm);
3323 gen_store_gpr(dc, rd, dst);
3324 } else { /* register */
3325 rs2 = GET_FIELD(insn, 27, 31);
3326 if (rs2 == 0) {
3327 /* mov shortcut: or x, %g0, y -> mov x, y */
3328 gen_store_gpr(dc, rd, cpu_src1);
3329 } else {
3330 cpu_src2 = gen_load_gpr(dc, rs2);
3331 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3332 gen_store_gpr(dc, rd, dst);
3333 }
3334 }
3335 }
3336 #ifdef TARGET_SPARC64
3337 } else if (xop == 0x25) { /* sll, V9 sllx */
3338 cpu_src1 = get_src1(dc, insn);
3339 if (IS_IMM) { /* immediate */
3340 simm = GET_FIELDs(insn, 20, 31);
3341 if (insn & (1 << 12)) {
3342 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3343 } else {
3344 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3345 }
3346 } else { /* register */
3347 rs2 = GET_FIELD(insn, 27, 31);
3348 cpu_src2 = gen_load_gpr(dc, rs2);
3349 if (insn & (1 << 12)) {
3350 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3351 } else {
3352 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3353 }
3354 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3355 }
3356 gen_store_gpr(dc, rd, cpu_dst);
3357 } else if (xop == 0x26) { /* srl, V9 srlx */
3358 cpu_src1 = get_src1(dc, insn);
3359 if (IS_IMM) { /* immediate */
3360 simm = GET_FIELDs(insn, 20, 31);
3361 if (insn & (1 << 12)) {
3362 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3363 } else {
3364 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3365 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3366 }
3367 } else { /* register */
3368 rs2 = GET_FIELD(insn, 27, 31);
3369 cpu_src2 = gen_load_gpr(dc, rs2);
3370 if (insn & (1 << 12)) {
3371 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3372 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3373 } else {
3374 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3375 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3376 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3377 }
3378 }
3379 gen_store_gpr(dc, rd, cpu_dst);
3380 } else if (xop == 0x27) { /* sra, V9 srax */
3381 cpu_src1 = get_src1(dc, insn);
3382 if (IS_IMM) { /* immediate */
3383 simm = GET_FIELDs(insn, 20, 31);
3384 if (insn & (1 << 12)) {
3385 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3386 } else {
3387 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3388 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3389 }
3390 } else { /* register */
3391 rs2 = GET_FIELD(insn, 27, 31);
3392 cpu_src2 = gen_load_gpr(dc, rs2);
3393 if (insn & (1 << 12)) {
3394 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3395 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3396 } else {
3397 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3398 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3399 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3400 }
3401 }
3402 gen_store_gpr(dc, rd, cpu_dst);
3403 #endif
3404 } else if (xop < 0x36) {
3405 if (xop < 0x20) {
3406 cpu_src1 = get_src1(dc, insn);
3407 cpu_src2 = get_src2(dc, insn);
3408 switch (xop & ~0x10) {
3409 case 0x0: /* add */
3410 if (xop & 0x10) {
3411 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3412 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3413 dc->cc_op = CC_OP_ADD;
3414 } else {
3415 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3416 }
3417 break;
3418 case 0x1: /* and */
3419 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3420 if (xop & 0x10) {
3421 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3422 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3423 dc->cc_op = CC_OP_LOGIC;
3424 }
3425 break;
3426 case 0x2: /* or */
3427 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3428 if (xop & 0x10) {
3429 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3430 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3431 dc->cc_op = CC_OP_LOGIC;
3432 }
3433 break;
3434 case 0x3: /* xor */
3435 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3436 if (xop & 0x10) {
3437 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3438 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3439 dc->cc_op = CC_OP_LOGIC;
3440 }
3441 break;
3442 case 0x4: /* sub */
3443 if (xop & 0x10) {
3444 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3445 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3446 dc->cc_op = CC_OP_SUB;
3447 } else {
3448 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3449 }
3450 break;
3451 case 0x5: /* andn */
3452 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3453 if (xop & 0x10) {
3454 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3455 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3456 dc->cc_op = CC_OP_LOGIC;
3457 }
3458 break;
3459 case 0x6: /* orn */
3460 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3461 if (xop & 0x10) {
3462 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3463 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3464 dc->cc_op = CC_OP_LOGIC;
3465 }
3466 break;
3467 case 0x7: /* xorn */
3468 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3469 if (xop & 0x10) {
3470 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3471 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3472 dc->cc_op = CC_OP_LOGIC;
3473 }
3474 break;
3475 case 0x8: /* addx, V9 addc */
3476 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3477 (xop & 0x10));
3478 break;
3479 #ifdef TARGET_SPARC64
3480 case 0x9: /* V9 mulx */
3481 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3482 break;
3483 #endif
3484 case 0xa: /* umul */
3485 CHECK_IU_FEATURE(dc, MUL);
3486 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3487 if (xop & 0x10) {
3488 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3489 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3490 dc->cc_op = CC_OP_LOGIC;
3491 }
3492 break;
3493 case 0xb: /* smul */
3494 CHECK_IU_FEATURE(dc, MUL);
3495 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3496 if (xop & 0x10) {
3497 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3498 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3499 dc->cc_op = CC_OP_LOGIC;
3500 }
3501 break;
3502 case 0xc: /* subx, V9 subc */
3503 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3504 (xop & 0x10));
3505 break;
3506 #ifdef TARGET_SPARC64
3507 case 0xd: /* V9 udivx */
3508 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3509 break;
3510 #endif
3511 case 0xe: /* udiv */
3512 CHECK_IU_FEATURE(dc, DIV);
3513 if (xop & 0x10) {
3514 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3515 cpu_src2);
3516 dc->cc_op = CC_OP_DIV;
3517 } else {
3518 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3519 cpu_src2);
3520 }
3521 break;
3522 case 0xf: /* sdiv */
3523 CHECK_IU_FEATURE(dc, DIV);
3524 if (xop & 0x10) {
3525 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3526 cpu_src2);
3527 dc->cc_op = CC_OP_DIV;
3528 } else {
3529 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3530 cpu_src2);
3531 }
3532 break;
3533 default:
3534 goto illegal_insn;
3535 }
3536 gen_store_gpr(dc, rd, cpu_dst);
3537 } else {
3538 cpu_src1 = get_src1(dc, insn);
3539 cpu_src2 = get_src2(dc, insn);
3540 switch (xop) {
3541 case 0x20: /* taddcc */
3542 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3543 gen_store_gpr(dc, rd, cpu_dst);
3544 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3545 dc->cc_op = CC_OP_TADD;
3546 break;
3547 case 0x21: /* tsubcc */
3548 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3549 gen_store_gpr(dc, rd, cpu_dst);
3550 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3551 dc->cc_op = CC_OP_TSUB;
3552 break;
3553 case 0x22: /* taddcctv */
3554 gen_helper_taddcctv(cpu_dst, cpu_env,
3555 cpu_src1, cpu_src2);
3556 gen_store_gpr(dc, rd, cpu_dst);
3557 dc->cc_op = CC_OP_TADDTV;
3558 break;
3559 case 0x23: /* tsubcctv */
3560 gen_helper_tsubcctv(cpu_dst, cpu_env,
3561 cpu_src1, cpu_src2);
3562 gen_store_gpr(dc, rd, cpu_dst);
3563 dc->cc_op = CC_OP_TSUBTV;
3564 break;
3565 case 0x24: /* mulscc */
3566 update_psr(dc);
3567 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3568 gen_store_gpr(dc, rd, cpu_dst);
3569 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3570 dc->cc_op = CC_OP_ADD;
3571 break;
3572 #ifndef TARGET_SPARC64
3573 case 0x25: /* sll */
3574 if (IS_IMM) { /* immediate */
3575 simm = GET_FIELDs(insn, 20, 31);
3576 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3577 } else { /* register */
3578 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3579 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3580 }
3581 gen_store_gpr(dc, rd, cpu_dst);
3582 break;
3583 case 0x26: /* srl */
3584 if (IS_IMM) { /* immediate */
3585 simm = GET_FIELDs(insn, 20, 31);
3586 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3587 } else { /* register */
3588 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3589 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3590 }
3591 gen_store_gpr(dc, rd, cpu_dst);
3592 break;
3593 case 0x27: /* sra */
3594 if (IS_IMM) { /* immediate */
3595 simm = GET_FIELDs(insn, 20, 31);
3596 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3597 } else { /* register */
3598 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3599 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3600 }
3601 gen_store_gpr(dc, rd, cpu_dst);
3602 break;
3603 #endif
3604 case 0x30:
3605 {
3606 switch(rd) {
3607 case 0: /* wry */
3608 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3609 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3610 break;
3611 #ifndef TARGET_SPARC64
3612 case 0x01 ... 0x0f: /* undefined in the
3613 SPARCv8 manual, nop
3614 on the microSPARC
3615 II */
3616 case 0x10 ... 0x1f: /* implementation-dependent
3617 in the SPARCv8
3618 manual, nop on the
3619 microSPARC II */
3620 break;
3621 #else
3622 case 0x2: /* V9 wrccr */
3623 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3624 gen_helper_wrccr(cpu_env, cpu_dst);
3625 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3626 dc->cc_op = CC_OP_FLAGS;
3627 break;
3628 case 0x3: /* V9 wrasi */
3629 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3630 tcg_gen_andi_tl(cpu_dst, cpu_dst, 0xff);
3631 tcg_gen_trunc_tl_i32(cpu_asi, cpu_dst);
3632 break;
3633 case 0x6: /* V9 wrfprs */
3634 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3635 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_dst);
3636 save_state(dc);
3637 gen_op_next_insn();
3638 tcg_gen_exit_tb(0);
3639 dc->is_br = 1;
3640 break;
3641 case 0xf: /* V9 sir, nop if user */
3642 #if !defined(CONFIG_USER_ONLY)
3643 if (supervisor(dc)) {
3644 ; // XXX
3645 }
3646 #endif
3647 break;
3648 case 0x13: /* Graphics Status */
3649 if (gen_trap_ifnofpu(dc)) {
3650 goto jmp_insn;
3651 }
3652 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3653 break;
3654 case 0x14: /* Softint set */
3655 if (!supervisor(dc))
3656 goto illegal_insn;
3657 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3658 gen_helper_set_softint(cpu_env, cpu_tmp0);
3659 break;
3660 case 0x15: /* Softint clear */
3661 if (!supervisor(dc))
3662 goto illegal_insn;
3663 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3664 gen_helper_clear_softint(cpu_env, cpu_tmp0);
3665 break;
3666 case 0x16: /* Softint write */
3667 if (!supervisor(dc))
3668 goto illegal_insn;
3669 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3670 gen_helper_write_softint(cpu_env, cpu_tmp0);
3671 break;
3672 case 0x17: /* Tick compare */
3673 #if !defined(CONFIG_USER_ONLY)
3674 if (!supervisor(dc))
3675 goto illegal_insn;
3676 #endif
3677 {
3678 TCGv_ptr r_tickptr;
3679
3680 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3681 cpu_src2);
3682 r_tickptr = tcg_temp_new_ptr();
3683 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3684 offsetof(CPUSPARCState, tick));
3685 gen_helper_tick_set_limit(r_tickptr,
3686 cpu_tick_cmpr);
3687 tcg_temp_free_ptr(r_tickptr);
3688 }
3689 break;
3690 case 0x18: /* System tick */
3691 #if !defined(CONFIG_USER_ONLY)
3692 if (!supervisor(dc))
3693 goto illegal_insn;
3694 #endif
3695 {
3696 TCGv_ptr r_tickptr;
3697
3698 tcg_gen_xor_tl(cpu_dst, cpu_src1,
3699 cpu_src2);
3700 r_tickptr = tcg_temp_new_ptr();
3701 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3702 offsetof(CPUSPARCState, stick));
3703 gen_helper_tick_set_count(r_tickptr,
3704 cpu_dst);
3705 tcg_temp_free_ptr(r_tickptr);
3706 }
3707 break;
3708 case 0x19: /* System tick compare */
3709 #if !defined(CONFIG_USER_ONLY)
3710 if (!supervisor(dc))
3711 goto illegal_insn;
3712 #endif
3713 {
3714 TCGv_ptr r_tickptr;
3715
3716 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3717 cpu_src2);
3718 r_tickptr = tcg_temp_new_ptr();
3719 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3720 offsetof(CPUSPARCState, stick));
3721 gen_helper_tick_set_limit(r_tickptr,
3722 cpu_stick_cmpr);
3723 tcg_temp_free_ptr(r_tickptr);
3724 }
3725 break;
3726
3727 case 0x10: /* Performance Control */
3728 case 0x11: /* Performance Instrumentation
3729 Counter */
3730 case 0x12: /* Dispatch Control */
3731 #endif
3732 default:
3733 goto illegal_insn;
3734 }
3735 }
3736 break;
3737 #if !defined(CONFIG_USER_ONLY)
3738 case 0x31: /* wrpsr, V9 saved, restored */
3739 {
3740 if (!supervisor(dc))
3741 goto priv_insn;
3742 #ifdef TARGET_SPARC64
3743 switch (rd) {
3744 case 0:
3745 gen_helper_saved(cpu_env);
3746 break;
3747 case 1:
3748 gen_helper_restored(cpu_env);
3749 break;
3750 case 2: /* UA2005 allclean */
3751 case 3: /* UA2005 otherw */
3752 case 4: /* UA2005 normalw */
3753 case 5: /* UA2005 invalw */
3754 // XXX
3755 default:
3756 goto illegal_insn;
3757 }
3758 #else
3759 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3760 gen_helper_wrpsr(cpu_env, cpu_dst);
3761 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3762 dc->cc_op = CC_OP_FLAGS;
3763 save_state(dc);
3764 gen_op_next_insn();
3765 tcg_gen_exit_tb(0);
3766 dc->is_br = 1;
3767 #endif
3768 }
3769 break;
3770 case 0x32: /* wrwim, V9 wrpr */
3771 {
3772 if (!supervisor(dc))
3773 goto priv_insn;
3774 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3775 #ifdef TARGET_SPARC64
3776 switch (rd) {
3777 case 0: // tpc
3778 {
3779 TCGv_ptr r_tsptr;
3780
3781 r_tsptr = tcg_temp_new_ptr();
3782 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3783 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3784 offsetof(trap_state, tpc));
3785 tcg_temp_free_ptr(r_tsptr);
3786 }
3787 break;
3788 case 1: // tnpc
3789 {
3790 TCGv_ptr r_tsptr;
3791
3792 r_tsptr = tcg_temp_new_ptr();
3793 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3794 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3795 offsetof(trap_state, tnpc));
3796 tcg_temp_free_ptr(r_tsptr);
3797 }
3798 break;
3799 case 2: // tstate
3800 {
3801 TCGv_ptr r_tsptr;
3802
3803 r_tsptr = tcg_temp_new_ptr();
3804 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3805 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3806 offsetof(trap_state,
3807 tstate));
3808 tcg_temp_free_ptr(r_tsptr);
3809 }
3810 break;
3811 case 3: // tt
3812 {
3813 TCGv_ptr r_tsptr;
3814
3815 r_tsptr = tcg_temp_new_ptr();
3816 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3817 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
3818 offsetof(trap_state, tt));
3819 tcg_temp_free_ptr(r_tsptr);
3820 }
3821 break;
3822 case 4: // tick
3823 {
3824 TCGv_ptr r_tickptr;
3825
3826 r_tickptr = tcg_temp_new_ptr();
3827 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3828 offsetof(CPUSPARCState, tick));
3829 gen_helper_tick_set_count(r_tickptr,
3830 cpu_tmp0);
3831 tcg_temp_free_ptr(r_tickptr);
3832 }
3833 break;
3834 case 5: // tba
3835 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3836 break;
3837 case 6: // pstate
3838 save_state(dc);
3839 gen_helper_wrpstate(cpu_env, cpu_tmp0);
3840 dc->npc = DYNAMIC_PC;
3841 break;
3842 case 7: // tl
3843 save_state(dc);
3844 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3845 offsetof(CPUSPARCState, tl));
3846 dc->npc = DYNAMIC_PC;
3847 break;
3848 case 8: // pil
3849 gen_helper_wrpil(cpu_env, cpu_tmp0);
3850 break;
3851 case 9: // cwp
3852 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3853 break;
3854 case 10: // cansave
3855 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3856 offsetof(CPUSPARCState,
3857 cansave));
3858 break;
3859 case 11: // canrestore
3860 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3861 offsetof(CPUSPARCState,
3862 canrestore));
3863 break;
3864 case 12: // cleanwin
3865 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3866 offsetof(CPUSPARCState,
3867 cleanwin));
3868 break;
3869 case 13: // otherwin
3870 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3871 offsetof(CPUSPARCState,
3872 otherwin));
3873 break;
3874 case 14: // wstate
3875 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3876 offsetof(CPUSPARCState,
3877 wstate));
3878 break;
3879 case 16: // UA2005 gl
3880 CHECK_IU_FEATURE(dc, GL);
3881 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3882 offsetof(CPUSPARCState, gl));
3883 break;
3884 case 26: // UA2005 strand status
3885 CHECK_IU_FEATURE(dc, HYPV);
3886 if (!hypervisor(dc))
3887 goto priv_insn;
3888 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
3889 break;
3890 default:
3891 goto illegal_insn;
3892 }
3893 #else
3894 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
3895 if (dc->def->nwindows != 32) {
3896 tcg_gen_andi_tl(cpu_wim, cpu_wim,
3897 (1 << dc->def->nwindows) - 1);
3898 }
3899 #endif
3900 }
3901 break;
3902 case 0x33: /* wrtbr, UA2005 wrhpr */
3903 {
3904 #ifndef TARGET_SPARC64
3905 if (!supervisor(dc))
3906 goto priv_insn;
3907 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
3908 #else
3909 CHECK_IU_FEATURE(dc, HYPV);
3910 if (!hypervisor(dc))
3911 goto priv_insn;
3912 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3913 switch (rd) {
3914 case 0: // hpstate
3915 // XXX gen_op_wrhpstate();
3916 save_state(dc);
3917 gen_op_next_insn();
3918 tcg_gen_exit_tb(0);
3919 dc->is_br = 1;
3920 break;
3921 case 1: // htstate
3922 // XXX gen_op_wrhtstate();
3923 break;
3924 case 3: // hintp
3925 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
3926 break;
3927 case 5: // htba
3928 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
3929 break;
3930 case 31: // hstick_cmpr
3931 {
3932 TCGv_ptr r_tickptr;
3933
3934 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
3935 r_tickptr = tcg_temp_new_ptr();
3936 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3937 offsetof(CPUSPARCState, hstick));
3938 gen_helper_tick_set_limit(r_tickptr,
3939 cpu_hstick_cmpr);
3940 tcg_temp_free_ptr(r_tickptr);
3941 }
3942 break;
3943 case 6: // hver readonly
3944 default:
3945 goto illegal_insn;
3946 }
3947 #endif
3948 }
3949 break;
3950 #endif
3951 #ifdef TARGET_SPARC64
3952 case 0x2c: /* V9 movcc */
3953 {
3954 int cc = GET_FIELD_SP(insn, 11, 12);
3955 int cond = GET_FIELD_SP(insn, 14, 17);
3956 DisasCompare cmp;
3957 TCGv dst;
3958
3959 if (insn & (1 << 18)) {
3960 if (cc == 0) {
3961 gen_compare(&cmp, 0, cond, dc);
3962 } else if (cc == 2) {
3963 gen_compare(&cmp, 1, cond, dc);
3964 } else {
3965 goto illegal_insn;
3966 }
3967 } else {
3968 gen_fcompare(&cmp, cc, cond);
3969 }
3970
3971 /* The get_src2 above loaded the normal 13-bit
3972 immediate field, not the 11-bit field we have
3973 in movcc. But it did handle the reg case. */
3974 if (IS_IMM) {
3975 simm = GET_FIELD_SPs(insn, 0, 10);
3976 tcg_gen_movi_tl(cpu_src2, simm);
3977 }
3978
3979 dst = gen_load_gpr(dc, rd);
3980 tcg_gen_movcond_tl(cmp.cond, dst,
3981 cmp.c1, cmp.c2,
3982 cpu_src2, dst);
3983 free_compare(&cmp);
3984 gen_store_gpr(dc, rd, dst);
3985 break;
3986 }
3987 case 0x2d: /* V9 sdivx */
3988 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3989 gen_store_gpr(dc, rd, cpu_dst);
3990 break;
3991 case 0x2e: /* V9 popc */
3992 gen_helper_popc(cpu_dst, cpu_src2);
3993 gen_store_gpr(dc, rd, cpu_dst);
3994 break;
3995 case 0x2f: /* V9 movr */
3996 {
3997 int cond = GET_FIELD_SP(insn, 10, 12);
3998 DisasCompare cmp;
3999 TCGv dst;
4000
4001 gen_compare_reg(&cmp, cond, cpu_src1);
4002
4003 /* The get_src2 above loaded the normal 13-bit
4004 immediate field, not the 10-bit field we have
4005 in movr. But it did handle the reg case. */
4006 if (IS_IMM) {
4007 simm = GET_FIELD_SPs(insn, 0, 9);
4008 tcg_gen_movi_tl(cpu_src2, simm);
4009 }
4010
4011 dst = gen_load_gpr(dc, rd);
4012 tcg_gen_movcond_tl(cmp.cond, dst,
4013 cmp.c1, cmp.c2,
4014 cpu_src2, dst);
4015 free_compare(&cmp);
4016 gen_store_gpr(dc, rd, dst);
4017 break;
4018 }
4019 #endif
4020 default:
4021 goto illegal_insn;
4022 }
4023 }
4024 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4025 #ifdef TARGET_SPARC64
4026 int opf = GET_FIELD_SP(insn, 5, 13);
4027 rs1 = GET_FIELD(insn, 13, 17);
4028 rs2 = GET_FIELD(insn, 27, 31);
4029 if (gen_trap_ifnofpu(dc)) {
4030 goto jmp_insn;
4031 }
4032
4033 switch (opf) {
4034 case 0x000: /* VIS I edge8cc */
4035 CHECK_FPU_FEATURE(dc, VIS1);
4036 cpu_src1 = gen_load_gpr(dc, rs1);
4037 cpu_src2 = gen_load_gpr(dc, rs2);
4038 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4039 gen_store_gpr(dc, rd, cpu_dst);
4040 break;
4041 case 0x001: /* VIS II edge8n */
4042 CHECK_FPU_FEATURE(dc, VIS2);
4043 cpu_src1 = gen_load_gpr(dc, rs1);
4044 cpu_src2 = gen_load_gpr(dc, rs2);
4045 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4046 gen_store_gpr(dc, rd, cpu_dst);
4047 break;
4048 case 0x002: /* VIS I edge8lcc */
4049 CHECK_FPU_FEATURE(dc, VIS1);
4050 cpu_src1 = gen_load_gpr(dc, rs1);
4051 cpu_src2 = gen_load_gpr(dc, rs2);
4052 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4053 gen_store_gpr(dc, rd, cpu_dst);
4054 break;
4055 case 0x003: /* VIS II edge8ln */
4056 CHECK_FPU_FEATURE(dc, VIS2);
4057 cpu_src1 = gen_load_gpr(dc, rs1);
4058 cpu_src2 = gen_load_gpr(dc, rs2);
4059 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4060 gen_store_gpr(dc, rd, cpu_dst);
4061 break;
4062 case 0x004: /* VIS I edge16cc */
4063 CHECK_FPU_FEATURE(dc, VIS1);
4064 cpu_src1 = gen_load_gpr(dc, rs1);
4065 cpu_src2 = gen_load_gpr(dc, rs2);
4066 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4067 gen_store_gpr(dc, rd, cpu_dst);
4068 break;
4069 case 0x005: /* VIS II edge16n */
4070 CHECK_FPU_FEATURE(dc, VIS2);
4071 cpu_src1 = gen_load_gpr(dc, rs1);
4072 cpu_src2 = gen_load_gpr(dc, rs2);
4073 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4074 gen_store_gpr(dc, rd, cpu_dst);
4075 break;
4076 case 0x006: /* VIS I edge16lcc */
4077 CHECK_FPU_FEATURE(dc, VIS1);
4078 cpu_src1 = gen_load_gpr(dc, rs1);
4079 cpu_src2 = gen_load_gpr(dc, rs2);
4080 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4081 gen_store_gpr(dc, rd, cpu_dst);
4082 break;
4083 case 0x007: /* VIS II edge16ln */
4084 CHECK_FPU_FEATURE(dc, VIS2);
4085 cpu_src1 = gen_load_gpr(dc, rs1);
4086 cpu_src2 = gen_load_gpr(dc, rs2);
4087 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4088 gen_store_gpr(dc, rd, cpu_dst);
4089 break;
4090 case 0x008: /* VIS I edge32cc */
4091 CHECK_FPU_FEATURE(dc, VIS1);
4092 cpu_src1 = gen_load_gpr(dc, rs1);
4093 cpu_src2 = gen_load_gpr(dc, rs2);
4094 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4095 gen_store_gpr(dc, rd, cpu_dst);
4096 break;
4097 case 0x009: /* VIS II edge32n */
4098 CHECK_FPU_FEATURE(dc, VIS2);
4099 cpu_src1 = gen_load_gpr(dc, rs1);
4100 cpu_src2 = gen_load_gpr(dc, rs2);
4101 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4102 gen_store_gpr(dc, rd, cpu_dst);
4103 break;
4104 case 0x00a: /* VIS I edge32lcc */
4105 CHECK_FPU_FEATURE(dc, VIS1);
4106 cpu_src1 = gen_load_gpr(dc, rs1);
4107 cpu_src2 = gen_load_gpr(dc, rs2);
4108 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4109 gen_store_gpr(dc, rd, cpu_dst);
4110 break;
4111 case 0x00b: /* VIS II edge32ln */
4112 CHECK_FPU_FEATURE(dc, VIS2);
4113 cpu_src1 = gen_load_gpr(dc, rs1);
4114 cpu_src2 = gen_load_gpr(dc, rs2);
4115 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4116 gen_store_gpr(dc, rd, cpu_dst);
4117 break;
4118 case 0x010: /* VIS I array8 */
4119 CHECK_FPU_FEATURE(dc, VIS1);
4120 cpu_src1 = gen_load_gpr(dc, rs1);
4121 cpu_src2 = gen_load_gpr(dc, rs2);
4122 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4123 gen_store_gpr(dc, rd, cpu_dst);
4124 break;
4125 case 0x012: /* VIS I array16 */
4126 CHECK_FPU_FEATURE(dc, VIS1);
4127 cpu_src1 = gen_load_gpr(dc, rs1);
4128 cpu_src2 = gen_load_gpr(dc, rs2);
4129 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4130 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4131 gen_store_gpr(dc, rd, cpu_dst);
4132 break;
4133 case 0x014: /* VIS I array32 */
4134 CHECK_FPU_FEATURE(dc, VIS1);
4135 cpu_src1 = gen_load_gpr(dc, rs1);
4136 cpu_src2 = gen_load_gpr(dc, rs2);
4137 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4138 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4139 gen_store_gpr(dc, rd, cpu_dst);
4140 break;
4141 case 0x018: /* VIS I alignaddr */
4142 CHECK_FPU_FEATURE(dc, VIS1);
4143 cpu_src1 = gen_load_gpr(dc, rs1);
4144 cpu_src2 = gen_load_gpr(dc, rs2);
4145 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4146 gen_store_gpr(dc, rd, cpu_dst);
4147 break;
4148 case 0x01a: /* VIS I alignaddrl */
4149 CHECK_FPU_FEATURE(dc, VIS1);
4150 cpu_src1 = gen_load_gpr(dc, rs1);
4151 cpu_src2 = gen_load_gpr(dc, rs2);
4152 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4153 gen_store_gpr(dc, rd, cpu_dst);
4154 break;
4155 case 0x019: /* VIS II bmask */
4156 CHECK_FPU_FEATURE(dc, VIS2);
4157 cpu_src1 = gen_load_gpr(dc, rs1);
4158 cpu_src2 = gen_load_gpr(dc, rs2);
4159 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4160 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4161 gen_store_gpr(dc, rd, cpu_dst);
4162 break;
4163 case 0x020: /* VIS I fcmple16 */
4164 CHECK_FPU_FEATURE(dc, VIS1);
4165 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4166 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4167 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4168 gen_store_gpr(dc, rd, cpu_dst);
4169 break;
4170 case 0x022: /* VIS I fcmpne16 */
4171 CHECK_FPU_FEATURE(dc, VIS1);
4172 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4173 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4174 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4175 gen_store_gpr(dc, rd, cpu_dst);
4176 break;
4177 case 0x024: /* VIS I fcmple32 */
4178 CHECK_FPU_FEATURE(dc, VIS1);
4179 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4180 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4181 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4182 gen_store_gpr(dc, rd, cpu_dst);
4183 break;
4184 case 0x026: /* VIS I fcmpne32 */
4185 CHECK_FPU_FEATURE(dc, VIS1);
4186 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4187 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4188 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4189 gen_store_gpr(dc, rd, cpu_dst);
4190 break;
4191 case 0x028: /* VIS I fcmpgt16 */
4192 CHECK_FPU_FEATURE(dc, VIS1);
4193 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4194 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4195 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4196 gen_store_gpr(dc, rd, cpu_dst);
4197 break;
4198 case 0x02a: /* VIS I fcmpeq16 */
4199 CHECK_FPU_FEATURE(dc, VIS1);
4200 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4201 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4202 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4203 gen_store_gpr(dc, rd, cpu_dst);
4204 break;
4205 case 0x02c: /* VIS I fcmpgt32 */
4206 CHECK_FPU_FEATURE(dc, VIS1);
4207 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4208 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4209 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4210 gen_store_gpr(dc, rd, cpu_dst);
4211 break;
4212 case 0x02e: /* VIS I fcmpeq32 */
4213 CHECK_FPU_FEATURE(dc, VIS1);
4214 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4215 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4216 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4217 gen_store_gpr(dc, rd, cpu_dst);
4218 break;
4219 case 0x031: /* VIS I fmul8x16 */
4220 CHECK_FPU_FEATURE(dc, VIS1);
4221 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4222 break;
4223 case 0x033: /* VIS I fmul8x16au */
4224 CHECK_FPU_FEATURE(dc, VIS1);
4225 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4226 break;
4227 case 0x035: /* VIS I fmul8x16al */
4228 CHECK_FPU_FEATURE(dc, VIS1);
4229 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4230 break;
4231 case 0x036: /* VIS I fmul8sux16 */
4232 CHECK_FPU_FEATURE(dc, VIS1);
4233 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4234 break;
4235 case 0x037: /* VIS I fmul8ulx16 */
4236 CHECK_FPU_FEATURE(dc, VIS1);
4237 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4238 break;
4239 case 0x038: /* VIS I fmuld8sux16 */
4240 CHECK_FPU_FEATURE(dc, VIS1);
4241 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4242 break;
4243 case 0x039: /* VIS I fmuld8ulx16 */
4244 CHECK_FPU_FEATURE(dc, VIS1);
4245 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4246 break;
4247 case 0x03a: /* VIS I fpack32 */
4248 CHECK_FPU_FEATURE(dc, VIS1);
4249 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4250 break;
4251 case 0x03b: /* VIS I fpack16 */
4252 CHECK_FPU_FEATURE(dc, VIS1);
4253 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4254 cpu_dst_32 = gen_dest_fpr_F(dc);
4255 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4256 gen_store_fpr_F(dc, rd, cpu_dst_32);
4257 break;
4258 case 0x03d: /* VIS I fpackfix */
4259 CHECK_FPU_FEATURE(dc, VIS1);
4260 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4261 cpu_dst_32 = gen_dest_fpr_F(dc);
4262 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4263 gen_store_fpr_F(dc, rd, cpu_dst_32);
4264 break;
4265 case 0x03e: /* VIS I pdist */
4266 CHECK_FPU_FEATURE(dc, VIS1);
4267 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4268 break;
4269 case 0x048: /* VIS I faligndata */
4270 CHECK_FPU_FEATURE(dc, VIS1);
4271 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4272 break;
4273 case 0x04b: /* VIS I fpmerge */
4274 CHECK_FPU_FEATURE(dc, VIS1);
4275 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4276 break;
4277 case 0x04c: /* VIS II bshuffle */
4278 CHECK_FPU_FEATURE(dc, VIS2);
4279 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4280 break;
4281 case 0x04d: /* VIS I fexpand */
4282 CHECK_FPU_FEATURE(dc, VIS1);
4283 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4284 break;
4285 case 0x050: /* VIS I fpadd16 */
4286 CHECK_FPU_FEATURE(dc, VIS1);
4287 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4288 break;
4289 case 0x051: /* VIS I fpadd16s */
4290 CHECK_FPU_FEATURE(dc, VIS1);
4291 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4292 break;
4293 case 0x052: /* VIS I fpadd32 */
4294 CHECK_FPU_FEATURE(dc, VIS1);
4295 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4296 break;
4297 case 0x053: /* VIS I fpadd32s */
4298 CHECK_FPU_FEATURE(dc, VIS1);
4299 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4300 break;
4301 case 0x054: /* VIS I fpsub16 */
4302 CHECK_FPU_FEATURE(dc, VIS1);
4303 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4304 break;
4305 case 0x055: /* VIS I fpsub16s */
4306 CHECK_FPU_FEATURE(dc, VIS1);
4307 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4308 break;
4309 case 0x056: /* VIS I fpsub32 */
4310 CHECK_FPU_FEATURE(dc, VIS1);
4311 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4312 break;
4313 case 0x057: /* VIS I fpsub32s */
4314 CHECK_FPU_FEATURE(dc, VIS1);
4315 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4316 break;
4317 case 0x060: /* VIS I fzero */
4318 CHECK_FPU_FEATURE(dc, VIS1);
4319 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4320 tcg_gen_movi_i64(cpu_dst_64, 0);
4321 gen_store_fpr_D(dc, rd, cpu_dst_64);
4322 break;
4323 case 0x061: /* VIS I fzeros */
4324 CHECK_FPU_FEATURE(dc, VIS1);
4325 cpu_dst_32 = gen_dest_fpr_F(dc);
4326 tcg_gen_movi_i32(cpu_dst_32, 0);
4327 gen_store_fpr_F(dc, rd, cpu_dst_32);
4328 break;
4329 case 0x062: /* VIS I fnor */
4330 CHECK_FPU_FEATURE(dc, VIS1);
4331 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4332 break;
4333 case 0x063: /* VIS I fnors */
4334 CHECK_FPU_FEATURE(dc, VIS1);
4335 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4336 break;
4337 case 0x064: /* VIS I fandnot2 */
4338 CHECK_FPU_FEATURE(dc, VIS1);
4339 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4340 break;
4341 case 0x065: /* VIS I fandnot2s */
4342 CHECK_FPU_FEATURE(dc, VIS1);
4343 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4344 break;
4345 case 0x066: /* VIS I fnot2 */
4346 CHECK_FPU_FEATURE(dc, VIS1);
4347 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4348 break;
4349 case 0x067: /* VIS I fnot2s */
4350 CHECK_FPU_FEATURE(dc, VIS1);
4351 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4352 break;
4353 case 0x068: /* VIS I fandnot1 */
4354 CHECK_FPU_FEATURE(dc, VIS1);
4355 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4356 break;
4357 case 0x069: /* VIS I fandnot1s */
4358 CHECK_FPU_FEATURE(dc, VIS1);
4359 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4360 break;
4361 case 0x06a: /* VIS I fnot1 */
4362 CHECK_FPU_FEATURE(dc, VIS1);
4363 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4364 break;
4365 case 0x06b: /* VIS I fnot1s */
4366 CHECK_FPU_FEATURE(dc, VIS1);
4367 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4368 break;
4369 case 0x06c: /* VIS I fxor */
4370 CHECK_FPU_FEATURE(dc, VIS1);
4371 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4372 break;
4373 case 0x06d: /* VIS I fxors */
4374 CHECK_FPU_FEATURE(dc, VIS1);
4375 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4376 break;
4377 case 0x06e: /* VIS I fnand */
4378 CHECK_FPU_FEATURE(dc, VIS1);
4379 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4380 break;
4381 case 0x06f: /* VIS I fnands */
4382 CHECK_FPU_FEATURE(dc, VIS1);
4383 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4384 break;
4385 case 0x070: /* VIS I fand */
4386 CHECK_FPU_FEATURE(dc, VIS1);
4387 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4388 break;
4389 case 0x071: /* VIS I fands */
4390 CHECK_FPU_FEATURE(dc, VIS1);
4391 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4392 break;
4393 case 0x072: /* VIS I fxnor */
4394 CHECK_FPU_FEATURE(dc, VIS1);
4395 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4396 break;
4397 case 0x073: /* VIS I fxnors */
4398 CHECK_FPU_FEATURE(dc, VIS1);
4399 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4400 break;
4401 case 0x074: /* VIS I fsrc1 */
4402 CHECK_FPU_FEATURE(dc, VIS1);
4403 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4404 gen_store_fpr_D(dc, rd, cpu_src1_64);
4405 break;
4406 case 0x075: /* VIS I fsrc1s */
4407 CHECK_FPU_FEATURE(dc, VIS1);
4408 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4409 gen_store_fpr_F(dc, rd, cpu_src1_32);
4410 break;
4411 case 0x076: /* VIS I fornot2 */
4412 CHECK_FPU_FEATURE(dc, VIS1);
4413 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4414 break;
4415 case 0x077: /* VIS I fornot2s */
4416 CHECK_FPU_FEATURE(dc, VIS1);
4417 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4418 break;
4419 case 0x078: /* VIS I fsrc2 */
4420 CHECK_FPU_FEATURE(dc, VIS1);
4421 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4422 gen_store_fpr_D(dc, rd, cpu_src1_64);
4423 break;
4424 case 0x079: /* VIS I fsrc2s */
4425 CHECK_FPU_FEATURE(dc, VIS1);
4426 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4427 gen_store_fpr_F(dc, rd, cpu_src1_32);
4428 break;
4429 case 0x07a: /* VIS I fornot1 */
4430 CHECK_FPU_FEATURE(dc, VIS1);
4431 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4432 break;
4433 case 0x07b: /* VIS I fornot1s */
4434 CHECK_FPU_FEATURE(dc, VIS1);
4435 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4436 break;
4437 case 0x07c: /* VIS I for */
4438 CHECK_FPU_FEATURE(dc, VIS1);
4439 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4440 break;
4441 case 0x07d: /* VIS I fors */
4442 CHECK_FPU_FEATURE(dc, VIS1);
4443 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4444 break;
4445 case 0x07e: /* VIS I fone */
4446 CHECK_FPU_FEATURE(dc, VIS1);
4447 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4448 tcg_gen_movi_i64(cpu_dst_64, -1);
4449 gen_store_fpr_D(dc, rd, cpu_dst_64);
4450 break;
4451 case 0x07f: /* VIS I fones */
4452 CHECK_FPU_FEATURE(dc, VIS1);
4453 cpu_dst_32 = gen_dest_fpr_F(dc);
4454 tcg_gen_movi_i32(cpu_dst_32, -1);
4455 gen_store_fpr_F(dc, rd, cpu_dst_32);
4456 break;
4457 case 0x080: /* VIS I shutdown */
4458 case 0x081: /* VIS II siam */
4459 // XXX
4460 goto illegal_insn;
4461 default:
4462 goto illegal_insn;
4463 }
4464 #else
4465 goto ncp_insn;
4466 #endif
4467 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4468 #ifdef TARGET_SPARC64
4469 goto illegal_insn;
4470 #else
4471 goto ncp_insn;
4472 #endif
4473 #ifdef TARGET_SPARC64
4474 } else if (xop == 0x39) { /* V9 return */
4475 TCGv_i32 r_const;
4476
4477 save_state(dc);
4478 cpu_src1 = get_src1(dc, insn);
4479 if (IS_IMM) { /* immediate */
4480 simm = GET_FIELDs(insn, 19, 31);
4481 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
4482 } else { /* register */
4483 rs2 = GET_FIELD(insn, 27, 31);
4484 if (rs2) {
4485 cpu_src2 = gen_load_gpr(dc, rs2);
4486 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4487 } else {
4488 tcg_gen_mov_tl(cpu_dst, cpu_src1);
4489 }
4490 }
4491 gen_helper_restore(cpu_env);
4492 gen_mov_pc_npc(dc);
4493 r_const = tcg_const_i32(3);
4494 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4495 tcg_temp_free_i32(r_const);
4496 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4497 dc->npc = DYNAMIC_PC;
4498 goto jmp_insn;
4499 #endif
4500 } else {
4501 cpu_src1 = get_src1(dc, insn);
4502 if (IS_IMM) { /* immediate */
4503 simm = GET_FIELDs(insn, 19, 31);
4504 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
4505 } else { /* register */
4506 rs2 = GET_FIELD(insn, 27, 31);
4507 if (rs2) {
4508 cpu_src2 = gen_load_gpr(dc, rs2);
4509 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4510 } else {
4511 tcg_gen_mov_tl(cpu_dst, cpu_src1);
4512 }
4513 }
4514 switch (xop) {
4515 case 0x38: /* jmpl */
4516 {
4517 TCGv t;
4518 TCGv_i32 r_const;
4519
4520 t = gen_dest_gpr(dc, rd);
4521 tcg_gen_movi_tl(t, dc->pc);
4522 gen_store_gpr(dc, rd, t);
4523 gen_mov_pc_npc(dc);
4524 r_const = tcg_const_i32(3);
4525 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4526 tcg_temp_free_i32(r_const);
4527 gen_address_mask(dc, cpu_dst);
4528 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4529 dc->npc = DYNAMIC_PC;
4530 }
4531 goto jmp_insn;
4532 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4533 case 0x39: /* rett, V9 return */
4534 {
4535 TCGv_i32 r_const;
4536
4537 if (!supervisor(dc))
4538 goto priv_insn;
4539 gen_mov_pc_npc(dc);
4540 r_const = tcg_const_i32(3);
4541 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4542 tcg_temp_free_i32(r_const);
4543 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4544 dc->npc = DYNAMIC_PC;
4545 gen_helper_rett(cpu_env);
4546 }
4547 goto jmp_insn;
4548 #endif
4549 case 0x3b: /* flush */
4550 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4551 goto unimp_flush;
4552 /* nop */
4553 break;
4554 case 0x3c: /* save */
4555 save_state(dc);
4556 gen_helper_save(cpu_env);
4557 gen_store_gpr(dc, rd, cpu_dst);
4558 break;
4559 case 0x3d: /* restore */
4560 save_state(dc);
4561 gen_helper_restore(cpu_env);
4562 gen_store_gpr(dc, rd, cpu_dst);
4563 break;
4564 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4565 case 0x3e: /* V9 done/retry */
4566 {
4567 switch (rd) {
4568 case 0:
4569 if (!supervisor(dc))
4570 goto priv_insn;
4571 dc->npc = DYNAMIC_PC;
4572 dc->pc = DYNAMIC_PC;
4573 gen_helper_done(cpu_env);
4574 goto jmp_insn;
4575 case 1:
4576 if (!supervisor(dc))
4577 goto priv_insn;
4578 dc->npc = DYNAMIC_PC;
4579 dc->pc = DYNAMIC_PC;
4580 gen_helper_retry(cpu_env);
4581 goto jmp_insn;
4582 default:
4583 goto illegal_insn;
4584 }
4585 }
4586 break;
4587 #endif
4588 default:
4589 goto illegal_insn;
4590 }
4591 }
4592 break;
4593 }
4594 break;
4595 case 3: /* load/store instructions */
4596 {
4597 unsigned int xop = GET_FIELD(insn, 7, 12);
4598 /* ??? gen_address_mask prevents us from using a source
4599 register directly. Always generate a temporary. */
4600 TCGv cpu_addr = get_temp_tl(dc);
4601
4602 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
4603 if (xop == 0x3c || xop == 0x3e) {
4604 /* V9 casa/casxa : no offset */
4605 } else if (IS_IMM) { /* immediate */
4606 simm = GET_FIELDs(insn, 19, 31);
4607 if (simm != 0) {
4608 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
4609 }
4610 } else { /* register */
4611 rs2 = GET_FIELD(insn, 27, 31);
4612 if (rs2 != 0) {
4613 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
4614 }
4615 }
4616 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4617 (xop > 0x17 && xop <= 0x1d ) ||
4618 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4619 TCGv cpu_val = gen_dest_gpr(dc, rd);
4620
4621 switch (xop) {
4622 case 0x0: /* ld, V9 lduw, load unsigned word */
4623 gen_address_mask(dc, cpu_addr);
4624 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4625 break;
4626 case 0x1: /* ldub, load unsigned byte */
4627 gen_address_mask(dc, cpu_addr);
4628 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4629 break;
4630 case 0x2: /* lduh, load unsigned halfword */
4631 gen_address_mask(dc, cpu_addr);
4632 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4633 break;
4634 case 0x3: /* ldd, load double word */
4635 if (rd & 1)
4636 goto illegal_insn;
4637 else {
4638 TCGv_i32 r_const;
4639 TCGv_i64 t64;
4640
4641 save_state(dc);
4642 r_const = tcg_const_i32(7);
4643 /* XXX remove alignment check */
4644 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4645 tcg_temp_free_i32(r_const);
4646 gen_address_mask(dc, cpu_addr);
4647 t64 = tcg_temp_new_i64();
4648 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4649 tcg_gen_trunc_i64_tl(cpu_tmp0, t64);
4650 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffffULL);
4651 gen_store_gpr(dc, rd + 1, cpu_tmp0);
4652 tcg_gen_shri_i64(t64, t64, 32);
4653 tcg_gen_trunc_i64_tl(cpu_val, t64);
4654 tcg_temp_free_i64(t64);
4655 tcg_gen_andi_tl(cpu_val, cpu_val, 0xffffffffULL);
4656 }
4657 break;
4658 case 0x9: /* ldsb, load signed byte */
4659 gen_address_mask(dc, cpu_addr);
4660 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4661 break;
4662 case 0xa: /* ldsh, load signed halfword */
4663 gen_address_mask(dc, cpu_addr);
4664 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4665 break;
4666 case 0xd: /* ldstub -- XXX: should be atomically */
4667 {
4668 TCGv r_const;
4669
4670 gen_address_mask(dc, cpu_addr);
4671 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4672 r_const = tcg_const_tl(0xff);
4673 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4674 tcg_temp_free(r_const);
4675 }
4676 break;
4677 case 0x0f: /* swap, swap register with memory. Also
4678 atomically */
4679 CHECK_IU_FEATURE(dc, SWAP);
4680 cpu_src1 = gen_load_gpr(dc, rd);
4681 gen_address_mask(dc, cpu_addr);
4682 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4683 tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
4684 tcg_gen_mov_tl(cpu_val, cpu_tmp0);
4685 break;
4686 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4687 case 0x10: /* lda, V9 lduwa, load word alternate */
4688 #ifndef TARGET_SPARC64
4689 if (IS_IMM)
4690 goto illegal_insn;
4691 if (!supervisor(dc))
4692 goto priv_insn;
4693 #endif
4694 save_state(dc);
4695 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
4696 break;
4697 case 0x11: /* lduba, load unsigned byte alternate */
4698 #ifndef TARGET_SPARC64
4699 if (IS_IMM)
4700 goto illegal_insn;
4701 if (!supervisor(dc))
4702 goto priv_insn;
4703 #endif
4704 save_state(dc);
4705 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
4706 break;
4707 case 0x12: /* lduha, load unsigned halfword alternate */
4708 #ifndef TARGET_SPARC64
4709 if (IS_IMM)
4710 goto illegal_insn;
4711 if (!supervisor(dc))
4712 goto priv_insn;
4713 #endif
4714 save_state(dc);
4715 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
4716 break;
4717 case 0x13: /* ldda, load double word alternate */
4718 #ifndef TARGET_SPARC64
4719 if (IS_IMM)
4720 goto illegal_insn;
4721 if (!supervisor(dc))
4722 goto priv_insn;
4723 #endif
4724 if (rd & 1)
4725 goto illegal_insn;
4726 save_state(dc);
4727 gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
4728 goto skip_move;
4729 case 0x19: /* ldsba, load signed byte alternate */
4730 #ifndef TARGET_SPARC64
4731 if (IS_IMM)
4732 goto illegal_insn;
4733 if (!supervisor(dc))
4734 goto priv_insn;
4735 #endif
4736 save_state(dc);
4737 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
4738 break;
4739 case 0x1a: /* ldsha, load signed halfword alternate */
4740 #ifndef TARGET_SPARC64
4741 if (IS_IMM)
4742 goto illegal_insn;
4743 if (!supervisor(dc))
4744 goto priv_insn;
4745 #endif
4746 save_state(dc);
4747 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
4748 break;
4749 case 0x1d: /* ldstuba -- XXX: should be atomically */
4750 #ifndef TARGET_SPARC64
4751 if (IS_IMM)
4752 goto illegal_insn;
4753 if (!supervisor(dc))
4754 goto priv_insn;
4755 #endif
4756 save_state(dc);
4757 gen_ldstub_asi(cpu_val, cpu_addr, insn);
4758 break;
4759 case 0x1f: /* swapa, swap reg with alt. memory. Also
4760 atomically */
4761 CHECK_IU_FEATURE(dc, SWAP);
4762 #ifndef TARGET_SPARC64
4763 if (IS_IMM)
4764 goto illegal_insn;
4765 if (!supervisor(dc))
4766 goto priv_insn;
4767 #endif
4768 save_state(dc);
4769 cpu_src1 = gen_load_gpr(dc, rd);
4770 gen_swap_asi(cpu_val, cpu_src1, cpu_addr, insn);
4771 break;
4772
4773 #ifndef TARGET_SPARC64
4774 case 0x30: /* ldc */
4775 case 0x31: /* ldcsr */
4776 case 0x33: /* lddc */
4777 goto ncp_insn;
4778 #endif
4779 #endif
4780 #ifdef TARGET_SPARC64
4781 case 0x08: /* V9 ldsw */
4782 gen_address_mask(dc, cpu_addr);
4783 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4784 break;
4785 case 0x0b: /* V9 ldx */
4786 gen_address_mask(dc, cpu_addr);
4787 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4788 break;
4789 case 0x18: /* V9 ldswa */
4790 save_state(dc);
4791 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
4792 break;
4793 case 0x1b: /* V9 ldxa */
4794 save_state(dc);
4795 gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
4796 break;
4797 case 0x2d: /* V9 prefetch, no effect */
4798 goto skip_move;
4799 case 0x30: /* V9 ldfa */
4800 if (gen_trap_ifnofpu(dc)) {
4801 goto jmp_insn;
4802 }
4803 save_state(dc);
4804 gen_ldf_asi(cpu_addr, insn, 4, rd);
4805 gen_update_fprs_dirty(rd);
4806 goto skip_move;
4807 case 0x33: /* V9 lddfa */
4808 if (gen_trap_ifnofpu(dc)) {
4809 goto jmp_insn;
4810 }
4811 save_state(dc);
4812 gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
4813 gen_update_fprs_dirty(DFPREG(rd));
4814 goto skip_move;
4815 case 0x3d: /* V9 prefetcha, no effect */
4816 goto skip_move;
4817 case 0x32: /* V9 ldqfa */
4818 CHECK_FPU_FEATURE(dc, FLOAT128);
4819 if (gen_trap_ifnofpu(dc)) {
4820 goto jmp_insn;
4821 }
4822 save_state(dc);
4823 gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
4824 gen_update_fprs_dirty(QFPREG(rd));
4825 goto skip_move;
4826 #endif
4827 default:
4828 goto illegal_insn;
4829 }
4830 gen_store_gpr(dc, rd, cpu_val);
4831 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4832 skip_move: ;
4833 #endif
4834 } else if (xop >= 0x20 && xop < 0x24) {
4835 if (gen_trap_ifnofpu(dc)) {
4836 goto jmp_insn;
4837 }
4838 save_state(dc);
4839 switch (xop) {
4840 case 0x20: /* ldf, load fpreg */
4841 gen_address_mask(dc, cpu_addr);
4842 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4843 cpu_dst_32 = gen_dest_fpr_F(dc);
4844 tcg_gen_trunc_tl_i32(cpu_dst_32, cpu_tmp0);
4845 gen_store_fpr_F(dc, rd, cpu_dst_32);
4846 break;
4847 case 0x21: /* ldfsr, V9 ldxfsr */
4848 #ifdef TARGET_SPARC64
4849 gen_address_mask(dc, cpu_addr);
4850 if (rd == 1) {
4851 TCGv_i64 t64 = tcg_temp_new_i64();
4852 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4853 gen_helper_ldxfsr(cpu_env, t64);
4854 tcg_temp_free_i64(t64);
4855 break;
4856 }
4857 #endif
4858 {
4859 TCGv_i32 t32 = get_temp_i32(dc);
4860 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4861 tcg_gen_trunc_tl_i32(t32, cpu_tmp0);
4862 gen_helper_ldfsr(cpu_env, t32);
4863 }
4864 break;
4865 case 0x22: /* ldqf, load quad fpreg */
4866 {
4867 TCGv_i32 r_const;
4868
4869 CHECK_FPU_FEATURE(dc, FLOAT128);
4870 r_const = tcg_const_i32(dc->mem_idx);
4871 gen_address_mask(dc, cpu_addr);
4872 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4873 tcg_temp_free_i32(r_const);
4874 gen_op_store_QT0_fpr(QFPREG(rd));
4875 gen_update_fprs_dirty(QFPREG(rd));
4876 }
4877 break;
4878 case 0x23: /* lddf, load double fpreg */
4879 gen_address_mask(dc, cpu_addr);
4880 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4881 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4882 gen_store_fpr_D(dc, rd, cpu_dst_64);
4883 break;
4884 default:
4885 goto illegal_insn;
4886 }
4887 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
4888 xop == 0xe || xop == 0x1e) {
4889 TCGv cpu_val = gen_load_gpr(dc, rd);
4890
4891 switch (xop) {
4892 case 0x4: /* st, store word */
4893 gen_address_mask(dc, cpu_addr);
4894 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4895 break;
4896 case 0x5: /* stb, store byte */
4897 gen_address_mask(dc, cpu_addr);
4898 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
4899 break;
4900 case 0x6: /* sth, store halfword */
4901 gen_address_mask(dc, cpu_addr);
4902 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
4903 break;
4904 case 0x7: /* std, store double word */
4905 if (rd & 1)
4906 goto illegal_insn;
4907 else {
4908 TCGv_i32 r_const;
4909 TCGv_i64 t64;
4910 TCGv lo;
4911
4912 save_state(dc);
4913 gen_address_mask(dc, cpu_addr);
4914 r_const = tcg_const_i32(7);
4915 /* XXX remove alignment check */
4916 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4917 tcg_temp_free_i32(r_const);
4918 lo = gen_load_gpr(dc, rd + 1);
4919
4920 t64 = tcg_temp_new_i64();
4921 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
4922 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
4923 tcg_temp_free_i64(t64);
4924 }
4925 break;
4926 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4927 case 0x14: /* sta, V9 stwa, store word alternate */
4928 #ifndef TARGET_SPARC64
4929 if (IS_IMM)
4930 goto illegal_insn;
4931 if (!supervisor(dc))
4932 goto priv_insn;
4933 #endif
4934 save_state(dc);
4935 gen_st_asi(cpu_val, cpu_addr, insn, 4);
4936 dc->npc = DYNAMIC_PC;
4937 break;
4938 case 0x15: /* stba, store byte alternate */
4939 #ifndef TARGET_SPARC64
4940 if (IS_IMM)
4941 goto illegal_insn;
4942 if (!supervisor(dc))
4943 goto priv_insn;
4944 #endif
4945 save_state(dc);
4946 gen_st_asi(cpu_val, cpu_addr, insn, 1);
4947 dc->npc = DYNAMIC_PC;
4948 break;
4949 case 0x16: /* stha, store halfword alternate */
4950 #ifndef TARGET_SPARC64
4951 if (IS_IMM)
4952 goto illegal_insn;
4953 if (!supervisor(dc))
4954 goto priv_insn;
4955 #endif
4956 save_state(dc);
4957 gen_st_asi(cpu_val, cpu_addr, insn, 2);
4958 dc->npc = DYNAMIC_PC;
4959 break;
4960 case 0x17: /* stda, store double word alternate */
4961 #ifndef TARGET_SPARC64
4962 if (IS_IMM)
4963 goto illegal_insn;
4964 if (!supervisor(dc))
4965 goto priv_insn;
4966 #endif
4967 if (rd & 1)
4968 goto illegal_insn;
4969 else {
4970 save_state(dc);
4971 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
4972 }
4973 break;
4974 #endif
4975 #ifdef TARGET_SPARC64
4976 case 0x0e: /* V9 stx */
4977 gen_address_mask(dc, cpu_addr);
4978 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
4979 break;
4980 case 0x1e: /* V9 stxa */
4981 save_state(dc);
4982 gen_st_asi(cpu_val, cpu_addr, insn, 8);
4983 dc->npc = DYNAMIC_PC;
4984 break;
4985 #endif
4986 default:
4987 goto illegal_insn;
4988 }
4989 } else if (xop > 0x23 && xop < 0x28) {
4990 if (gen_trap_ifnofpu(dc)) {
4991 goto jmp_insn;
4992 }
4993 save_state(dc);
4994 switch (xop) {
4995 case 0x24: /* stf, store fpreg */
4996 gen_address_mask(dc, cpu_addr);
4997 cpu_src1_32 = gen_load_fpr_F(dc, rd);
4998 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_src1_32);
4999 tcg_gen_qemu_st32(cpu_tmp0, cpu_addr, dc->mem_idx);
5000 break;
5001 case 0x25: /* stfsr, V9 stxfsr */
5002 {
5003 TCGv t = get_temp_tl(dc);
5004
5005 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUSPARCState, fsr));
5006 #ifdef TARGET_SPARC64
5007 gen_address_mask(dc, cpu_addr);
5008 if (rd == 1) {
5009 tcg_gen_qemu_st64(t, cpu_addr, dc->mem_idx);
5010 break;
5011 }
5012 #endif
5013 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5014 }
5015 break;
5016 case 0x26:
5017 #ifdef TARGET_SPARC64
5018 /* V9 stqf, store quad fpreg */
5019 {
5020 TCGv_i32 r_const;
5021
5022 CHECK_FPU_FEATURE(dc, FLOAT128);
5023 gen_op_load_fpr_QT0(QFPREG(rd));
5024 r_const = tcg_const_i32(dc->mem_idx);
5025 gen_address_mask(dc, cpu_addr);
5026 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5027 tcg_temp_free_i32(r_const);
5028 }
5029 break;
5030 #else /* !TARGET_SPARC64 */
5031 /* stdfq, store floating point queue */
5032 #if defined(CONFIG_USER_ONLY)
5033 goto illegal_insn;
5034 #else
5035 if (!supervisor(dc))
5036 goto priv_insn;
5037 if (gen_trap_ifnofpu(dc)) {
5038 goto jmp_insn;
5039 }
5040 goto nfq_insn;
5041 #endif
5042 #endif
5043 case 0x27: /* stdf, store double fpreg */
5044 gen_address_mask(dc, cpu_addr);
5045 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5046 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5047 break;
5048 default:
5049 goto illegal_insn;
5050 }
5051 } else if (xop > 0x33 && xop < 0x3f) {
5052 save_state(dc);
5053 switch (xop) {
5054 #ifdef TARGET_SPARC64
5055 case 0x34: /* V9 stfa */
5056 if (gen_trap_ifnofpu(dc)) {
5057 goto jmp_insn;
5058 }
5059 gen_stf_asi(cpu_addr, insn, 4, rd);
5060 break;
5061 case 0x36: /* V9 stqfa */
5062 {
5063 TCGv_i32 r_const;
5064
5065 CHECK_FPU_FEATURE(dc, FLOAT128);
5066 if (gen_trap_ifnofpu(dc)) {
5067 goto jmp_insn;
5068 }
5069 r_const = tcg_const_i32(7);
5070 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5071 tcg_temp_free_i32(r_const);
5072 gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
5073 }
5074 break;
5075 case 0x37: /* V9 stdfa */
5076 if (gen_trap_ifnofpu(dc)) {
5077 goto jmp_insn;
5078 }
5079 gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
5080 break;
5081 case 0x3c: /* V9 casa */
5082 rs2 = GET_FIELD(insn, 27, 31);
5083 cpu_src2 = gen_load_gpr(dc, rs2);
5084 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5085 break;
5086 case 0x3e: /* V9 casxa */
5087 rs2 = GET_FIELD(insn, 27, 31);
5088 cpu_src2 = gen_load_gpr(dc, rs2);
5089 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5090 break;
5091 #else
5092 case 0x34: /* stc */
5093 case 0x35: /* stcsr */
5094 case 0x36: /* stdcq */
5095 case 0x37: /* stdc */
5096 goto ncp_insn;
5097 #endif
5098 default:
5099 goto illegal_insn;
5100 }
5101 } else {
5102 goto illegal_insn;
5103 }
5104 }
5105 break;
5106 }
5107 /* default case for non jump instructions */
5108 if (dc->npc == DYNAMIC_PC) {
5109 dc->pc = DYNAMIC_PC;
5110 gen_op_next_insn();
5111 } else if (dc->npc == JUMP_PC) {
5112 /* we can do a static jump */
5113 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5114 dc->is_br = 1;
5115 } else {
5116 dc->pc = dc->npc;
5117 dc->npc = dc->npc + 4;
5118 }
5119 jmp_insn:
5120 goto egress;
5121 illegal_insn:
5122 {
5123 TCGv_i32 r_const;
5124
5125 save_state(dc);
5126 r_const = tcg_const_i32(TT_ILL_INSN);
5127 gen_helper_raise_exception(cpu_env, r_const);
5128 tcg_temp_free_i32(r_const);
5129 dc->is_br = 1;
5130 }
5131 goto egress;
5132 unimp_flush:
5133 {
5134 TCGv_i32 r_const;
5135
5136 save_state(dc);
5137 r_const = tcg_const_i32(TT_UNIMP_FLUSH);
5138 gen_helper_raise_exception(cpu_env, r_const);
5139 tcg_temp_free_i32(r_const);
5140 dc->is_br = 1;
5141 }
5142 goto egress;
5143 #if !defined(CONFIG_USER_ONLY)
5144 priv_insn:
5145 {
5146 TCGv_i32 r_const;
5147
5148 save_state(dc);
5149 r_const = tcg_const_i32(TT_PRIV_INSN);
5150 gen_helper_raise_exception(cpu_env, r_const);
5151 tcg_temp_free_i32(r_const);
5152 dc->is_br = 1;
5153 }
5154 goto egress;
5155 #endif
5156 nfpu_insn:
5157 save_state(dc);
5158 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
5159 dc->is_br = 1;
5160 goto egress;
5161 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5162 nfq_insn:
5163 save_state(dc);
5164 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
5165 dc->is_br = 1;
5166 goto egress;
5167 #endif
5168 #ifndef TARGET_SPARC64
5169 ncp_insn:
5170 {
5171 TCGv r_const;
5172
5173 save_state(dc);
5174 r_const = tcg_const_i32(TT_NCP_INSN);
5175 gen_helper_raise_exception(cpu_env, r_const);
5176 tcg_temp_free(r_const);
5177 dc->is_br = 1;
5178 }
5179 goto egress;
5180 #endif
5181 egress:
5182 if (dc->n_t32 != 0) {
5183 int i;
5184 for (i = dc->n_t32 - 1; i >= 0; --i) {
5185 tcg_temp_free_i32(dc->t32[i]);
5186 }
5187 dc->n_t32 = 0;
5188 }
5189 if (dc->n_ttl != 0) {
5190 int i;
5191 for (i = dc->n_ttl - 1; i >= 0; --i) {
5192 tcg_temp_free(dc->ttl[i]);
5193 }
5194 dc->n_ttl = 0;
5195 }
5196 }
5197
5198 static inline void gen_intermediate_code_internal(TranslationBlock * tb,
5199 int spc, CPUSPARCState *env)
5200 {
5201 target_ulong pc_start, last_pc;
5202 uint16_t *gen_opc_end;
5203 DisasContext dc1, *dc = &dc1;
5204 CPUBreakpoint *bp;
5205 int j, lj = -1;
5206 int num_insns;
5207 int max_insns;
5208 unsigned int insn;
5209
5210 memset(dc, 0, sizeof(DisasContext));
5211 dc->tb = tb;
5212 pc_start = tb->pc;
5213 dc->pc = pc_start;
5214 last_pc = dc->pc;
5215 dc->npc = (target_ulong) tb->cs_base;
5216 dc->cc_op = CC_OP_DYNAMIC;
5217 dc->mem_idx = cpu_mmu_index(env);
5218 dc->def = env->def;
5219 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5220 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5221 dc->singlestep = (env->singlestep_enabled || singlestep);
5222 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
5223
5224 num_insns = 0;
5225 max_insns = tb->cflags & CF_COUNT_MASK;
5226 if (max_insns == 0)
5227 max_insns = CF_COUNT_MASK;
5228 gen_icount_start();
5229 do {
5230 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
5231 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5232 if (bp->pc == dc->pc) {
5233 if (dc->pc != pc_start)
5234 save_state(dc);
5235 gen_helper_debug(cpu_env);
5236 tcg_gen_exit_tb(0);
5237 dc->is_br = 1;
5238 goto exit_gen_loop;
5239 }
5240 }
5241 }
5242 if (spc) {
5243 qemu_log("Search PC...\n");
5244 j = gen_opc_ptr - gen_opc_buf;
5245 if (lj < j) {
5246 lj++;
5247 while (lj < j)
5248 gen_opc_instr_start[lj++] = 0;
5249 gen_opc_pc[lj] = dc->pc;
5250 gen_opc_npc[lj] = dc->npc;
5251 gen_opc_instr_start[lj] = 1;
5252 gen_opc_icount[lj] = num_insns;
5253 }
5254 }
5255 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
5256 gen_io_start();
5257 last_pc = dc->pc;
5258 insn = cpu_ldl_code(env, dc->pc);
5259
5260 cpu_tmp0 = tcg_temp_new();
5261 cpu_dst = tcg_temp_new();
5262
5263 disas_sparc_insn(dc, insn);
5264 num_insns++;
5265
5266 tcg_temp_free(cpu_dst);
5267 tcg_temp_free(cpu_tmp0);
5268
5269 if (dc->is_br)
5270 break;
5271 /* if the next PC is different, we abort now */
5272 if (dc->pc != (last_pc + 4))
5273 break;
5274 /* if we reach a page boundary, we stop generation so that the
5275 PC of a TT_TFAULT exception is always in the right page */
5276 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5277 break;
5278 /* if single step mode, we generate only one instruction and
5279 generate an exception */
5280 if (dc->singlestep) {
5281 break;
5282 }
5283 } while ((gen_opc_ptr < gen_opc_end) &&
5284 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5285 num_insns < max_insns);
5286
5287 exit_gen_loop:
5288 if (tb->cflags & CF_LAST_IO) {
5289 gen_io_end();
5290 }
5291 if (!dc->is_br) {
5292 if (dc->pc != DYNAMIC_PC &&
5293 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5294 /* static PC and NPC: we can use direct chaining */
5295 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5296 } else {
5297 if (dc->pc != DYNAMIC_PC) {
5298 tcg_gen_movi_tl(cpu_pc, dc->pc);
5299 }
5300 save_npc(dc);
5301 tcg_gen_exit_tb(0);
5302 }
5303 }
5304 gen_icount_end(tb, num_insns);
5305 *gen_opc_ptr = INDEX_op_end;
5306 if (spc) {
5307 j = gen_opc_ptr - gen_opc_buf;
5308 lj++;
5309 while (lj <= j)
5310 gen_opc_instr_start[lj++] = 0;
5311 #if 0
5312 log_page_dump();
5313 #endif
5314 gen_opc_jump_pc[0] = dc->jump_pc[0];
5315 gen_opc_jump_pc[1] = dc->jump_pc[1];
5316 } else {
5317 tb->size = last_pc + 4 - pc_start;
5318 tb->icount = num_insns;
5319 }
5320 #ifdef DEBUG_DISAS
5321 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5322 qemu_log("--------------\n");
5323 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5324 log_target_disas(pc_start, last_pc + 4 - pc_start, 0);
5325 qemu_log("\n");
5326 }
5327 #endif
5328 }
5329
5330 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5331 {
5332 gen_intermediate_code_internal(tb, 0, env);
5333 }
5334
5335 void gen_intermediate_code_pc(CPUSPARCState * env, TranslationBlock * tb)
5336 {
5337 gen_intermediate_code_internal(tb, 1, env);
5338 }
5339
5340 void gen_intermediate_code_init(CPUSPARCState *env)
5341 {
5342 unsigned int i;
5343 static int inited;
5344 static const char * const gregnames[8] = {
5345 NULL, // g0 not used
5346 "g1",
5347 "g2",
5348 "g3",
5349 "g4",
5350 "g5",
5351 "g6",
5352 "g7",
5353 };
5354 static const char * const fregnames[32] = {
5355 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5356 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5357 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5358 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5359 };
5360
5361 /* init various static tables */
5362 if (!inited) {
5363 inited = 1;
5364
5365 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5366 cpu_regwptr = tcg_global_mem_new_ptr(TCG_AREG0,
5367 offsetof(CPUSPARCState, regwptr),
5368 "regwptr");
5369 #ifdef TARGET_SPARC64
5370 cpu_xcc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, xcc),
5371 "xcc");
5372 cpu_asi = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, asi),
5373 "asi");
5374 cpu_fprs = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, fprs),
5375 "fprs");
5376 cpu_gsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, gsr),
5377 "gsr");
5378 cpu_tick_cmpr = tcg_global_mem_new(TCG_AREG0,
5379 offsetof(CPUSPARCState, tick_cmpr),
5380 "tick_cmpr");
5381 cpu_stick_cmpr = tcg_global_mem_new(TCG_AREG0,
5382 offsetof(CPUSPARCState, stick_cmpr),
5383 "stick_cmpr");
5384 cpu_hstick_cmpr = tcg_global_mem_new(TCG_AREG0,
5385 offsetof(CPUSPARCState, hstick_cmpr),
5386 "hstick_cmpr");
5387 cpu_hintp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hintp),
5388 "hintp");
5389 cpu_htba = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, htba),
5390 "htba");
5391 cpu_hver = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hver),
5392 "hver");
5393 cpu_ssr = tcg_global_mem_new(TCG_AREG0,
5394 offsetof(CPUSPARCState, ssr), "ssr");
5395 cpu_ver = tcg_global_mem_new(TCG_AREG0,
5396 offsetof(CPUSPARCState, version), "ver");
5397 cpu_softint = tcg_global_mem_new_i32(TCG_AREG0,
5398 offsetof(CPUSPARCState, softint),
5399 "softint");
5400 #else
5401 cpu_wim = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, wim),
5402 "wim");
5403 #endif
5404 cpu_cond = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cond),
5405 "cond");
5406 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_src),
5407 "cc_src");
5408 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0,
5409 offsetof(CPUSPARCState, cc_src2),
5410 "cc_src2");
5411 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_dst),
5412 "cc_dst");
5413 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, cc_op),
5414 "cc_op");
5415 cpu_psr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, psr),
5416 "psr");
5417 cpu_fsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, fsr),
5418 "fsr");
5419 cpu_pc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, pc),
5420 "pc");
5421 cpu_npc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, npc),
5422 "npc");
5423 cpu_y = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, y), "y");
5424 #ifndef CONFIG_USER_ONLY
5425 cpu_tbr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, tbr),
5426 "tbr");
5427 #endif
5428 for (i = 1; i < 8; i++) {
5429 cpu_gregs[i] = tcg_global_mem_new(TCG_AREG0,
5430 offsetof(CPUSPARCState, gregs[i]),
5431 gregnames[i]);
5432 }
5433 for (i = 0; i < TARGET_DPREGS; i++) {
5434 cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
5435 offsetof(CPUSPARCState, fpr[i]),
5436 fregnames[i]);
5437 }
5438
5439 /* register helpers */
5440
5441 #define GEN_HELPER 2
5442 #include "helper.h"
5443 }
5444 }
5445
5446 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, int pc_pos)
5447 {
5448 target_ulong npc;
5449 env->pc = gen_opc_pc[pc_pos];
5450 npc = gen_opc_npc[pc_pos];
5451 if (npc == 1) {
5452 /* dynamic NPC: already stored */
5453 } else if (npc == 2) {
5454 /* jump PC: use 'cond' and the jump targets of the translation */
5455 if (env->cond) {
5456 env->npc = gen_opc_jump_pc[0];
5457 } else {
5458 env->npc = gen_opc_jump_pc[1];
5459 }
5460 } else {
5461 env->npc = npc;
5462 }
5463 }