]> git.proxmox.com Git - mirror_qemu.git/blob - target-sparc/translate.c
target-sparc: Avoid unnecessary local temporaries
[mirror_qemu.git] / target-sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "disas.h"
29 #include "helper.h"
30 #include "tcg-op.h"
31
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #define DEBUG_DISAS
36
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
40
41 /* global register indexes */
42 static TCGv_ptr cpu_env, cpu_regwptr;
43 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
44 static TCGv_i32 cpu_cc_op;
45 static TCGv_i32 cpu_psr;
46 static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
47 static TCGv cpu_y;
48 #ifndef CONFIG_USER_ONLY
49 static TCGv cpu_tbr;
50 #endif
51 static TCGv cpu_cond, cpu_dst, cpu_addr, cpu_val;
52 #ifdef TARGET_SPARC64
53 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
54 static TCGv cpu_gsr;
55 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
56 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
57 static TCGv_i32 cpu_softint;
58 #else
59 static TCGv cpu_wim;
60 #endif
61 /* local register indexes (only used inside old micro ops) */
62 static TCGv cpu_tmp0;
63 static TCGv_i32 cpu_tmp32;
64 static TCGv_i64 cpu_tmp64;
65 /* Floating point registers */
66 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67
68 static target_ulong gen_opc_npc[OPC_BUF_SIZE];
69 static target_ulong gen_opc_jump_pc[2];
70
71 #include "gen-icount.h"
72
73 typedef struct DisasContext {
74 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
75 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
76 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
77 int is_br;
78 int mem_idx;
79 int fpu_enabled;
80 int address_mask_32bit;
81 int singlestep;
82 uint32_t cc_op; /* current CC operation */
83 struct TranslationBlock *tb;
84 sparc_def_t *def;
85 TCGv_i32 t32[3];
86 int n_t32;
87 } DisasContext;
88
89 typedef struct {
90 TCGCond cond;
91 bool is_bool;
92 bool g1, g2;
93 TCGv c1, c2;
94 } DisasCompare;
95
96 // This function uses non-native bit order
97 #define GET_FIELD(X, FROM, TO) \
98 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
99
100 // This function uses the order in the manuals, i.e. bit 0 is 2^0
101 #define GET_FIELD_SP(X, FROM, TO) \
102 GET_FIELD(X, 31 - (TO), 31 - (FROM))
103
104 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
105 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
106
107 #ifdef TARGET_SPARC64
108 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
109 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
110 #else
111 #define DFPREG(r) (r & 0x1e)
112 #define QFPREG(r) (r & 0x1c)
113 #endif
114
115 #define UA2005_HTRAP_MASK 0xff
116 #define V8_TRAP_MASK 0x7f
117
118 static int sign_extend(int x, int len)
119 {
120 len = 32 - len;
121 return (x << len) >> len;
122 }
123
124 #define IS_IMM (insn & (1<<13))
125
126 static inline void gen_update_fprs_dirty(int rd)
127 {
128 #if defined(TARGET_SPARC64)
129 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
130 #endif
131 }
132
133 /* floating point registers moves */
134 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
135 {
136 #if TCG_TARGET_REG_BITS == 32
137 if (src & 1) {
138 return TCGV_LOW(cpu_fpr[src / 2]);
139 } else {
140 return TCGV_HIGH(cpu_fpr[src / 2]);
141 }
142 #else
143 if (src & 1) {
144 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
145 } else {
146 TCGv_i32 ret = tcg_temp_new_i32();
147 TCGv_i64 t = tcg_temp_new_i64();
148
149 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
150 tcg_gen_trunc_i64_i32(ret, t);
151 tcg_temp_free_i64(t);
152
153 dc->t32[dc->n_t32++] = ret;
154 assert(dc->n_t32 <= ARRAY_SIZE(dc->t32));
155
156 return ret;
157 }
158 #endif
159 }
160
161 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
162 {
163 #if TCG_TARGET_REG_BITS == 32
164 if (dst & 1) {
165 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
166 } else {
167 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
168 }
169 #else
170 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
171 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
172 (dst & 1 ? 0 : 32), 32);
173 #endif
174 gen_update_fprs_dirty(dst);
175 }
176
177 static TCGv_i32 gen_dest_fpr_F(void)
178 {
179 return cpu_tmp32;
180 }
181
182 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
183 {
184 src = DFPREG(src);
185 return cpu_fpr[src / 2];
186 }
187
188 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
189 {
190 dst = DFPREG(dst);
191 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
192 gen_update_fprs_dirty(dst);
193 }
194
195 static TCGv_i64 gen_dest_fpr_D(void)
196 {
197 return cpu_tmp64;
198 }
199
200 static void gen_op_load_fpr_QT0(unsigned int src)
201 {
202 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
203 offsetof(CPU_QuadU, ll.upper));
204 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
205 offsetof(CPU_QuadU, ll.lower));
206 }
207
208 static void gen_op_load_fpr_QT1(unsigned int src)
209 {
210 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
211 offsetof(CPU_QuadU, ll.upper));
212 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
213 offsetof(CPU_QuadU, ll.lower));
214 }
215
216 static void gen_op_store_QT0_fpr(unsigned int dst)
217 {
218 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
219 offsetof(CPU_QuadU, ll.upper));
220 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
221 offsetof(CPU_QuadU, ll.lower));
222 }
223
224 #ifdef TARGET_SPARC64
225 static void gen_move_Q(unsigned int rd, unsigned int rs)
226 {
227 rd = QFPREG(rd);
228 rs = QFPREG(rs);
229
230 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
231 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
232 gen_update_fprs_dirty(rd);
233 }
234 #endif
235
236 /* moves */
237 #ifdef CONFIG_USER_ONLY
238 #define supervisor(dc) 0
239 #ifdef TARGET_SPARC64
240 #define hypervisor(dc) 0
241 #endif
242 #else
243 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
244 #ifdef TARGET_SPARC64
245 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
246 #else
247 #endif
248 #endif
249
250 #ifdef TARGET_SPARC64
251 #ifndef TARGET_ABI32
252 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
253 #else
254 #define AM_CHECK(dc) (1)
255 #endif
256 #endif
257
258 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
259 {
260 #ifdef TARGET_SPARC64
261 if (AM_CHECK(dc))
262 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
263 #endif
264 }
265
266 static inline void gen_movl_reg_TN(int reg, TCGv tn)
267 {
268 if (reg == 0)
269 tcg_gen_movi_tl(tn, 0);
270 else if (reg < 8)
271 tcg_gen_mov_tl(tn, cpu_gregs[reg]);
272 else {
273 tcg_gen_ld_tl(tn, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
274 }
275 }
276
277 static inline void gen_movl_TN_reg(int reg, TCGv tn)
278 {
279 if (reg == 0)
280 return;
281 else if (reg < 8)
282 tcg_gen_mov_tl(cpu_gregs[reg], tn);
283 else {
284 tcg_gen_st_tl(tn, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
285 }
286 }
287
288 static inline void gen_goto_tb(DisasContext *s, int tb_num,
289 target_ulong pc, target_ulong npc)
290 {
291 TranslationBlock *tb;
292
293 tb = s->tb;
294 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
295 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
296 !s->singlestep) {
297 /* jump to same page: we can use a direct jump */
298 tcg_gen_goto_tb(tb_num);
299 tcg_gen_movi_tl(cpu_pc, pc);
300 tcg_gen_movi_tl(cpu_npc, npc);
301 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
302 } else {
303 /* jump to another page: currently not optimized */
304 tcg_gen_movi_tl(cpu_pc, pc);
305 tcg_gen_movi_tl(cpu_npc, npc);
306 tcg_gen_exit_tb(0);
307 }
308 }
309
310 // XXX suboptimal
311 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
312 {
313 tcg_gen_extu_i32_tl(reg, src);
314 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
315 tcg_gen_andi_tl(reg, reg, 0x1);
316 }
317
318 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
319 {
320 tcg_gen_extu_i32_tl(reg, src);
321 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
322 tcg_gen_andi_tl(reg, reg, 0x1);
323 }
324
325 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
326 {
327 tcg_gen_extu_i32_tl(reg, src);
328 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
329 tcg_gen_andi_tl(reg, reg, 0x1);
330 }
331
332 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
333 {
334 tcg_gen_extu_i32_tl(reg, src);
335 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
336 tcg_gen_andi_tl(reg, reg, 0x1);
337 }
338
339 static inline void gen_op_addi_cc(TCGv dst, TCGv src1, target_long src2)
340 {
341 tcg_gen_mov_tl(cpu_cc_src, src1);
342 tcg_gen_movi_tl(cpu_cc_src2, src2);
343 tcg_gen_addi_tl(cpu_cc_dst, cpu_cc_src, src2);
344 tcg_gen_mov_tl(dst, cpu_cc_dst);
345 }
346
347 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
348 {
349 tcg_gen_mov_tl(cpu_cc_src, src1);
350 tcg_gen_mov_tl(cpu_cc_src2, src2);
351 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
352 tcg_gen_mov_tl(dst, cpu_cc_dst);
353 }
354
355 static TCGv_i32 gen_add32_carry32(void)
356 {
357 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
358
359 /* Carry is computed from a previous add: (dst < src) */
360 #if TARGET_LONG_BITS == 64
361 cc_src1_32 = tcg_temp_new_i32();
362 cc_src2_32 = tcg_temp_new_i32();
363 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_dst);
364 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src);
365 #else
366 cc_src1_32 = cpu_cc_dst;
367 cc_src2_32 = cpu_cc_src;
368 #endif
369
370 carry_32 = tcg_temp_new_i32();
371 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
372
373 #if TARGET_LONG_BITS == 64
374 tcg_temp_free_i32(cc_src1_32);
375 tcg_temp_free_i32(cc_src2_32);
376 #endif
377
378 return carry_32;
379 }
380
381 static TCGv_i32 gen_sub32_carry32(void)
382 {
383 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
384
385 /* Carry is computed from a previous borrow: (src1 < src2) */
386 #if TARGET_LONG_BITS == 64
387 cc_src1_32 = tcg_temp_new_i32();
388 cc_src2_32 = tcg_temp_new_i32();
389 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_src);
390 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src2);
391 #else
392 cc_src1_32 = cpu_cc_src;
393 cc_src2_32 = cpu_cc_src2;
394 #endif
395
396 carry_32 = tcg_temp_new_i32();
397 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
398
399 #if TARGET_LONG_BITS == 64
400 tcg_temp_free_i32(cc_src1_32);
401 tcg_temp_free_i32(cc_src2_32);
402 #endif
403
404 return carry_32;
405 }
406
407 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
408 TCGv src2, int update_cc)
409 {
410 TCGv_i32 carry_32;
411 TCGv carry;
412
413 switch (dc->cc_op) {
414 case CC_OP_DIV:
415 case CC_OP_LOGIC:
416 /* Carry is known to be zero. Fall back to plain ADD. */
417 if (update_cc) {
418 gen_op_add_cc(dst, src1, src2);
419 } else {
420 tcg_gen_add_tl(dst, src1, src2);
421 }
422 return;
423
424 case CC_OP_ADD:
425 case CC_OP_TADD:
426 case CC_OP_TADDTV:
427 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
428 {
429 /* For 32-bit hosts, we can re-use the host's hardware carry
430 generation by using an ADD2 opcode. We discard the low
431 part of the output. Ideally we'd combine this operation
432 with the add that generated the carry in the first place. */
433 TCGv dst_low = tcg_temp_new();
434 tcg_gen_op6_i32(INDEX_op_add2_i32, dst_low, dst,
435 cpu_cc_src, src1, cpu_cc_src2, src2);
436 tcg_temp_free(dst_low);
437 goto add_done;
438 }
439 #endif
440 carry_32 = gen_add32_carry32();
441 break;
442
443 case CC_OP_SUB:
444 case CC_OP_TSUB:
445 case CC_OP_TSUBTV:
446 carry_32 = gen_sub32_carry32();
447 break;
448
449 default:
450 /* We need external help to produce the carry. */
451 carry_32 = tcg_temp_new_i32();
452 gen_helper_compute_C_icc(carry_32, cpu_env);
453 break;
454 }
455
456 #if TARGET_LONG_BITS == 64
457 carry = tcg_temp_new();
458 tcg_gen_extu_i32_i64(carry, carry_32);
459 #else
460 carry = carry_32;
461 #endif
462
463 tcg_gen_add_tl(dst, src1, src2);
464 tcg_gen_add_tl(dst, dst, carry);
465
466 tcg_temp_free_i32(carry_32);
467 #if TARGET_LONG_BITS == 64
468 tcg_temp_free(carry);
469 #endif
470
471 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
472 add_done:
473 #endif
474 if (update_cc) {
475 tcg_gen_mov_tl(cpu_cc_src, src1);
476 tcg_gen_mov_tl(cpu_cc_src2, src2);
477 tcg_gen_mov_tl(cpu_cc_dst, dst);
478 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
479 dc->cc_op = CC_OP_ADDX;
480 }
481 }
482
483 static inline void gen_op_subi_cc(TCGv dst, TCGv src1, target_long src2, DisasContext *dc)
484 {
485 tcg_gen_mov_tl(cpu_cc_src, src1);
486 tcg_gen_movi_tl(cpu_cc_src2, src2);
487 if (src2 == 0) {
488 tcg_gen_mov_tl(cpu_cc_dst, src1);
489 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
490 dc->cc_op = CC_OP_LOGIC;
491 } else {
492 tcg_gen_subi_tl(cpu_cc_dst, cpu_cc_src, src2);
493 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
494 dc->cc_op = CC_OP_SUB;
495 }
496 tcg_gen_mov_tl(dst, cpu_cc_dst);
497 }
498
499 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
500 {
501 tcg_gen_mov_tl(cpu_cc_src, src1);
502 tcg_gen_mov_tl(cpu_cc_src2, src2);
503 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
504 tcg_gen_mov_tl(dst, cpu_cc_dst);
505 }
506
507 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
508 TCGv src2, int update_cc)
509 {
510 TCGv_i32 carry_32;
511 TCGv carry;
512
513 switch (dc->cc_op) {
514 case CC_OP_DIV:
515 case CC_OP_LOGIC:
516 /* Carry is known to be zero. Fall back to plain SUB. */
517 if (update_cc) {
518 gen_op_sub_cc(dst, src1, src2);
519 } else {
520 tcg_gen_sub_tl(dst, src1, src2);
521 }
522 return;
523
524 case CC_OP_ADD:
525 case CC_OP_TADD:
526 case CC_OP_TADDTV:
527 carry_32 = gen_add32_carry32();
528 break;
529
530 case CC_OP_SUB:
531 case CC_OP_TSUB:
532 case CC_OP_TSUBTV:
533 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
534 {
535 /* For 32-bit hosts, we can re-use the host's hardware carry
536 generation by using a SUB2 opcode. We discard the low
537 part of the output. Ideally we'd combine this operation
538 with the add that generated the carry in the first place. */
539 TCGv dst_low = tcg_temp_new();
540 tcg_gen_op6_i32(INDEX_op_sub2_i32, dst_low, dst,
541 cpu_cc_src, src1, cpu_cc_src2, src2);
542 tcg_temp_free(dst_low);
543 goto sub_done;
544 }
545 #endif
546 carry_32 = gen_sub32_carry32();
547 break;
548
549 default:
550 /* We need external help to produce the carry. */
551 carry_32 = tcg_temp_new_i32();
552 gen_helper_compute_C_icc(carry_32, cpu_env);
553 break;
554 }
555
556 #if TARGET_LONG_BITS == 64
557 carry = tcg_temp_new();
558 tcg_gen_extu_i32_i64(carry, carry_32);
559 #else
560 carry = carry_32;
561 #endif
562
563 tcg_gen_sub_tl(dst, src1, src2);
564 tcg_gen_sub_tl(dst, dst, carry);
565
566 tcg_temp_free_i32(carry_32);
567 #if TARGET_LONG_BITS == 64
568 tcg_temp_free(carry);
569 #endif
570
571 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
572 sub_done:
573 #endif
574 if (update_cc) {
575 tcg_gen_mov_tl(cpu_cc_src, src1);
576 tcg_gen_mov_tl(cpu_cc_src2, src2);
577 tcg_gen_mov_tl(cpu_cc_dst, dst);
578 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
579 dc->cc_op = CC_OP_SUBX;
580 }
581 }
582
583 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
584 {
585 TCGv r_temp, zero;
586
587 r_temp = tcg_temp_new();
588
589 /* old op:
590 if (!(env->y & 1))
591 T1 = 0;
592 */
593 zero = tcg_const_tl(0);
594 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
595 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
596 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
597 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
598 zero, cpu_cc_src2);
599 tcg_temp_free(zero);
600
601 // b2 = T0 & 1;
602 // env->y = (b2 << 31) | (env->y >> 1);
603 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
604 tcg_gen_shli_tl(r_temp, r_temp, 31);
605 tcg_gen_shri_tl(cpu_tmp0, cpu_y, 1);
606 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0x7fffffff);
607 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, r_temp);
608 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
609
610 // b1 = N ^ V;
611 gen_mov_reg_N(cpu_tmp0, cpu_psr);
612 gen_mov_reg_V(r_temp, cpu_psr);
613 tcg_gen_xor_tl(cpu_tmp0, cpu_tmp0, r_temp);
614 tcg_temp_free(r_temp);
615
616 // T0 = (b1 << 31) | (T0 >> 1);
617 // src1 = T0;
618 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, 31);
619 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
620 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
621
622 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
623
624 tcg_gen_mov_tl(dst, cpu_cc_dst);
625 }
626
627 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
628 {
629 TCGv_i32 r_src1, r_src2;
630 TCGv_i64 r_temp, r_temp2;
631
632 r_src1 = tcg_temp_new_i32();
633 r_src2 = tcg_temp_new_i32();
634
635 tcg_gen_trunc_tl_i32(r_src1, src1);
636 tcg_gen_trunc_tl_i32(r_src2, src2);
637
638 r_temp = tcg_temp_new_i64();
639 r_temp2 = tcg_temp_new_i64();
640
641 if (sign_ext) {
642 tcg_gen_ext_i32_i64(r_temp, r_src2);
643 tcg_gen_ext_i32_i64(r_temp2, r_src1);
644 } else {
645 tcg_gen_extu_i32_i64(r_temp, r_src2);
646 tcg_gen_extu_i32_i64(r_temp2, r_src1);
647 }
648
649 tcg_gen_mul_i64(r_temp2, r_temp, r_temp2);
650
651 tcg_gen_shri_i64(r_temp, r_temp2, 32);
652 tcg_gen_trunc_i64_tl(cpu_tmp0, r_temp);
653 tcg_temp_free_i64(r_temp);
654 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
655
656 tcg_gen_trunc_i64_tl(dst, r_temp2);
657
658 tcg_temp_free_i64(r_temp2);
659
660 tcg_temp_free_i32(r_src1);
661 tcg_temp_free_i32(r_src2);
662 }
663
664 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
665 {
666 /* zero-extend truncated operands before multiplication */
667 gen_op_multiply(dst, src1, src2, 0);
668 }
669
670 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
671 {
672 /* sign-extend truncated operands before multiplication */
673 gen_op_multiply(dst, src1, src2, 1);
674 }
675
676 // 1
677 static inline void gen_op_eval_ba(TCGv dst)
678 {
679 tcg_gen_movi_tl(dst, 1);
680 }
681
682 // Z
683 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
684 {
685 gen_mov_reg_Z(dst, src);
686 }
687
688 // Z | (N ^ V)
689 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
690 {
691 gen_mov_reg_N(cpu_tmp0, src);
692 gen_mov_reg_V(dst, src);
693 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
694 gen_mov_reg_Z(cpu_tmp0, src);
695 tcg_gen_or_tl(dst, dst, cpu_tmp0);
696 }
697
698 // N ^ V
699 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
700 {
701 gen_mov_reg_V(cpu_tmp0, src);
702 gen_mov_reg_N(dst, src);
703 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
704 }
705
706 // C | Z
707 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
708 {
709 gen_mov_reg_Z(cpu_tmp0, src);
710 gen_mov_reg_C(dst, src);
711 tcg_gen_or_tl(dst, dst, cpu_tmp0);
712 }
713
714 // C
715 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
716 {
717 gen_mov_reg_C(dst, src);
718 }
719
720 // V
721 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
722 {
723 gen_mov_reg_V(dst, src);
724 }
725
726 // 0
727 static inline void gen_op_eval_bn(TCGv dst)
728 {
729 tcg_gen_movi_tl(dst, 0);
730 }
731
732 // N
733 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
734 {
735 gen_mov_reg_N(dst, src);
736 }
737
738 // !Z
739 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
740 {
741 gen_mov_reg_Z(dst, src);
742 tcg_gen_xori_tl(dst, dst, 0x1);
743 }
744
745 // !(Z | (N ^ V))
746 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
747 {
748 gen_mov_reg_N(cpu_tmp0, src);
749 gen_mov_reg_V(dst, src);
750 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
751 gen_mov_reg_Z(cpu_tmp0, src);
752 tcg_gen_or_tl(dst, dst, cpu_tmp0);
753 tcg_gen_xori_tl(dst, dst, 0x1);
754 }
755
756 // !(N ^ V)
757 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
758 {
759 gen_mov_reg_V(cpu_tmp0, src);
760 gen_mov_reg_N(dst, src);
761 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
762 tcg_gen_xori_tl(dst, dst, 0x1);
763 }
764
765 // !(C | Z)
766 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
767 {
768 gen_mov_reg_Z(cpu_tmp0, src);
769 gen_mov_reg_C(dst, src);
770 tcg_gen_or_tl(dst, dst, cpu_tmp0);
771 tcg_gen_xori_tl(dst, dst, 0x1);
772 }
773
774 // !C
775 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
776 {
777 gen_mov_reg_C(dst, src);
778 tcg_gen_xori_tl(dst, dst, 0x1);
779 }
780
781 // !N
782 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
783 {
784 gen_mov_reg_N(dst, src);
785 tcg_gen_xori_tl(dst, dst, 0x1);
786 }
787
788 // !V
789 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
790 {
791 gen_mov_reg_V(dst, src);
792 tcg_gen_xori_tl(dst, dst, 0x1);
793 }
794
795 /*
796 FPSR bit field FCC1 | FCC0:
797 0 =
798 1 <
799 2 >
800 3 unordered
801 */
802 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
803 unsigned int fcc_offset)
804 {
805 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
806 tcg_gen_andi_tl(reg, reg, 0x1);
807 }
808
809 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
810 unsigned int fcc_offset)
811 {
812 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
813 tcg_gen_andi_tl(reg, reg, 0x1);
814 }
815
816 // !0: FCC0 | FCC1
817 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
818 unsigned int fcc_offset)
819 {
820 gen_mov_reg_FCC0(dst, src, fcc_offset);
821 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
822 tcg_gen_or_tl(dst, dst, cpu_tmp0);
823 }
824
825 // 1 or 2: FCC0 ^ FCC1
826 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
827 unsigned int fcc_offset)
828 {
829 gen_mov_reg_FCC0(dst, src, fcc_offset);
830 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
831 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
832 }
833
834 // 1 or 3: FCC0
835 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
836 unsigned int fcc_offset)
837 {
838 gen_mov_reg_FCC0(dst, src, fcc_offset);
839 }
840
841 // 1: FCC0 & !FCC1
842 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
843 unsigned int fcc_offset)
844 {
845 gen_mov_reg_FCC0(dst, src, fcc_offset);
846 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
847 tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
848 tcg_gen_and_tl(dst, dst, cpu_tmp0);
849 }
850
851 // 2 or 3: FCC1
852 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
853 unsigned int fcc_offset)
854 {
855 gen_mov_reg_FCC1(dst, src, fcc_offset);
856 }
857
858 // 2: !FCC0 & FCC1
859 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
860 unsigned int fcc_offset)
861 {
862 gen_mov_reg_FCC0(dst, src, fcc_offset);
863 tcg_gen_xori_tl(dst, dst, 0x1);
864 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
865 tcg_gen_and_tl(dst, dst, cpu_tmp0);
866 }
867
868 // 3: FCC0 & FCC1
869 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
870 unsigned int fcc_offset)
871 {
872 gen_mov_reg_FCC0(dst, src, fcc_offset);
873 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
874 tcg_gen_and_tl(dst, dst, cpu_tmp0);
875 }
876
877 // 0: !(FCC0 | FCC1)
878 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
879 unsigned int fcc_offset)
880 {
881 gen_mov_reg_FCC0(dst, src, fcc_offset);
882 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
883 tcg_gen_or_tl(dst, dst, cpu_tmp0);
884 tcg_gen_xori_tl(dst, dst, 0x1);
885 }
886
887 // 0 or 3: !(FCC0 ^ FCC1)
888 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
889 unsigned int fcc_offset)
890 {
891 gen_mov_reg_FCC0(dst, src, fcc_offset);
892 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
893 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
894 tcg_gen_xori_tl(dst, dst, 0x1);
895 }
896
897 // 0 or 2: !FCC0
898 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
899 unsigned int fcc_offset)
900 {
901 gen_mov_reg_FCC0(dst, src, fcc_offset);
902 tcg_gen_xori_tl(dst, dst, 0x1);
903 }
904
905 // !1: !(FCC0 & !FCC1)
906 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
907 unsigned int fcc_offset)
908 {
909 gen_mov_reg_FCC0(dst, src, fcc_offset);
910 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
911 tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
912 tcg_gen_and_tl(dst, dst, cpu_tmp0);
913 tcg_gen_xori_tl(dst, dst, 0x1);
914 }
915
916 // 0 or 1: !FCC1
917 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
918 unsigned int fcc_offset)
919 {
920 gen_mov_reg_FCC1(dst, src, fcc_offset);
921 tcg_gen_xori_tl(dst, dst, 0x1);
922 }
923
924 // !2: !(!FCC0 & FCC1)
925 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
926 unsigned int fcc_offset)
927 {
928 gen_mov_reg_FCC0(dst, src, fcc_offset);
929 tcg_gen_xori_tl(dst, dst, 0x1);
930 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
931 tcg_gen_and_tl(dst, dst, cpu_tmp0);
932 tcg_gen_xori_tl(dst, dst, 0x1);
933 }
934
935 // !3: !(FCC0 & FCC1)
936 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
937 unsigned int fcc_offset)
938 {
939 gen_mov_reg_FCC0(dst, src, fcc_offset);
940 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
941 tcg_gen_and_tl(dst, dst, cpu_tmp0);
942 tcg_gen_xori_tl(dst, dst, 0x1);
943 }
944
945 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
946 target_ulong pc2, TCGv r_cond)
947 {
948 int l1;
949
950 l1 = gen_new_label();
951
952 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
953
954 gen_goto_tb(dc, 0, pc1, pc1 + 4);
955
956 gen_set_label(l1);
957 gen_goto_tb(dc, 1, pc2, pc2 + 4);
958 }
959
960 static inline void gen_branch_a(DisasContext *dc, target_ulong pc1,
961 target_ulong pc2, TCGv r_cond)
962 {
963 int l1;
964
965 l1 = gen_new_label();
966
967 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
968
969 gen_goto_tb(dc, 0, pc2, pc1);
970
971 gen_set_label(l1);
972 gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8);
973 }
974
975 static inline void gen_generic_branch(DisasContext *dc)
976 {
977 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
978 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
979 TCGv zero = tcg_const_tl(0);
980
981 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
982
983 tcg_temp_free(npc0);
984 tcg_temp_free(npc1);
985 tcg_temp_free(zero);
986 }
987
988 /* call this function before using the condition register as it may
989 have been set for a jump */
990 static inline void flush_cond(DisasContext *dc)
991 {
992 if (dc->npc == JUMP_PC) {
993 gen_generic_branch(dc);
994 dc->npc = DYNAMIC_PC;
995 }
996 }
997
998 static inline void save_npc(DisasContext *dc)
999 {
1000 if (dc->npc == JUMP_PC) {
1001 gen_generic_branch(dc);
1002 dc->npc = DYNAMIC_PC;
1003 } else if (dc->npc != DYNAMIC_PC) {
1004 tcg_gen_movi_tl(cpu_npc, dc->npc);
1005 }
1006 }
1007
1008 static inline void save_state(DisasContext *dc)
1009 {
1010 tcg_gen_movi_tl(cpu_pc, dc->pc);
1011 /* flush pending conditional evaluations before exposing cpu state */
1012 if (dc->cc_op != CC_OP_FLAGS) {
1013 dc->cc_op = CC_OP_FLAGS;
1014 gen_helper_compute_psr(cpu_env);
1015 }
1016 save_npc(dc);
1017 }
1018
1019 static inline void gen_mov_pc_npc(DisasContext *dc)
1020 {
1021 if (dc->npc == JUMP_PC) {
1022 gen_generic_branch(dc);
1023 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1024 dc->pc = DYNAMIC_PC;
1025 } else if (dc->npc == DYNAMIC_PC) {
1026 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1027 dc->pc = DYNAMIC_PC;
1028 } else {
1029 dc->pc = dc->npc;
1030 }
1031 }
1032
1033 static inline void gen_op_next_insn(void)
1034 {
1035 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1036 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1037 }
1038
1039 static void free_compare(DisasCompare *cmp)
1040 {
1041 if (!cmp->g1) {
1042 tcg_temp_free(cmp->c1);
1043 }
1044 if (!cmp->g2) {
1045 tcg_temp_free(cmp->c2);
1046 }
1047 }
1048
1049 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1050 DisasContext *dc)
1051 {
1052 static int subcc_cond[16] = {
1053 TCG_COND_NEVER,
1054 TCG_COND_EQ,
1055 TCG_COND_LE,
1056 TCG_COND_LT,
1057 TCG_COND_LEU,
1058 TCG_COND_LTU,
1059 -1, /* neg */
1060 -1, /* overflow */
1061 TCG_COND_ALWAYS,
1062 TCG_COND_NE,
1063 TCG_COND_GT,
1064 TCG_COND_GE,
1065 TCG_COND_GTU,
1066 TCG_COND_GEU,
1067 -1, /* pos */
1068 -1, /* no overflow */
1069 };
1070
1071 static int logic_cond[16] = {
1072 TCG_COND_NEVER,
1073 TCG_COND_EQ, /* eq: Z */
1074 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1075 TCG_COND_LT, /* lt: N ^ V -> N */
1076 TCG_COND_EQ, /* leu: C | Z -> Z */
1077 TCG_COND_NEVER, /* ltu: C -> 0 */
1078 TCG_COND_LT, /* neg: N */
1079 TCG_COND_NEVER, /* vs: V -> 0 */
1080 TCG_COND_ALWAYS,
1081 TCG_COND_NE, /* ne: !Z */
1082 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1083 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1084 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1085 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1086 TCG_COND_GE, /* pos: !N */
1087 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1088 };
1089
1090 TCGv_i32 r_src;
1091 TCGv r_dst;
1092
1093 #ifdef TARGET_SPARC64
1094 if (xcc) {
1095 r_src = cpu_xcc;
1096 } else {
1097 r_src = cpu_psr;
1098 }
1099 #else
1100 r_src = cpu_psr;
1101 #endif
1102
1103 switch (dc->cc_op) {
1104 case CC_OP_LOGIC:
1105 cmp->cond = logic_cond[cond];
1106 do_compare_dst_0:
1107 cmp->is_bool = false;
1108 cmp->g2 = false;
1109 cmp->c2 = tcg_const_tl(0);
1110 #ifdef TARGET_SPARC64
1111 if (!xcc) {
1112 cmp->g1 = false;
1113 cmp->c1 = tcg_temp_new();
1114 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1115 break;
1116 }
1117 #endif
1118 cmp->g1 = true;
1119 cmp->c1 = cpu_cc_dst;
1120 break;
1121
1122 case CC_OP_SUB:
1123 switch (cond) {
1124 case 6: /* neg */
1125 case 14: /* pos */
1126 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1127 goto do_compare_dst_0;
1128
1129 case 7: /* overflow */
1130 case 15: /* !overflow */
1131 goto do_dynamic;
1132
1133 default:
1134 cmp->cond = subcc_cond[cond];
1135 cmp->is_bool = false;
1136 #ifdef TARGET_SPARC64
1137 if (!xcc) {
1138 /* Note that sign-extension works for unsigned compares as
1139 long as both operands are sign-extended. */
1140 cmp->g1 = cmp->g2 = false;
1141 cmp->c1 = tcg_temp_new();
1142 cmp->c2 = tcg_temp_new();
1143 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1144 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1145 break;
1146 }
1147 #endif
1148 cmp->g1 = cmp->g2 = true;
1149 cmp->c1 = cpu_cc_src;
1150 cmp->c2 = cpu_cc_src2;
1151 break;
1152 }
1153 break;
1154
1155 default:
1156 do_dynamic:
1157 gen_helper_compute_psr(cpu_env);
1158 dc->cc_op = CC_OP_FLAGS;
1159 /* FALLTHRU */
1160
1161 case CC_OP_FLAGS:
1162 /* We're going to generate a boolean result. */
1163 cmp->cond = TCG_COND_NE;
1164 cmp->is_bool = true;
1165 cmp->g1 = cmp->g2 = false;
1166 cmp->c1 = r_dst = tcg_temp_new();
1167 cmp->c2 = tcg_const_tl(0);
1168
1169 switch (cond) {
1170 case 0x0:
1171 gen_op_eval_bn(r_dst);
1172 break;
1173 case 0x1:
1174 gen_op_eval_be(r_dst, r_src);
1175 break;
1176 case 0x2:
1177 gen_op_eval_ble(r_dst, r_src);
1178 break;
1179 case 0x3:
1180 gen_op_eval_bl(r_dst, r_src);
1181 break;
1182 case 0x4:
1183 gen_op_eval_bleu(r_dst, r_src);
1184 break;
1185 case 0x5:
1186 gen_op_eval_bcs(r_dst, r_src);
1187 break;
1188 case 0x6:
1189 gen_op_eval_bneg(r_dst, r_src);
1190 break;
1191 case 0x7:
1192 gen_op_eval_bvs(r_dst, r_src);
1193 break;
1194 case 0x8:
1195 gen_op_eval_ba(r_dst);
1196 break;
1197 case 0x9:
1198 gen_op_eval_bne(r_dst, r_src);
1199 break;
1200 case 0xa:
1201 gen_op_eval_bg(r_dst, r_src);
1202 break;
1203 case 0xb:
1204 gen_op_eval_bge(r_dst, r_src);
1205 break;
1206 case 0xc:
1207 gen_op_eval_bgu(r_dst, r_src);
1208 break;
1209 case 0xd:
1210 gen_op_eval_bcc(r_dst, r_src);
1211 break;
1212 case 0xe:
1213 gen_op_eval_bpos(r_dst, r_src);
1214 break;
1215 case 0xf:
1216 gen_op_eval_bvc(r_dst, r_src);
1217 break;
1218 }
1219 break;
1220 }
1221 }
1222
1223 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1224 {
1225 unsigned int offset;
1226 TCGv r_dst;
1227
1228 /* For now we still generate a straight boolean result. */
1229 cmp->cond = TCG_COND_NE;
1230 cmp->is_bool = true;
1231 cmp->g1 = cmp->g2 = false;
1232 cmp->c1 = r_dst = tcg_temp_new();
1233 cmp->c2 = tcg_const_tl(0);
1234
1235 switch (cc) {
1236 default:
1237 case 0x0:
1238 offset = 0;
1239 break;
1240 case 0x1:
1241 offset = 32 - 10;
1242 break;
1243 case 0x2:
1244 offset = 34 - 10;
1245 break;
1246 case 0x3:
1247 offset = 36 - 10;
1248 break;
1249 }
1250
1251 switch (cond) {
1252 case 0x0:
1253 gen_op_eval_bn(r_dst);
1254 break;
1255 case 0x1:
1256 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1257 break;
1258 case 0x2:
1259 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1260 break;
1261 case 0x3:
1262 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1263 break;
1264 case 0x4:
1265 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1266 break;
1267 case 0x5:
1268 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1269 break;
1270 case 0x6:
1271 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1272 break;
1273 case 0x7:
1274 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1275 break;
1276 case 0x8:
1277 gen_op_eval_ba(r_dst);
1278 break;
1279 case 0x9:
1280 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1281 break;
1282 case 0xa:
1283 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1284 break;
1285 case 0xb:
1286 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1287 break;
1288 case 0xc:
1289 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1290 break;
1291 case 0xd:
1292 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1293 break;
1294 case 0xe:
1295 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1296 break;
1297 case 0xf:
1298 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1299 break;
1300 }
1301 }
1302
1303 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1304 DisasContext *dc)
1305 {
1306 DisasCompare cmp;
1307 gen_compare(&cmp, cc, cond, dc);
1308
1309 /* The interface is to return a boolean in r_dst. */
1310 if (cmp.is_bool) {
1311 tcg_gen_mov_tl(r_dst, cmp.c1);
1312 } else {
1313 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1314 }
1315
1316 free_compare(&cmp);
1317 }
1318
1319 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1320 {
1321 DisasCompare cmp;
1322 gen_fcompare(&cmp, cc, cond);
1323
1324 /* The interface is to return a boolean in r_dst. */
1325 if (cmp.is_bool) {
1326 tcg_gen_mov_tl(r_dst, cmp.c1);
1327 } else {
1328 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1329 }
1330
1331 free_compare(&cmp);
1332 }
1333
1334 #ifdef TARGET_SPARC64
1335 // Inverted logic
1336 static const int gen_tcg_cond_reg[8] = {
1337 -1,
1338 TCG_COND_NE,
1339 TCG_COND_GT,
1340 TCG_COND_GE,
1341 -1,
1342 TCG_COND_EQ,
1343 TCG_COND_LE,
1344 TCG_COND_LT,
1345 };
1346
1347 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1348 {
1349 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1350 cmp->is_bool = false;
1351 cmp->g1 = true;
1352 cmp->g2 = false;
1353 cmp->c1 = r_src;
1354 cmp->c2 = tcg_const_tl(0);
1355 }
1356
1357 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1358 {
1359 DisasCompare cmp;
1360 gen_compare_reg(&cmp, cond, r_src);
1361
1362 /* The interface is to return a boolean in r_dst. */
1363 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1364
1365 free_compare(&cmp);
1366 }
1367 #endif
1368
1369 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1370 {
1371 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1372 target_ulong target = dc->pc + offset;
1373
1374 #ifdef TARGET_SPARC64
1375 if (unlikely(AM_CHECK(dc))) {
1376 target &= 0xffffffffULL;
1377 }
1378 #endif
1379 if (cond == 0x0) {
1380 /* unconditional not taken */
1381 if (a) {
1382 dc->pc = dc->npc + 4;
1383 dc->npc = dc->pc + 4;
1384 } else {
1385 dc->pc = dc->npc;
1386 dc->npc = dc->pc + 4;
1387 }
1388 } else if (cond == 0x8) {
1389 /* unconditional taken */
1390 if (a) {
1391 dc->pc = target;
1392 dc->npc = dc->pc + 4;
1393 } else {
1394 dc->pc = dc->npc;
1395 dc->npc = target;
1396 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1397 }
1398 } else {
1399 flush_cond(dc);
1400 gen_cond(cpu_cond, cc, cond, dc);
1401 if (a) {
1402 gen_branch_a(dc, target, dc->npc, cpu_cond);
1403 dc->is_br = 1;
1404 } else {
1405 dc->pc = dc->npc;
1406 dc->jump_pc[0] = target;
1407 if (unlikely(dc->npc == DYNAMIC_PC)) {
1408 dc->jump_pc[1] = DYNAMIC_PC;
1409 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1410 } else {
1411 dc->jump_pc[1] = dc->npc + 4;
1412 dc->npc = JUMP_PC;
1413 }
1414 }
1415 }
1416 }
1417
1418 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1419 {
1420 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1421 target_ulong target = dc->pc + offset;
1422
1423 #ifdef TARGET_SPARC64
1424 if (unlikely(AM_CHECK(dc))) {
1425 target &= 0xffffffffULL;
1426 }
1427 #endif
1428 if (cond == 0x0) {
1429 /* unconditional not taken */
1430 if (a) {
1431 dc->pc = dc->npc + 4;
1432 dc->npc = dc->pc + 4;
1433 } else {
1434 dc->pc = dc->npc;
1435 dc->npc = dc->pc + 4;
1436 }
1437 } else if (cond == 0x8) {
1438 /* unconditional taken */
1439 if (a) {
1440 dc->pc = target;
1441 dc->npc = dc->pc + 4;
1442 } else {
1443 dc->pc = dc->npc;
1444 dc->npc = target;
1445 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1446 }
1447 } else {
1448 flush_cond(dc);
1449 gen_fcond(cpu_cond, cc, cond);
1450 if (a) {
1451 gen_branch_a(dc, target, dc->npc, cpu_cond);
1452 dc->is_br = 1;
1453 } else {
1454 dc->pc = dc->npc;
1455 dc->jump_pc[0] = target;
1456 if (unlikely(dc->npc == DYNAMIC_PC)) {
1457 dc->jump_pc[1] = DYNAMIC_PC;
1458 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1459 } else {
1460 dc->jump_pc[1] = dc->npc + 4;
1461 dc->npc = JUMP_PC;
1462 }
1463 }
1464 }
1465 }
1466
1467 #ifdef TARGET_SPARC64
1468 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1469 TCGv r_reg)
1470 {
1471 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1472 target_ulong target = dc->pc + offset;
1473
1474 if (unlikely(AM_CHECK(dc))) {
1475 target &= 0xffffffffULL;
1476 }
1477 flush_cond(dc);
1478 gen_cond_reg(cpu_cond, cond, r_reg);
1479 if (a) {
1480 gen_branch_a(dc, target, dc->npc, cpu_cond);
1481 dc->is_br = 1;
1482 } else {
1483 dc->pc = dc->npc;
1484 dc->jump_pc[0] = target;
1485 if (unlikely(dc->npc == DYNAMIC_PC)) {
1486 dc->jump_pc[1] = DYNAMIC_PC;
1487 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1488 } else {
1489 dc->jump_pc[1] = dc->npc + 4;
1490 dc->npc = JUMP_PC;
1491 }
1492 }
1493 }
1494
1495 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1496 {
1497 switch (fccno) {
1498 case 0:
1499 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1500 break;
1501 case 1:
1502 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1503 break;
1504 case 2:
1505 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1506 break;
1507 case 3:
1508 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1509 break;
1510 }
1511 }
1512
1513 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1514 {
1515 switch (fccno) {
1516 case 0:
1517 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1518 break;
1519 case 1:
1520 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1521 break;
1522 case 2:
1523 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1524 break;
1525 case 3:
1526 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1527 break;
1528 }
1529 }
1530
1531 static inline void gen_op_fcmpq(int fccno)
1532 {
1533 switch (fccno) {
1534 case 0:
1535 gen_helper_fcmpq(cpu_env);
1536 break;
1537 case 1:
1538 gen_helper_fcmpq_fcc1(cpu_env);
1539 break;
1540 case 2:
1541 gen_helper_fcmpq_fcc2(cpu_env);
1542 break;
1543 case 3:
1544 gen_helper_fcmpq_fcc3(cpu_env);
1545 break;
1546 }
1547 }
1548
1549 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1550 {
1551 switch (fccno) {
1552 case 0:
1553 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1554 break;
1555 case 1:
1556 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1557 break;
1558 case 2:
1559 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1560 break;
1561 case 3:
1562 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1563 break;
1564 }
1565 }
1566
1567 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1568 {
1569 switch (fccno) {
1570 case 0:
1571 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1572 break;
1573 case 1:
1574 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1575 break;
1576 case 2:
1577 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1578 break;
1579 case 3:
1580 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1581 break;
1582 }
1583 }
1584
1585 static inline void gen_op_fcmpeq(int fccno)
1586 {
1587 switch (fccno) {
1588 case 0:
1589 gen_helper_fcmpeq(cpu_env);
1590 break;
1591 case 1:
1592 gen_helper_fcmpeq_fcc1(cpu_env);
1593 break;
1594 case 2:
1595 gen_helper_fcmpeq_fcc2(cpu_env);
1596 break;
1597 case 3:
1598 gen_helper_fcmpeq_fcc3(cpu_env);
1599 break;
1600 }
1601 }
1602
1603 #else
1604
1605 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1606 {
1607 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1608 }
1609
1610 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1611 {
1612 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1613 }
1614
1615 static inline void gen_op_fcmpq(int fccno)
1616 {
1617 gen_helper_fcmpq(cpu_env);
1618 }
1619
1620 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1621 {
1622 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1623 }
1624
1625 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1626 {
1627 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1628 }
1629
1630 static inline void gen_op_fcmpeq(int fccno)
1631 {
1632 gen_helper_fcmpeq(cpu_env);
1633 }
1634 #endif
1635
1636 static inline void gen_op_fpexception_im(int fsr_flags)
1637 {
1638 TCGv_i32 r_const;
1639
1640 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1641 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1642 r_const = tcg_const_i32(TT_FP_EXCP);
1643 gen_helper_raise_exception(cpu_env, r_const);
1644 tcg_temp_free_i32(r_const);
1645 }
1646
1647 static int gen_trap_ifnofpu(DisasContext *dc)
1648 {
1649 #if !defined(CONFIG_USER_ONLY)
1650 if (!dc->fpu_enabled) {
1651 TCGv_i32 r_const;
1652
1653 save_state(dc);
1654 r_const = tcg_const_i32(TT_NFPU_INSN);
1655 gen_helper_raise_exception(cpu_env, r_const);
1656 tcg_temp_free_i32(r_const);
1657 dc->is_br = 1;
1658 return 1;
1659 }
1660 #endif
1661 return 0;
1662 }
1663
1664 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1665 {
1666 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1667 }
1668
1669 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1670 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1671 {
1672 TCGv_i32 dst, src;
1673
1674 src = gen_load_fpr_F(dc, rs);
1675 dst = gen_dest_fpr_F();
1676
1677 gen(dst, cpu_env, src);
1678
1679 gen_store_fpr_F(dc, rd, dst);
1680 }
1681
1682 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1683 void (*gen)(TCGv_i32, TCGv_i32))
1684 {
1685 TCGv_i32 dst, src;
1686
1687 src = gen_load_fpr_F(dc, rs);
1688 dst = gen_dest_fpr_F();
1689
1690 gen(dst, src);
1691
1692 gen_store_fpr_F(dc, rd, dst);
1693 }
1694
1695 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1696 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1697 {
1698 TCGv_i32 dst, src1, src2;
1699
1700 src1 = gen_load_fpr_F(dc, rs1);
1701 src2 = gen_load_fpr_F(dc, rs2);
1702 dst = gen_dest_fpr_F();
1703
1704 gen(dst, cpu_env, src1, src2);
1705
1706 gen_store_fpr_F(dc, rd, dst);
1707 }
1708
1709 #ifdef TARGET_SPARC64
1710 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1711 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1712 {
1713 TCGv_i32 dst, src1, src2;
1714
1715 src1 = gen_load_fpr_F(dc, rs1);
1716 src2 = gen_load_fpr_F(dc, rs2);
1717 dst = gen_dest_fpr_F();
1718
1719 gen(dst, src1, src2);
1720
1721 gen_store_fpr_F(dc, rd, dst);
1722 }
1723 #endif
1724
1725 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1726 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1727 {
1728 TCGv_i64 dst, src;
1729
1730 src = gen_load_fpr_D(dc, rs);
1731 dst = gen_dest_fpr_D();
1732
1733 gen(dst, cpu_env, src);
1734
1735 gen_store_fpr_D(dc, rd, dst);
1736 }
1737
1738 #ifdef TARGET_SPARC64
1739 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1740 void (*gen)(TCGv_i64, TCGv_i64))
1741 {
1742 TCGv_i64 dst, src;
1743
1744 src = gen_load_fpr_D(dc, rs);
1745 dst = gen_dest_fpr_D();
1746
1747 gen(dst, src);
1748
1749 gen_store_fpr_D(dc, rd, dst);
1750 }
1751 #endif
1752
1753 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1754 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1755 {
1756 TCGv_i64 dst, src1, src2;
1757
1758 src1 = gen_load_fpr_D(dc, rs1);
1759 src2 = gen_load_fpr_D(dc, rs2);
1760 dst = gen_dest_fpr_D();
1761
1762 gen(dst, cpu_env, src1, src2);
1763
1764 gen_store_fpr_D(dc, rd, dst);
1765 }
1766
1767 #ifdef TARGET_SPARC64
1768 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1769 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1770 {
1771 TCGv_i64 dst, src1, src2;
1772
1773 src1 = gen_load_fpr_D(dc, rs1);
1774 src2 = gen_load_fpr_D(dc, rs2);
1775 dst = gen_dest_fpr_D();
1776
1777 gen(dst, src1, src2);
1778
1779 gen_store_fpr_D(dc, rd, dst);
1780 }
1781
1782 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1783 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1784 {
1785 TCGv_i64 dst, src1, src2;
1786
1787 src1 = gen_load_fpr_D(dc, rs1);
1788 src2 = gen_load_fpr_D(dc, rs2);
1789 dst = gen_dest_fpr_D();
1790
1791 gen(dst, cpu_gsr, src1, src2);
1792
1793 gen_store_fpr_D(dc, rd, dst);
1794 }
1795
1796 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1797 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1798 {
1799 TCGv_i64 dst, src0, src1, src2;
1800
1801 src1 = gen_load_fpr_D(dc, rs1);
1802 src2 = gen_load_fpr_D(dc, rs2);
1803 src0 = gen_load_fpr_D(dc, rd);
1804 dst = gen_dest_fpr_D();
1805
1806 gen(dst, src0, src1, src2);
1807
1808 gen_store_fpr_D(dc, rd, dst);
1809 }
1810 #endif
1811
1812 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1813 void (*gen)(TCGv_ptr))
1814 {
1815 gen_op_load_fpr_QT1(QFPREG(rs));
1816
1817 gen(cpu_env);
1818
1819 gen_op_store_QT0_fpr(QFPREG(rd));
1820 gen_update_fprs_dirty(QFPREG(rd));
1821 }
1822
1823 #ifdef TARGET_SPARC64
1824 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1825 void (*gen)(TCGv_ptr))
1826 {
1827 gen_op_load_fpr_QT1(QFPREG(rs));
1828
1829 gen(cpu_env);
1830
1831 gen_op_store_QT0_fpr(QFPREG(rd));
1832 gen_update_fprs_dirty(QFPREG(rd));
1833 }
1834 #endif
1835
1836 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1837 void (*gen)(TCGv_ptr))
1838 {
1839 gen_op_load_fpr_QT0(QFPREG(rs1));
1840 gen_op_load_fpr_QT1(QFPREG(rs2));
1841
1842 gen(cpu_env);
1843
1844 gen_op_store_QT0_fpr(QFPREG(rd));
1845 gen_update_fprs_dirty(QFPREG(rd));
1846 }
1847
1848 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1849 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1850 {
1851 TCGv_i64 dst;
1852 TCGv_i32 src1, src2;
1853
1854 src1 = gen_load_fpr_F(dc, rs1);
1855 src2 = gen_load_fpr_F(dc, rs2);
1856 dst = gen_dest_fpr_D();
1857
1858 gen(dst, cpu_env, src1, src2);
1859
1860 gen_store_fpr_D(dc, rd, dst);
1861 }
1862
1863 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1864 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1865 {
1866 TCGv_i64 src1, src2;
1867
1868 src1 = gen_load_fpr_D(dc, rs1);
1869 src2 = gen_load_fpr_D(dc, rs2);
1870
1871 gen(cpu_env, src1, src2);
1872
1873 gen_op_store_QT0_fpr(QFPREG(rd));
1874 gen_update_fprs_dirty(QFPREG(rd));
1875 }
1876
1877 #ifdef TARGET_SPARC64
1878 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1879 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1880 {
1881 TCGv_i64 dst;
1882 TCGv_i32 src;
1883
1884 src = gen_load_fpr_F(dc, rs);
1885 dst = gen_dest_fpr_D();
1886
1887 gen(dst, cpu_env, src);
1888
1889 gen_store_fpr_D(dc, rd, dst);
1890 }
1891 #endif
1892
1893 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1894 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1895 {
1896 TCGv_i64 dst;
1897 TCGv_i32 src;
1898
1899 src = gen_load_fpr_F(dc, rs);
1900 dst = gen_dest_fpr_D();
1901
1902 gen(dst, cpu_env, src);
1903
1904 gen_store_fpr_D(dc, rd, dst);
1905 }
1906
1907 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1908 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1909 {
1910 TCGv_i32 dst;
1911 TCGv_i64 src;
1912
1913 src = gen_load_fpr_D(dc, rs);
1914 dst = gen_dest_fpr_F();
1915
1916 gen(dst, cpu_env, src);
1917
1918 gen_store_fpr_F(dc, rd, dst);
1919 }
1920
1921 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1922 void (*gen)(TCGv_i32, TCGv_ptr))
1923 {
1924 TCGv_i32 dst;
1925
1926 gen_op_load_fpr_QT1(QFPREG(rs));
1927 dst = gen_dest_fpr_F();
1928
1929 gen(dst, cpu_env);
1930
1931 gen_store_fpr_F(dc, rd, dst);
1932 }
1933
1934 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1935 void (*gen)(TCGv_i64, TCGv_ptr))
1936 {
1937 TCGv_i64 dst;
1938
1939 gen_op_load_fpr_QT1(QFPREG(rs));
1940 dst = gen_dest_fpr_D();
1941
1942 gen(dst, cpu_env);
1943
1944 gen_store_fpr_D(dc, rd, dst);
1945 }
1946
1947 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1948 void (*gen)(TCGv_ptr, TCGv_i32))
1949 {
1950 TCGv_i32 src;
1951
1952 src = gen_load_fpr_F(dc, rs);
1953
1954 gen(cpu_env, src);
1955
1956 gen_op_store_QT0_fpr(QFPREG(rd));
1957 gen_update_fprs_dirty(QFPREG(rd));
1958 }
1959
1960 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1961 void (*gen)(TCGv_ptr, TCGv_i64))
1962 {
1963 TCGv_i64 src;
1964
1965 src = gen_load_fpr_D(dc, rs);
1966
1967 gen(cpu_env, src);
1968
1969 gen_op_store_QT0_fpr(QFPREG(rd));
1970 gen_update_fprs_dirty(QFPREG(rd));
1971 }
1972
1973 /* asi moves */
1974 #ifdef TARGET_SPARC64
1975 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
1976 {
1977 int asi;
1978 TCGv_i32 r_asi;
1979
1980 if (IS_IMM) {
1981 r_asi = tcg_temp_new_i32();
1982 tcg_gen_mov_i32(r_asi, cpu_asi);
1983 } else {
1984 asi = GET_FIELD(insn, 19, 26);
1985 r_asi = tcg_const_i32(asi);
1986 }
1987 return r_asi;
1988 }
1989
1990 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
1991 int sign)
1992 {
1993 TCGv_i32 r_asi, r_size, r_sign;
1994
1995 r_asi = gen_get_asi(insn, addr);
1996 r_size = tcg_const_i32(size);
1997 r_sign = tcg_const_i32(sign);
1998 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
1999 tcg_temp_free_i32(r_sign);
2000 tcg_temp_free_i32(r_size);
2001 tcg_temp_free_i32(r_asi);
2002 }
2003
2004 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2005 {
2006 TCGv_i32 r_asi, r_size;
2007
2008 r_asi = gen_get_asi(insn, addr);
2009 r_size = tcg_const_i32(size);
2010 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2011 tcg_temp_free_i32(r_size);
2012 tcg_temp_free_i32(r_asi);
2013 }
2014
2015 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
2016 {
2017 TCGv_i32 r_asi, r_size, r_rd;
2018
2019 r_asi = gen_get_asi(insn, addr);
2020 r_size = tcg_const_i32(size);
2021 r_rd = tcg_const_i32(rd);
2022 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2023 tcg_temp_free_i32(r_rd);
2024 tcg_temp_free_i32(r_size);
2025 tcg_temp_free_i32(r_asi);
2026 }
2027
2028 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
2029 {
2030 TCGv_i32 r_asi, r_size, r_rd;
2031
2032 r_asi = gen_get_asi(insn, addr);
2033 r_size = tcg_const_i32(size);
2034 r_rd = tcg_const_i32(rd);
2035 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2036 tcg_temp_free_i32(r_rd);
2037 tcg_temp_free_i32(r_size);
2038 tcg_temp_free_i32(r_asi);
2039 }
2040
2041 static inline void gen_swap_asi(TCGv dst, TCGv addr, int insn)
2042 {
2043 TCGv_i32 r_asi, r_size, r_sign;
2044
2045 r_asi = gen_get_asi(insn, addr);
2046 r_size = tcg_const_i32(4);
2047 r_sign = tcg_const_i32(0);
2048 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2049 tcg_temp_free_i32(r_sign);
2050 gen_helper_st_asi(cpu_env, addr, dst, r_asi, r_size);
2051 tcg_temp_free_i32(r_size);
2052 tcg_temp_free_i32(r_asi);
2053 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2054 }
2055
2056 static inline void gen_ldda_asi(TCGv hi, TCGv addr, int insn, int rd)
2057 {
2058 TCGv_i32 r_asi, r_rd;
2059
2060 r_asi = gen_get_asi(insn, addr);
2061 r_rd = tcg_const_i32(rd);
2062 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2063 tcg_temp_free_i32(r_rd);
2064 tcg_temp_free_i32(r_asi);
2065 }
2066
2067 static inline void gen_stda_asi(TCGv hi, TCGv addr, int insn, int rd)
2068 {
2069 TCGv_i32 r_asi, r_size;
2070
2071 gen_movl_reg_TN(rd + 1, cpu_tmp0);
2072 tcg_gen_concat_tl_i64(cpu_tmp64, cpu_tmp0, hi);
2073 r_asi = gen_get_asi(insn, addr);
2074 r_size = tcg_const_i32(8);
2075 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2076 tcg_temp_free_i32(r_size);
2077 tcg_temp_free_i32(r_asi);
2078 }
2079
2080 static inline void gen_cas_asi(TCGv dst, TCGv addr, TCGv val2, int insn,
2081 int rd)
2082 {
2083 TCGv r_val1;
2084 TCGv_i32 r_asi;
2085
2086 r_val1 = tcg_temp_new();
2087 gen_movl_reg_TN(rd, r_val1);
2088 r_asi = gen_get_asi(insn, addr);
2089 gen_helper_cas_asi(dst, cpu_env, addr, r_val1, val2, r_asi);
2090 tcg_temp_free_i32(r_asi);
2091 tcg_temp_free(r_val1);
2092 }
2093
2094 static inline void gen_casx_asi(TCGv dst, TCGv addr, TCGv val2, int insn,
2095 int rd)
2096 {
2097 TCGv_i32 r_asi;
2098
2099 gen_movl_reg_TN(rd, cpu_tmp64);
2100 r_asi = gen_get_asi(insn, addr);
2101 gen_helper_casx_asi(dst, cpu_env, addr, cpu_tmp64, val2, r_asi);
2102 tcg_temp_free_i32(r_asi);
2103 }
2104
2105 #elif !defined(CONFIG_USER_ONLY)
2106
2107 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2108 int sign)
2109 {
2110 TCGv_i32 r_asi, r_size, r_sign;
2111
2112 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2113 r_size = tcg_const_i32(size);
2114 r_sign = tcg_const_i32(sign);
2115 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2116 tcg_temp_free(r_sign);
2117 tcg_temp_free(r_size);
2118 tcg_temp_free(r_asi);
2119 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2120 }
2121
2122 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2123 {
2124 TCGv_i32 r_asi, r_size;
2125
2126 tcg_gen_extu_tl_i64(cpu_tmp64, src);
2127 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2128 r_size = tcg_const_i32(size);
2129 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2130 tcg_temp_free(r_size);
2131 tcg_temp_free(r_asi);
2132 }
2133
2134 static inline void gen_swap_asi(TCGv dst, TCGv addr, int insn)
2135 {
2136 TCGv_i32 r_asi, r_size, r_sign;
2137 TCGv_i64 r_val;
2138
2139 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2140 r_size = tcg_const_i32(4);
2141 r_sign = tcg_const_i32(0);
2142 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2143 tcg_temp_free(r_sign);
2144 r_val = tcg_temp_new_i64();
2145 tcg_gen_extu_tl_i64(r_val, dst);
2146 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2147 tcg_temp_free_i64(r_val);
2148 tcg_temp_free(r_size);
2149 tcg_temp_free(r_asi);
2150 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2151 }
2152
2153 static inline void gen_ldda_asi(TCGv hi, TCGv addr, int insn, int rd)
2154 {
2155 TCGv_i32 r_asi, r_size, r_sign;
2156
2157 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2158 r_size = tcg_const_i32(8);
2159 r_sign = tcg_const_i32(0);
2160 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2161 tcg_temp_free(r_sign);
2162 tcg_temp_free(r_size);
2163 tcg_temp_free(r_asi);
2164 tcg_gen_trunc_i64_tl(cpu_tmp0, cpu_tmp64);
2165 gen_movl_TN_reg(rd + 1, cpu_tmp0);
2166 tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
2167 tcg_gen_trunc_i64_tl(hi, cpu_tmp64);
2168 gen_movl_TN_reg(rd, hi);
2169 }
2170
2171 static inline void gen_stda_asi(TCGv hi, TCGv addr, int insn, int rd)
2172 {
2173 TCGv_i32 r_asi, r_size;
2174
2175 gen_movl_reg_TN(rd + 1, cpu_tmp0);
2176 tcg_gen_concat_tl_i64(cpu_tmp64, cpu_tmp0, hi);
2177 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2178 r_size = tcg_const_i32(8);
2179 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2180 tcg_temp_free(r_size);
2181 tcg_temp_free(r_asi);
2182 }
2183 #endif
2184
2185 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2186 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2187 {
2188 TCGv_i64 r_val;
2189 TCGv_i32 r_asi, r_size;
2190
2191 gen_ld_asi(dst, addr, insn, 1, 0);
2192
2193 r_val = tcg_const_i64(0xffULL);
2194 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2195 r_size = tcg_const_i32(1);
2196 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2197 tcg_temp_free_i32(r_size);
2198 tcg_temp_free_i32(r_asi);
2199 tcg_temp_free_i64(r_val);
2200 }
2201 #endif
2202
2203 static inline TCGv get_src1(unsigned int insn, TCGv def)
2204 {
2205 TCGv r_rs1 = def;
2206 unsigned int rs1;
2207
2208 rs1 = GET_FIELD(insn, 13, 17);
2209 if (rs1 == 0) {
2210 tcg_gen_movi_tl(def, 0);
2211 } else if (rs1 < 8) {
2212 r_rs1 = cpu_gregs[rs1];
2213 } else {
2214 tcg_gen_ld_tl(def, cpu_regwptr, (rs1 - 8) * sizeof(target_ulong));
2215 }
2216 return r_rs1;
2217 }
2218
2219 static inline TCGv get_src2(unsigned int insn, TCGv def)
2220 {
2221 TCGv r_rs2 = def;
2222
2223 if (IS_IMM) { /* immediate */
2224 target_long simm = GET_FIELDs(insn, 19, 31);
2225 tcg_gen_movi_tl(def, simm);
2226 } else { /* register */
2227 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2228 if (rs2 == 0) {
2229 tcg_gen_movi_tl(def, 0);
2230 } else if (rs2 < 8) {
2231 r_rs2 = cpu_gregs[rs2];
2232 } else {
2233 tcg_gen_ld_tl(def, cpu_regwptr, (rs2 - 8) * sizeof(target_ulong));
2234 }
2235 }
2236 return r_rs2;
2237 }
2238
2239 #ifdef TARGET_SPARC64
2240 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2241 {
2242 TCGv_i32 c32, zero, dst, s1, s2;
2243
2244 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2245 or fold the comparison down to 32 bits and use movcond_i32. Choose
2246 the later. */
2247 c32 = tcg_temp_new_i32();
2248 if (cmp->is_bool) {
2249 tcg_gen_trunc_i64_i32(c32, cmp->c1);
2250 } else {
2251 TCGv_i64 c64 = tcg_temp_new_i64();
2252 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2253 tcg_gen_trunc_i64_i32(c32, c64);
2254 tcg_temp_free_i64(c64);
2255 }
2256
2257 s1 = gen_load_fpr_F(dc, rs);
2258 s2 = gen_load_fpr_F(dc, rd);
2259 dst = gen_dest_fpr_F();
2260 zero = tcg_const_i32(0);
2261
2262 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2263
2264 tcg_temp_free_i32(c32);
2265 tcg_temp_free_i32(zero);
2266 gen_store_fpr_F(dc, rd, dst);
2267 }
2268
2269 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2270 {
2271 TCGv_i64 dst = gen_dest_fpr_D();
2272 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2273 gen_load_fpr_D(dc, rs),
2274 gen_load_fpr_D(dc, rd));
2275 gen_store_fpr_D(dc, rd, dst);
2276 }
2277
2278 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2279 {
2280 int qd = QFPREG(rd);
2281 int qs = QFPREG(rs);
2282
2283 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2284 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2285 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2286 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2287
2288 gen_update_fprs_dirty(qd);
2289 }
2290
2291 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2292 {
2293 TCGv_i32 r_tl = tcg_temp_new_i32();
2294
2295 /* load env->tl into r_tl */
2296 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2297
2298 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2299 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2300
2301 /* calculate offset to current trap state from env->ts, reuse r_tl */
2302 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2303 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2304
2305 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2306 {
2307 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2308 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2309 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2310 tcg_temp_free_ptr(r_tl_tmp);
2311 }
2312
2313 tcg_temp_free_i32(r_tl);
2314 }
2315
2316 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2317 int width, bool cc, bool left)
2318 {
2319 TCGv lo1, lo2, t1, t2;
2320 uint64_t amask, tabl, tabr;
2321 int shift, imask, omask;
2322
2323 if (cc) {
2324 tcg_gen_mov_tl(cpu_cc_src, s1);
2325 tcg_gen_mov_tl(cpu_cc_src2, s2);
2326 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2327 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2328 dc->cc_op = CC_OP_SUB;
2329 }
2330
2331 /* Theory of operation: there are two tables, left and right (not to
2332 be confused with the left and right versions of the opcode). These
2333 are indexed by the low 3 bits of the inputs. To make things "easy",
2334 these tables are loaded into two constants, TABL and TABR below.
2335 The operation index = (input & imask) << shift calculates the index
2336 into the constant, while val = (table >> index) & omask calculates
2337 the value we're looking for. */
2338 switch (width) {
2339 case 8:
2340 imask = 0x7;
2341 shift = 3;
2342 omask = 0xff;
2343 if (left) {
2344 tabl = 0x80c0e0f0f8fcfeffULL;
2345 tabr = 0xff7f3f1f0f070301ULL;
2346 } else {
2347 tabl = 0x0103070f1f3f7fffULL;
2348 tabr = 0xfffefcf8f0e0c080ULL;
2349 }
2350 break;
2351 case 16:
2352 imask = 0x6;
2353 shift = 1;
2354 omask = 0xf;
2355 if (left) {
2356 tabl = 0x8cef;
2357 tabr = 0xf731;
2358 } else {
2359 tabl = 0x137f;
2360 tabr = 0xfec8;
2361 }
2362 break;
2363 case 32:
2364 imask = 0x4;
2365 shift = 0;
2366 omask = 0x3;
2367 if (left) {
2368 tabl = (2 << 2) | 3;
2369 tabr = (3 << 2) | 1;
2370 } else {
2371 tabl = (1 << 2) | 3;
2372 tabr = (3 << 2) | 2;
2373 }
2374 break;
2375 default:
2376 abort();
2377 }
2378
2379 lo1 = tcg_temp_new();
2380 lo2 = tcg_temp_new();
2381 tcg_gen_andi_tl(lo1, s1, imask);
2382 tcg_gen_andi_tl(lo2, s2, imask);
2383 tcg_gen_shli_tl(lo1, lo1, shift);
2384 tcg_gen_shli_tl(lo2, lo2, shift);
2385
2386 t1 = tcg_const_tl(tabl);
2387 t2 = tcg_const_tl(tabr);
2388 tcg_gen_shr_tl(lo1, t1, lo1);
2389 tcg_gen_shr_tl(lo2, t2, lo2);
2390 tcg_gen_andi_tl(dst, lo1, omask);
2391 tcg_gen_andi_tl(lo2, lo2, omask);
2392
2393 amask = -8;
2394 if (AM_CHECK(dc)) {
2395 amask &= 0xffffffffULL;
2396 }
2397 tcg_gen_andi_tl(s1, s1, amask);
2398 tcg_gen_andi_tl(s2, s2, amask);
2399
2400 /* We want to compute
2401 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2402 We've already done dst = lo1, so this reduces to
2403 dst &= (s1 == s2 ? -1 : lo2)
2404 Which we perform by
2405 lo2 |= -(s1 == s2)
2406 dst &= lo2
2407 */
2408 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2409 tcg_gen_neg_tl(t1, t1);
2410 tcg_gen_or_tl(lo2, lo2, t1);
2411 tcg_gen_and_tl(dst, dst, lo2);
2412
2413 tcg_temp_free(lo1);
2414 tcg_temp_free(lo2);
2415 tcg_temp_free(t1);
2416 tcg_temp_free(t2);
2417 }
2418
2419 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2420 {
2421 TCGv tmp = tcg_temp_new();
2422
2423 tcg_gen_add_tl(tmp, s1, s2);
2424 tcg_gen_andi_tl(dst, tmp, -8);
2425 if (left) {
2426 tcg_gen_neg_tl(tmp, tmp);
2427 }
2428 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2429
2430 tcg_temp_free(tmp);
2431 }
2432
2433 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2434 {
2435 TCGv t1, t2, shift;
2436
2437 t1 = tcg_temp_new();
2438 t2 = tcg_temp_new();
2439 shift = tcg_temp_new();
2440
2441 tcg_gen_andi_tl(shift, gsr, 7);
2442 tcg_gen_shli_tl(shift, shift, 3);
2443 tcg_gen_shl_tl(t1, s1, shift);
2444
2445 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2446 shift of (up to 63) followed by a constant shift of 1. */
2447 tcg_gen_xori_tl(shift, shift, 63);
2448 tcg_gen_shr_tl(t2, s2, shift);
2449 tcg_gen_shri_tl(t2, t2, 1);
2450
2451 tcg_gen_or_tl(dst, t1, t2);
2452
2453 tcg_temp_free(t1);
2454 tcg_temp_free(t2);
2455 tcg_temp_free(shift);
2456 }
2457 #endif
2458
2459 #define CHECK_IU_FEATURE(dc, FEATURE) \
2460 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2461 goto illegal_insn;
2462 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2463 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2464 goto nfpu_insn;
2465
2466 /* before an instruction, dc->pc must be static */
2467 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2468 {
2469 unsigned int opc, rs1, rs2, rd;
2470 TCGv cpu_src1, cpu_src2, cpu_tmp1, cpu_tmp2;
2471 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2472 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2473 target_long simm;
2474
2475 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2476 tcg_gen_debug_insn_start(dc->pc);
2477 }
2478
2479 opc = GET_FIELD(insn, 0, 1);
2480
2481 rd = GET_FIELD(insn, 2, 6);
2482
2483 cpu_tmp1 = cpu_src1 = tcg_temp_new();
2484 cpu_tmp2 = cpu_src2 = tcg_temp_new();
2485
2486 switch (opc) {
2487 case 0: /* branches/sethi */
2488 {
2489 unsigned int xop = GET_FIELD(insn, 7, 9);
2490 int32_t target;
2491 switch (xop) {
2492 #ifdef TARGET_SPARC64
2493 case 0x1: /* V9 BPcc */
2494 {
2495 int cc;
2496
2497 target = GET_FIELD_SP(insn, 0, 18);
2498 target = sign_extend(target, 19);
2499 target <<= 2;
2500 cc = GET_FIELD_SP(insn, 20, 21);
2501 if (cc == 0)
2502 do_branch(dc, target, insn, 0);
2503 else if (cc == 2)
2504 do_branch(dc, target, insn, 1);
2505 else
2506 goto illegal_insn;
2507 goto jmp_insn;
2508 }
2509 case 0x3: /* V9 BPr */
2510 {
2511 target = GET_FIELD_SP(insn, 0, 13) |
2512 (GET_FIELD_SP(insn, 20, 21) << 14);
2513 target = sign_extend(target, 16);
2514 target <<= 2;
2515 cpu_src1 = get_src1(insn, cpu_src1);
2516 do_branch_reg(dc, target, insn, cpu_src1);
2517 goto jmp_insn;
2518 }
2519 case 0x5: /* V9 FBPcc */
2520 {
2521 int cc = GET_FIELD_SP(insn, 20, 21);
2522 if (gen_trap_ifnofpu(dc)) {
2523 goto jmp_insn;
2524 }
2525 target = GET_FIELD_SP(insn, 0, 18);
2526 target = sign_extend(target, 19);
2527 target <<= 2;
2528 do_fbranch(dc, target, insn, cc);
2529 goto jmp_insn;
2530 }
2531 #else
2532 case 0x7: /* CBN+x */
2533 {
2534 goto ncp_insn;
2535 }
2536 #endif
2537 case 0x2: /* BN+x */
2538 {
2539 target = GET_FIELD(insn, 10, 31);
2540 target = sign_extend(target, 22);
2541 target <<= 2;
2542 do_branch(dc, target, insn, 0);
2543 goto jmp_insn;
2544 }
2545 case 0x6: /* FBN+x */
2546 {
2547 if (gen_trap_ifnofpu(dc)) {
2548 goto jmp_insn;
2549 }
2550 target = GET_FIELD(insn, 10, 31);
2551 target = sign_extend(target, 22);
2552 target <<= 2;
2553 do_fbranch(dc, target, insn, 0);
2554 goto jmp_insn;
2555 }
2556 case 0x4: /* SETHI */
2557 if (rd) { // nop
2558 uint32_t value = GET_FIELD(insn, 10, 31);
2559 TCGv r_const;
2560
2561 r_const = tcg_const_tl(value << 10);
2562 gen_movl_TN_reg(rd, r_const);
2563 tcg_temp_free(r_const);
2564 }
2565 break;
2566 case 0x0: /* UNIMPL */
2567 default:
2568 goto illegal_insn;
2569 }
2570 break;
2571 }
2572 break;
2573 case 1: /*CALL*/
2574 {
2575 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2576 TCGv r_const;
2577
2578 r_const = tcg_const_tl(dc->pc);
2579 gen_movl_TN_reg(15, r_const);
2580 tcg_temp_free(r_const);
2581 target += dc->pc;
2582 gen_mov_pc_npc(dc);
2583 #ifdef TARGET_SPARC64
2584 if (unlikely(AM_CHECK(dc))) {
2585 target &= 0xffffffffULL;
2586 }
2587 #endif
2588 dc->npc = target;
2589 }
2590 goto jmp_insn;
2591 case 2: /* FPU & Logical Operations */
2592 {
2593 unsigned int xop = GET_FIELD(insn, 7, 12);
2594 if (xop == 0x3a) { /* generate trap */
2595 int cond = GET_FIELD(insn, 3, 6);
2596 TCGv_i32 trap;
2597 int l1 = -1, mask;
2598
2599 if (cond == 0) {
2600 /* Trap never. */
2601 break;
2602 }
2603
2604 save_state(dc);
2605
2606 if (cond != 8) {
2607 /* Conditional trap. */
2608 DisasCompare cmp;
2609 #ifdef TARGET_SPARC64
2610 /* V9 icc/xcc */
2611 int cc = GET_FIELD_SP(insn, 11, 12);
2612 if (cc == 0) {
2613 gen_compare(&cmp, 0, cond, dc);
2614 } else if (cc == 2) {
2615 gen_compare(&cmp, 1, cond, dc);
2616 } else {
2617 goto illegal_insn;
2618 }
2619 #else
2620 gen_compare(&cmp, 0, cond, dc);
2621 #endif
2622 l1 = gen_new_label();
2623 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2624 cmp.c1, cmp.c2, l1);
2625 free_compare(&cmp);
2626 }
2627
2628 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2629 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2630
2631 /* Don't use the normal temporaries, as they may well have
2632 gone out of scope with the branch above. While we're
2633 doing that we might as well pre-truncate to 32-bit. */
2634 trap = tcg_temp_new_i32();
2635
2636 rs1 = GET_FIELD_SP(insn, 14, 18);
2637 if (IS_IMM) {
2638 rs2 = GET_FIELD_SP(insn, 0, 6);
2639 if (rs1 == 0) {
2640 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2641 /* Signal that the trap value is fully constant. */
2642 mask = 0;
2643 } else {
2644 TCGv t1 = tcg_temp_new();
2645 gen_movl_reg_TN(rs1, t1);
2646 tcg_gen_trunc_tl_i32(trap, t1);
2647 tcg_temp_free(t1);
2648 tcg_gen_addi_i32(trap, trap, rs2);
2649 }
2650 } else {
2651 TCGv t1 = tcg_temp_new();
2652 TCGv t2 = tcg_temp_new();
2653 rs2 = GET_FIELD_SP(insn, 0, 4);
2654 gen_movl_reg_TN(rs1, t1);
2655 gen_movl_reg_TN(rs2, t2);
2656 tcg_gen_add_tl(t1, t1, t2);
2657 tcg_gen_trunc_tl_i32(trap, t1);
2658 tcg_temp_free(t1);
2659 tcg_temp_free(t2);
2660 }
2661 if (mask != 0) {
2662 tcg_gen_andi_i32(trap, trap, mask);
2663 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2664 }
2665
2666 gen_helper_raise_exception(cpu_env, trap);
2667 tcg_temp_free_i32(trap);
2668
2669 if (cond == 8) {
2670 /* An unconditional trap ends the TB. */
2671 dc->is_br = 1;
2672 goto jmp_insn;
2673 } else {
2674 /* A conditional trap falls through to the next insn. */
2675 gen_set_label(l1);
2676 break;
2677 }
2678 } else if (xop == 0x28) {
2679 rs1 = GET_FIELD(insn, 13, 17);
2680 switch(rs1) {
2681 case 0: /* rdy */
2682 #ifndef TARGET_SPARC64
2683 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2684 manual, rdy on the microSPARC
2685 II */
2686 case 0x0f: /* stbar in the SPARCv8 manual,
2687 rdy on the microSPARC II */
2688 case 0x10 ... 0x1f: /* implementation-dependent in the
2689 SPARCv8 manual, rdy on the
2690 microSPARC II */
2691 /* Read Asr17 */
2692 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2693 TCGv r_const;
2694
2695 /* Read Asr17 for a Leon3 monoprocessor */
2696 r_const = tcg_const_tl((1 << 8)
2697 | (dc->def->nwindows - 1));
2698 gen_movl_TN_reg(rd, r_const);
2699 tcg_temp_free(r_const);
2700 break;
2701 }
2702 #endif
2703 gen_movl_TN_reg(rd, cpu_y);
2704 break;
2705 #ifdef TARGET_SPARC64
2706 case 0x2: /* V9 rdccr */
2707 gen_helper_compute_psr(cpu_env);
2708 gen_helper_rdccr(cpu_dst, cpu_env);
2709 gen_movl_TN_reg(rd, cpu_dst);
2710 break;
2711 case 0x3: /* V9 rdasi */
2712 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2713 gen_movl_TN_reg(rd, cpu_dst);
2714 break;
2715 case 0x4: /* V9 rdtick */
2716 {
2717 TCGv_ptr r_tickptr;
2718
2719 r_tickptr = tcg_temp_new_ptr();
2720 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2721 offsetof(CPUSPARCState, tick));
2722 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2723 tcg_temp_free_ptr(r_tickptr);
2724 gen_movl_TN_reg(rd, cpu_dst);
2725 }
2726 break;
2727 case 0x5: /* V9 rdpc */
2728 {
2729 TCGv r_const;
2730
2731 if (unlikely(AM_CHECK(dc))) {
2732 r_const = tcg_const_tl(dc->pc & 0xffffffffULL);
2733 } else {
2734 r_const = tcg_const_tl(dc->pc);
2735 }
2736 gen_movl_TN_reg(rd, r_const);
2737 tcg_temp_free(r_const);
2738 }
2739 break;
2740 case 0x6: /* V9 rdfprs */
2741 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2742 gen_movl_TN_reg(rd, cpu_dst);
2743 break;
2744 case 0xf: /* V9 membar */
2745 break; /* no effect */
2746 case 0x13: /* Graphics Status */
2747 if (gen_trap_ifnofpu(dc)) {
2748 goto jmp_insn;
2749 }
2750 gen_movl_TN_reg(rd, cpu_gsr);
2751 break;
2752 case 0x16: /* Softint */
2753 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2754 gen_movl_TN_reg(rd, cpu_dst);
2755 break;
2756 case 0x17: /* Tick compare */
2757 gen_movl_TN_reg(rd, cpu_tick_cmpr);
2758 break;
2759 case 0x18: /* System tick */
2760 {
2761 TCGv_ptr r_tickptr;
2762
2763 r_tickptr = tcg_temp_new_ptr();
2764 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2765 offsetof(CPUSPARCState, stick));
2766 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2767 tcg_temp_free_ptr(r_tickptr);
2768 gen_movl_TN_reg(rd, cpu_dst);
2769 }
2770 break;
2771 case 0x19: /* System tick compare */
2772 gen_movl_TN_reg(rd, cpu_stick_cmpr);
2773 break;
2774 case 0x10: /* Performance Control */
2775 case 0x11: /* Performance Instrumentation Counter */
2776 case 0x12: /* Dispatch Control */
2777 case 0x14: /* Softint set, WO */
2778 case 0x15: /* Softint clear, WO */
2779 #endif
2780 default:
2781 goto illegal_insn;
2782 }
2783 #if !defined(CONFIG_USER_ONLY)
2784 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2785 #ifndef TARGET_SPARC64
2786 if (!supervisor(dc))
2787 goto priv_insn;
2788 gen_helper_compute_psr(cpu_env);
2789 dc->cc_op = CC_OP_FLAGS;
2790 gen_helper_rdpsr(cpu_dst, cpu_env);
2791 #else
2792 CHECK_IU_FEATURE(dc, HYPV);
2793 if (!hypervisor(dc))
2794 goto priv_insn;
2795 rs1 = GET_FIELD(insn, 13, 17);
2796 switch (rs1) {
2797 case 0: // hpstate
2798 // gen_op_rdhpstate();
2799 break;
2800 case 1: // htstate
2801 // gen_op_rdhtstate();
2802 break;
2803 case 3: // hintp
2804 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2805 break;
2806 case 5: // htba
2807 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2808 break;
2809 case 6: // hver
2810 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2811 break;
2812 case 31: // hstick_cmpr
2813 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2814 break;
2815 default:
2816 goto illegal_insn;
2817 }
2818 #endif
2819 gen_movl_TN_reg(rd, cpu_dst);
2820 break;
2821 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2822 if (!supervisor(dc))
2823 goto priv_insn;
2824 #ifdef TARGET_SPARC64
2825 rs1 = GET_FIELD(insn, 13, 17);
2826 switch (rs1) {
2827 case 0: // tpc
2828 {
2829 TCGv_ptr r_tsptr;
2830
2831 r_tsptr = tcg_temp_new_ptr();
2832 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2833 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2834 offsetof(trap_state, tpc));
2835 tcg_temp_free_ptr(r_tsptr);
2836 }
2837 break;
2838 case 1: // tnpc
2839 {
2840 TCGv_ptr r_tsptr;
2841
2842 r_tsptr = tcg_temp_new_ptr();
2843 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2844 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2845 offsetof(trap_state, tnpc));
2846 tcg_temp_free_ptr(r_tsptr);
2847 }
2848 break;
2849 case 2: // tstate
2850 {
2851 TCGv_ptr r_tsptr;
2852
2853 r_tsptr = tcg_temp_new_ptr();
2854 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2855 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2856 offsetof(trap_state, tstate));
2857 tcg_temp_free_ptr(r_tsptr);
2858 }
2859 break;
2860 case 3: // tt
2861 {
2862 TCGv_ptr r_tsptr;
2863
2864 r_tsptr = tcg_temp_new_ptr();
2865 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2866 tcg_gen_ld_i32(cpu_tmp32, r_tsptr,
2867 offsetof(trap_state, tt));
2868 tcg_temp_free_ptr(r_tsptr);
2869 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2870 }
2871 break;
2872 case 4: // tick
2873 {
2874 TCGv_ptr r_tickptr;
2875
2876 r_tickptr = tcg_temp_new_ptr();
2877 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2878 offsetof(CPUSPARCState, tick));
2879 gen_helper_tick_get_count(cpu_tmp0, r_tickptr);
2880 gen_movl_TN_reg(rd, cpu_tmp0);
2881 tcg_temp_free_ptr(r_tickptr);
2882 }
2883 break;
2884 case 5: // tba
2885 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2886 break;
2887 case 6: // pstate
2888 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2889 offsetof(CPUSPARCState, pstate));
2890 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2891 break;
2892 case 7: // tl
2893 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2894 offsetof(CPUSPARCState, tl));
2895 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2896 break;
2897 case 8: // pil
2898 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2899 offsetof(CPUSPARCState, psrpil));
2900 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2901 break;
2902 case 9: // cwp
2903 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2904 break;
2905 case 10: // cansave
2906 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2907 offsetof(CPUSPARCState, cansave));
2908 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2909 break;
2910 case 11: // canrestore
2911 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2912 offsetof(CPUSPARCState, canrestore));
2913 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2914 break;
2915 case 12: // cleanwin
2916 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2917 offsetof(CPUSPARCState, cleanwin));
2918 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2919 break;
2920 case 13: // otherwin
2921 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2922 offsetof(CPUSPARCState, otherwin));
2923 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2924 break;
2925 case 14: // wstate
2926 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2927 offsetof(CPUSPARCState, wstate));
2928 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2929 break;
2930 case 16: // UA2005 gl
2931 CHECK_IU_FEATURE(dc, GL);
2932 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2933 offsetof(CPUSPARCState, gl));
2934 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2935 break;
2936 case 26: // UA2005 strand status
2937 CHECK_IU_FEATURE(dc, HYPV);
2938 if (!hypervisor(dc))
2939 goto priv_insn;
2940 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2941 break;
2942 case 31: // ver
2943 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2944 break;
2945 case 15: // fq
2946 default:
2947 goto illegal_insn;
2948 }
2949 #else
2950 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2951 #endif
2952 gen_movl_TN_reg(rd, cpu_tmp0);
2953 break;
2954 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2955 #ifdef TARGET_SPARC64
2956 save_state(dc);
2957 gen_helper_flushw(cpu_env);
2958 #else
2959 if (!supervisor(dc))
2960 goto priv_insn;
2961 gen_movl_TN_reg(rd, cpu_tbr);
2962 #endif
2963 break;
2964 #endif
2965 } else if (xop == 0x34) { /* FPU Operations */
2966 if (gen_trap_ifnofpu(dc)) {
2967 goto jmp_insn;
2968 }
2969 gen_op_clear_ieee_excp_and_FTT();
2970 rs1 = GET_FIELD(insn, 13, 17);
2971 rs2 = GET_FIELD(insn, 27, 31);
2972 xop = GET_FIELD(insn, 18, 26);
2973 save_state(dc);
2974 switch (xop) {
2975 case 0x1: /* fmovs */
2976 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2977 gen_store_fpr_F(dc, rd, cpu_src1_32);
2978 break;
2979 case 0x5: /* fnegs */
2980 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2981 break;
2982 case 0x9: /* fabss */
2983 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2984 break;
2985 case 0x29: /* fsqrts */
2986 CHECK_FPU_FEATURE(dc, FSQRT);
2987 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2988 break;
2989 case 0x2a: /* fsqrtd */
2990 CHECK_FPU_FEATURE(dc, FSQRT);
2991 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
2992 break;
2993 case 0x2b: /* fsqrtq */
2994 CHECK_FPU_FEATURE(dc, FLOAT128);
2995 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
2996 break;
2997 case 0x41: /* fadds */
2998 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
2999 break;
3000 case 0x42: /* faddd */
3001 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3002 break;
3003 case 0x43: /* faddq */
3004 CHECK_FPU_FEATURE(dc, FLOAT128);
3005 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3006 break;
3007 case 0x45: /* fsubs */
3008 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3009 break;
3010 case 0x46: /* fsubd */
3011 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3012 break;
3013 case 0x47: /* fsubq */
3014 CHECK_FPU_FEATURE(dc, FLOAT128);
3015 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3016 break;
3017 case 0x49: /* fmuls */
3018 CHECK_FPU_FEATURE(dc, FMUL);
3019 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3020 break;
3021 case 0x4a: /* fmuld */
3022 CHECK_FPU_FEATURE(dc, FMUL);
3023 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3024 break;
3025 case 0x4b: /* fmulq */
3026 CHECK_FPU_FEATURE(dc, FLOAT128);
3027 CHECK_FPU_FEATURE(dc, FMUL);
3028 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3029 break;
3030 case 0x4d: /* fdivs */
3031 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3032 break;
3033 case 0x4e: /* fdivd */
3034 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3035 break;
3036 case 0x4f: /* fdivq */
3037 CHECK_FPU_FEATURE(dc, FLOAT128);
3038 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3039 break;
3040 case 0x69: /* fsmuld */
3041 CHECK_FPU_FEATURE(dc, FSMULD);
3042 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3043 break;
3044 case 0x6e: /* fdmulq */
3045 CHECK_FPU_FEATURE(dc, FLOAT128);
3046 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3047 break;
3048 case 0xc4: /* fitos */
3049 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3050 break;
3051 case 0xc6: /* fdtos */
3052 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3053 break;
3054 case 0xc7: /* fqtos */
3055 CHECK_FPU_FEATURE(dc, FLOAT128);
3056 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3057 break;
3058 case 0xc8: /* fitod */
3059 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3060 break;
3061 case 0xc9: /* fstod */
3062 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3063 break;
3064 case 0xcb: /* fqtod */
3065 CHECK_FPU_FEATURE(dc, FLOAT128);
3066 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3067 break;
3068 case 0xcc: /* fitoq */
3069 CHECK_FPU_FEATURE(dc, FLOAT128);
3070 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3071 break;
3072 case 0xcd: /* fstoq */
3073 CHECK_FPU_FEATURE(dc, FLOAT128);
3074 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3075 break;
3076 case 0xce: /* fdtoq */
3077 CHECK_FPU_FEATURE(dc, FLOAT128);
3078 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3079 break;
3080 case 0xd1: /* fstoi */
3081 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3082 break;
3083 case 0xd2: /* fdtoi */
3084 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3085 break;
3086 case 0xd3: /* fqtoi */
3087 CHECK_FPU_FEATURE(dc, FLOAT128);
3088 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3089 break;
3090 #ifdef TARGET_SPARC64
3091 case 0x2: /* V9 fmovd */
3092 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3093 gen_store_fpr_D(dc, rd, cpu_src1_64);
3094 break;
3095 case 0x3: /* V9 fmovq */
3096 CHECK_FPU_FEATURE(dc, FLOAT128);
3097 gen_move_Q(rd, rs2);
3098 break;
3099 case 0x6: /* V9 fnegd */
3100 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3101 break;
3102 case 0x7: /* V9 fnegq */
3103 CHECK_FPU_FEATURE(dc, FLOAT128);
3104 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3105 break;
3106 case 0xa: /* V9 fabsd */
3107 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3108 break;
3109 case 0xb: /* V9 fabsq */
3110 CHECK_FPU_FEATURE(dc, FLOAT128);
3111 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3112 break;
3113 case 0x81: /* V9 fstox */
3114 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3115 break;
3116 case 0x82: /* V9 fdtox */
3117 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3118 break;
3119 case 0x83: /* V9 fqtox */
3120 CHECK_FPU_FEATURE(dc, FLOAT128);
3121 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3122 break;
3123 case 0x84: /* V9 fxtos */
3124 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3125 break;
3126 case 0x88: /* V9 fxtod */
3127 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3128 break;
3129 case 0x8c: /* V9 fxtoq */
3130 CHECK_FPU_FEATURE(dc, FLOAT128);
3131 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3132 break;
3133 #endif
3134 default:
3135 goto illegal_insn;
3136 }
3137 } else if (xop == 0x35) { /* FPU Operations */
3138 #ifdef TARGET_SPARC64
3139 int cond;
3140 #endif
3141 if (gen_trap_ifnofpu(dc)) {
3142 goto jmp_insn;
3143 }
3144 gen_op_clear_ieee_excp_and_FTT();
3145 rs1 = GET_FIELD(insn, 13, 17);
3146 rs2 = GET_FIELD(insn, 27, 31);
3147 xop = GET_FIELD(insn, 18, 26);
3148 save_state(dc);
3149
3150 #ifdef TARGET_SPARC64
3151 #define FMOVR(sz) \
3152 do { \
3153 DisasCompare cmp; \
3154 cond = GET_FIELD_SP(insn, 14, 17); \
3155 cpu_src1 = get_src1(insn, cpu_src1); \
3156 gen_compare_reg(&cmp, cond, cpu_src1); \
3157 gen_fmov##sz(dc, &cmp, rd, rs2); \
3158 free_compare(&cmp); \
3159 } while (0)
3160
3161 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3162 FMOVR(s);
3163 break;
3164 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3165 FMOVR(d);
3166 break;
3167 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3168 CHECK_FPU_FEATURE(dc, FLOAT128);
3169 FMOVR(q);
3170 break;
3171 }
3172 #undef FMOVR
3173 #endif
3174 switch (xop) {
3175 #ifdef TARGET_SPARC64
3176 #define FMOVCC(fcc, sz) \
3177 do { \
3178 DisasCompare cmp; \
3179 cond = GET_FIELD_SP(insn, 14, 17); \
3180 gen_fcompare(&cmp, fcc, cond); \
3181 gen_fmov##sz(dc, &cmp, rd, rs2); \
3182 free_compare(&cmp); \
3183 } while (0)
3184
3185 case 0x001: /* V9 fmovscc %fcc0 */
3186 FMOVCC(0, s);
3187 break;
3188 case 0x002: /* V9 fmovdcc %fcc0 */
3189 FMOVCC(0, d);
3190 break;
3191 case 0x003: /* V9 fmovqcc %fcc0 */
3192 CHECK_FPU_FEATURE(dc, FLOAT128);
3193 FMOVCC(0, q);
3194 break;
3195 case 0x041: /* V9 fmovscc %fcc1 */
3196 FMOVCC(1, s);
3197 break;
3198 case 0x042: /* V9 fmovdcc %fcc1 */
3199 FMOVCC(1, d);
3200 break;
3201 case 0x043: /* V9 fmovqcc %fcc1 */
3202 CHECK_FPU_FEATURE(dc, FLOAT128);
3203 FMOVCC(1, q);
3204 break;
3205 case 0x081: /* V9 fmovscc %fcc2 */
3206 FMOVCC(2, s);
3207 break;
3208 case 0x082: /* V9 fmovdcc %fcc2 */
3209 FMOVCC(2, d);
3210 break;
3211 case 0x083: /* V9 fmovqcc %fcc2 */
3212 CHECK_FPU_FEATURE(dc, FLOAT128);
3213 FMOVCC(2, q);
3214 break;
3215 case 0x0c1: /* V9 fmovscc %fcc3 */
3216 FMOVCC(3, s);
3217 break;
3218 case 0x0c2: /* V9 fmovdcc %fcc3 */
3219 FMOVCC(3, d);
3220 break;
3221 case 0x0c3: /* V9 fmovqcc %fcc3 */
3222 CHECK_FPU_FEATURE(dc, FLOAT128);
3223 FMOVCC(3, q);
3224 break;
3225 #undef FMOVCC
3226 #define FMOVCC(xcc, sz) \
3227 do { \
3228 DisasCompare cmp; \
3229 cond = GET_FIELD_SP(insn, 14, 17); \
3230 gen_compare(&cmp, xcc, cond, dc); \
3231 gen_fmov##sz(dc, &cmp, rd, rs2); \
3232 free_compare(&cmp); \
3233 } while (0)
3234
3235 case 0x101: /* V9 fmovscc %icc */
3236 FMOVCC(0, s);
3237 break;
3238 case 0x102: /* V9 fmovdcc %icc */
3239 FMOVCC(0, d);
3240 break;
3241 case 0x103: /* V9 fmovqcc %icc */
3242 CHECK_FPU_FEATURE(dc, FLOAT128);
3243 FMOVCC(0, q);
3244 break;
3245 case 0x181: /* V9 fmovscc %xcc */
3246 FMOVCC(1, s);
3247 break;
3248 case 0x182: /* V9 fmovdcc %xcc */
3249 FMOVCC(1, d);
3250 break;
3251 case 0x183: /* V9 fmovqcc %xcc */
3252 CHECK_FPU_FEATURE(dc, FLOAT128);
3253 FMOVCC(1, q);
3254 break;
3255 #undef FMOVCC
3256 #endif
3257 case 0x51: /* fcmps, V9 %fcc */
3258 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3259 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3260 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3261 break;
3262 case 0x52: /* fcmpd, V9 %fcc */
3263 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3264 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3265 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3266 break;
3267 case 0x53: /* fcmpq, V9 %fcc */
3268 CHECK_FPU_FEATURE(dc, FLOAT128);
3269 gen_op_load_fpr_QT0(QFPREG(rs1));
3270 gen_op_load_fpr_QT1(QFPREG(rs2));
3271 gen_op_fcmpq(rd & 3);
3272 break;
3273 case 0x55: /* fcmpes, V9 %fcc */
3274 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3275 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3276 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3277 break;
3278 case 0x56: /* fcmped, V9 %fcc */
3279 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3280 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3281 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3282 break;
3283 case 0x57: /* fcmpeq, V9 %fcc */
3284 CHECK_FPU_FEATURE(dc, FLOAT128);
3285 gen_op_load_fpr_QT0(QFPREG(rs1));
3286 gen_op_load_fpr_QT1(QFPREG(rs2));
3287 gen_op_fcmpeq(rd & 3);
3288 break;
3289 default:
3290 goto illegal_insn;
3291 }
3292 } else if (xop == 0x2) {
3293 // clr/mov shortcut
3294
3295 rs1 = GET_FIELD(insn, 13, 17);
3296 if (rs1 == 0) {
3297 // or %g0, x, y -> mov T0, x; mov y, T0
3298 if (IS_IMM) { /* immediate */
3299 TCGv r_const;
3300
3301 simm = GET_FIELDs(insn, 19, 31);
3302 r_const = tcg_const_tl(simm);
3303 gen_movl_TN_reg(rd, r_const);
3304 tcg_temp_free(r_const);
3305 } else { /* register */
3306 rs2 = GET_FIELD(insn, 27, 31);
3307 gen_movl_reg_TN(rs2, cpu_dst);
3308 gen_movl_TN_reg(rd, cpu_dst);
3309 }
3310 } else {
3311 cpu_src1 = get_src1(insn, cpu_src1);
3312 if (IS_IMM) { /* immediate */
3313 simm = GET_FIELDs(insn, 19, 31);
3314 tcg_gen_ori_tl(cpu_dst, cpu_src1, simm);
3315 gen_movl_TN_reg(rd, cpu_dst);
3316 } else { /* register */
3317 // or x, %g0, y -> mov T1, x; mov y, T1
3318 rs2 = GET_FIELD(insn, 27, 31);
3319 if (rs2 != 0) {
3320 gen_movl_reg_TN(rs2, cpu_src2);
3321 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3322 gen_movl_TN_reg(rd, cpu_dst);
3323 } else
3324 gen_movl_TN_reg(rd, cpu_src1);
3325 }
3326 }
3327 #ifdef TARGET_SPARC64
3328 } else if (xop == 0x25) { /* sll, V9 sllx */
3329 cpu_src1 = get_src1(insn, cpu_src1);
3330 if (IS_IMM) { /* immediate */
3331 simm = GET_FIELDs(insn, 20, 31);
3332 if (insn & (1 << 12)) {
3333 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3334 } else {
3335 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3336 }
3337 } else { /* register */
3338 rs2 = GET_FIELD(insn, 27, 31);
3339 gen_movl_reg_TN(rs2, cpu_src2);
3340 if (insn & (1 << 12)) {
3341 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3342 } else {
3343 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3344 }
3345 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3346 }
3347 gen_movl_TN_reg(rd, cpu_dst);
3348 } else if (xop == 0x26) { /* srl, V9 srlx */
3349 cpu_src1 = get_src1(insn, cpu_src1);
3350 if (IS_IMM) { /* immediate */
3351 simm = GET_FIELDs(insn, 20, 31);
3352 if (insn & (1 << 12)) {
3353 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3354 } else {
3355 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3356 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3357 }
3358 } else { /* register */
3359 rs2 = GET_FIELD(insn, 27, 31);
3360 gen_movl_reg_TN(rs2, cpu_src2);
3361 if (insn & (1 << 12)) {
3362 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3363 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3364 } else {
3365 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3366 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3367 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3368 }
3369 }
3370 gen_movl_TN_reg(rd, cpu_dst);
3371 } else if (xop == 0x27) { /* sra, V9 srax */
3372 cpu_src1 = get_src1(insn, cpu_src1);
3373 if (IS_IMM) { /* immediate */
3374 simm = GET_FIELDs(insn, 20, 31);
3375 if (insn & (1 << 12)) {
3376 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3377 } else {
3378 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3379 tcg_gen_ext32s_i64(cpu_dst, cpu_dst);
3380 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3381 }
3382 } else { /* register */
3383 rs2 = GET_FIELD(insn, 27, 31);
3384 gen_movl_reg_TN(rs2, cpu_src2);
3385 if (insn & (1 << 12)) {
3386 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3387 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3388 } else {
3389 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3390 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3391 tcg_gen_ext32s_i64(cpu_dst, cpu_dst);
3392 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3393 }
3394 }
3395 gen_movl_TN_reg(rd, cpu_dst);
3396 #endif
3397 } else if (xop < 0x36) {
3398 if (xop < 0x20) {
3399 cpu_src1 = get_src1(insn, cpu_src1);
3400 cpu_src2 = get_src2(insn, cpu_src2);
3401 switch (xop & ~0x10) {
3402 case 0x0: /* add */
3403 if (IS_IMM) {
3404 simm = GET_FIELDs(insn, 19, 31);
3405 if (xop & 0x10) {
3406 gen_op_addi_cc(cpu_dst, cpu_src1, simm);
3407 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3408 dc->cc_op = CC_OP_ADD;
3409 } else {
3410 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
3411 }
3412 } else {
3413 if (xop & 0x10) {
3414 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3415 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3416 dc->cc_op = CC_OP_ADD;
3417 } else {
3418 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3419 }
3420 }
3421 break;
3422 case 0x1: /* and */
3423 if (IS_IMM) {
3424 simm = GET_FIELDs(insn, 19, 31);
3425 tcg_gen_andi_tl(cpu_dst, cpu_src1, simm);
3426 } else {
3427 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3428 }
3429 if (xop & 0x10) {
3430 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3431 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3432 dc->cc_op = CC_OP_LOGIC;
3433 }
3434 break;
3435 case 0x2: /* or */
3436 if (IS_IMM) {
3437 simm = GET_FIELDs(insn, 19, 31);
3438 tcg_gen_ori_tl(cpu_dst, cpu_src1, simm);
3439 } else {
3440 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3441 }
3442 if (xop & 0x10) {
3443 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3444 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3445 dc->cc_op = CC_OP_LOGIC;
3446 }
3447 break;
3448 case 0x3: /* xor */
3449 if (IS_IMM) {
3450 simm = GET_FIELDs(insn, 19, 31);
3451 tcg_gen_xori_tl(cpu_dst, cpu_src1, simm);
3452 } else {
3453 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3454 }
3455 if (xop & 0x10) {
3456 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3457 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3458 dc->cc_op = CC_OP_LOGIC;
3459 }
3460 break;
3461 case 0x4: /* sub */
3462 if (IS_IMM) {
3463 simm = GET_FIELDs(insn, 19, 31);
3464 if (xop & 0x10) {
3465 gen_op_subi_cc(cpu_dst, cpu_src1, simm, dc);
3466 } else {
3467 tcg_gen_subi_tl(cpu_dst, cpu_src1, simm);
3468 }
3469 } else {
3470 if (xop & 0x10) {
3471 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3472 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3473 dc->cc_op = CC_OP_SUB;
3474 } else {
3475 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3476 }
3477 }
3478 break;
3479 case 0x5: /* andn */
3480 if (IS_IMM) {
3481 simm = GET_FIELDs(insn, 19, 31);
3482 tcg_gen_andi_tl(cpu_dst, cpu_src1, ~simm);
3483 } else {
3484 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3485 }
3486 if (xop & 0x10) {
3487 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3488 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3489 dc->cc_op = CC_OP_LOGIC;
3490 }
3491 break;
3492 case 0x6: /* orn */
3493 if (IS_IMM) {
3494 simm = GET_FIELDs(insn, 19, 31);
3495 tcg_gen_ori_tl(cpu_dst, cpu_src1, ~simm);
3496 } else {
3497 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3498 }
3499 if (xop & 0x10) {
3500 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3501 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3502 dc->cc_op = CC_OP_LOGIC;
3503 }
3504 break;
3505 case 0x7: /* xorn */
3506 if (IS_IMM) {
3507 simm = GET_FIELDs(insn, 19, 31);
3508 tcg_gen_xori_tl(cpu_dst, cpu_src1, ~simm);
3509 } else {
3510 tcg_gen_not_tl(cpu_tmp0, cpu_src2);
3511 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_tmp0);
3512 }
3513 if (xop & 0x10) {
3514 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3515 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3516 dc->cc_op = CC_OP_LOGIC;
3517 }
3518 break;
3519 case 0x8: /* addx, V9 addc */
3520 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3521 (xop & 0x10));
3522 break;
3523 #ifdef TARGET_SPARC64
3524 case 0x9: /* V9 mulx */
3525 if (IS_IMM) {
3526 simm = GET_FIELDs(insn, 19, 31);
3527 tcg_gen_muli_i64(cpu_dst, cpu_src1, simm);
3528 } else {
3529 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3530 }
3531 break;
3532 #endif
3533 case 0xa: /* umul */
3534 CHECK_IU_FEATURE(dc, MUL);
3535 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3536 if (xop & 0x10) {
3537 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3538 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3539 dc->cc_op = CC_OP_LOGIC;
3540 }
3541 break;
3542 case 0xb: /* smul */
3543 CHECK_IU_FEATURE(dc, MUL);
3544 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3545 if (xop & 0x10) {
3546 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3547 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3548 dc->cc_op = CC_OP_LOGIC;
3549 }
3550 break;
3551 case 0xc: /* subx, V9 subc */
3552 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3553 (xop & 0x10));
3554 break;
3555 #ifdef TARGET_SPARC64
3556 case 0xd: /* V9 udivx */
3557 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3558 break;
3559 #endif
3560 case 0xe: /* udiv */
3561 CHECK_IU_FEATURE(dc, DIV);
3562 if (xop & 0x10) {
3563 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3564 cpu_src2);
3565 dc->cc_op = CC_OP_DIV;
3566 } else {
3567 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3568 cpu_src2);
3569 }
3570 break;
3571 case 0xf: /* sdiv */
3572 CHECK_IU_FEATURE(dc, DIV);
3573 if (xop & 0x10) {
3574 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3575 cpu_src2);
3576 dc->cc_op = CC_OP_DIV;
3577 } else {
3578 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3579 cpu_src2);
3580 }
3581 break;
3582 default:
3583 goto illegal_insn;
3584 }
3585 gen_movl_TN_reg(rd, cpu_dst);
3586 } else {
3587 cpu_src1 = get_src1(insn, cpu_src1);
3588 cpu_src2 = get_src2(insn, cpu_src2);
3589 switch (xop) {
3590 case 0x20: /* taddcc */
3591 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3592 gen_movl_TN_reg(rd, cpu_dst);
3593 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3594 dc->cc_op = CC_OP_TADD;
3595 break;
3596 case 0x21: /* tsubcc */
3597 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3598 gen_movl_TN_reg(rd, cpu_dst);
3599 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3600 dc->cc_op = CC_OP_TSUB;
3601 break;
3602 case 0x22: /* taddcctv */
3603 gen_helper_taddcctv(cpu_dst, cpu_env,
3604 cpu_src1, cpu_src2);
3605 gen_movl_TN_reg(rd, cpu_dst);
3606 dc->cc_op = CC_OP_TADDTV;
3607 break;
3608 case 0x23: /* tsubcctv */
3609 gen_helper_tsubcctv(cpu_dst, cpu_env,
3610 cpu_src1, cpu_src2);
3611 gen_movl_TN_reg(rd, cpu_dst);
3612 dc->cc_op = CC_OP_TSUBTV;
3613 break;
3614 case 0x24: /* mulscc */
3615 gen_helper_compute_psr(cpu_env);
3616 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3617 gen_movl_TN_reg(rd, cpu_dst);
3618 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3619 dc->cc_op = CC_OP_ADD;
3620 break;
3621 #ifndef TARGET_SPARC64
3622 case 0x25: /* sll */
3623 if (IS_IMM) { /* immediate */
3624 simm = GET_FIELDs(insn, 20, 31);
3625 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3626 } else { /* register */
3627 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3628 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3629 }
3630 gen_movl_TN_reg(rd, cpu_dst);
3631 break;
3632 case 0x26: /* srl */
3633 if (IS_IMM) { /* immediate */
3634 simm = GET_FIELDs(insn, 20, 31);
3635 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3636 } else { /* register */
3637 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3638 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3639 }
3640 gen_movl_TN_reg(rd, cpu_dst);
3641 break;
3642 case 0x27: /* sra */
3643 if (IS_IMM) { /* immediate */
3644 simm = GET_FIELDs(insn, 20, 31);
3645 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3646 } else { /* register */
3647 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3648 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3649 }
3650 gen_movl_TN_reg(rd, cpu_dst);
3651 break;
3652 #endif
3653 case 0x30:
3654 {
3655 switch(rd) {
3656 case 0: /* wry */
3657 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3658 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3659 break;
3660 #ifndef TARGET_SPARC64
3661 case 0x01 ... 0x0f: /* undefined in the
3662 SPARCv8 manual, nop
3663 on the microSPARC
3664 II */
3665 case 0x10 ... 0x1f: /* implementation-dependent
3666 in the SPARCv8
3667 manual, nop on the
3668 microSPARC II */
3669 break;
3670 #else
3671 case 0x2: /* V9 wrccr */
3672 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3673 gen_helper_wrccr(cpu_env, cpu_dst);
3674 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3675 dc->cc_op = CC_OP_FLAGS;
3676 break;
3677 case 0x3: /* V9 wrasi */
3678 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3679 tcg_gen_andi_tl(cpu_dst, cpu_dst, 0xff);
3680 tcg_gen_trunc_tl_i32(cpu_asi, cpu_dst);
3681 break;
3682 case 0x6: /* V9 wrfprs */
3683 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3684 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_dst);
3685 save_state(dc);
3686 gen_op_next_insn();
3687 tcg_gen_exit_tb(0);
3688 dc->is_br = 1;
3689 break;
3690 case 0xf: /* V9 sir, nop if user */
3691 #if !defined(CONFIG_USER_ONLY)
3692 if (supervisor(dc)) {
3693 ; // XXX
3694 }
3695 #endif
3696 break;
3697 case 0x13: /* Graphics Status */
3698 if (gen_trap_ifnofpu(dc)) {
3699 goto jmp_insn;
3700 }
3701 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3702 break;
3703 case 0x14: /* Softint set */
3704 if (!supervisor(dc))
3705 goto illegal_insn;
3706 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3707 gen_helper_set_softint(cpu_env, cpu_tmp64);
3708 break;
3709 case 0x15: /* Softint clear */
3710 if (!supervisor(dc))
3711 goto illegal_insn;
3712 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3713 gen_helper_clear_softint(cpu_env, cpu_tmp64);
3714 break;
3715 case 0x16: /* Softint write */
3716 if (!supervisor(dc))
3717 goto illegal_insn;
3718 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3719 gen_helper_write_softint(cpu_env, cpu_tmp64);
3720 break;
3721 case 0x17: /* Tick compare */
3722 #if !defined(CONFIG_USER_ONLY)
3723 if (!supervisor(dc))
3724 goto illegal_insn;
3725 #endif
3726 {
3727 TCGv_ptr r_tickptr;
3728
3729 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3730 cpu_src2);
3731 r_tickptr = tcg_temp_new_ptr();
3732 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3733 offsetof(CPUSPARCState, tick));
3734 gen_helper_tick_set_limit(r_tickptr,
3735 cpu_tick_cmpr);
3736 tcg_temp_free_ptr(r_tickptr);
3737 }
3738 break;
3739 case 0x18: /* System tick */
3740 #if !defined(CONFIG_USER_ONLY)
3741 if (!supervisor(dc))
3742 goto illegal_insn;
3743 #endif
3744 {
3745 TCGv_ptr r_tickptr;
3746
3747 tcg_gen_xor_tl(cpu_dst, cpu_src1,
3748 cpu_src2);
3749 r_tickptr = tcg_temp_new_ptr();
3750 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3751 offsetof(CPUSPARCState, stick));
3752 gen_helper_tick_set_count(r_tickptr,
3753 cpu_dst);
3754 tcg_temp_free_ptr(r_tickptr);
3755 }
3756 break;
3757 case 0x19: /* System tick compare */
3758 #if !defined(CONFIG_USER_ONLY)
3759 if (!supervisor(dc))
3760 goto illegal_insn;
3761 #endif
3762 {
3763 TCGv_ptr r_tickptr;
3764
3765 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3766 cpu_src2);
3767 r_tickptr = tcg_temp_new_ptr();
3768 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3769 offsetof(CPUSPARCState, stick));
3770 gen_helper_tick_set_limit(r_tickptr,
3771 cpu_stick_cmpr);
3772 tcg_temp_free_ptr(r_tickptr);
3773 }
3774 break;
3775
3776 case 0x10: /* Performance Control */
3777 case 0x11: /* Performance Instrumentation
3778 Counter */
3779 case 0x12: /* Dispatch Control */
3780 #endif
3781 default:
3782 goto illegal_insn;
3783 }
3784 }
3785 break;
3786 #if !defined(CONFIG_USER_ONLY)
3787 case 0x31: /* wrpsr, V9 saved, restored */
3788 {
3789 if (!supervisor(dc))
3790 goto priv_insn;
3791 #ifdef TARGET_SPARC64
3792 switch (rd) {
3793 case 0:
3794 gen_helper_saved(cpu_env);
3795 break;
3796 case 1:
3797 gen_helper_restored(cpu_env);
3798 break;
3799 case 2: /* UA2005 allclean */
3800 case 3: /* UA2005 otherw */
3801 case 4: /* UA2005 normalw */
3802 case 5: /* UA2005 invalw */
3803 // XXX
3804 default:
3805 goto illegal_insn;
3806 }
3807 #else
3808 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3809 gen_helper_wrpsr(cpu_env, cpu_dst);
3810 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3811 dc->cc_op = CC_OP_FLAGS;
3812 save_state(dc);
3813 gen_op_next_insn();
3814 tcg_gen_exit_tb(0);
3815 dc->is_br = 1;
3816 #endif
3817 }
3818 break;
3819 case 0x32: /* wrwim, V9 wrpr */
3820 {
3821 if (!supervisor(dc))
3822 goto priv_insn;
3823 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3824 #ifdef TARGET_SPARC64
3825 switch (rd) {
3826 case 0: // tpc
3827 {
3828 TCGv_ptr r_tsptr;
3829
3830 r_tsptr = tcg_temp_new_ptr();
3831 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3832 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3833 offsetof(trap_state, tpc));
3834 tcg_temp_free_ptr(r_tsptr);
3835 }
3836 break;
3837 case 1: // tnpc
3838 {
3839 TCGv_ptr r_tsptr;
3840
3841 r_tsptr = tcg_temp_new_ptr();
3842 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3843 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3844 offsetof(trap_state, tnpc));
3845 tcg_temp_free_ptr(r_tsptr);
3846 }
3847 break;
3848 case 2: // tstate
3849 {
3850 TCGv_ptr r_tsptr;
3851
3852 r_tsptr = tcg_temp_new_ptr();
3853 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3854 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3855 offsetof(trap_state,
3856 tstate));
3857 tcg_temp_free_ptr(r_tsptr);
3858 }
3859 break;
3860 case 3: // tt
3861 {
3862 TCGv_ptr r_tsptr;
3863
3864 r_tsptr = tcg_temp_new_ptr();
3865 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3866 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3867 tcg_gen_st_i32(cpu_tmp32, r_tsptr,
3868 offsetof(trap_state, tt));
3869 tcg_temp_free_ptr(r_tsptr);
3870 }
3871 break;
3872 case 4: // tick
3873 {
3874 TCGv_ptr r_tickptr;
3875
3876 r_tickptr = tcg_temp_new_ptr();
3877 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3878 offsetof(CPUSPARCState, tick));
3879 gen_helper_tick_set_count(r_tickptr,
3880 cpu_tmp0);
3881 tcg_temp_free_ptr(r_tickptr);
3882 }
3883 break;
3884 case 5: // tba
3885 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3886 break;
3887 case 6: // pstate
3888 save_state(dc);
3889 gen_helper_wrpstate(cpu_env, cpu_tmp0);
3890 dc->npc = DYNAMIC_PC;
3891 break;
3892 case 7: // tl
3893 save_state(dc);
3894 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3895 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3896 offsetof(CPUSPARCState, tl));
3897 dc->npc = DYNAMIC_PC;
3898 break;
3899 case 8: // pil
3900 gen_helper_wrpil(cpu_env, cpu_tmp0);
3901 break;
3902 case 9: // cwp
3903 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3904 break;
3905 case 10: // cansave
3906 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3907 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3908 offsetof(CPUSPARCState,
3909 cansave));
3910 break;
3911 case 11: // canrestore
3912 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3913 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3914 offsetof(CPUSPARCState,
3915 canrestore));
3916 break;
3917 case 12: // cleanwin
3918 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3919 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3920 offsetof(CPUSPARCState,
3921 cleanwin));
3922 break;
3923 case 13: // otherwin
3924 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3925 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3926 offsetof(CPUSPARCState,
3927 otherwin));
3928 break;
3929 case 14: // wstate
3930 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3931 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3932 offsetof(CPUSPARCState,
3933 wstate));
3934 break;
3935 case 16: // UA2005 gl
3936 CHECK_IU_FEATURE(dc, GL);
3937 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3938 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3939 offsetof(CPUSPARCState, gl));
3940 break;
3941 case 26: // UA2005 strand status
3942 CHECK_IU_FEATURE(dc, HYPV);
3943 if (!hypervisor(dc))
3944 goto priv_insn;
3945 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
3946 break;
3947 default:
3948 goto illegal_insn;
3949 }
3950 #else
3951 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3952 if (dc->def->nwindows != 32)
3953 tcg_gen_andi_tl(cpu_tmp32, cpu_tmp32,
3954 (1 << dc->def->nwindows) - 1);
3955 tcg_gen_mov_i32(cpu_wim, cpu_tmp32);
3956 #endif
3957 }
3958 break;
3959 case 0x33: /* wrtbr, UA2005 wrhpr */
3960 {
3961 #ifndef TARGET_SPARC64
3962 if (!supervisor(dc))
3963 goto priv_insn;
3964 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
3965 #else
3966 CHECK_IU_FEATURE(dc, HYPV);
3967 if (!hypervisor(dc))
3968 goto priv_insn;
3969 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3970 switch (rd) {
3971 case 0: // hpstate
3972 // XXX gen_op_wrhpstate();
3973 save_state(dc);
3974 gen_op_next_insn();
3975 tcg_gen_exit_tb(0);
3976 dc->is_br = 1;
3977 break;
3978 case 1: // htstate
3979 // XXX gen_op_wrhtstate();
3980 break;
3981 case 3: // hintp
3982 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
3983 break;
3984 case 5: // htba
3985 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
3986 break;
3987 case 31: // hstick_cmpr
3988 {
3989 TCGv_ptr r_tickptr;
3990
3991 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
3992 r_tickptr = tcg_temp_new_ptr();
3993 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3994 offsetof(CPUSPARCState, hstick));
3995 gen_helper_tick_set_limit(r_tickptr,
3996 cpu_hstick_cmpr);
3997 tcg_temp_free_ptr(r_tickptr);
3998 }
3999 break;
4000 case 6: // hver readonly
4001 default:
4002 goto illegal_insn;
4003 }
4004 #endif
4005 }
4006 break;
4007 #endif
4008 #ifdef TARGET_SPARC64
4009 case 0x2c: /* V9 movcc */
4010 {
4011 int cc = GET_FIELD_SP(insn, 11, 12);
4012 int cond = GET_FIELD_SP(insn, 14, 17);
4013 DisasCompare cmp;
4014
4015 if (insn & (1 << 18)) {
4016 if (cc == 0) {
4017 gen_compare(&cmp, 0, cond, dc);
4018 } else if (cc == 2) {
4019 gen_compare(&cmp, 1, cond, dc);
4020 } else {
4021 goto illegal_insn;
4022 }
4023 } else {
4024 gen_fcompare(&cmp, cc, cond);
4025 }
4026
4027 /* The get_src2 above loaded the normal 13-bit
4028 immediate field, not the 11-bit field we have
4029 in movcc. But it did handle the reg case. */
4030 if (IS_IMM) {
4031 simm = GET_FIELD_SPs(insn, 0, 10);
4032 tcg_gen_movi_tl(cpu_src2, simm);
4033 }
4034
4035 gen_movl_reg_TN(rd, cpu_dst);
4036 tcg_gen_movcond_tl(cmp.cond, cpu_dst,
4037 cmp.c1, cmp.c2,
4038 cpu_src2, cpu_dst);
4039 free_compare(&cmp);
4040 gen_movl_TN_reg(rd, cpu_dst);
4041 break;
4042 }
4043 case 0x2d: /* V9 sdivx */
4044 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4045 gen_movl_TN_reg(rd, cpu_dst);
4046 break;
4047 case 0x2e: /* V9 popc */
4048 {
4049 cpu_src2 = get_src2(insn, cpu_src2);
4050 gen_helper_popc(cpu_dst, cpu_src2);
4051 gen_movl_TN_reg(rd, cpu_dst);
4052 }
4053 case 0x2f: /* V9 movr */
4054 {
4055 int cond = GET_FIELD_SP(insn, 10, 12);
4056 DisasCompare cmp;
4057
4058 gen_compare_reg(&cmp, cond, cpu_src1);
4059
4060 /* The get_src2 above loaded the normal 13-bit
4061 immediate field, not the 10-bit field we have
4062 in movr. But it did handle the reg case. */
4063 if (IS_IMM) {
4064 simm = GET_FIELD_SPs(insn, 0, 9);
4065 tcg_gen_movi_tl(cpu_src2, simm);
4066 }
4067
4068 gen_movl_reg_TN(rd, cpu_dst);
4069 tcg_gen_movcond_tl(cmp.cond, cpu_dst,
4070 cmp.c1, cmp.c2,
4071 cpu_src2, cpu_dst);
4072 free_compare(&cmp);
4073 gen_movl_TN_reg(rd, cpu_dst);
4074 break;
4075 }
4076 #endif
4077 default:
4078 goto illegal_insn;
4079 }
4080 }
4081 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4082 #ifdef TARGET_SPARC64
4083 int opf = GET_FIELD_SP(insn, 5, 13);
4084 rs1 = GET_FIELD(insn, 13, 17);
4085 rs2 = GET_FIELD(insn, 27, 31);
4086 if (gen_trap_ifnofpu(dc)) {
4087 goto jmp_insn;
4088 }
4089
4090 switch (opf) {
4091 case 0x000: /* VIS I edge8cc */
4092 CHECK_FPU_FEATURE(dc, VIS1);
4093 gen_movl_reg_TN(rs1, cpu_src1);
4094 gen_movl_reg_TN(rs2, cpu_src2);
4095 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4096 gen_movl_TN_reg(rd, cpu_dst);
4097 break;
4098 case 0x001: /* VIS II edge8n */
4099 CHECK_FPU_FEATURE(dc, VIS2);
4100 gen_movl_reg_TN(rs1, cpu_src1);
4101 gen_movl_reg_TN(rs2, cpu_src2);
4102 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4103 gen_movl_TN_reg(rd, cpu_dst);
4104 break;
4105 case 0x002: /* VIS I edge8lcc */
4106 CHECK_FPU_FEATURE(dc, VIS1);
4107 gen_movl_reg_TN(rs1, cpu_src1);
4108 gen_movl_reg_TN(rs2, cpu_src2);
4109 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4110 gen_movl_TN_reg(rd, cpu_dst);
4111 break;
4112 case 0x003: /* VIS II edge8ln */
4113 CHECK_FPU_FEATURE(dc, VIS2);
4114 gen_movl_reg_TN(rs1, cpu_src1);
4115 gen_movl_reg_TN(rs2, cpu_src2);
4116 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4117 gen_movl_TN_reg(rd, cpu_dst);
4118 break;
4119 case 0x004: /* VIS I edge16cc */
4120 CHECK_FPU_FEATURE(dc, VIS1);
4121 gen_movl_reg_TN(rs1, cpu_src1);
4122 gen_movl_reg_TN(rs2, cpu_src2);
4123 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4124 gen_movl_TN_reg(rd, cpu_dst);
4125 break;
4126 case 0x005: /* VIS II edge16n */
4127 CHECK_FPU_FEATURE(dc, VIS2);
4128 gen_movl_reg_TN(rs1, cpu_src1);
4129 gen_movl_reg_TN(rs2, cpu_src2);
4130 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4131 gen_movl_TN_reg(rd, cpu_dst);
4132 break;
4133 case 0x006: /* VIS I edge16lcc */
4134 CHECK_FPU_FEATURE(dc, VIS1);
4135 gen_movl_reg_TN(rs1, cpu_src1);
4136 gen_movl_reg_TN(rs2, cpu_src2);
4137 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4138 gen_movl_TN_reg(rd, cpu_dst);
4139 break;
4140 case 0x007: /* VIS II edge16ln */
4141 CHECK_FPU_FEATURE(dc, VIS2);
4142 gen_movl_reg_TN(rs1, cpu_src1);
4143 gen_movl_reg_TN(rs2, cpu_src2);
4144 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4145 gen_movl_TN_reg(rd, cpu_dst);
4146 break;
4147 case 0x008: /* VIS I edge32cc */
4148 CHECK_FPU_FEATURE(dc, VIS1);
4149 gen_movl_reg_TN(rs1, cpu_src1);
4150 gen_movl_reg_TN(rs2, cpu_src2);
4151 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4152 gen_movl_TN_reg(rd, cpu_dst);
4153 break;
4154 case 0x009: /* VIS II edge32n */
4155 CHECK_FPU_FEATURE(dc, VIS2);
4156 gen_movl_reg_TN(rs1, cpu_src1);
4157 gen_movl_reg_TN(rs2, cpu_src2);
4158 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4159 gen_movl_TN_reg(rd, cpu_dst);
4160 break;
4161 case 0x00a: /* VIS I edge32lcc */
4162 CHECK_FPU_FEATURE(dc, VIS1);
4163 gen_movl_reg_TN(rs1, cpu_src1);
4164 gen_movl_reg_TN(rs2, cpu_src2);
4165 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4166 gen_movl_TN_reg(rd, cpu_dst);
4167 break;
4168 case 0x00b: /* VIS II edge32ln */
4169 CHECK_FPU_FEATURE(dc, VIS2);
4170 gen_movl_reg_TN(rs1, cpu_src1);
4171 gen_movl_reg_TN(rs2, cpu_src2);
4172 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4173 gen_movl_TN_reg(rd, cpu_dst);
4174 break;
4175 case 0x010: /* VIS I array8 */
4176 CHECK_FPU_FEATURE(dc, VIS1);
4177 cpu_src1 = get_src1(insn, cpu_src1);
4178 gen_movl_reg_TN(rs2, cpu_src2);
4179 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4180 gen_movl_TN_reg(rd, cpu_dst);
4181 break;
4182 case 0x012: /* VIS I array16 */
4183 CHECK_FPU_FEATURE(dc, VIS1);
4184 cpu_src1 = get_src1(insn, cpu_src1);
4185 gen_movl_reg_TN(rs2, cpu_src2);
4186 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4187 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4188 gen_movl_TN_reg(rd, cpu_dst);
4189 break;
4190 case 0x014: /* VIS I array32 */
4191 CHECK_FPU_FEATURE(dc, VIS1);
4192 cpu_src1 = get_src1(insn, cpu_src1);
4193 gen_movl_reg_TN(rs2, cpu_src2);
4194 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4195 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4196 gen_movl_TN_reg(rd, cpu_dst);
4197 break;
4198 case 0x018: /* VIS I alignaddr */
4199 CHECK_FPU_FEATURE(dc, VIS1);
4200 cpu_src1 = get_src1(insn, cpu_src1);
4201 gen_movl_reg_TN(rs2, cpu_src2);
4202 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4203 gen_movl_TN_reg(rd, cpu_dst);
4204 break;
4205 case 0x01a: /* VIS I alignaddrl */
4206 CHECK_FPU_FEATURE(dc, VIS1);
4207 cpu_src1 = get_src1(insn, cpu_src1);
4208 gen_movl_reg_TN(rs2, cpu_src2);
4209 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4210 gen_movl_TN_reg(rd, cpu_dst);
4211 break;
4212 case 0x019: /* VIS II bmask */
4213 CHECK_FPU_FEATURE(dc, VIS2);
4214 cpu_src1 = get_src1(insn, cpu_src1);
4215 cpu_src2 = get_src1(insn, cpu_src2);
4216 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4217 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4218 gen_movl_TN_reg(rd, cpu_dst);
4219 break;
4220 case 0x020: /* VIS I fcmple16 */
4221 CHECK_FPU_FEATURE(dc, VIS1);
4222 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4223 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4224 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4225 gen_movl_TN_reg(rd, cpu_dst);
4226 break;
4227 case 0x022: /* VIS I fcmpne16 */
4228 CHECK_FPU_FEATURE(dc, VIS1);
4229 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4230 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4231 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4232 gen_movl_TN_reg(rd, cpu_dst);
4233 break;
4234 case 0x024: /* VIS I fcmple32 */
4235 CHECK_FPU_FEATURE(dc, VIS1);
4236 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4237 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4238 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4239 gen_movl_TN_reg(rd, cpu_dst);
4240 break;
4241 case 0x026: /* VIS I fcmpne32 */
4242 CHECK_FPU_FEATURE(dc, VIS1);
4243 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4244 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4245 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4246 gen_movl_TN_reg(rd, cpu_dst);
4247 break;
4248 case 0x028: /* VIS I fcmpgt16 */
4249 CHECK_FPU_FEATURE(dc, VIS1);
4250 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4251 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4252 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4253 gen_movl_TN_reg(rd, cpu_dst);
4254 break;
4255 case 0x02a: /* VIS I fcmpeq16 */
4256 CHECK_FPU_FEATURE(dc, VIS1);
4257 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4258 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4259 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4260 gen_movl_TN_reg(rd, cpu_dst);
4261 break;
4262 case 0x02c: /* VIS I fcmpgt32 */
4263 CHECK_FPU_FEATURE(dc, VIS1);
4264 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4265 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4266 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4267 gen_movl_TN_reg(rd, cpu_dst);
4268 break;
4269 case 0x02e: /* VIS I fcmpeq32 */
4270 CHECK_FPU_FEATURE(dc, VIS1);
4271 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4272 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4273 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4274 gen_movl_TN_reg(rd, cpu_dst);
4275 break;
4276 case 0x031: /* VIS I fmul8x16 */
4277 CHECK_FPU_FEATURE(dc, VIS1);
4278 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4279 break;
4280 case 0x033: /* VIS I fmul8x16au */
4281 CHECK_FPU_FEATURE(dc, VIS1);
4282 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4283 break;
4284 case 0x035: /* VIS I fmul8x16al */
4285 CHECK_FPU_FEATURE(dc, VIS1);
4286 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4287 break;
4288 case 0x036: /* VIS I fmul8sux16 */
4289 CHECK_FPU_FEATURE(dc, VIS1);
4290 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4291 break;
4292 case 0x037: /* VIS I fmul8ulx16 */
4293 CHECK_FPU_FEATURE(dc, VIS1);
4294 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4295 break;
4296 case 0x038: /* VIS I fmuld8sux16 */
4297 CHECK_FPU_FEATURE(dc, VIS1);
4298 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4299 break;
4300 case 0x039: /* VIS I fmuld8ulx16 */
4301 CHECK_FPU_FEATURE(dc, VIS1);
4302 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4303 break;
4304 case 0x03a: /* VIS I fpack32 */
4305 CHECK_FPU_FEATURE(dc, VIS1);
4306 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4307 break;
4308 case 0x03b: /* VIS I fpack16 */
4309 CHECK_FPU_FEATURE(dc, VIS1);
4310 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4311 cpu_dst_32 = gen_dest_fpr_F();
4312 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4313 gen_store_fpr_F(dc, rd, cpu_dst_32);
4314 break;
4315 case 0x03d: /* VIS I fpackfix */
4316 CHECK_FPU_FEATURE(dc, VIS1);
4317 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4318 cpu_dst_32 = gen_dest_fpr_F();
4319 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4320 gen_store_fpr_F(dc, rd, cpu_dst_32);
4321 break;
4322 case 0x03e: /* VIS I pdist */
4323 CHECK_FPU_FEATURE(dc, VIS1);
4324 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4325 break;
4326 case 0x048: /* VIS I faligndata */
4327 CHECK_FPU_FEATURE(dc, VIS1);
4328 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4329 break;
4330 case 0x04b: /* VIS I fpmerge */
4331 CHECK_FPU_FEATURE(dc, VIS1);
4332 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4333 break;
4334 case 0x04c: /* VIS II bshuffle */
4335 CHECK_FPU_FEATURE(dc, VIS2);
4336 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4337 break;
4338 case 0x04d: /* VIS I fexpand */
4339 CHECK_FPU_FEATURE(dc, VIS1);
4340 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4341 break;
4342 case 0x050: /* VIS I fpadd16 */
4343 CHECK_FPU_FEATURE(dc, VIS1);
4344 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4345 break;
4346 case 0x051: /* VIS I fpadd16s */
4347 CHECK_FPU_FEATURE(dc, VIS1);
4348 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4349 break;
4350 case 0x052: /* VIS I fpadd32 */
4351 CHECK_FPU_FEATURE(dc, VIS1);
4352 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4353 break;
4354 case 0x053: /* VIS I fpadd32s */
4355 CHECK_FPU_FEATURE(dc, VIS1);
4356 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4357 break;
4358 case 0x054: /* VIS I fpsub16 */
4359 CHECK_FPU_FEATURE(dc, VIS1);
4360 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4361 break;
4362 case 0x055: /* VIS I fpsub16s */
4363 CHECK_FPU_FEATURE(dc, VIS1);
4364 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4365 break;
4366 case 0x056: /* VIS I fpsub32 */
4367 CHECK_FPU_FEATURE(dc, VIS1);
4368 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4369 break;
4370 case 0x057: /* VIS I fpsub32s */
4371 CHECK_FPU_FEATURE(dc, VIS1);
4372 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4373 break;
4374 case 0x060: /* VIS I fzero */
4375 CHECK_FPU_FEATURE(dc, VIS1);
4376 cpu_dst_64 = gen_dest_fpr_D();
4377 tcg_gen_movi_i64(cpu_dst_64, 0);
4378 gen_store_fpr_D(dc, rd, cpu_dst_64);
4379 break;
4380 case 0x061: /* VIS I fzeros */
4381 CHECK_FPU_FEATURE(dc, VIS1);
4382 cpu_dst_32 = gen_dest_fpr_F();
4383 tcg_gen_movi_i32(cpu_dst_32, 0);
4384 gen_store_fpr_F(dc, rd, cpu_dst_32);
4385 break;
4386 case 0x062: /* VIS I fnor */
4387 CHECK_FPU_FEATURE(dc, VIS1);
4388 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4389 break;
4390 case 0x063: /* VIS I fnors */
4391 CHECK_FPU_FEATURE(dc, VIS1);
4392 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4393 break;
4394 case 0x064: /* VIS I fandnot2 */
4395 CHECK_FPU_FEATURE(dc, VIS1);
4396 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4397 break;
4398 case 0x065: /* VIS I fandnot2s */
4399 CHECK_FPU_FEATURE(dc, VIS1);
4400 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4401 break;
4402 case 0x066: /* VIS I fnot2 */
4403 CHECK_FPU_FEATURE(dc, VIS1);
4404 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4405 break;
4406 case 0x067: /* VIS I fnot2s */
4407 CHECK_FPU_FEATURE(dc, VIS1);
4408 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4409 break;
4410 case 0x068: /* VIS I fandnot1 */
4411 CHECK_FPU_FEATURE(dc, VIS1);
4412 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4413 break;
4414 case 0x069: /* VIS I fandnot1s */
4415 CHECK_FPU_FEATURE(dc, VIS1);
4416 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4417 break;
4418 case 0x06a: /* VIS I fnot1 */
4419 CHECK_FPU_FEATURE(dc, VIS1);
4420 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4421 break;
4422 case 0x06b: /* VIS I fnot1s */
4423 CHECK_FPU_FEATURE(dc, VIS1);
4424 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4425 break;
4426 case 0x06c: /* VIS I fxor */
4427 CHECK_FPU_FEATURE(dc, VIS1);
4428 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4429 break;
4430 case 0x06d: /* VIS I fxors */
4431 CHECK_FPU_FEATURE(dc, VIS1);
4432 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4433 break;
4434 case 0x06e: /* VIS I fnand */
4435 CHECK_FPU_FEATURE(dc, VIS1);
4436 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4437 break;
4438 case 0x06f: /* VIS I fnands */
4439 CHECK_FPU_FEATURE(dc, VIS1);
4440 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4441 break;
4442 case 0x070: /* VIS I fand */
4443 CHECK_FPU_FEATURE(dc, VIS1);
4444 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4445 break;
4446 case 0x071: /* VIS I fands */
4447 CHECK_FPU_FEATURE(dc, VIS1);
4448 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4449 break;
4450 case 0x072: /* VIS I fxnor */
4451 CHECK_FPU_FEATURE(dc, VIS1);
4452 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4453 break;
4454 case 0x073: /* VIS I fxnors */
4455 CHECK_FPU_FEATURE(dc, VIS1);
4456 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4457 break;
4458 case 0x074: /* VIS I fsrc1 */
4459 CHECK_FPU_FEATURE(dc, VIS1);
4460 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4461 gen_store_fpr_D(dc, rd, cpu_src1_64);
4462 break;
4463 case 0x075: /* VIS I fsrc1s */
4464 CHECK_FPU_FEATURE(dc, VIS1);
4465 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4466 gen_store_fpr_F(dc, rd, cpu_src1_32);
4467 break;
4468 case 0x076: /* VIS I fornot2 */
4469 CHECK_FPU_FEATURE(dc, VIS1);
4470 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4471 break;
4472 case 0x077: /* VIS I fornot2s */
4473 CHECK_FPU_FEATURE(dc, VIS1);
4474 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4475 break;
4476 case 0x078: /* VIS I fsrc2 */
4477 CHECK_FPU_FEATURE(dc, VIS1);
4478 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4479 gen_store_fpr_D(dc, rd, cpu_src1_64);
4480 break;
4481 case 0x079: /* VIS I fsrc2s */
4482 CHECK_FPU_FEATURE(dc, VIS1);
4483 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4484 gen_store_fpr_F(dc, rd, cpu_src1_32);
4485 break;
4486 case 0x07a: /* VIS I fornot1 */
4487 CHECK_FPU_FEATURE(dc, VIS1);
4488 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4489 break;
4490 case 0x07b: /* VIS I fornot1s */
4491 CHECK_FPU_FEATURE(dc, VIS1);
4492 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4493 break;
4494 case 0x07c: /* VIS I for */
4495 CHECK_FPU_FEATURE(dc, VIS1);
4496 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4497 break;
4498 case 0x07d: /* VIS I fors */
4499 CHECK_FPU_FEATURE(dc, VIS1);
4500 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4501 break;
4502 case 0x07e: /* VIS I fone */
4503 CHECK_FPU_FEATURE(dc, VIS1);
4504 cpu_dst_64 = gen_dest_fpr_D();
4505 tcg_gen_movi_i64(cpu_dst_64, -1);
4506 gen_store_fpr_D(dc, rd, cpu_dst_64);
4507 break;
4508 case 0x07f: /* VIS I fones */
4509 CHECK_FPU_FEATURE(dc, VIS1);
4510 cpu_dst_32 = gen_dest_fpr_F();
4511 tcg_gen_movi_i32(cpu_dst_32, -1);
4512 gen_store_fpr_F(dc, rd, cpu_dst_32);
4513 break;
4514 case 0x080: /* VIS I shutdown */
4515 case 0x081: /* VIS II siam */
4516 // XXX
4517 goto illegal_insn;
4518 default:
4519 goto illegal_insn;
4520 }
4521 #else
4522 goto ncp_insn;
4523 #endif
4524 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4525 #ifdef TARGET_SPARC64
4526 goto illegal_insn;
4527 #else
4528 goto ncp_insn;
4529 #endif
4530 #ifdef TARGET_SPARC64
4531 } else if (xop == 0x39) { /* V9 return */
4532 TCGv_i32 r_const;
4533
4534 save_state(dc);
4535 cpu_src1 = get_src1(insn, cpu_src1);
4536 if (IS_IMM) { /* immediate */
4537 simm = GET_FIELDs(insn, 19, 31);
4538 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
4539 } else { /* register */
4540 rs2 = GET_FIELD(insn, 27, 31);
4541 if (rs2) {
4542 gen_movl_reg_TN(rs2, cpu_src2);
4543 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4544 } else
4545 tcg_gen_mov_tl(cpu_dst, cpu_src1);
4546 }
4547 gen_helper_restore(cpu_env);
4548 gen_mov_pc_npc(dc);
4549 r_const = tcg_const_i32(3);
4550 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4551 tcg_temp_free_i32(r_const);
4552 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4553 dc->npc = DYNAMIC_PC;
4554 goto jmp_insn;
4555 #endif
4556 } else {
4557 cpu_src1 = get_src1(insn, cpu_src1);
4558 if (IS_IMM) { /* immediate */
4559 simm = GET_FIELDs(insn, 19, 31);
4560 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
4561 } else { /* register */
4562 rs2 = GET_FIELD(insn, 27, 31);
4563 if (rs2) {
4564 gen_movl_reg_TN(rs2, cpu_src2);
4565 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4566 } else
4567 tcg_gen_mov_tl(cpu_dst, cpu_src1);
4568 }
4569 switch (xop) {
4570 case 0x38: /* jmpl */
4571 {
4572 TCGv r_pc;
4573 TCGv_i32 r_const;
4574
4575 r_pc = tcg_const_tl(dc->pc);
4576 gen_movl_TN_reg(rd, r_pc);
4577 tcg_temp_free(r_pc);
4578 gen_mov_pc_npc(dc);
4579 r_const = tcg_const_i32(3);
4580 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4581 tcg_temp_free_i32(r_const);
4582 gen_address_mask(dc, cpu_dst);
4583 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4584 dc->npc = DYNAMIC_PC;
4585 }
4586 goto jmp_insn;
4587 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4588 case 0x39: /* rett, V9 return */
4589 {
4590 TCGv_i32 r_const;
4591
4592 if (!supervisor(dc))
4593 goto priv_insn;
4594 gen_mov_pc_npc(dc);
4595 r_const = tcg_const_i32(3);
4596 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4597 tcg_temp_free_i32(r_const);
4598 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4599 dc->npc = DYNAMIC_PC;
4600 gen_helper_rett(cpu_env);
4601 }
4602 goto jmp_insn;
4603 #endif
4604 case 0x3b: /* flush */
4605 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4606 goto unimp_flush;
4607 /* nop */
4608 break;
4609 case 0x3c: /* save */
4610 save_state(dc);
4611 gen_helper_save(cpu_env);
4612 gen_movl_TN_reg(rd, cpu_dst);
4613 break;
4614 case 0x3d: /* restore */
4615 save_state(dc);
4616 gen_helper_restore(cpu_env);
4617 gen_movl_TN_reg(rd, cpu_dst);
4618 break;
4619 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4620 case 0x3e: /* V9 done/retry */
4621 {
4622 switch (rd) {
4623 case 0:
4624 if (!supervisor(dc))
4625 goto priv_insn;
4626 dc->npc = DYNAMIC_PC;
4627 dc->pc = DYNAMIC_PC;
4628 gen_helper_done(cpu_env);
4629 goto jmp_insn;
4630 case 1:
4631 if (!supervisor(dc))
4632 goto priv_insn;
4633 dc->npc = DYNAMIC_PC;
4634 dc->pc = DYNAMIC_PC;
4635 gen_helper_retry(cpu_env);
4636 goto jmp_insn;
4637 default:
4638 goto illegal_insn;
4639 }
4640 }
4641 break;
4642 #endif
4643 default:
4644 goto illegal_insn;
4645 }
4646 }
4647 break;
4648 }
4649 break;
4650 case 3: /* load/store instructions */
4651 {
4652 unsigned int xop = GET_FIELD(insn, 7, 12);
4653
4654 /* flush pending conditional evaluations before exposing
4655 cpu state */
4656 if (dc->cc_op != CC_OP_FLAGS) {
4657 dc->cc_op = CC_OP_FLAGS;
4658 gen_helper_compute_psr(cpu_env);
4659 }
4660 cpu_src1 = get_src1(insn, cpu_src1);
4661 if (xop == 0x3c || xop == 0x3e) { // V9 casa/casxa
4662 rs2 = GET_FIELD(insn, 27, 31);
4663 gen_movl_reg_TN(rs2, cpu_src2);
4664 tcg_gen_mov_tl(cpu_addr, cpu_src1);
4665 } else if (IS_IMM) { /* immediate */
4666 simm = GET_FIELDs(insn, 19, 31);
4667 tcg_gen_addi_tl(cpu_addr, cpu_src1, simm);
4668 } else { /* register */
4669 rs2 = GET_FIELD(insn, 27, 31);
4670 if (rs2 != 0) {
4671 gen_movl_reg_TN(rs2, cpu_src2);
4672 tcg_gen_add_tl(cpu_addr, cpu_src1, cpu_src2);
4673 } else
4674 tcg_gen_mov_tl(cpu_addr, cpu_src1);
4675 }
4676 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4677 (xop > 0x17 && xop <= 0x1d ) ||
4678 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4679 switch (xop) {
4680 case 0x0: /* ld, V9 lduw, load unsigned word */
4681 gen_address_mask(dc, cpu_addr);
4682 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4683 break;
4684 case 0x1: /* ldub, load unsigned byte */
4685 gen_address_mask(dc, cpu_addr);
4686 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4687 break;
4688 case 0x2: /* lduh, load unsigned halfword */
4689 gen_address_mask(dc, cpu_addr);
4690 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4691 break;
4692 case 0x3: /* ldd, load double word */
4693 if (rd & 1)
4694 goto illegal_insn;
4695 else {
4696 TCGv_i32 r_const;
4697
4698 save_state(dc);
4699 r_const = tcg_const_i32(7);
4700 /* XXX remove alignment check */
4701 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4702 tcg_temp_free_i32(r_const);
4703 gen_address_mask(dc, cpu_addr);
4704 tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
4705 tcg_gen_trunc_i64_tl(cpu_tmp0, cpu_tmp64);
4706 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffffULL);
4707 gen_movl_TN_reg(rd + 1, cpu_tmp0);
4708 tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
4709 tcg_gen_trunc_i64_tl(cpu_val, cpu_tmp64);
4710 tcg_gen_andi_tl(cpu_val, cpu_val, 0xffffffffULL);
4711 }
4712 break;
4713 case 0x9: /* ldsb, load signed byte */
4714 gen_address_mask(dc, cpu_addr);
4715 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4716 break;
4717 case 0xa: /* ldsh, load signed halfword */
4718 gen_address_mask(dc, cpu_addr);
4719 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4720 break;
4721 case 0xd: /* ldstub -- XXX: should be atomically */
4722 {
4723 TCGv r_const;
4724
4725 gen_address_mask(dc, cpu_addr);
4726 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4727 r_const = tcg_const_tl(0xff);
4728 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4729 tcg_temp_free(r_const);
4730 }
4731 break;
4732 case 0x0f: /* swap, swap register with memory. Also
4733 atomically */
4734 CHECK_IU_FEATURE(dc, SWAP);
4735 gen_movl_reg_TN(rd, cpu_val);
4736 gen_address_mask(dc, cpu_addr);
4737 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4738 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4739 tcg_gen_mov_tl(cpu_val, cpu_tmp0);
4740 break;
4741 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4742 case 0x10: /* lda, V9 lduwa, load word alternate */
4743 #ifndef TARGET_SPARC64
4744 if (IS_IMM)
4745 goto illegal_insn;
4746 if (!supervisor(dc))
4747 goto priv_insn;
4748 #endif
4749 save_state(dc);
4750 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
4751 break;
4752 case 0x11: /* lduba, load unsigned byte alternate */
4753 #ifndef TARGET_SPARC64
4754 if (IS_IMM)
4755 goto illegal_insn;
4756 if (!supervisor(dc))
4757 goto priv_insn;
4758 #endif
4759 save_state(dc);
4760 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
4761 break;
4762 case 0x12: /* lduha, load unsigned halfword alternate */
4763 #ifndef TARGET_SPARC64
4764 if (IS_IMM)
4765 goto illegal_insn;
4766 if (!supervisor(dc))
4767 goto priv_insn;
4768 #endif
4769 save_state(dc);
4770 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
4771 break;
4772 case 0x13: /* ldda, load double word alternate */
4773 #ifndef TARGET_SPARC64
4774 if (IS_IMM)
4775 goto illegal_insn;
4776 if (!supervisor(dc))
4777 goto priv_insn;
4778 #endif
4779 if (rd & 1)
4780 goto illegal_insn;
4781 save_state(dc);
4782 gen_ldda_asi(cpu_val, cpu_addr, insn, rd);
4783 goto skip_move;
4784 case 0x19: /* ldsba, load signed byte alternate */
4785 #ifndef TARGET_SPARC64
4786 if (IS_IMM)
4787 goto illegal_insn;
4788 if (!supervisor(dc))
4789 goto priv_insn;
4790 #endif
4791 save_state(dc);
4792 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
4793 break;
4794 case 0x1a: /* ldsha, load signed halfword alternate */
4795 #ifndef TARGET_SPARC64
4796 if (IS_IMM)
4797 goto illegal_insn;
4798 if (!supervisor(dc))
4799 goto priv_insn;
4800 #endif
4801 save_state(dc);
4802 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
4803 break;
4804 case 0x1d: /* ldstuba -- XXX: should be atomically */
4805 #ifndef TARGET_SPARC64
4806 if (IS_IMM)
4807 goto illegal_insn;
4808 if (!supervisor(dc))
4809 goto priv_insn;
4810 #endif
4811 save_state(dc);
4812 gen_ldstub_asi(cpu_val, cpu_addr, insn);
4813 break;
4814 case 0x1f: /* swapa, swap reg with alt. memory. Also
4815 atomically */
4816 CHECK_IU_FEATURE(dc, SWAP);
4817 #ifndef TARGET_SPARC64
4818 if (IS_IMM)
4819 goto illegal_insn;
4820 if (!supervisor(dc))
4821 goto priv_insn;
4822 #endif
4823 save_state(dc);
4824 gen_movl_reg_TN(rd, cpu_val);
4825 gen_swap_asi(cpu_val, cpu_addr, insn);
4826 break;
4827
4828 #ifndef TARGET_SPARC64
4829 case 0x30: /* ldc */
4830 case 0x31: /* ldcsr */
4831 case 0x33: /* lddc */
4832 goto ncp_insn;
4833 #endif
4834 #endif
4835 #ifdef TARGET_SPARC64
4836 case 0x08: /* V9 ldsw */
4837 gen_address_mask(dc, cpu_addr);
4838 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4839 break;
4840 case 0x0b: /* V9 ldx */
4841 gen_address_mask(dc, cpu_addr);
4842 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4843 break;
4844 case 0x18: /* V9 ldswa */
4845 save_state(dc);
4846 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
4847 break;
4848 case 0x1b: /* V9 ldxa */
4849 save_state(dc);
4850 gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
4851 break;
4852 case 0x2d: /* V9 prefetch, no effect */
4853 goto skip_move;
4854 case 0x30: /* V9 ldfa */
4855 if (gen_trap_ifnofpu(dc)) {
4856 goto jmp_insn;
4857 }
4858 save_state(dc);
4859 gen_ldf_asi(cpu_addr, insn, 4, rd);
4860 gen_update_fprs_dirty(rd);
4861 goto skip_move;
4862 case 0x33: /* V9 lddfa */
4863 if (gen_trap_ifnofpu(dc)) {
4864 goto jmp_insn;
4865 }
4866 save_state(dc);
4867 gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
4868 gen_update_fprs_dirty(DFPREG(rd));
4869 goto skip_move;
4870 case 0x3d: /* V9 prefetcha, no effect */
4871 goto skip_move;
4872 case 0x32: /* V9 ldqfa */
4873 CHECK_FPU_FEATURE(dc, FLOAT128);
4874 if (gen_trap_ifnofpu(dc)) {
4875 goto jmp_insn;
4876 }
4877 save_state(dc);
4878 gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
4879 gen_update_fprs_dirty(QFPREG(rd));
4880 goto skip_move;
4881 #endif
4882 default:
4883 goto illegal_insn;
4884 }
4885 gen_movl_TN_reg(rd, cpu_val);
4886 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4887 skip_move: ;
4888 #endif
4889 } else if (xop >= 0x20 && xop < 0x24) {
4890 if (gen_trap_ifnofpu(dc)) {
4891 goto jmp_insn;
4892 }
4893 save_state(dc);
4894 switch (xop) {
4895 case 0x20: /* ldf, load fpreg */
4896 gen_address_mask(dc, cpu_addr);
4897 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4898 cpu_dst_32 = gen_dest_fpr_F();
4899 tcg_gen_trunc_tl_i32(cpu_dst_32, cpu_tmp0);
4900 gen_store_fpr_F(dc, rd, cpu_dst_32);
4901 break;
4902 case 0x21: /* ldfsr, V9 ldxfsr */
4903 #ifdef TARGET_SPARC64
4904 gen_address_mask(dc, cpu_addr);
4905 if (rd == 1) {
4906 tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
4907 gen_helper_ldxfsr(cpu_env, cpu_tmp64);
4908 } else {
4909 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4910 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
4911 gen_helper_ldfsr(cpu_env, cpu_tmp32);
4912 }
4913 #else
4914 {
4915 tcg_gen_qemu_ld32u(cpu_tmp32, cpu_addr, dc->mem_idx);
4916 gen_helper_ldfsr(cpu_env, cpu_tmp32);
4917 }
4918 #endif
4919 break;
4920 case 0x22: /* ldqf, load quad fpreg */
4921 {
4922 TCGv_i32 r_const;
4923
4924 CHECK_FPU_FEATURE(dc, FLOAT128);
4925 r_const = tcg_const_i32(dc->mem_idx);
4926 gen_address_mask(dc, cpu_addr);
4927 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4928 tcg_temp_free_i32(r_const);
4929 gen_op_store_QT0_fpr(QFPREG(rd));
4930 gen_update_fprs_dirty(QFPREG(rd));
4931 }
4932 break;
4933 case 0x23: /* lddf, load double fpreg */
4934 gen_address_mask(dc, cpu_addr);
4935 cpu_dst_64 = gen_dest_fpr_D();
4936 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4937 gen_store_fpr_D(dc, rd, cpu_dst_64);
4938 break;
4939 default:
4940 goto illegal_insn;
4941 }
4942 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
4943 xop == 0xe || xop == 0x1e) {
4944 gen_movl_reg_TN(rd, cpu_val);
4945 switch (xop) {
4946 case 0x4: /* st, store word */
4947 gen_address_mask(dc, cpu_addr);
4948 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4949 break;
4950 case 0x5: /* stb, store byte */
4951 gen_address_mask(dc, cpu_addr);
4952 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
4953 break;
4954 case 0x6: /* sth, store halfword */
4955 gen_address_mask(dc, cpu_addr);
4956 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
4957 break;
4958 case 0x7: /* std, store double word */
4959 if (rd & 1)
4960 goto illegal_insn;
4961 else {
4962 TCGv_i32 r_const;
4963
4964 save_state(dc);
4965 gen_address_mask(dc, cpu_addr);
4966 r_const = tcg_const_i32(7);
4967 /* XXX remove alignment check */
4968 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4969 tcg_temp_free_i32(r_const);
4970 gen_movl_reg_TN(rd + 1, cpu_tmp0);
4971 tcg_gen_concat_tl_i64(cpu_tmp64, cpu_tmp0, cpu_val);
4972 tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
4973 }
4974 break;
4975 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4976 case 0x14: /* sta, V9 stwa, store word alternate */
4977 #ifndef TARGET_SPARC64
4978 if (IS_IMM)
4979 goto illegal_insn;
4980 if (!supervisor(dc))
4981 goto priv_insn;
4982 #endif
4983 save_state(dc);
4984 gen_st_asi(cpu_val, cpu_addr, insn, 4);
4985 dc->npc = DYNAMIC_PC;
4986 break;
4987 case 0x15: /* stba, store byte alternate */
4988 #ifndef TARGET_SPARC64
4989 if (IS_IMM)
4990 goto illegal_insn;
4991 if (!supervisor(dc))
4992 goto priv_insn;
4993 #endif
4994 save_state(dc);
4995 gen_st_asi(cpu_val, cpu_addr, insn, 1);
4996 dc->npc = DYNAMIC_PC;
4997 break;
4998 case 0x16: /* stha, store halfword alternate */
4999 #ifndef TARGET_SPARC64
5000 if (IS_IMM)
5001 goto illegal_insn;
5002 if (!supervisor(dc))
5003 goto priv_insn;
5004 #endif
5005 save_state(dc);
5006 gen_st_asi(cpu_val, cpu_addr, insn, 2);
5007 dc->npc = DYNAMIC_PC;
5008 break;
5009 case 0x17: /* stda, store double word alternate */
5010 #ifndef TARGET_SPARC64
5011 if (IS_IMM)
5012 goto illegal_insn;
5013 if (!supervisor(dc))
5014 goto priv_insn;
5015 #endif
5016 if (rd & 1)
5017 goto illegal_insn;
5018 else {
5019 save_state(dc);
5020 gen_stda_asi(cpu_val, cpu_addr, insn, rd);
5021 }
5022 break;
5023 #endif
5024 #ifdef TARGET_SPARC64
5025 case 0x0e: /* V9 stx */
5026 gen_address_mask(dc, cpu_addr);
5027 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
5028 break;
5029 case 0x1e: /* V9 stxa */
5030 save_state(dc);
5031 gen_st_asi(cpu_val, cpu_addr, insn, 8);
5032 dc->npc = DYNAMIC_PC;
5033 break;
5034 #endif
5035 default:
5036 goto illegal_insn;
5037 }
5038 } else if (xop > 0x23 && xop < 0x28) {
5039 if (gen_trap_ifnofpu(dc)) {
5040 goto jmp_insn;
5041 }
5042 save_state(dc);
5043 switch (xop) {
5044 case 0x24: /* stf, store fpreg */
5045 gen_address_mask(dc, cpu_addr);
5046 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5047 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_src1_32);
5048 tcg_gen_qemu_st32(cpu_tmp0, cpu_addr, dc->mem_idx);
5049 break;
5050 case 0x25: /* stfsr, V9 stxfsr */
5051 #ifdef TARGET_SPARC64
5052 gen_address_mask(dc, cpu_addr);
5053 tcg_gen_ld_i64(cpu_tmp64, cpu_env, offsetof(CPUSPARCState, fsr));
5054 if (rd == 1)
5055 tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
5056 else
5057 tcg_gen_qemu_st32(cpu_tmp64, cpu_addr, dc->mem_idx);
5058 #else
5059 tcg_gen_ld_i32(cpu_tmp32, cpu_env, offsetof(CPUSPARCState, fsr));
5060 tcg_gen_qemu_st32(cpu_tmp32, cpu_addr, dc->mem_idx);
5061 #endif
5062 break;
5063 case 0x26:
5064 #ifdef TARGET_SPARC64
5065 /* V9 stqf, store quad fpreg */
5066 {
5067 TCGv_i32 r_const;
5068
5069 CHECK_FPU_FEATURE(dc, FLOAT128);
5070 gen_op_load_fpr_QT0(QFPREG(rd));
5071 r_const = tcg_const_i32(dc->mem_idx);
5072 gen_address_mask(dc, cpu_addr);
5073 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5074 tcg_temp_free_i32(r_const);
5075 }
5076 break;
5077 #else /* !TARGET_SPARC64 */
5078 /* stdfq, store floating point queue */
5079 #if defined(CONFIG_USER_ONLY)
5080 goto illegal_insn;
5081 #else
5082 if (!supervisor(dc))
5083 goto priv_insn;
5084 if (gen_trap_ifnofpu(dc)) {
5085 goto jmp_insn;
5086 }
5087 goto nfq_insn;
5088 #endif
5089 #endif
5090 case 0x27: /* stdf, store double fpreg */
5091 gen_address_mask(dc, cpu_addr);
5092 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5093 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5094 break;
5095 default:
5096 goto illegal_insn;
5097 }
5098 } else if (xop > 0x33 && xop < 0x3f) {
5099 save_state(dc);
5100 switch (xop) {
5101 #ifdef TARGET_SPARC64
5102 case 0x34: /* V9 stfa */
5103 if (gen_trap_ifnofpu(dc)) {
5104 goto jmp_insn;
5105 }
5106 gen_stf_asi(cpu_addr, insn, 4, rd);
5107 break;
5108 case 0x36: /* V9 stqfa */
5109 {
5110 TCGv_i32 r_const;
5111
5112 CHECK_FPU_FEATURE(dc, FLOAT128);
5113 if (gen_trap_ifnofpu(dc)) {
5114 goto jmp_insn;
5115 }
5116 r_const = tcg_const_i32(7);
5117 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5118 tcg_temp_free_i32(r_const);
5119 gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
5120 }
5121 break;
5122 case 0x37: /* V9 stdfa */
5123 if (gen_trap_ifnofpu(dc)) {
5124 goto jmp_insn;
5125 }
5126 gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
5127 break;
5128 case 0x3c: /* V9 casa */
5129 gen_cas_asi(cpu_val, cpu_addr, cpu_src2, insn, rd);
5130 gen_movl_TN_reg(rd, cpu_val);
5131 break;
5132 case 0x3e: /* V9 casxa */
5133 gen_casx_asi(cpu_val, cpu_addr, cpu_src2, insn, rd);
5134 gen_movl_TN_reg(rd, cpu_val);
5135 break;
5136 #else
5137 case 0x34: /* stc */
5138 case 0x35: /* stcsr */
5139 case 0x36: /* stdcq */
5140 case 0x37: /* stdc */
5141 goto ncp_insn;
5142 #endif
5143 default:
5144 goto illegal_insn;
5145 }
5146 } else
5147 goto illegal_insn;
5148 }
5149 break;
5150 }
5151 /* default case for non jump instructions */
5152 if (dc->npc == DYNAMIC_PC) {
5153 dc->pc = DYNAMIC_PC;
5154 gen_op_next_insn();
5155 } else if (dc->npc == JUMP_PC) {
5156 /* we can do a static jump */
5157 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5158 dc->is_br = 1;
5159 } else {
5160 dc->pc = dc->npc;
5161 dc->npc = dc->npc + 4;
5162 }
5163 jmp_insn:
5164 goto egress;
5165 illegal_insn:
5166 {
5167 TCGv_i32 r_const;
5168
5169 save_state(dc);
5170 r_const = tcg_const_i32(TT_ILL_INSN);
5171 gen_helper_raise_exception(cpu_env, r_const);
5172 tcg_temp_free_i32(r_const);
5173 dc->is_br = 1;
5174 }
5175 goto egress;
5176 unimp_flush:
5177 {
5178 TCGv_i32 r_const;
5179
5180 save_state(dc);
5181 r_const = tcg_const_i32(TT_UNIMP_FLUSH);
5182 gen_helper_raise_exception(cpu_env, r_const);
5183 tcg_temp_free_i32(r_const);
5184 dc->is_br = 1;
5185 }
5186 goto egress;
5187 #if !defined(CONFIG_USER_ONLY)
5188 priv_insn:
5189 {
5190 TCGv_i32 r_const;
5191
5192 save_state(dc);
5193 r_const = tcg_const_i32(TT_PRIV_INSN);
5194 gen_helper_raise_exception(cpu_env, r_const);
5195 tcg_temp_free_i32(r_const);
5196 dc->is_br = 1;
5197 }
5198 goto egress;
5199 #endif
5200 nfpu_insn:
5201 save_state(dc);
5202 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
5203 dc->is_br = 1;
5204 goto egress;
5205 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5206 nfq_insn:
5207 save_state(dc);
5208 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
5209 dc->is_br = 1;
5210 goto egress;
5211 #endif
5212 #ifndef TARGET_SPARC64
5213 ncp_insn:
5214 {
5215 TCGv r_const;
5216
5217 save_state(dc);
5218 r_const = tcg_const_i32(TT_NCP_INSN);
5219 gen_helper_raise_exception(cpu_env, r_const);
5220 tcg_temp_free(r_const);
5221 dc->is_br = 1;
5222 }
5223 goto egress;
5224 #endif
5225 egress:
5226 tcg_temp_free(cpu_tmp1);
5227 tcg_temp_free(cpu_tmp2);
5228 if (dc->n_t32 != 0) {
5229 int i;
5230 for (i = dc->n_t32 - 1; i >= 0; --i) {
5231 tcg_temp_free_i32(dc->t32[i]);
5232 }
5233 dc->n_t32 = 0;
5234 }
5235 }
5236
5237 static inline void gen_intermediate_code_internal(TranslationBlock * tb,
5238 int spc, CPUSPARCState *env)
5239 {
5240 target_ulong pc_start, last_pc;
5241 uint16_t *gen_opc_end;
5242 DisasContext dc1, *dc = &dc1;
5243 CPUBreakpoint *bp;
5244 int j, lj = -1;
5245 int num_insns;
5246 int max_insns;
5247 unsigned int insn;
5248
5249 memset(dc, 0, sizeof(DisasContext));
5250 dc->tb = tb;
5251 pc_start = tb->pc;
5252 dc->pc = pc_start;
5253 last_pc = dc->pc;
5254 dc->npc = (target_ulong) tb->cs_base;
5255 dc->cc_op = CC_OP_DYNAMIC;
5256 dc->mem_idx = cpu_mmu_index(env);
5257 dc->def = env->def;
5258 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5259 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5260 dc->singlestep = (env->singlestep_enabled || singlestep);
5261 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
5262
5263 num_insns = 0;
5264 max_insns = tb->cflags & CF_COUNT_MASK;
5265 if (max_insns == 0)
5266 max_insns = CF_COUNT_MASK;
5267 gen_icount_start();
5268 do {
5269 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
5270 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5271 if (bp->pc == dc->pc) {
5272 if (dc->pc != pc_start)
5273 save_state(dc);
5274 gen_helper_debug(cpu_env);
5275 tcg_gen_exit_tb(0);
5276 dc->is_br = 1;
5277 goto exit_gen_loop;
5278 }
5279 }
5280 }
5281 if (spc) {
5282 qemu_log("Search PC...\n");
5283 j = gen_opc_ptr - gen_opc_buf;
5284 if (lj < j) {
5285 lj++;
5286 while (lj < j)
5287 gen_opc_instr_start[lj++] = 0;
5288 gen_opc_pc[lj] = dc->pc;
5289 gen_opc_npc[lj] = dc->npc;
5290 gen_opc_instr_start[lj] = 1;
5291 gen_opc_icount[lj] = num_insns;
5292 }
5293 }
5294 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
5295 gen_io_start();
5296 last_pc = dc->pc;
5297 insn = cpu_ldl_code(env, dc->pc);
5298
5299 cpu_tmp0 = tcg_temp_new();
5300 cpu_tmp32 = tcg_temp_new_i32();
5301 cpu_tmp64 = tcg_temp_new_i64();
5302 cpu_dst = tcg_temp_new();
5303 cpu_val = tcg_temp_new();
5304 cpu_addr = tcg_temp_new();
5305
5306 disas_sparc_insn(dc, insn);
5307 num_insns++;
5308
5309 tcg_temp_free(cpu_addr);
5310 tcg_temp_free(cpu_val);
5311 tcg_temp_free(cpu_dst);
5312 tcg_temp_free_i64(cpu_tmp64);
5313 tcg_temp_free_i32(cpu_tmp32);
5314 tcg_temp_free(cpu_tmp0);
5315
5316 if (dc->is_br)
5317 break;
5318 /* if the next PC is different, we abort now */
5319 if (dc->pc != (last_pc + 4))
5320 break;
5321 /* if we reach a page boundary, we stop generation so that the
5322 PC of a TT_TFAULT exception is always in the right page */
5323 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5324 break;
5325 /* if single step mode, we generate only one instruction and
5326 generate an exception */
5327 if (dc->singlestep) {
5328 break;
5329 }
5330 } while ((gen_opc_ptr < gen_opc_end) &&
5331 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5332 num_insns < max_insns);
5333
5334 exit_gen_loop:
5335 if (tb->cflags & CF_LAST_IO) {
5336 gen_io_end();
5337 }
5338 if (!dc->is_br) {
5339 if (dc->pc != DYNAMIC_PC &&
5340 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5341 /* static PC and NPC: we can use direct chaining */
5342 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5343 } else {
5344 if (dc->pc != DYNAMIC_PC) {
5345 tcg_gen_movi_tl(cpu_pc, dc->pc);
5346 }
5347 save_npc(dc);
5348 tcg_gen_exit_tb(0);
5349 }
5350 }
5351 gen_icount_end(tb, num_insns);
5352 *gen_opc_ptr = INDEX_op_end;
5353 if (spc) {
5354 j = gen_opc_ptr - gen_opc_buf;
5355 lj++;
5356 while (lj <= j)
5357 gen_opc_instr_start[lj++] = 0;
5358 #if 0
5359 log_page_dump();
5360 #endif
5361 gen_opc_jump_pc[0] = dc->jump_pc[0];
5362 gen_opc_jump_pc[1] = dc->jump_pc[1];
5363 } else {
5364 tb->size = last_pc + 4 - pc_start;
5365 tb->icount = num_insns;
5366 }
5367 #ifdef DEBUG_DISAS
5368 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5369 qemu_log("--------------\n");
5370 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5371 log_target_disas(pc_start, last_pc + 4 - pc_start, 0);
5372 qemu_log("\n");
5373 }
5374 #endif
5375 }
5376
5377 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5378 {
5379 gen_intermediate_code_internal(tb, 0, env);
5380 }
5381
5382 void gen_intermediate_code_pc(CPUSPARCState * env, TranslationBlock * tb)
5383 {
5384 gen_intermediate_code_internal(tb, 1, env);
5385 }
5386
5387 void gen_intermediate_code_init(CPUSPARCState *env)
5388 {
5389 unsigned int i;
5390 static int inited;
5391 static const char * const gregnames[8] = {
5392 NULL, // g0 not used
5393 "g1",
5394 "g2",
5395 "g3",
5396 "g4",
5397 "g5",
5398 "g6",
5399 "g7",
5400 };
5401 static const char * const fregnames[32] = {
5402 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5403 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5404 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5405 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5406 };
5407
5408 /* init various static tables */
5409 if (!inited) {
5410 inited = 1;
5411
5412 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5413 cpu_regwptr = tcg_global_mem_new_ptr(TCG_AREG0,
5414 offsetof(CPUSPARCState, regwptr),
5415 "regwptr");
5416 #ifdef TARGET_SPARC64
5417 cpu_xcc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, xcc),
5418 "xcc");
5419 cpu_asi = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, asi),
5420 "asi");
5421 cpu_fprs = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, fprs),
5422 "fprs");
5423 cpu_gsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, gsr),
5424 "gsr");
5425 cpu_tick_cmpr = tcg_global_mem_new(TCG_AREG0,
5426 offsetof(CPUSPARCState, tick_cmpr),
5427 "tick_cmpr");
5428 cpu_stick_cmpr = tcg_global_mem_new(TCG_AREG0,
5429 offsetof(CPUSPARCState, stick_cmpr),
5430 "stick_cmpr");
5431 cpu_hstick_cmpr = tcg_global_mem_new(TCG_AREG0,
5432 offsetof(CPUSPARCState, hstick_cmpr),
5433 "hstick_cmpr");
5434 cpu_hintp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hintp),
5435 "hintp");
5436 cpu_htba = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, htba),
5437 "htba");
5438 cpu_hver = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hver),
5439 "hver");
5440 cpu_ssr = tcg_global_mem_new(TCG_AREG0,
5441 offsetof(CPUSPARCState, ssr), "ssr");
5442 cpu_ver = tcg_global_mem_new(TCG_AREG0,
5443 offsetof(CPUSPARCState, version), "ver");
5444 cpu_softint = tcg_global_mem_new_i32(TCG_AREG0,
5445 offsetof(CPUSPARCState, softint),
5446 "softint");
5447 #else
5448 cpu_wim = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, wim),
5449 "wim");
5450 #endif
5451 cpu_cond = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cond),
5452 "cond");
5453 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_src),
5454 "cc_src");
5455 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0,
5456 offsetof(CPUSPARCState, cc_src2),
5457 "cc_src2");
5458 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_dst),
5459 "cc_dst");
5460 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, cc_op),
5461 "cc_op");
5462 cpu_psr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, psr),
5463 "psr");
5464 cpu_fsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, fsr),
5465 "fsr");
5466 cpu_pc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, pc),
5467 "pc");
5468 cpu_npc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, npc),
5469 "npc");
5470 cpu_y = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, y), "y");
5471 #ifndef CONFIG_USER_ONLY
5472 cpu_tbr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, tbr),
5473 "tbr");
5474 #endif
5475 for (i = 1; i < 8; i++) {
5476 cpu_gregs[i] = tcg_global_mem_new(TCG_AREG0,
5477 offsetof(CPUSPARCState, gregs[i]),
5478 gregnames[i]);
5479 }
5480 for (i = 0; i < TARGET_DPREGS; i++) {
5481 cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
5482 offsetof(CPUSPARCState, fpr[i]),
5483 fregnames[i]);
5484 }
5485
5486 /* register helpers */
5487
5488 #define GEN_HELPER 2
5489 #include "helper.h"
5490 }
5491 }
5492
5493 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, int pc_pos)
5494 {
5495 target_ulong npc;
5496 env->pc = gen_opc_pc[pc_pos];
5497 npc = gen_opc_npc[pc_pos];
5498 if (npc == 1) {
5499 /* dynamic NPC: already stored */
5500 } else if (npc == 2) {
5501 /* jump PC: use 'cond' and the jump targets of the translation */
5502 if (env->cond) {
5503 env->npc = gen_opc_jump_pc[0];
5504 } else {
5505 env->npc = gen_opc_jump_pc[1];
5506 }
5507 } else {
5508 env->npc = npc;
5509 }
5510
5511 /* flush pending conditional evaluations before exposing cpu state */
5512 if (CC_OP != CC_OP_FLAGS) {
5513 helper_compute_psr(env);
5514 }
5515 }