]> git.proxmox.com Git - qemu.git/blob - target-sparc/translate.c
target-sparc: Finish conversion to gen_load_gpr
[qemu.git] / target-sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "disas.h"
29 #include "helper.h"
30 #include "tcg-op.h"
31
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #define DEBUG_DISAS
36
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
40
41 /* global register indexes */
42 static TCGv_ptr cpu_env, cpu_regwptr;
43 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
44 static TCGv_i32 cpu_cc_op;
45 static TCGv_i32 cpu_psr;
46 static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
47 static TCGv cpu_y;
48 #ifndef CONFIG_USER_ONLY
49 static TCGv cpu_tbr;
50 #endif
51 static TCGv cpu_cond, cpu_dst, cpu_addr;
52 #ifdef TARGET_SPARC64
53 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
54 static TCGv cpu_gsr;
55 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
56 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
57 static TCGv_i32 cpu_softint;
58 #else
59 static TCGv cpu_wim;
60 #endif
61 /* local register indexes (only used inside old micro ops) */
62 static TCGv cpu_tmp0;
63 static TCGv_i32 cpu_tmp32;
64 static TCGv_i64 cpu_tmp64;
65 /* Floating point registers */
66 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67
68 static target_ulong gen_opc_npc[OPC_BUF_SIZE];
69 static target_ulong gen_opc_jump_pc[2];
70
71 #include "gen-icount.h"
72
73 typedef struct DisasContext {
74 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
75 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
76 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
77 int is_br;
78 int mem_idx;
79 int fpu_enabled;
80 int address_mask_32bit;
81 int singlestep;
82 uint32_t cc_op; /* current CC operation */
83 struct TranslationBlock *tb;
84 sparc_def_t *def;
85 TCGv_i32 t32[3];
86 TCGv ttl[5];
87 int n_t32;
88 int n_ttl;
89 } DisasContext;
90
91 typedef struct {
92 TCGCond cond;
93 bool is_bool;
94 bool g1, g2;
95 TCGv c1, c2;
96 } DisasCompare;
97
98 // This function uses non-native bit order
99 #define GET_FIELD(X, FROM, TO) \
100 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
101
102 // This function uses the order in the manuals, i.e. bit 0 is 2^0
103 #define GET_FIELD_SP(X, FROM, TO) \
104 GET_FIELD(X, 31 - (TO), 31 - (FROM))
105
106 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
107 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
108
109 #ifdef TARGET_SPARC64
110 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
111 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
112 #else
113 #define DFPREG(r) (r & 0x1e)
114 #define QFPREG(r) (r & 0x1c)
115 #endif
116
117 #define UA2005_HTRAP_MASK 0xff
118 #define V8_TRAP_MASK 0x7f
119
120 static int sign_extend(int x, int len)
121 {
122 len = 32 - len;
123 return (x << len) >> len;
124 }
125
126 #define IS_IMM (insn & (1<<13))
127
128 static inline void gen_update_fprs_dirty(int rd)
129 {
130 #if defined(TARGET_SPARC64)
131 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
132 #endif
133 }
134
135 /* floating point registers moves */
136 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
137 {
138 #if TCG_TARGET_REG_BITS == 32
139 if (src & 1) {
140 return TCGV_LOW(cpu_fpr[src / 2]);
141 } else {
142 return TCGV_HIGH(cpu_fpr[src / 2]);
143 }
144 #else
145 if (src & 1) {
146 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
147 } else {
148 TCGv_i32 ret = tcg_temp_new_i32();
149 TCGv_i64 t = tcg_temp_new_i64();
150
151 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
152 tcg_gen_trunc_i64_i32(ret, t);
153 tcg_temp_free_i64(t);
154
155 dc->t32[dc->n_t32++] = ret;
156 assert(dc->n_t32 <= ARRAY_SIZE(dc->t32));
157
158 return ret;
159 }
160 #endif
161 }
162
163 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
164 {
165 #if TCG_TARGET_REG_BITS == 32
166 if (dst & 1) {
167 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
168 } else {
169 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
170 }
171 #else
172 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
173 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
174 (dst & 1 ? 0 : 32), 32);
175 #endif
176 gen_update_fprs_dirty(dst);
177 }
178
179 static TCGv_i32 gen_dest_fpr_F(void)
180 {
181 return cpu_tmp32;
182 }
183
184 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
185 {
186 src = DFPREG(src);
187 return cpu_fpr[src / 2];
188 }
189
190 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
191 {
192 dst = DFPREG(dst);
193 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
194 gen_update_fprs_dirty(dst);
195 }
196
197 static TCGv_i64 gen_dest_fpr_D(void)
198 {
199 return cpu_tmp64;
200 }
201
202 static void gen_op_load_fpr_QT0(unsigned int src)
203 {
204 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
205 offsetof(CPU_QuadU, ll.upper));
206 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
207 offsetof(CPU_QuadU, ll.lower));
208 }
209
210 static void gen_op_load_fpr_QT1(unsigned int src)
211 {
212 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
213 offsetof(CPU_QuadU, ll.upper));
214 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
215 offsetof(CPU_QuadU, ll.lower));
216 }
217
218 static void gen_op_store_QT0_fpr(unsigned int dst)
219 {
220 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
221 offsetof(CPU_QuadU, ll.upper));
222 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
223 offsetof(CPU_QuadU, ll.lower));
224 }
225
226 #ifdef TARGET_SPARC64
227 static void gen_move_Q(unsigned int rd, unsigned int rs)
228 {
229 rd = QFPREG(rd);
230 rs = QFPREG(rs);
231
232 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
233 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
234 gen_update_fprs_dirty(rd);
235 }
236 #endif
237
238 /* moves */
239 #ifdef CONFIG_USER_ONLY
240 #define supervisor(dc) 0
241 #ifdef TARGET_SPARC64
242 #define hypervisor(dc) 0
243 #endif
244 #else
245 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
246 #ifdef TARGET_SPARC64
247 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
248 #else
249 #endif
250 #endif
251
252 #ifdef TARGET_SPARC64
253 #ifndef TARGET_ABI32
254 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
255 #else
256 #define AM_CHECK(dc) (1)
257 #endif
258 #endif
259
260 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
261 {
262 #ifdef TARGET_SPARC64
263 if (AM_CHECK(dc))
264 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
265 #endif
266 }
267
268 static inline TCGv get_temp_tl(DisasContext *dc)
269 {
270 TCGv t;
271 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
272 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
273 return t;
274 }
275
276 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
277 {
278 if (reg == 0 || reg >= 8) {
279 TCGv t = get_temp_tl(dc);
280 if (reg == 0) {
281 tcg_gen_movi_tl(t, 0);
282 } else {
283 tcg_gen_ld_tl(t, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
284 }
285 return t;
286 } else {
287 return cpu_gregs[reg];
288 }
289 }
290
291 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
292 {
293 if (reg > 0) {
294 if (reg < 8) {
295 tcg_gen_mov_tl(cpu_gregs[reg], v);
296 } else {
297 tcg_gen_st_tl(v, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
298 }
299 }
300 }
301
302 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
303 {
304 if (reg == 0 || reg >= 8) {
305 return get_temp_tl(dc);
306 } else {
307 return cpu_gregs[reg];
308 }
309 }
310
311 static inline void gen_goto_tb(DisasContext *s, int tb_num,
312 target_ulong pc, target_ulong npc)
313 {
314 TranslationBlock *tb;
315
316 tb = s->tb;
317 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
318 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
319 !s->singlestep) {
320 /* jump to same page: we can use a direct jump */
321 tcg_gen_goto_tb(tb_num);
322 tcg_gen_movi_tl(cpu_pc, pc);
323 tcg_gen_movi_tl(cpu_npc, npc);
324 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
325 } else {
326 /* jump to another page: currently not optimized */
327 tcg_gen_movi_tl(cpu_pc, pc);
328 tcg_gen_movi_tl(cpu_npc, npc);
329 tcg_gen_exit_tb(0);
330 }
331 }
332
333 // XXX suboptimal
334 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
335 {
336 tcg_gen_extu_i32_tl(reg, src);
337 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
338 tcg_gen_andi_tl(reg, reg, 0x1);
339 }
340
341 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
342 {
343 tcg_gen_extu_i32_tl(reg, src);
344 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
345 tcg_gen_andi_tl(reg, reg, 0x1);
346 }
347
348 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
349 {
350 tcg_gen_extu_i32_tl(reg, src);
351 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
352 tcg_gen_andi_tl(reg, reg, 0x1);
353 }
354
355 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
356 {
357 tcg_gen_extu_i32_tl(reg, src);
358 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
359 tcg_gen_andi_tl(reg, reg, 0x1);
360 }
361
362 static inline void gen_op_addi_cc(TCGv dst, TCGv src1, target_long src2)
363 {
364 tcg_gen_mov_tl(cpu_cc_src, src1);
365 tcg_gen_movi_tl(cpu_cc_src2, src2);
366 tcg_gen_addi_tl(cpu_cc_dst, cpu_cc_src, src2);
367 tcg_gen_mov_tl(dst, cpu_cc_dst);
368 }
369
370 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
371 {
372 tcg_gen_mov_tl(cpu_cc_src, src1);
373 tcg_gen_mov_tl(cpu_cc_src2, src2);
374 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
375 tcg_gen_mov_tl(dst, cpu_cc_dst);
376 }
377
378 static TCGv_i32 gen_add32_carry32(void)
379 {
380 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
381
382 /* Carry is computed from a previous add: (dst < src) */
383 #if TARGET_LONG_BITS == 64
384 cc_src1_32 = tcg_temp_new_i32();
385 cc_src2_32 = tcg_temp_new_i32();
386 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_dst);
387 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src);
388 #else
389 cc_src1_32 = cpu_cc_dst;
390 cc_src2_32 = cpu_cc_src;
391 #endif
392
393 carry_32 = tcg_temp_new_i32();
394 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
395
396 #if TARGET_LONG_BITS == 64
397 tcg_temp_free_i32(cc_src1_32);
398 tcg_temp_free_i32(cc_src2_32);
399 #endif
400
401 return carry_32;
402 }
403
404 static TCGv_i32 gen_sub32_carry32(void)
405 {
406 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
407
408 /* Carry is computed from a previous borrow: (src1 < src2) */
409 #if TARGET_LONG_BITS == 64
410 cc_src1_32 = tcg_temp_new_i32();
411 cc_src2_32 = tcg_temp_new_i32();
412 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_src);
413 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src2);
414 #else
415 cc_src1_32 = cpu_cc_src;
416 cc_src2_32 = cpu_cc_src2;
417 #endif
418
419 carry_32 = tcg_temp_new_i32();
420 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
421
422 #if TARGET_LONG_BITS == 64
423 tcg_temp_free_i32(cc_src1_32);
424 tcg_temp_free_i32(cc_src2_32);
425 #endif
426
427 return carry_32;
428 }
429
430 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
431 TCGv src2, int update_cc)
432 {
433 TCGv_i32 carry_32;
434 TCGv carry;
435
436 switch (dc->cc_op) {
437 case CC_OP_DIV:
438 case CC_OP_LOGIC:
439 /* Carry is known to be zero. Fall back to plain ADD. */
440 if (update_cc) {
441 gen_op_add_cc(dst, src1, src2);
442 } else {
443 tcg_gen_add_tl(dst, src1, src2);
444 }
445 return;
446
447 case CC_OP_ADD:
448 case CC_OP_TADD:
449 case CC_OP_TADDTV:
450 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
451 {
452 /* For 32-bit hosts, we can re-use the host's hardware carry
453 generation by using an ADD2 opcode. We discard the low
454 part of the output. Ideally we'd combine this operation
455 with the add that generated the carry in the first place. */
456 TCGv dst_low = tcg_temp_new();
457 tcg_gen_op6_i32(INDEX_op_add2_i32, dst_low, dst,
458 cpu_cc_src, src1, cpu_cc_src2, src2);
459 tcg_temp_free(dst_low);
460 goto add_done;
461 }
462 #endif
463 carry_32 = gen_add32_carry32();
464 break;
465
466 case CC_OP_SUB:
467 case CC_OP_TSUB:
468 case CC_OP_TSUBTV:
469 carry_32 = gen_sub32_carry32();
470 break;
471
472 default:
473 /* We need external help to produce the carry. */
474 carry_32 = tcg_temp_new_i32();
475 gen_helper_compute_C_icc(carry_32, cpu_env);
476 break;
477 }
478
479 #if TARGET_LONG_BITS == 64
480 carry = tcg_temp_new();
481 tcg_gen_extu_i32_i64(carry, carry_32);
482 #else
483 carry = carry_32;
484 #endif
485
486 tcg_gen_add_tl(dst, src1, src2);
487 tcg_gen_add_tl(dst, dst, carry);
488
489 tcg_temp_free_i32(carry_32);
490 #if TARGET_LONG_BITS == 64
491 tcg_temp_free(carry);
492 #endif
493
494 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
495 add_done:
496 #endif
497 if (update_cc) {
498 tcg_gen_mov_tl(cpu_cc_src, src1);
499 tcg_gen_mov_tl(cpu_cc_src2, src2);
500 tcg_gen_mov_tl(cpu_cc_dst, dst);
501 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
502 dc->cc_op = CC_OP_ADDX;
503 }
504 }
505
506 static inline void gen_op_subi_cc(TCGv dst, TCGv src1, target_long src2, DisasContext *dc)
507 {
508 tcg_gen_mov_tl(cpu_cc_src, src1);
509 tcg_gen_movi_tl(cpu_cc_src2, src2);
510 if (src2 == 0) {
511 tcg_gen_mov_tl(cpu_cc_dst, src1);
512 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
513 dc->cc_op = CC_OP_LOGIC;
514 } else {
515 tcg_gen_subi_tl(cpu_cc_dst, cpu_cc_src, src2);
516 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
517 dc->cc_op = CC_OP_SUB;
518 }
519 tcg_gen_mov_tl(dst, cpu_cc_dst);
520 }
521
522 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
523 {
524 tcg_gen_mov_tl(cpu_cc_src, src1);
525 tcg_gen_mov_tl(cpu_cc_src2, src2);
526 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
527 tcg_gen_mov_tl(dst, cpu_cc_dst);
528 }
529
530 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
531 TCGv src2, int update_cc)
532 {
533 TCGv_i32 carry_32;
534 TCGv carry;
535
536 switch (dc->cc_op) {
537 case CC_OP_DIV:
538 case CC_OP_LOGIC:
539 /* Carry is known to be zero. Fall back to plain SUB. */
540 if (update_cc) {
541 gen_op_sub_cc(dst, src1, src2);
542 } else {
543 tcg_gen_sub_tl(dst, src1, src2);
544 }
545 return;
546
547 case CC_OP_ADD:
548 case CC_OP_TADD:
549 case CC_OP_TADDTV:
550 carry_32 = gen_add32_carry32();
551 break;
552
553 case CC_OP_SUB:
554 case CC_OP_TSUB:
555 case CC_OP_TSUBTV:
556 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
557 {
558 /* For 32-bit hosts, we can re-use the host's hardware carry
559 generation by using a SUB2 opcode. We discard the low
560 part of the output. Ideally we'd combine this operation
561 with the add that generated the carry in the first place. */
562 TCGv dst_low = tcg_temp_new();
563 tcg_gen_op6_i32(INDEX_op_sub2_i32, dst_low, dst,
564 cpu_cc_src, src1, cpu_cc_src2, src2);
565 tcg_temp_free(dst_low);
566 goto sub_done;
567 }
568 #endif
569 carry_32 = gen_sub32_carry32();
570 break;
571
572 default:
573 /* We need external help to produce the carry. */
574 carry_32 = tcg_temp_new_i32();
575 gen_helper_compute_C_icc(carry_32, cpu_env);
576 break;
577 }
578
579 #if TARGET_LONG_BITS == 64
580 carry = tcg_temp_new();
581 tcg_gen_extu_i32_i64(carry, carry_32);
582 #else
583 carry = carry_32;
584 #endif
585
586 tcg_gen_sub_tl(dst, src1, src2);
587 tcg_gen_sub_tl(dst, dst, carry);
588
589 tcg_temp_free_i32(carry_32);
590 #if TARGET_LONG_BITS == 64
591 tcg_temp_free(carry);
592 #endif
593
594 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
595 sub_done:
596 #endif
597 if (update_cc) {
598 tcg_gen_mov_tl(cpu_cc_src, src1);
599 tcg_gen_mov_tl(cpu_cc_src2, src2);
600 tcg_gen_mov_tl(cpu_cc_dst, dst);
601 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
602 dc->cc_op = CC_OP_SUBX;
603 }
604 }
605
606 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
607 {
608 TCGv r_temp, zero;
609
610 r_temp = tcg_temp_new();
611
612 /* old op:
613 if (!(env->y & 1))
614 T1 = 0;
615 */
616 zero = tcg_const_tl(0);
617 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
618 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
619 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
620 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
621 zero, cpu_cc_src2);
622 tcg_temp_free(zero);
623
624 // b2 = T0 & 1;
625 // env->y = (b2 << 31) | (env->y >> 1);
626 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
627 tcg_gen_shli_tl(r_temp, r_temp, 31);
628 tcg_gen_shri_tl(cpu_tmp0, cpu_y, 1);
629 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0x7fffffff);
630 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, r_temp);
631 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
632
633 // b1 = N ^ V;
634 gen_mov_reg_N(cpu_tmp0, cpu_psr);
635 gen_mov_reg_V(r_temp, cpu_psr);
636 tcg_gen_xor_tl(cpu_tmp0, cpu_tmp0, r_temp);
637 tcg_temp_free(r_temp);
638
639 // T0 = (b1 << 31) | (T0 >> 1);
640 // src1 = T0;
641 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, 31);
642 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
643 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
644
645 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
646
647 tcg_gen_mov_tl(dst, cpu_cc_dst);
648 }
649
650 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
651 {
652 TCGv_i32 r_src1, r_src2;
653 TCGv_i64 r_temp, r_temp2;
654
655 r_src1 = tcg_temp_new_i32();
656 r_src2 = tcg_temp_new_i32();
657
658 tcg_gen_trunc_tl_i32(r_src1, src1);
659 tcg_gen_trunc_tl_i32(r_src2, src2);
660
661 r_temp = tcg_temp_new_i64();
662 r_temp2 = tcg_temp_new_i64();
663
664 if (sign_ext) {
665 tcg_gen_ext_i32_i64(r_temp, r_src2);
666 tcg_gen_ext_i32_i64(r_temp2, r_src1);
667 } else {
668 tcg_gen_extu_i32_i64(r_temp, r_src2);
669 tcg_gen_extu_i32_i64(r_temp2, r_src1);
670 }
671
672 tcg_gen_mul_i64(r_temp2, r_temp, r_temp2);
673
674 tcg_gen_shri_i64(r_temp, r_temp2, 32);
675 tcg_gen_trunc_i64_tl(cpu_tmp0, r_temp);
676 tcg_temp_free_i64(r_temp);
677 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
678
679 tcg_gen_trunc_i64_tl(dst, r_temp2);
680
681 tcg_temp_free_i64(r_temp2);
682
683 tcg_temp_free_i32(r_src1);
684 tcg_temp_free_i32(r_src2);
685 }
686
687 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
688 {
689 /* zero-extend truncated operands before multiplication */
690 gen_op_multiply(dst, src1, src2, 0);
691 }
692
693 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
694 {
695 /* sign-extend truncated operands before multiplication */
696 gen_op_multiply(dst, src1, src2, 1);
697 }
698
699 // 1
700 static inline void gen_op_eval_ba(TCGv dst)
701 {
702 tcg_gen_movi_tl(dst, 1);
703 }
704
705 // Z
706 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
707 {
708 gen_mov_reg_Z(dst, src);
709 }
710
711 // Z | (N ^ V)
712 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
713 {
714 gen_mov_reg_N(cpu_tmp0, src);
715 gen_mov_reg_V(dst, src);
716 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
717 gen_mov_reg_Z(cpu_tmp0, src);
718 tcg_gen_or_tl(dst, dst, cpu_tmp0);
719 }
720
721 // N ^ V
722 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
723 {
724 gen_mov_reg_V(cpu_tmp0, src);
725 gen_mov_reg_N(dst, src);
726 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
727 }
728
729 // C | Z
730 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
731 {
732 gen_mov_reg_Z(cpu_tmp0, src);
733 gen_mov_reg_C(dst, src);
734 tcg_gen_or_tl(dst, dst, cpu_tmp0);
735 }
736
737 // C
738 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
739 {
740 gen_mov_reg_C(dst, src);
741 }
742
743 // V
744 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
745 {
746 gen_mov_reg_V(dst, src);
747 }
748
749 // 0
750 static inline void gen_op_eval_bn(TCGv dst)
751 {
752 tcg_gen_movi_tl(dst, 0);
753 }
754
755 // N
756 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
757 {
758 gen_mov_reg_N(dst, src);
759 }
760
761 // !Z
762 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
763 {
764 gen_mov_reg_Z(dst, src);
765 tcg_gen_xori_tl(dst, dst, 0x1);
766 }
767
768 // !(Z | (N ^ V))
769 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
770 {
771 gen_mov_reg_N(cpu_tmp0, src);
772 gen_mov_reg_V(dst, src);
773 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
774 gen_mov_reg_Z(cpu_tmp0, src);
775 tcg_gen_or_tl(dst, dst, cpu_tmp0);
776 tcg_gen_xori_tl(dst, dst, 0x1);
777 }
778
779 // !(N ^ V)
780 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
781 {
782 gen_mov_reg_V(cpu_tmp0, src);
783 gen_mov_reg_N(dst, src);
784 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
785 tcg_gen_xori_tl(dst, dst, 0x1);
786 }
787
788 // !(C | Z)
789 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
790 {
791 gen_mov_reg_Z(cpu_tmp0, src);
792 gen_mov_reg_C(dst, src);
793 tcg_gen_or_tl(dst, dst, cpu_tmp0);
794 tcg_gen_xori_tl(dst, dst, 0x1);
795 }
796
797 // !C
798 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
799 {
800 gen_mov_reg_C(dst, src);
801 tcg_gen_xori_tl(dst, dst, 0x1);
802 }
803
804 // !N
805 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
806 {
807 gen_mov_reg_N(dst, src);
808 tcg_gen_xori_tl(dst, dst, 0x1);
809 }
810
811 // !V
812 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
813 {
814 gen_mov_reg_V(dst, src);
815 tcg_gen_xori_tl(dst, dst, 0x1);
816 }
817
818 /*
819 FPSR bit field FCC1 | FCC0:
820 0 =
821 1 <
822 2 >
823 3 unordered
824 */
825 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
826 unsigned int fcc_offset)
827 {
828 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
829 tcg_gen_andi_tl(reg, reg, 0x1);
830 }
831
832 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
833 unsigned int fcc_offset)
834 {
835 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
836 tcg_gen_andi_tl(reg, reg, 0x1);
837 }
838
839 // !0: FCC0 | FCC1
840 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
841 unsigned int fcc_offset)
842 {
843 gen_mov_reg_FCC0(dst, src, fcc_offset);
844 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
845 tcg_gen_or_tl(dst, dst, cpu_tmp0);
846 }
847
848 // 1 or 2: FCC0 ^ FCC1
849 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
850 unsigned int fcc_offset)
851 {
852 gen_mov_reg_FCC0(dst, src, fcc_offset);
853 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
854 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
855 }
856
857 // 1 or 3: FCC0
858 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
859 unsigned int fcc_offset)
860 {
861 gen_mov_reg_FCC0(dst, src, fcc_offset);
862 }
863
864 // 1: FCC0 & !FCC1
865 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
866 unsigned int fcc_offset)
867 {
868 gen_mov_reg_FCC0(dst, src, fcc_offset);
869 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
870 tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
871 tcg_gen_and_tl(dst, dst, cpu_tmp0);
872 }
873
874 // 2 or 3: FCC1
875 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
876 unsigned int fcc_offset)
877 {
878 gen_mov_reg_FCC1(dst, src, fcc_offset);
879 }
880
881 // 2: !FCC0 & FCC1
882 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
883 unsigned int fcc_offset)
884 {
885 gen_mov_reg_FCC0(dst, src, fcc_offset);
886 tcg_gen_xori_tl(dst, dst, 0x1);
887 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
888 tcg_gen_and_tl(dst, dst, cpu_tmp0);
889 }
890
891 // 3: FCC0 & FCC1
892 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
893 unsigned int fcc_offset)
894 {
895 gen_mov_reg_FCC0(dst, src, fcc_offset);
896 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
897 tcg_gen_and_tl(dst, dst, cpu_tmp0);
898 }
899
900 // 0: !(FCC0 | FCC1)
901 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
902 unsigned int fcc_offset)
903 {
904 gen_mov_reg_FCC0(dst, src, fcc_offset);
905 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
906 tcg_gen_or_tl(dst, dst, cpu_tmp0);
907 tcg_gen_xori_tl(dst, dst, 0x1);
908 }
909
910 // 0 or 3: !(FCC0 ^ FCC1)
911 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
912 unsigned int fcc_offset)
913 {
914 gen_mov_reg_FCC0(dst, src, fcc_offset);
915 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
916 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
917 tcg_gen_xori_tl(dst, dst, 0x1);
918 }
919
920 // 0 or 2: !FCC0
921 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
922 unsigned int fcc_offset)
923 {
924 gen_mov_reg_FCC0(dst, src, fcc_offset);
925 tcg_gen_xori_tl(dst, dst, 0x1);
926 }
927
928 // !1: !(FCC0 & !FCC1)
929 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
930 unsigned int fcc_offset)
931 {
932 gen_mov_reg_FCC0(dst, src, fcc_offset);
933 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
934 tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
935 tcg_gen_and_tl(dst, dst, cpu_tmp0);
936 tcg_gen_xori_tl(dst, dst, 0x1);
937 }
938
939 // 0 or 1: !FCC1
940 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
941 unsigned int fcc_offset)
942 {
943 gen_mov_reg_FCC1(dst, src, fcc_offset);
944 tcg_gen_xori_tl(dst, dst, 0x1);
945 }
946
947 // !2: !(!FCC0 & FCC1)
948 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
949 unsigned int fcc_offset)
950 {
951 gen_mov_reg_FCC0(dst, src, fcc_offset);
952 tcg_gen_xori_tl(dst, dst, 0x1);
953 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
954 tcg_gen_and_tl(dst, dst, cpu_tmp0);
955 tcg_gen_xori_tl(dst, dst, 0x1);
956 }
957
958 // !3: !(FCC0 & FCC1)
959 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
960 unsigned int fcc_offset)
961 {
962 gen_mov_reg_FCC0(dst, src, fcc_offset);
963 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
964 tcg_gen_and_tl(dst, dst, cpu_tmp0);
965 tcg_gen_xori_tl(dst, dst, 0x1);
966 }
967
968 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
969 target_ulong pc2, TCGv r_cond)
970 {
971 int l1;
972
973 l1 = gen_new_label();
974
975 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
976
977 gen_goto_tb(dc, 0, pc1, pc1 + 4);
978
979 gen_set_label(l1);
980 gen_goto_tb(dc, 1, pc2, pc2 + 4);
981 }
982
983 static inline void gen_branch_a(DisasContext *dc, target_ulong pc1,
984 target_ulong pc2, TCGv r_cond)
985 {
986 int l1;
987
988 l1 = gen_new_label();
989
990 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
991
992 gen_goto_tb(dc, 0, pc2, pc1);
993
994 gen_set_label(l1);
995 gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8);
996 }
997
998 static inline void gen_generic_branch(DisasContext *dc)
999 {
1000 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1001 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1002 TCGv zero = tcg_const_tl(0);
1003
1004 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1005
1006 tcg_temp_free(npc0);
1007 tcg_temp_free(npc1);
1008 tcg_temp_free(zero);
1009 }
1010
1011 /* call this function before using the condition register as it may
1012 have been set for a jump */
1013 static inline void flush_cond(DisasContext *dc)
1014 {
1015 if (dc->npc == JUMP_PC) {
1016 gen_generic_branch(dc);
1017 dc->npc = DYNAMIC_PC;
1018 }
1019 }
1020
1021 static inline void save_npc(DisasContext *dc)
1022 {
1023 if (dc->npc == JUMP_PC) {
1024 gen_generic_branch(dc);
1025 dc->npc = DYNAMIC_PC;
1026 } else if (dc->npc != DYNAMIC_PC) {
1027 tcg_gen_movi_tl(cpu_npc, dc->npc);
1028 }
1029 }
1030
1031 static inline void update_psr(DisasContext *dc)
1032 {
1033 if (dc->cc_op != CC_OP_FLAGS) {
1034 dc->cc_op = CC_OP_FLAGS;
1035 gen_helper_compute_psr(cpu_env);
1036 }
1037 }
1038
1039 static inline void save_state(DisasContext *dc)
1040 {
1041 tcg_gen_movi_tl(cpu_pc, dc->pc);
1042 save_npc(dc);
1043 }
1044
1045 static inline void gen_mov_pc_npc(DisasContext *dc)
1046 {
1047 if (dc->npc == JUMP_PC) {
1048 gen_generic_branch(dc);
1049 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1050 dc->pc = DYNAMIC_PC;
1051 } else if (dc->npc == DYNAMIC_PC) {
1052 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1053 dc->pc = DYNAMIC_PC;
1054 } else {
1055 dc->pc = dc->npc;
1056 }
1057 }
1058
1059 static inline void gen_op_next_insn(void)
1060 {
1061 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1062 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1063 }
1064
1065 static void free_compare(DisasCompare *cmp)
1066 {
1067 if (!cmp->g1) {
1068 tcg_temp_free(cmp->c1);
1069 }
1070 if (!cmp->g2) {
1071 tcg_temp_free(cmp->c2);
1072 }
1073 }
1074
1075 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1076 DisasContext *dc)
1077 {
1078 static int subcc_cond[16] = {
1079 TCG_COND_NEVER,
1080 TCG_COND_EQ,
1081 TCG_COND_LE,
1082 TCG_COND_LT,
1083 TCG_COND_LEU,
1084 TCG_COND_LTU,
1085 -1, /* neg */
1086 -1, /* overflow */
1087 TCG_COND_ALWAYS,
1088 TCG_COND_NE,
1089 TCG_COND_GT,
1090 TCG_COND_GE,
1091 TCG_COND_GTU,
1092 TCG_COND_GEU,
1093 -1, /* pos */
1094 -1, /* no overflow */
1095 };
1096
1097 static int logic_cond[16] = {
1098 TCG_COND_NEVER,
1099 TCG_COND_EQ, /* eq: Z */
1100 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1101 TCG_COND_LT, /* lt: N ^ V -> N */
1102 TCG_COND_EQ, /* leu: C | Z -> Z */
1103 TCG_COND_NEVER, /* ltu: C -> 0 */
1104 TCG_COND_LT, /* neg: N */
1105 TCG_COND_NEVER, /* vs: V -> 0 */
1106 TCG_COND_ALWAYS,
1107 TCG_COND_NE, /* ne: !Z */
1108 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1109 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1110 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1111 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1112 TCG_COND_GE, /* pos: !N */
1113 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1114 };
1115
1116 TCGv_i32 r_src;
1117 TCGv r_dst;
1118
1119 #ifdef TARGET_SPARC64
1120 if (xcc) {
1121 r_src = cpu_xcc;
1122 } else {
1123 r_src = cpu_psr;
1124 }
1125 #else
1126 r_src = cpu_psr;
1127 #endif
1128
1129 switch (dc->cc_op) {
1130 case CC_OP_LOGIC:
1131 cmp->cond = logic_cond[cond];
1132 do_compare_dst_0:
1133 cmp->is_bool = false;
1134 cmp->g2 = false;
1135 cmp->c2 = tcg_const_tl(0);
1136 #ifdef TARGET_SPARC64
1137 if (!xcc) {
1138 cmp->g1 = false;
1139 cmp->c1 = tcg_temp_new();
1140 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1141 break;
1142 }
1143 #endif
1144 cmp->g1 = true;
1145 cmp->c1 = cpu_cc_dst;
1146 break;
1147
1148 case CC_OP_SUB:
1149 switch (cond) {
1150 case 6: /* neg */
1151 case 14: /* pos */
1152 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1153 goto do_compare_dst_0;
1154
1155 case 7: /* overflow */
1156 case 15: /* !overflow */
1157 goto do_dynamic;
1158
1159 default:
1160 cmp->cond = subcc_cond[cond];
1161 cmp->is_bool = false;
1162 #ifdef TARGET_SPARC64
1163 if (!xcc) {
1164 /* Note that sign-extension works for unsigned compares as
1165 long as both operands are sign-extended. */
1166 cmp->g1 = cmp->g2 = false;
1167 cmp->c1 = tcg_temp_new();
1168 cmp->c2 = tcg_temp_new();
1169 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1170 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1171 break;
1172 }
1173 #endif
1174 cmp->g1 = cmp->g2 = true;
1175 cmp->c1 = cpu_cc_src;
1176 cmp->c2 = cpu_cc_src2;
1177 break;
1178 }
1179 break;
1180
1181 default:
1182 do_dynamic:
1183 gen_helper_compute_psr(cpu_env);
1184 dc->cc_op = CC_OP_FLAGS;
1185 /* FALLTHRU */
1186
1187 case CC_OP_FLAGS:
1188 /* We're going to generate a boolean result. */
1189 cmp->cond = TCG_COND_NE;
1190 cmp->is_bool = true;
1191 cmp->g1 = cmp->g2 = false;
1192 cmp->c1 = r_dst = tcg_temp_new();
1193 cmp->c2 = tcg_const_tl(0);
1194
1195 switch (cond) {
1196 case 0x0:
1197 gen_op_eval_bn(r_dst);
1198 break;
1199 case 0x1:
1200 gen_op_eval_be(r_dst, r_src);
1201 break;
1202 case 0x2:
1203 gen_op_eval_ble(r_dst, r_src);
1204 break;
1205 case 0x3:
1206 gen_op_eval_bl(r_dst, r_src);
1207 break;
1208 case 0x4:
1209 gen_op_eval_bleu(r_dst, r_src);
1210 break;
1211 case 0x5:
1212 gen_op_eval_bcs(r_dst, r_src);
1213 break;
1214 case 0x6:
1215 gen_op_eval_bneg(r_dst, r_src);
1216 break;
1217 case 0x7:
1218 gen_op_eval_bvs(r_dst, r_src);
1219 break;
1220 case 0x8:
1221 gen_op_eval_ba(r_dst);
1222 break;
1223 case 0x9:
1224 gen_op_eval_bne(r_dst, r_src);
1225 break;
1226 case 0xa:
1227 gen_op_eval_bg(r_dst, r_src);
1228 break;
1229 case 0xb:
1230 gen_op_eval_bge(r_dst, r_src);
1231 break;
1232 case 0xc:
1233 gen_op_eval_bgu(r_dst, r_src);
1234 break;
1235 case 0xd:
1236 gen_op_eval_bcc(r_dst, r_src);
1237 break;
1238 case 0xe:
1239 gen_op_eval_bpos(r_dst, r_src);
1240 break;
1241 case 0xf:
1242 gen_op_eval_bvc(r_dst, r_src);
1243 break;
1244 }
1245 break;
1246 }
1247 }
1248
1249 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1250 {
1251 unsigned int offset;
1252 TCGv r_dst;
1253
1254 /* For now we still generate a straight boolean result. */
1255 cmp->cond = TCG_COND_NE;
1256 cmp->is_bool = true;
1257 cmp->g1 = cmp->g2 = false;
1258 cmp->c1 = r_dst = tcg_temp_new();
1259 cmp->c2 = tcg_const_tl(0);
1260
1261 switch (cc) {
1262 default:
1263 case 0x0:
1264 offset = 0;
1265 break;
1266 case 0x1:
1267 offset = 32 - 10;
1268 break;
1269 case 0x2:
1270 offset = 34 - 10;
1271 break;
1272 case 0x3:
1273 offset = 36 - 10;
1274 break;
1275 }
1276
1277 switch (cond) {
1278 case 0x0:
1279 gen_op_eval_bn(r_dst);
1280 break;
1281 case 0x1:
1282 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1283 break;
1284 case 0x2:
1285 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1286 break;
1287 case 0x3:
1288 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1289 break;
1290 case 0x4:
1291 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1292 break;
1293 case 0x5:
1294 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1295 break;
1296 case 0x6:
1297 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1298 break;
1299 case 0x7:
1300 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1301 break;
1302 case 0x8:
1303 gen_op_eval_ba(r_dst);
1304 break;
1305 case 0x9:
1306 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1307 break;
1308 case 0xa:
1309 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1310 break;
1311 case 0xb:
1312 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1313 break;
1314 case 0xc:
1315 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1316 break;
1317 case 0xd:
1318 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1319 break;
1320 case 0xe:
1321 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1322 break;
1323 case 0xf:
1324 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1325 break;
1326 }
1327 }
1328
1329 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1330 DisasContext *dc)
1331 {
1332 DisasCompare cmp;
1333 gen_compare(&cmp, cc, cond, dc);
1334
1335 /* The interface is to return a boolean in r_dst. */
1336 if (cmp.is_bool) {
1337 tcg_gen_mov_tl(r_dst, cmp.c1);
1338 } else {
1339 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1340 }
1341
1342 free_compare(&cmp);
1343 }
1344
1345 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1346 {
1347 DisasCompare cmp;
1348 gen_fcompare(&cmp, cc, cond);
1349
1350 /* The interface is to return a boolean in r_dst. */
1351 if (cmp.is_bool) {
1352 tcg_gen_mov_tl(r_dst, cmp.c1);
1353 } else {
1354 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1355 }
1356
1357 free_compare(&cmp);
1358 }
1359
1360 #ifdef TARGET_SPARC64
1361 // Inverted logic
1362 static const int gen_tcg_cond_reg[8] = {
1363 -1,
1364 TCG_COND_NE,
1365 TCG_COND_GT,
1366 TCG_COND_GE,
1367 -1,
1368 TCG_COND_EQ,
1369 TCG_COND_LE,
1370 TCG_COND_LT,
1371 };
1372
1373 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1374 {
1375 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1376 cmp->is_bool = false;
1377 cmp->g1 = true;
1378 cmp->g2 = false;
1379 cmp->c1 = r_src;
1380 cmp->c2 = tcg_const_tl(0);
1381 }
1382
1383 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1384 {
1385 DisasCompare cmp;
1386 gen_compare_reg(&cmp, cond, r_src);
1387
1388 /* The interface is to return a boolean in r_dst. */
1389 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1390
1391 free_compare(&cmp);
1392 }
1393 #endif
1394
1395 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1396 {
1397 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1398 target_ulong target = dc->pc + offset;
1399
1400 #ifdef TARGET_SPARC64
1401 if (unlikely(AM_CHECK(dc))) {
1402 target &= 0xffffffffULL;
1403 }
1404 #endif
1405 if (cond == 0x0) {
1406 /* unconditional not taken */
1407 if (a) {
1408 dc->pc = dc->npc + 4;
1409 dc->npc = dc->pc + 4;
1410 } else {
1411 dc->pc = dc->npc;
1412 dc->npc = dc->pc + 4;
1413 }
1414 } else if (cond == 0x8) {
1415 /* unconditional taken */
1416 if (a) {
1417 dc->pc = target;
1418 dc->npc = dc->pc + 4;
1419 } else {
1420 dc->pc = dc->npc;
1421 dc->npc = target;
1422 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1423 }
1424 } else {
1425 flush_cond(dc);
1426 gen_cond(cpu_cond, cc, cond, dc);
1427 if (a) {
1428 gen_branch_a(dc, target, dc->npc, cpu_cond);
1429 dc->is_br = 1;
1430 } else {
1431 dc->pc = dc->npc;
1432 dc->jump_pc[0] = target;
1433 if (unlikely(dc->npc == DYNAMIC_PC)) {
1434 dc->jump_pc[1] = DYNAMIC_PC;
1435 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1436 } else {
1437 dc->jump_pc[1] = dc->npc + 4;
1438 dc->npc = JUMP_PC;
1439 }
1440 }
1441 }
1442 }
1443
1444 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1445 {
1446 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1447 target_ulong target = dc->pc + offset;
1448
1449 #ifdef TARGET_SPARC64
1450 if (unlikely(AM_CHECK(dc))) {
1451 target &= 0xffffffffULL;
1452 }
1453 #endif
1454 if (cond == 0x0) {
1455 /* unconditional not taken */
1456 if (a) {
1457 dc->pc = dc->npc + 4;
1458 dc->npc = dc->pc + 4;
1459 } else {
1460 dc->pc = dc->npc;
1461 dc->npc = dc->pc + 4;
1462 }
1463 } else if (cond == 0x8) {
1464 /* unconditional taken */
1465 if (a) {
1466 dc->pc = target;
1467 dc->npc = dc->pc + 4;
1468 } else {
1469 dc->pc = dc->npc;
1470 dc->npc = target;
1471 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1472 }
1473 } else {
1474 flush_cond(dc);
1475 gen_fcond(cpu_cond, cc, cond);
1476 if (a) {
1477 gen_branch_a(dc, target, dc->npc, cpu_cond);
1478 dc->is_br = 1;
1479 } else {
1480 dc->pc = dc->npc;
1481 dc->jump_pc[0] = target;
1482 if (unlikely(dc->npc == DYNAMIC_PC)) {
1483 dc->jump_pc[1] = DYNAMIC_PC;
1484 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1485 } else {
1486 dc->jump_pc[1] = dc->npc + 4;
1487 dc->npc = JUMP_PC;
1488 }
1489 }
1490 }
1491 }
1492
1493 #ifdef TARGET_SPARC64
1494 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1495 TCGv r_reg)
1496 {
1497 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1498 target_ulong target = dc->pc + offset;
1499
1500 if (unlikely(AM_CHECK(dc))) {
1501 target &= 0xffffffffULL;
1502 }
1503 flush_cond(dc);
1504 gen_cond_reg(cpu_cond, cond, r_reg);
1505 if (a) {
1506 gen_branch_a(dc, target, dc->npc, cpu_cond);
1507 dc->is_br = 1;
1508 } else {
1509 dc->pc = dc->npc;
1510 dc->jump_pc[0] = target;
1511 if (unlikely(dc->npc == DYNAMIC_PC)) {
1512 dc->jump_pc[1] = DYNAMIC_PC;
1513 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1514 } else {
1515 dc->jump_pc[1] = dc->npc + 4;
1516 dc->npc = JUMP_PC;
1517 }
1518 }
1519 }
1520
1521 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1522 {
1523 switch (fccno) {
1524 case 0:
1525 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1526 break;
1527 case 1:
1528 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1529 break;
1530 case 2:
1531 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1532 break;
1533 case 3:
1534 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1535 break;
1536 }
1537 }
1538
1539 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1540 {
1541 switch (fccno) {
1542 case 0:
1543 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1544 break;
1545 case 1:
1546 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1547 break;
1548 case 2:
1549 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1550 break;
1551 case 3:
1552 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1553 break;
1554 }
1555 }
1556
1557 static inline void gen_op_fcmpq(int fccno)
1558 {
1559 switch (fccno) {
1560 case 0:
1561 gen_helper_fcmpq(cpu_env);
1562 break;
1563 case 1:
1564 gen_helper_fcmpq_fcc1(cpu_env);
1565 break;
1566 case 2:
1567 gen_helper_fcmpq_fcc2(cpu_env);
1568 break;
1569 case 3:
1570 gen_helper_fcmpq_fcc3(cpu_env);
1571 break;
1572 }
1573 }
1574
1575 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1576 {
1577 switch (fccno) {
1578 case 0:
1579 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1580 break;
1581 case 1:
1582 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1583 break;
1584 case 2:
1585 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1586 break;
1587 case 3:
1588 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1589 break;
1590 }
1591 }
1592
1593 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1594 {
1595 switch (fccno) {
1596 case 0:
1597 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1598 break;
1599 case 1:
1600 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1601 break;
1602 case 2:
1603 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1604 break;
1605 case 3:
1606 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1607 break;
1608 }
1609 }
1610
1611 static inline void gen_op_fcmpeq(int fccno)
1612 {
1613 switch (fccno) {
1614 case 0:
1615 gen_helper_fcmpeq(cpu_env);
1616 break;
1617 case 1:
1618 gen_helper_fcmpeq_fcc1(cpu_env);
1619 break;
1620 case 2:
1621 gen_helper_fcmpeq_fcc2(cpu_env);
1622 break;
1623 case 3:
1624 gen_helper_fcmpeq_fcc3(cpu_env);
1625 break;
1626 }
1627 }
1628
1629 #else
1630
1631 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1632 {
1633 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1634 }
1635
1636 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1637 {
1638 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1639 }
1640
1641 static inline void gen_op_fcmpq(int fccno)
1642 {
1643 gen_helper_fcmpq(cpu_env);
1644 }
1645
1646 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1647 {
1648 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1649 }
1650
1651 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1652 {
1653 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1654 }
1655
1656 static inline void gen_op_fcmpeq(int fccno)
1657 {
1658 gen_helper_fcmpeq(cpu_env);
1659 }
1660 #endif
1661
1662 static inline void gen_op_fpexception_im(int fsr_flags)
1663 {
1664 TCGv_i32 r_const;
1665
1666 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1667 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1668 r_const = tcg_const_i32(TT_FP_EXCP);
1669 gen_helper_raise_exception(cpu_env, r_const);
1670 tcg_temp_free_i32(r_const);
1671 }
1672
1673 static int gen_trap_ifnofpu(DisasContext *dc)
1674 {
1675 #if !defined(CONFIG_USER_ONLY)
1676 if (!dc->fpu_enabled) {
1677 TCGv_i32 r_const;
1678
1679 save_state(dc);
1680 r_const = tcg_const_i32(TT_NFPU_INSN);
1681 gen_helper_raise_exception(cpu_env, r_const);
1682 tcg_temp_free_i32(r_const);
1683 dc->is_br = 1;
1684 return 1;
1685 }
1686 #endif
1687 return 0;
1688 }
1689
1690 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1691 {
1692 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1693 }
1694
1695 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1696 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1697 {
1698 TCGv_i32 dst, src;
1699
1700 src = gen_load_fpr_F(dc, rs);
1701 dst = gen_dest_fpr_F();
1702
1703 gen(dst, cpu_env, src);
1704
1705 gen_store_fpr_F(dc, rd, dst);
1706 }
1707
1708 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1709 void (*gen)(TCGv_i32, TCGv_i32))
1710 {
1711 TCGv_i32 dst, src;
1712
1713 src = gen_load_fpr_F(dc, rs);
1714 dst = gen_dest_fpr_F();
1715
1716 gen(dst, src);
1717
1718 gen_store_fpr_F(dc, rd, dst);
1719 }
1720
1721 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1722 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1723 {
1724 TCGv_i32 dst, src1, src2;
1725
1726 src1 = gen_load_fpr_F(dc, rs1);
1727 src2 = gen_load_fpr_F(dc, rs2);
1728 dst = gen_dest_fpr_F();
1729
1730 gen(dst, cpu_env, src1, src2);
1731
1732 gen_store_fpr_F(dc, rd, dst);
1733 }
1734
1735 #ifdef TARGET_SPARC64
1736 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1737 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1738 {
1739 TCGv_i32 dst, src1, src2;
1740
1741 src1 = gen_load_fpr_F(dc, rs1);
1742 src2 = gen_load_fpr_F(dc, rs2);
1743 dst = gen_dest_fpr_F();
1744
1745 gen(dst, src1, src2);
1746
1747 gen_store_fpr_F(dc, rd, dst);
1748 }
1749 #endif
1750
1751 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1752 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1753 {
1754 TCGv_i64 dst, src;
1755
1756 src = gen_load_fpr_D(dc, rs);
1757 dst = gen_dest_fpr_D();
1758
1759 gen(dst, cpu_env, src);
1760
1761 gen_store_fpr_D(dc, rd, dst);
1762 }
1763
1764 #ifdef TARGET_SPARC64
1765 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1766 void (*gen)(TCGv_i64, TCGv_i64))
1767 {
1768 TCGv_i64 dst, src;
1769
1770 src = gen_load_fpr_D(dc, rs);
1771 dst = gen_dest_fpr_D();
1772
1773 gen(dst, src);
1774
1775 gen_store_fpr_D(dc, rd, dst);
1776 }
1777 #endif
1778
1779 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1780 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1781 {
1782 TCGv_i64 dst, src1, src2;
1783
1784 src1 = gen_load_fpr_D(dc, rs1);
1785 src2 = gen_load_fpr_D(dc, rs2);
1786 dst = gen_dest_fpr_D();
1787
1788 gen(dst, cpu_env, src1, src2);
1789
1790 gen_store_fpr_D(dc, rd, dst);
1791 }
1792
1793 #ifdef TARGET_SPARC64
1794 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1795 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1796 {
1797 TCGv_i64 dst, src1, src2;
1798
1799 src1 = gen_load_fpr_D(dc, rs1);
1800 src2 = gen_load_fpr_D(dc, rs2);
1801 dst = gen_dest_fpr_D();
1802
1803 gen(dst, src1, src2);
1804
1805 gen_store_fpr_D(dc, rd, dst);
1806 }
1807
1808 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1809 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1810 {
1811 TCGv_i64 dst, src1, src2;
1812
1813 src1 = gen_load_fpr_D(dc, rs1);
1814 src2 = gen_load_fpr_D(dc, rs2);
1815 dst = gen_dest_fpr_D();
1816
1817 gen(dst, cpu_gsr, src1, src2);
1818
1819 gen_store_fpr_D(dc, rd, dst);
1820 }
1821
1822 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1823 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1824 {
1825 TCGv_i64 dst, src0, src1, src2;
1826
1827 src1 = gen_load_fpr_D(dc, rs1);
1828 src2 = gen_load_fpr_D(dc, rs2);
1829 src0 = gen_load_fpr_D(dc, rd);
1830 dst = gen_dest_fpr_D();
1831
1832 gen(dst, src0, src1, src2);
1833
1834 gen_store_fpr_D(dc, rd, dst);
1835 }
1836 #endif
1837
1838 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1839 void (*gen)(TCGv_ptr))
1840 {
1841 gen_op_load_fpr_QT1(QFPREG(rs));
1842
1843 gen(cpu_env);
1844
1845 gen_op_store_QT0_fpr(QFPREG(rd));
1846 gen_update_fprs_dirty(QFPREG(rd));
1847 }
1848
1849 #ifdef TARGET_SPARC64
1850 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1851 void (*gen)(TCGv_ptr))
1852 {
1853 gen_op_load_fpr_QT1(QFPREG(rs));
1854
1855 gen(cpu_env);
1856
1857 gen_op_store_QT0_fpr(QFPREG(rd));
1858 gen_update_fprs_dirty(QFPREG(rd));
1859 }
1860 #endif
1861
1862 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1863 void (*gen)(TCGv_ptr))
1864 {
1865 gen_op_load_fpr_QT0(QFPREG(rs1));
1866 gen_op_load_fpr_QT1(QFPREG(rs2));
1867
1868 gen(cpu_env);
1869
1870 gen_op_store_QT0_fpr(QFPREG(rd));
1871 gen_update_fprs_dirty(QFPREG(rd));
1872 }
1873
1874 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1875 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1876 {
1877 TCGv_i64 dst;
1878 TCGv_i32 src1, src2;
1879
1880 src1 = gen_load_fpr_F(dc, rs1);
1881 src2 = gen_load_fpr_F(dc, rs2);
1882 dst = gen_dest_fpr_D();
1883
1884 gen(dst, cpu_env, src1, src2);
1885
1886 gen_store_fpr_D(dc, rd, dst);
1887 }
1888
1889 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1890 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1891 {
1892 TCGv_i64 src1, src2;
1893
1894 src1 = gen_load_fpr_D(dc, rs1);
1895 src2 = gen_load_fpr_D(dc, rs2);
1896
1897 gen(cpu_env, src1, src2);
1898
1899 gen_op_store_QT0_fpr(QFPREG(rd));
1900 gen_update_fprs_dirty(QFPREG(rd));
1901 }
1902
1903 #ifdef TARGET_SPARC64
1904 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1905 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1906 {
1907 TCGv_i64 dst;
1908 TCGv_i32 src;
1909
1910 src = gen_load_fpr_F(dc, rs);
1911 dst = gen_dest_fpr_D();
1912
1913 gen(dst, cpu_env, src);
1914
1915 gen_store_fpr_D(dc, rd, dst);
1916 }
1917 #endif
1918
1919 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1920 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1921 {
1922 TCGv_i64 dst;
1923 TCGv_i32 src;
1924
1925 src = gen_load_fpr_F(dc, rs);
1926 dst = gen_dest_fpr_D();
1927
1928 gen(dst, cpu_env, src);
1929
1930 gen_store_fpr_D(dc, rd, dst);
1931 }
1932
1933 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1934 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1935 {
1936 TCGv_i32 dst;
1937 TCGv_i64 src;
1938
1939 src = gen_load_fpr_D(dc, rs);
1940 dst = gen_dest_fpr_F();
1941
1942 gen(dst, cpu_env, src);
1943
1944 gen_store_fpr_F(dc, rd, dst);
1945 }
1946
1947 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1948 void (*gen)(TCGv_i32, TCGv_ptr))
1949 {
1950 TCGv_i32 dst;
1951
1952 gen_op_load_fpr_QT1(QFPREG(rs));
1953 dst = gen_dest_fpr_F();
1954
1955 gen(dst, cpu_env);
1956
1957 gen_store_fpr_F(dc, rd, dst);
1958 }
1959
1960 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1961 void (*gen)(TCGv_i64, TCGv_ptr))
1962 {
1963 TCGv_i64 dst;
1964
1965 gen_op_load_fpr_QT1(QFPREG(rs));
1966 dst = gen_dest_fpr_D();
1967
1968 gen(dst, cpu_env);
1969
1970 gen_store_fpr_D(dc, rd, dst);
1971 }
1972
1973 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1974 void (*gen)(TCGv_ptr, TCGv_i32))
1975 {
1976 TCGv_i32 src;
1977
1978 src = gen_load_fpr_F(dc, rs);
1979
1980 gen(cpu_env, src);
1981
1982 gen_op_store_QT0_fpr(QFPREG(rd));
1983 gen_update_fprs_dirty(QFPREG(rd));
1984 }
1985
1986 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1987 void (*gen)(TCGv_ptr, TCGv_i64))
1988 {
1989 TCGv_i64 src;
1990
1991 src = gen_load_fpr_D(dc, rs);
1992
1993 gen(cpu_env, src);
1994
1995 gen_op_store_QT0_fpr(QFPREG(rd));
1996 gen_update_fprs_dirty(QFPREG(rd));
1997 }
1998
1999 /* asi moves */
2000 #ifdef TARGET_SPARC64
2001 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
2002 {
2003 int asi;
2004 TCGv_i32 r_asi;
2005
2006 if (IS_IMM) {
2007 r_asi = tcg_temp_new_i32();
2008 tcg_gen_mov_i32(r_asi, cpu_asi);
2009 } else {
2010 asi = GET_FIELD(insn, 19, 26);
2011 r_asi = tcg_const_i32(asi);
2012 }
2013 return r_asi;
2014 }
2015
2016 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2017 int sign)
2018 {
2019 TCGv_i32 r_asi, r_size, r_sign;
2020
2021 r_asi = gen_get_asi(insn, addr);
2022 r_size = tcg_const_i32(size);
2023 r_sign = tcg_const_i32(sign);
2024 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
2025 tcg_temp_free_i32(r_sign);
2026 tcg_temp_free_i32(r_size);
2027 tcg_temp_free_i32(r_asi);
2028 }
2029
2030 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2031 {
2032 TCGv_i32 r_asi, r_size;
2033
2034 r_asi = gen_get_asi(insn, addr);
2035 r_size = tcg_const_i32(size);
2036 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2037 tcg_temp_free_i32(r_size);
2038 tcg_temp_free_i32(r_asi);
2039 }
2040
2041 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
2042 {
2043 TCGv_i32 r_asi, r_size, r_rd;
2044
2045 r_asi = gen_get_asi(insn, addr);
2046 r_size = tcg_const_i32(size);
2047 r_rd = tcg_const_i32(rd);
2048 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2049 tcg_temp_free_i32(r_rd);
2050 tcg_temp_free_i32(r_size);
2051 tcg_temp_free_i32(r_asi);
2052 }
2053
2054 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
2055 {
2056 TCGv_i32 r_asi, r_size, r_rd;
2057
2058 r_asi = gen_get_asi(insn, addr);
2059 r_size = tcg_const_i32(size);
2060 r_rd = tcg_const_i32(rd);
2061 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2062 tcg_temp_free_i32(r_rd);
2063 tcg_temp_free_i32(r_size);
2064 tcg_temp_free_i32(r_asi);
2065 }
2066
2067 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2068 {
2069 TCGv_i32 r_asi, r_size, r_sign;
2070
2071 r_asi = gen_get_asi(insn, addr);
2072 r_size = tcg_const_i32(4);
2073 r_sign = tcg_const_i32(0);
2074 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2075 tcg_temp_free_i32(r_sign);
2076 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2077 tcg_temp_free_i32(r_size);
2078 tcg_temp_free_i32(r_asi);
2079 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2080 }
2081
2082 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2083 int insn, int rd)
2084 {
2085 TCGv_i32 r_asi, r_rd;
2086
2087 r_asi = gen_get_asi(insn, addr);
2088 r_rd = tcg_const_i32(rd);
2089 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2090 tcg_temp_free_i32(r_rd);
2091 tcg_temp_free_i32(r_asi);
2092 }
2093
2094 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2095 int insn, int rd)
2096 {
2097 TCGv_i32 r_asi, r_size;
2098 TCGv lo = gen_load_gpr(dc, rd + 1);
2099
2100 tcg_gen_concat_tl_i64(cpu_tmp64, lo, hi);
2101 r_asi = gen_get_asi(insn, addr);
2102 r_size = tcg_const_i32(8);
2103 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2104 tcg_temp_free_i32(r_size);
2105 tcg_temp_free_i32(r_asi);
2106 }
2107
2108 static inline void gen_cas_asi(DisasContext *dc, TCGv addr,
2109 TCGv val2, int insn, int rd)
2110 {
2111 TCGv val1 = gen_load_gpr(dc, rd);
2112 TCGv dst = gen_dest_gpr(dc, rd);
2113 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2114
2115 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2116 tcg_temp_free_i32(r_asi);
2117 gen_store_gpr(dc, rd, dst);
2118 }
2119
2120 static inline void gen_casx_asi(DisasContext *dc, TCGv addr,
2121 TCGv val2, int insn, int rd)
2122 {
2123 TCGv val1 = gen_load_gpr(dc, rd);
2124 TCGv dst = gen_dest_gpr(dc, rd);
2125 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2126
2127 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2128 tcg_temp_free_i32(r_asi);
2129 gen_store_gpr(dc, rd, dst);
2130 }
2131
2132 #elif !defined(CONFIG_USER_ONLY)
2133
2134 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2135 int sign)
2136 {
2137 TCGv_i32 r_asi, r_size, r_sign;
2138
2139 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2140 r_size = tcg_const_i32(size);
2141 r_sign = tcg_const_i32(sign);
2142 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2143 tcg_temp_free(r_sign);
2144 tcg_temp_free(r_size);
2145 tcg_temp_free(r_asi);
2146 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2147 }
2148
2149 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2150 {
2151 TCGv_i32 r_asi, r_size;
2152
2153 tcg_gen_extu_tl_i64(cpu_tmp64, src);
2154 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2155 r_size = tcg_const_i32(size);
2156 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2157 tcg_temp_free(r_size);
2158 tcg_temp_free(r_asi);
2159 }
2160
2161 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2162 {
2163 TCGv_i32 r_asi, r_size, r_sign;
2164 TCGv_i64 r_val;
2165
2166 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2167 r_size = tcg_const_i32(4);
2168 r_sign = tcg_const_i32(0);
2169 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2170 tcg_temp_free(r_sign);
2171 r_val = tcg_temp_new_i64();
2172 tcg_gen_extu_tl_i64(r_val, src);
2173 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2174 tcg_temp_free_i64(r_val);
2175 tcg_temp_free(r_size);
2176 tcg_temp_free(r_asi);
2177 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2178 }
2179
2180 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2181 int insn, int rd)
2182 {
2183 TCGv_i32 r_asi, r_size, r_sign;
2184 TCGv t;
2185
2186 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2187 r_size = tcg_const_i32(8);
2188 r_sign = tcg_const_i32(0);
2189 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2190 tcg_temp_free(r_sign);
2191 tcg_temp_free(r_size);
2192 tcg_temp_free(r_asi);
2193
2194 t = gen_dest_gpr(dc, rd + 1);
2195 tcg_gen_trunc_i64_tl(t, cpu_tmp64);
2196 gen_store_gpr(dc, rd + 1, t);
2197
2198 tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
2199 tcg_gen_trunc_i64_tl(hi, cpu_tmp64);
2200 gen_store_gpr(dc, rd, hi);
2201 }
2202
2203 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2204 int insn, int rd)
2205 {
2206 TCGv_i32 r_asi, r_size;
2207 TCGv lo = gen_load_gpr(dc, rd + 1);
2208
2209 tcg_gen_concat_tl_i64(cpu_tmp64, lo, hi);
2210 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2211 r_size = tcg_const_i32(8);
2212 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2213 tcg_temp_free(r_size);
2214 tcg_temp_free(r_asi);
2215 }
2216 #endif
2217
2218 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2219 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2220 {
2221 TCGv_i64 r_val;
2222 TCGv_i32 r_asi, r_size;
2223
2224 gen_ld_asi(dst, addr, insn, 1, 0);
2225
2226 r_val = tcg_const_i64(0xffULL);
2227 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2228 r_size = tcg_const_i32(1);
2229 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2230 tcg_temp_free_i32(r_size);
2231 tcg_temp_free_i32(r_asi);
2232 tcg_temp_free_i64(r_val);
2233 }
2234 #endif
2235
2236 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2237 {
2238 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2239 return gen_load_gpr(dc, rs1);
2240 }
2241
2242 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2243 {
2244 if (IS_IMM) { /* immediate */
2245 target_long simm = GET_FIELDs(insn, 19, 31);
2246 TCGv t = get_temp_tl(dc);
2247 tcg_gen_movi_tl(t, simm);
2248 return t;
2249 } else { /* register */
2250 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2251 return gen_load_gpr(dc, rs2);
2252 }
2253 }
2254
2255 #ifdef TARGET_SPARC64
2256 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2257 {
2258 TCGv_i32 c32, zero, dst, s1, s2;
2259
2260 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2261 or fold the comparison down to 32 bits and use movcond_i32. Choose
2262 the later. */
2263 c32 = tcg_temp_new_i32();
2264 if (cmp->is_bool) {
2265 tcg_gen_trunc_i64_i32(c32, cmp->c1);
2266 } else {
2267 TCGv_i64 c64 = tcg_temp_new_i64();
2268 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2269 tcg_gen_trunc_i64_i32(c32, c64);
2270 tcg_temp_free_i64(c64);
2271 }
2272
2273 s1 = gen_load_fpr_F(dc, rs);
2274 s2 = gen_load_fpr_F(dc, rd);
2275 dst = gen_dest_fpr_F();
2276 zero = tcg_const_i32(0);
2277
2278 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2279
2280 tcg_temp_free_i32(c32);
2281 tcg_temp_free_i32(zero);
2282 gen_store_fpr_F(dc, rd, dst);
2283 }
2284
2285 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2286 {
2287 TCGv_i64 dst = gen_dest_fpr_D();
2288 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2289 gen_load_fpr_D(dc, rs),
2290 gen_load_fpr_D(dc, rd));
2291 gen_store_fpr_D(dc, rd, dst);
2292 }
2293
2294 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2295 {
2296 int qd = QFPREG(rd);
2297 int qs = QFPREG(rs);
2298
2299 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2300 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2301 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2302 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2303
2304 gen_update_fprs_dirty(qd);
2305 }
2306
2307 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2308 {
2309 TCGv_i32 r_tl = tcg_temp_new_i32();
2310
2311 /* load env->tl into r_tl */
2312 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2313
2314 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2315 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2316
2317 /* calculate offset to current trap state from env->ts, reuse r_tl */
2318 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2319 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2320
2321 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2322 {
2323 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2324 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2325 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2326 tcg_temp_free_ptr(r_tl_tmp);
2327 }
2328
2329 tcg_temp_free_i32(r_tl);
2330 }
2331
2332 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2333 int width, bool cc, bool left)
2334 {
2335 TCGv lo1, lo2, t1, t2;
2336 uint64_t amask, tabl, tabr;
2337 int shift, imask, omask;
2338
2339 if (cc) {
2340 tcg_gen_mov_tl(cpu_cc_src, s1);
2341 tcg_gen_mov_tl(cpu_cc_src2, s2);
2342 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2343 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2344 dc->cc_op = CC_OP_SUB;
2345 }
2346
2347 /* Theory of operation: there are two tables, left and right (not to
2348 be confused with the left and right versions of the opcode). These
2349 are indexed by the low 3 bits of the inputs. To make things "easy",
2350 these tables are loaded into two constants, TABL and TABR below.
2351 The operation index = (input & imask) << shift calculates the index
2352 into the constant, while val = (table >> index) & omask calculates
2353 the value we're looking for. */
2354 switch (width) {
2355 case 8:
2356 imask = 0x7;
2357 shift = 3;
2358 omask = 0xff;
2359 if (left) {
2360 tabl = 0x80c0e0f0f8fcfeffULL;
2361 tabr = 0xff7f3f1f0f070301ULL;
2362 } else {
2363 tabl = 0x0103070f1f3f7fffULL;
2364 tabr = 0xfffefcf8f0e0c080ULL;
2365 }
2366 break;
2367 case 16:
2368 imask = 0x6;
2369 shift = 1;
2370 omask = 0xf;
2371 if (left) {
2372 tabl = 0x8cef;
2373 tabr = 0xf731;
2374 } else {
2375 tabl = 0x137f;
2376 tabr = 0xfec8;
2377 }
2378 break;
2379 case 32:
2380 imask = 0x4;
2381 shift = 0;
2382 omask = 0x3;
2383 if (left) {
2384 tabl = (2 << 2) | 3;
2385 tabr = (3 << 2) | 1;
2386 } else {
2387 tabl = (1 << 2) | 3;
2388 tabr = (3 << 2) | 2;
2389 }
2390 break;
2391 default:
2392 abort();
2393 }
2394
2395 lo1 = tcg_temp_new();
2396 lo2 = tcg_temp_new();
2397 tcg_gen_andi_tl(lo1, s1, imask);
2398 tcg_gen_andi_tl(lo2, s2, imask);
2399 tcg_gen_shli_tl(lo1, lo1, shift);
2400 tcg_gen_shli_tl(lo2, lo2, shift);
2401
2402 t1 = tcg_const_tl(tabl);
2403 t2 = tcg_const_tl(tabr);
2404 tcg_gen_shr_tl(lo1, t1, lo1);
2405 tcg_gen_shr_tl(lo2, t2, lo2);
2406 tcg_gen_andi_tl(dst, lo1, omask);
2407 tcg_gen_andi_tl(lo2, lo2, omask);
2408
2409 amask = -8;
2410 if (AM_CHECK(dc)) {
2411 amask &= 0xffffffffULL;
2412 }
2413 tcg_gen_andi_tl(s1, s1, amask);
2414 tcg_gen_andi_tl(s2, s2, amask);
2415
2416 /* We want to compute
2417 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2418 We've already done dst = lo1, so this reduces to
2419 dst &= (s1 == s2 ? -1 : lo2)
2420 Which we perform by
2421 lo2 |= -(s1 == s2)
2422 dst &= lo2
2423 */
2424 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2425 tcg_gen_neg_tl(t1, t1);
2426 tcg_gen_or_tl(lo2, lo2, t1);
2427 tcg_gen_and_tl(dst, dst, lo2);
2428
2429 tcg_temp_free(lo1);
2430 tcg_temp_free(lo2);
2431 tcg_temp_free(t1);
2432 tcg_temp_free(t2);
2433 }
2434
2435 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2436 {
2437 TCGv tmp = tcg_temp_new();
2438
2439 tcg_gen_add_tl(tmp, s1, s2);
2440 tcg_gen_andi_tl(dst, tmp, -8);
2441 if (left) {
2442 tcg_gen_neg_tl(tmp, tmp);
2443 }
2444 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2445
2446 tcg_temp_free(tmp);
2447 }
2448
2449 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2450 {
2451 TCGv t1, t2, shift;
2452
2453 t1 = tcg_temp_new();
2454 t2 = tcg_temp_new();
2455 shift = tcg_temp_new();
2456
2457 tcg_gen_andi_tl(shift, gsr, 7);
2458 tcg_gen_shli_tl(shift, shift, 3);
2459 tcg_gen_shl_tl(t1, s1, shift);
2460
2461 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2462 shift of (up to 63) followed by a constant shift of 1. */
2463 tcg_gen_xori_tl(shift, shift, 63);
2464 tcg_gen_shr_tl(t2, s2, shift);
2465 tcg_gen_shri_tl(t2, t2, 1);
2466
2467 tcg_gen_or_tl(dst, t1, t2);
2468
2469 tcg_temp_free(t1);
2470 tcg_temp_free(t2);
2471 tcg_temp_free(shift);
2472 }
2473 #endif
2474
2475 #define CHECK_IU_FEATURE(dc, FEATURE) \
2476 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2477 goto illegal_insn;
2478 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2479 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2480 goto nfpu_insn;
2481
2482 /* before an instruction, dc->pc must be static */
2483 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2484 {
2485 unsigned int opc, rs1, rs2, rd;
2486 TCGv cpu_src1, cpu_src2, cpu_tmp1, cpu_tmp2;
2487 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2488 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2489 target_long simm;
2490
2491 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2492 tcg_gen_debug_insn_start(dc->pc);
2493 }
2494
2495 opc = GET_FIELD(insn, 0, 1);
2496
2497 rd = GET_FIELD(insn, 2, 6);
2498
2499 cpu_tmp1 = cpu_src1 = tcg_temp_new();
2500 cpu_tmp2 = cpu_src2 = tcg_temp_new();
2501
2502 switch (opc) {
2503 case 0: /* branches/sethi */
2504 {
2505 unsigned int xop = GET_FIELD(insn, 7, 9);
2506 int32_t target;
2507 switch (xop) {
2508 #ifdef TARGET_SPARC64
2509 case 0x1: /* V9 BPcc */
2510 {
2511 int cc;
2512
2513 target = GET_FIELD_SP(insn, 0, 18);
2514 target = sign_extend(target, 19);
2515 target <<= 2;
2516 cc = GET_FIELD_SP(insn, 20, 21);
2517 if (cc == 0)
2518 do_branch(dc, target, insn, 0);
2519 else if (cc == 2)
2520 do_branch(dc, target, insn, 1);
2521 else
2522 goto illegal_insn;
2523 goto jmp_insn;
2524 }
2525 case 0x3: /* V9 BPr */
2526 {
2527 target = GET_FIELD_SP(insn, 0, 13) |
2528 (GET_FIELD_SP(insn, 20, 21) << 14);
2529 target = sign_extend(target, 16);
2530 target <<= 2;
2531 cpu_src1 = get_src1(dc, insn);
2532 do_branch_reg(dc, target, insn, cpu_src1);
2533 goto jmp_insn;
2534 }
2535 case 0x5: /* V9 FBPcc */
2536 {
2537 int cc = GET_FIELD_SP(insn, 20, 21);
2538 if (gen_trap_ifnofpu(dc)) {
2539 goto jmp_insn;
2540 }
2541 target = GET_FIELD_SP(insn, 0, 18);
2542 target = sign_extend(target, 19);
2543 target <<= 2;
2544 do_fbranch(dc, target, insn, cc);
2545 goto jmp_insn;
2546 }
2547 #else
2548 case 0x7: /* CBN+x */
2549 {
2550 goto ncp_insn;
2551 }
2552 #endif
2553 case 0x2: /* BN+x */
2554 {
2555 target = GET_FIELD(insn, 10, 31);
2556 target = sign_extend(target, 22);
2557 target <<= 2;
2558 do_branch(dc, target, insn, 0);
2559 goto jmp_insn;
2560 }
2561 case 0x6: /* FBN+x */
2562 {
2563 if (gen_trap_ifnofpu(dc)) {
2564 goto jmp_insn;
2565 }
2566 target = GET_FIELD(insn, 10, 31);
2567 target = sign_extend(target, 22);
2568 target <<= 2;
2569 do_fbranch(dc, target, insn, 0);
2570 goto jmp_insn;
2571 }
2572 case 0x4: /* SETHI */
2573 /* Special-case %g0 because that's the canonical nop. */
2574 if (rd) {
2575 uint32_t value = GET_FIELD(insn, 10, 31);
2576 TCGv t = gen_dest_gpr(dc, rd);
2577 tcg_gen_movi_tl(t, value << 10);
2578 gen_store_gpr(dc, rd, t);
2579 }
2580 break;
2581 case 0x0: /* UNIMPL */
2582 default:
2583 goto illegal_insn;
2584 }
2585 break;
2586 }
2587 break;
2588 case 1: /*CALL*/
2589 {
2590 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2591 TCGv o7 = gen_dest_gpr(dc, 15);
2592
2593 tcg_gen_movi_tl(o7, dc->pc);
2594 gen_store_gpr(dc, 15, o7);
2595 target += dc->pc;
2596 gen_mov_pc_npc(dc);
2597 #ifdef TARGET_SPARC64
2598 if (unlikely(AM_CHECK(dc))) {
2599 target &= 0xffffffffULL;
2600 }
2601 #endif
2602 dc->npc = target;
2603 }
2604 goto jmp_insn;
2605 case 2: /* FPU & Logical Operations */
2606 {
2607 unsigned int xop = GET_FIELD(insn, 7, 12);
2608 if (xop == 0x3a) { /* generate trap */
2609 int cond = GET_FIELD(insn, 3, 6);
2610 TCGv_i32 trap;
2611 int l1 = -1, mask;
2612
2613 if (cond == 0) {
2614 /* Trap never. */
2615 break;
2616 }
2617
2618 save_state(dc);
2619
2620 if (cond != 8) {
2621 /* Conditional trap. */
2622 DisasCompare cmp;
2623 #ifdef TARGET_SPARC64
2624 /* V9 icc/xcc */
2625 int cc = GET_FIELD_SP(insn, 11, 12);
2626 if (cc == 0) {
2627 gen_compare(&cmp, 0, cond, dc);
2628 } else if (cc == 2) {
2629 gen_compare(&cmp, 1, cond, dc);
2630 } else {
2631 goto illegal_insn;
2632 }
2633 #else
2634 gen_compare(&cmp, 0, cond, dc);
2635 #endif
2636 l1 = gen_new_label();
2637 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2638 cmp.c1, cmp.c2, l1);
2639 free_compare(&cmp);
2640 }
2641
2642 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2643 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2644
2645 /* Don't use the normal temporaries, as they may well have
2646 gone out of scope with the branch above. While we're
2647 doing that we might as well pre-truncate to 32-bit. */
2648 trap = tcg_temp_new_i32();
2649
2650 rs1 = GET_FIELD_SP(insn, 14, 18);
2651 if (IS_IMM) {
2652 rs2 = GET_FIELD_SP(insn, 0, 6);
2653 if (rs1 == 0) {
2654 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2655 /* Signal that the trap value is fully constant. */
2656 mask = 0;
2657 } else {
2658 TCGv t1 = gen_load_gpr(dc, rs1);
2659 tcg_gen_trunc_tl_i32(trap, t1);
2660 tcg_gen_addi_i32(trap, trap, rs2);
2661 }
2662 } else {
2663 TCGv t1, t2;
2664 rs2 = GET_FIELD_SP(insn, 0, 4);
2665 t1 = gen_load_gpr(dc, rs1);
2666 t2 = gen_load_gpr(dc, rs2);
2667 tcg_gen_add_tl(t1, t1, t2);
2668 tcg_gen_trunc_tl_i32(trap, t1);
2669 }
2670 if (mask != 0) {
2671 tcg_gen_andi_i32(trap, trap, mask);
2672 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2673 }
2674
2675 gen_helper_raise_exception(cpu_env, trap);
2676 tcg_temp_free_i32(trap);
2677
2678 if (cond == 8) {
2679 /* An unconditional trap ends the TB. */
2680 dc->is_br = 1;
2681 goto jmp_insn;
2682 } else {
2683 /* A conditional trap falls through to the next insn. */
2684 gen_set_label(l1);
2685 break;
2686 }
2687 } else if (xop == 0x28) {
2688 rs1 = GET_FIELD(insn, 13, 17);
2689 switch(rs1) {
2690 case 0: /* rdy */
2691 #ifndef TARGET_SPARC64
2692 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2693 manual, rdy on the microSPARC
2694 II */
2695 case 0x0f: /* stbar in the SPARCv8 manual,
2696 rdy on the microSPARC II */
2697 case 0x10 ... 0x1f: /* implementation-dependent in the
2698 SPARCv8 manual, rdy on the
2699 microSPARC II */
2700 /* Read Asr17 */
2701 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2702 TCGv t = gen_dest_gpr(dc, rd);
2703 /* Read Asr17 for a Leon3 monoprocessor */
2704 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
2705 gen_store_gpr(dc, rd, t);
2706 break;
2707 }
2708 #endif
2709 gen_store_gpr(dc, rd, cpu_y);
2710 break;
2711 #ifdef TARGET_SPARC64
2712 case 0x2: /* V9 rdccr */
2713 update_psr(dc);
2714 gen_helper_rdccr(cpu_dst, cpu_env);
2715 gen_store_gpr(dc, rd, cpu_dst);
2716 break;
2717 case 0x3: /* V9 rdasi */
2718 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2719 gen_store_gpr(dc, rd, cpu_dst);
2720 break;
2721 case 0x4: /* V9 rdtick */
2722 {
2723 TCGv_ptr r_tickptr;
2724
2725 r_tickptr = tcg_temp_new_ptr();
2726 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2727 offsetof(CPUSPARCState, tick));
2728 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2729 tcg_temp_free_ptr(r_tickptr);
2730 gen_store_gpr(dc, rd, cpu_dst);
2731 }
2732 break;
2733 case 0x5: /* V9 rdpc */
2734 {
2735 TCGv t = gen_dest_gpr(dc, rd);
2736 if (unlikely(AM_CHECK(dc))) {
2737 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
2738 } else {
2739 tcg_gen_movi_tl(t, dc->pc);
2740 }
2741 gen_store_gpr(dc, rd, t);
2742 }
2743 break;
2744 case 0x6: /* V9 rdfprs */
2745 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2746 gen_store_gpr(dc, rd, cpu_dst);
2747 break;
2748 case 0xf: /* V9 membar */
2749 break; /* no effect */
2750 case 0x13: /* Graphics Status */
2751 if (gen_trap_ifnofpu(dc)) {
2752 goto jmp_insn;
2753 }
2754 gen_store_gpr(dc, rd, cpu_gsr);
2755 break;
2756 case 0x16: /* Softint */
2757 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2758 gen_store_gpr(dc, rd, cpu_dst);
2759 break;
2760 case 0x17: /* Tick compare */
2761 gen_store_gpr(dc, rd, cpu_tick_cmpr);
2762 break;
2763 case 0x18: /* System tick */
2764 {
2765 TCGv_ptr r_tickptr;
2766
2767 r_tickptr = tcg_temp_new_ptr();
2768 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2769 offsetof(CPUSPARCState, stick));
2770 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2771 tcg_temp_free_ptr(r_tickptr);
2772 gen_store_gpr(dc, rd, cpu_dst);
2773 }
2774 break;
2775 case 0x19: /* System tick compare */
2776 gen_store_gpr(dc, rd, cpu_stick_cmpr);
2777 break;
2778 case 0x10: /* Performance Control */
2779 case 0x11: /* Performance Instrumentation Counter */
2780 case 0x12: /* Dispatch Control */
2781 case 0x14: /* Softint set, WO */
2782 case 0x15: /* Softint clear, WO */
2783 #endif
2784 default:
2785 goto illegal_insn;
2786 }
2787 #if !defined(CONFIG_USER_ONLY)
2788 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2789 #ifndef TARGET_SPARC64
2790 if (!supervisor(dc)) {
2791 goto priv_insn;
2792 }
2793 update_psr(dc);
2794 gen_helper_rdpsr(cpu_dst, cpu_env);
2795 #else
2796 CHECK_IU_FEATURE(dc, HYPV);
2797 if (!hypervisor(dc))
2798 goto priv_insn;
2799 rs1 = GET_FIELD(insn, 13, 17);
2800 switch (rs1) {
2801 case 0: // hpstate
2802 // gen_op_rdhpstate();
2803 break;
2804 case 1: // htstate
2805 // gen_op_rdhtstate();
2806 break;
2807 case 3: // hintp
2808 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2809 break;
2810 case 5: // htba
2811 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2812 break;
2813 case 6: // hver
2814 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2815 break;
2816 case 31: // hstick_cmpr
2817 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2818 break;
2819 default:
2820 goto illegal_insn;
2821 }
2822 #endif
2823 gen_store_gpr(dc, rd, cpu_dst);
2824 break;
2825 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2826 if (!supervisor(dc))
2827 goto priv_insn;
2828 #ifdef TARGET_SPARC64
2829 rs1 = GET_FIELD(insn, 13, 17);
2830 switch (rs1) {
2831 case 0: // tpc
2832 {
2833 TCGv_ptr r_tsptr;
2834
2835 r_tsptr = tcg_temp_new_ptr();
2836 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2837 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2838 offsetof(trap_state, tpc));
2839 tcg_temp_free_ptr(r_tsptr);
2840 }
2841 break;
2842 case 1: // tnpc
2843 {
2844 TCGv_ptr r_tsptr;
2845
2846 r_tsptr = tcg_temp_new_ptr();
2847 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2848 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2849 offsetof(trap_state, tnpc));
2850 tcg_temp_free_ptr(r_tsptr);
2851 }
2852 break;
2853 case 2: // tstate
2854 {
2855 TCGv_ptr r_tsptr;
2856
2857 r_tsptr = tcg_temp_new_ptr();
2858 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2859 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2860 offsetof(trap_state, tstate));
2861 tcg_temp_free_ptr(r_tsptr);
2862 }
2863 break;
2864 case 3: // tt
2865 {
2866 TCGv_ptr r_tsptr;
2867
2868 r_tsptr = tcg_temp_new_ptr();
2869 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2870 tcg_gen_ld_i32(cpu_tmp32, r_tsptr,
2871 offsetof(trap_state, tt));
2872 tcg_temp_free_ptr(r_tsptr);
2873 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2874 }
2875 break;
2876 case 4: // tick
2877 {
2878 TCGv_ptr r_tickptr;
2879
2880 r_tickptr = tcg_temp_new_ptr();
2881 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2882 offsetof(CPUSPARCState, tick));
2883 gen_helper_tick_get_count(cpu_tmp0, r_tickptr);
2884 tcg_temp_free_ptr(r_tickptr);
2885 }
2886 break;
2887 case 5: // tba
2888 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2889 break;
2890 case 6: // pstate
2891 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2892 offsetof(CPUSPARCState, pstate));
2893 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2894 break;
2895 case 7: // tl
2896 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2897 offsetof(CPUSPARCState, tl));
2898 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2899 break;
2900 case 8: // pil
2901 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2902 offsetof(CPUSPARCState, psrpil));
2903 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2904 break;
2905 case 9: // cwp
2906 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2907 break;
2908 case 10: // cansave
2909 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2910 offsetof(CPUSPARCState, cansave));
2911 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2912 break;
2913 case 11: // canrestore
2914 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2915 offsetof(CPUSPARCState, canrestore));
2916 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2917 break;
2918 case 12: // cleanwin
2919 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2920 offsetof(CPUSPARCState, cleanwin));
2921 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2922 break;
2923 case 13: // otherwin
2924 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2925 offsetof(CPUSPARCState, otherwin));
2926 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2927 break;
2928 case 14: // wstate
2929 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2930 offsetof(CPUSPARCState, wstate));
2931 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2932 break;
2933 case 16: // UA2005 gl
2934 CHECK_IU_FEATURE(dc, GL);
2935 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2936 offsetof(CPUSPARCState, gl));
2937 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2938 break;
2939 case 26: // UA2005 strand status
2940 CHECK_IU_FEATURE(dc, HYPV);
2941 if (!hypervisor(dc))
2942 goto priv_insn;
2943 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2944 break;
2945 case 31: // ver
2946 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2947 break;
2948 case 15: // fq
2949 default:
2950 goto illegal_insn;
2951 }
2952 #else
2953 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2954 #endif
2955 gen_store_gpr(dc, rd, cpu_tmp0);
2956 break;
2957 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2958 #ifdef TARGET_SPARC64
2959 save_state(dc);
2960 gen_helper_flushw(cpu_env);
2961 #else
2962 if (!supervisor(dc))
2963 goto priv_insn;
2964 gen_store_gpr(dc, rd, cpu_tbr);
2965 #endif
2966 break;
2967 #endif
2968 } else if (xop == 0x34) { /* FPU Operations */
2969 if (gen_trap_ifnofpu(dc)) {
2970 goto jmp_insn;
2971 }
2972 gen_op_clear_ieee_excp_and_FTT();
2973 rs1 = GET_FIELD(insn, 13, 17);
2974 rs2 = GET_FIELD(insn, 27, 31);
2975 xop = GET_FIELD(insn, 18, 26);
2976 save_state(dc);
2977 switch (xop) {
2978 case 0x1: /* fmovs */
2979 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2980 gen_store_fpr_F(dc, rd, cpu_src1_32);
2981 break;
2982 case 0x5: /* fnegs */
2983 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2984 break;
2985 case 0x9: /* fabss */
2986 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2987 break;
2988 case 0x29: /* fsqrts */
2989 CHECK_FPU_FEATURE(dc, FSQRT);
2990 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2991 break;
2992 case 0x2a: /* fsqrtd */
2993 CHECK_FPU_FEATURE(dc, FSQRT);
2994 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
2995 break;
2996 case 0x2b: /* fsqrtq */
2997 CHECK_FPU_FEATURE(dc, FLOAT128);
2998 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
2999 break;
3000 case 0x41: /* fadds */
3001 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3002 break;
3003 case 0x42: /* faddd */
3004 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3005 break;
3006 case 0x43: /* faddq */
3007 CHECK_FPU_FEATURE(dc, FLOAT128);
3008 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3009 break;
3010 case 0x45: /* fsubs */
3011 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3012 break;
3013 case 0x46: /* fsubd */
3014 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3015 break;
3016 case 0x47: /* fsubq */
3017 CHECK_FPU_FEATURE(dc, FLOAT128);
3018 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3019 break;
3020 case 0x49: /* fmuls */
3021 CHECK_FPU_FEATURE(dc, FMUL);
3022 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3023 break;
3024 case 0x4a: /* fmuld */
3025 CHECK_FPU_FEATURE(dc, FMUL);
3026 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3027 break;
3028 case 0x4b: /* fmulq */
3029 CHECK_FPU_FEATURE(dc, FLOAT128);
3030 CHECK_FPU_FEATURE(dc, FMUL);
3031 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3032 break;
3033 case 0x4d: /* fdivs */
3034 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3035 break;
3036 case 0x4e: /* fdivd */
3037 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3038 break;
3039 case 0x4f: /* fdivq */
3040 CHECK_FPU_FEATURE(dc, FLOAT128);
3041 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3042 break;
3043 case 0x69: /* fsmuld */
3044 CHECK_FPU_FEATURE(dc, FSMULD);
3045 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3046 break;
3047 case 0x6e: /* fdmulq */
3048 CHECK_FPU_FEATURE(dc, FLOAT128);
3049 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3050 break;
3051 case 0xc4: /* fitos */
3052 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3053 break;
3054 case 0xc6: /* fdtos */
3055 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3056 break;
3057 case 0xc7: /* fqtos */
3058 CHECK_FPU_FEATURE(dc, FLOAT128);
3059 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3060 break;
3061 case 0xc8: /* fitod */
3062 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3063 break;
3064 case 0xc9: /* fstod */
3065 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3066 break;
3067 case 0xcb: /* fqtod */
3068 CHECK_FPU_FEATURE(dc, FLOAT128);
3069 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3070 break;
3071 case 0xcc: /* fitoq */
3072 CHECK_FPU_FEATURE(dc, FLOAT128);
3073 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3074 break;
3075 case 0xcd: /* fstoq */
3076 CHECK_FPU_FEATURE(dc, FLOAT128);
3077 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3078 break;
3079 case 0xce: /* fdtoq */
3080 CHECK_FPU_FEATURE(dc, FLOAT128);
3081 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3082 break;
3083 case 0xd1: /* fstoi */
3084 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3085 break;
3086 case 0xd2: /* fdtoi */
3087 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3088 break;
3089 case 0xd3: /* fqtoi */
3090 CHECK_FPU_FEATURE(dc, FLOAT128);
3091 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3092 break;
3093 #ifdef TARGET_SPARC64
3094 case 0x2: /* V9 fmovd */
3095 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3096 gen_store_fpr_D(dc, rd, cpu_src1_64);
3097 break;
3098 case 0x3: /* V9 fmovq */
3099 CHECK_FPU_FEATURE(dc, FLOAT128);
3100 gen_move_Q(rd, rs2);
3101 break;
3102 case 0x6: /* V9 fnegd */
3103 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3104 break;
3105 case 0x7: /* V9 fnegq */
3106 CHECK_FPU_FEATURE(dc, FLOAT128);
3107 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3108 break;
3109 case 0xa: /* V9 fabsd */
3110 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3111 break;
3112 case 0xb: /* V9 fabsq */
3113 CHECK_FPU_FEATURE(dc, FLOAT128);
3114 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3115 break;
3116 case 0x81: /* V9 fstox */
3117 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3118 break;
3119 case 0x82: /* V9 fdtox */
3120 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3121 break;
3122 case 0x83: /* V9 fqtox */
3123 CHECK_FPU_FEATURE(dc, FLOAT128);
3124 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3125 break;
3126 case 0x84: /* V9 fxtos */
3127 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3128 break;
3129 case 0x88: /* V9 fxtod */
3130 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3131 break;
3132 case 0x8c: /* V9 fxtoq */
3133 CHECK_FPU_FEATURE(dc, FLOAT128);
3134 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3135 break;
3136 #endif
3137 default:
3138 goto illegal_insn;
3139 }
3140 } else if (xop == 0x35) { /* FPU Operations */
3141 #ifdef TARGET_SPARC64
3142 int cond;
3143 #endif
3144 if (gen_trap_ifnofpu(dc)) {
3145 goto jmp_insn;
3146 }
3147 gen_op_clear_ieee_excp_and_FTT();
3148 rs1 = GET_FIELD(insn, 13, 17);
3149 rs2 = GET_FIELD(insn, 27, 31);
3150 xop = GET_FIELD(insn, 18, 26);
3151 save_state(dc);
3152
3153 #ifdef TARGET_SPARC64
3154 #define FMOVR(sz) \
3155 do { \
3156 DisasCompare cmp; \
3157 cond = GET_FIELD_SP(insn, 14, 17); \
3158 cpu_src1 = get_src1(dc, insn); \
3159 gen_compare_reg(&cmp, cond, cpu_src1); \
3160 gen_fmov##sz(dc, &cmp, rd, rs2); \
3161 free_compare(&cmp); \
3162 } while (0)
3163
3164 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3165 FMOVR(s);
3166 break;
3167 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3168 FMOVR(d);
3169 break;
3170 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3171 CHECK_FPU_FEATURE(dc, FLOAT128);
3172 FMOVR(q);
3173 break;
3174 }
3175 #undef FMOVR
3176 #endif
3177 switch (xop) {
3178 #ifdef TARGET_SPARC64
3179 #define FMOVCC(fcc, sz) \
3180 do { \
3181 DisasCompare cmp; \
3182 cond = GET_FIELD_SP(insn, 14, 17); \
3183 gen_fcompare(&cmp, fcc, cond); \
3184 gen_fmov##sz(dc, &cmp, rd, rs2); \
3185 free_compare(&cmp); \
3186 } while (0)
3187
3188 case 0x001: /* V9 fmovscc %fcc0 */
3189 FMOVCC(0, s);
3190 break;
3191 case 0x002: /* V9 fmovdcc %fcc0 */
3192 FMOVCC(0, d);
3193 break;
3194 case 0x003: /* V9 fmovqcc %fcc0 */
3195 CHECK_FPU_FEATURE(dc, FLOAT128);
3196 FMOVCC(0, q);
3197 break;
3198 case 0x041: /* V9 fmovscc %fcc1 */
3199 FMOVCC(1, s);
3200 break;
3201 case 0x042: /* V9 fmovdcc %fcc1 */
3202 FMOVCC(1, d);
3203 break;
3204 case 0x043: /* V9 fmovqcc %fcc1 */
3205 CHECK_FPU_FEATURE(dc, FLOAT128);
3206 FMOVCC(1, q);
3207 break;
3208 case 0x081: /* V9 fmovscc %fcc2 */
3209 FMOVCC(2, s);
3210 break;
3211 case 0x082: /* V9 fmovdcc %fcc2 */
3212 FMOVCC(2, d);
3213 break;
3214 case 0x083: /* V9 fmovqcc %fcc2 */
3215 CHECK_FPU_FEATURE(dc, FLOAT128);
3216 FMOVCC(2, q);
3217 break;
3218 case 0x0c1: /* V9 fmovscc %fcc3 */
3219 FMOVCC(3, s);
3220 break;
3221 case 0x0c2: /* V9 fmovdcc %fcc3 */
3222 FMOVCC(3, d);
3223 break;
3224 case 0x0c3: /* V9 fmovqcc %fcc3 */
3225 CHECK_FPU_FEATURE(dc, FLOAT128);
3226 FMOVCC(3, q);
3227 break;
3228 #undef FMOVCC
3229 #define FMOVCC(xcc, sz) \
3230 do { \
3231 DisasCompare cmp; \
3232 cond = GET_FIELD_SP(insn, 14, 17); \
3233 gen_compare(&cmp, xcc, cond, dc); \
3234 gen_fmov##sz(dc, &cmp, rd, rs2); \
3235 free_compare(&cmp); \
3236 } while (0)
3237
3238 case 0x101: /* V9 fmovscc %icc */
3239 FMOVCC(0, s);
3240 break;
3241 case 0x102: /* V9 fmovdcc %icc */
3242 FMOVCC(0, d);
3243 break;
3244 case 0x103: /* V9 fmovqcc %icc */
3245 CHECK_FPU_FEATURE(dc, FLOAT128);
3246 FMOVCC(0, q);
3247 break;
3248 case 0x181: /* V9 fmovscc %xcc */
3249 FMOVCC(1, s);
3250 break;
3251 case 0x182: /* V9 fmovdcc %xcc */
3252 FMOVCC(1, d);
3253 break;
3254 case 0x183: /* V9 fmovqcc %xcc */
3255 CHECK_FPU_FEATURE(dc, FLOAT128);
3256 FMOVCC(1, q);
3257 break;
3258 #undef FMOVCC
3259 #endif
3260 case 0x51: /* fcmps, V9 %fcc */
3261 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3262 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3263 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3264 break;
3265 case 0x52: /* fcmpd, V9 %fcc */
3266 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3267 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3268 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3269 break;
3270 case 0x53: /* fcmpq, V9 %fcc */
3271 CHECK_FPU_FEATURE(dc, FLOAT128);
3272 gen_op_load_fpr_QT0(QFPREG(rs1));
3273 gen_op_load_fpr_QT1(QFPREG(rs2));
3274 gen_op_fcmpq(rd & 3);
3275 break;
3276 case 0x55: /* fcmpes, V9 %fcc */
3277 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3278 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3279 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3280 break;
3281 case 0x56: /* fcmped, V9 %fcc */
3282 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3283 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3284 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3285 break;
3286 case 0x57: /* fcmpeq, V9 %fcc */
3287 CHECK_FPU_FEATURE(dc, FLOAT128);
3288 gen_op_load_fpr_QT0(QFPREG(rs1));
3289 gen_op_load_fpr_QT1(QFPREG(rs2));
3290 gen_op_fcmpeq(rd & 3);
3291 break;
3292 default:
3293 goto illegal_insn;
3294 }
3295 } else if (xop == 0x2) {
3296 TCGv dst = gen_dest_gpr(dc, rd);
3297 rs1 = GET_FIELD(insn, 13, 17);
3298 if (rs1 == 0) {
3299 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3300 if (IS_IMM) { /* immediate */
3301 simm = GET_FIELDs(insn, 19, 31);
3302 tcg_gen_movi_tl(dst, simm);
3303 gen_store_gpr(dc, rd, dst);
3304 } else { /* register */
3305 rs2 = GET_FIELD(insn, 27, 31);
3306 if (rs2 == 0) {
3307 tcg_gen_movi_tl(dst, 0);
3308 gen_store_gpr(dc, rd, dst);
3309 } else {
3310 cpu_src2 = gen_load_gpr(dc, rs2);
3311 gen_store_gpr(dc, rd, cpu_src2);
3312 }
3313 }
3314 } else {
3315 cpu_src1 = get_src1(dc, insn);
3316 if (IS_IMM) { /* immediate */
3317 simm = GET_FIELDs(insn, 19, 31);
3318 tcg_gen_ori_tl(dst, cpu_src1, simm);
3319 gen_store_gpr(dc, rd, dst);
3320 } else { /* register */
3321 rs2 = GET_FIELD(insn, 27, 31);
3322 if (rs2 == 0) {
3323 /* mov shortcut: or x, %g0, y -> mov x, y */
3324 gen_store_gpr(dc, rd, cpu_src1);
3325 } else {
3326 cpu_src2 = gen_load_gpr(dc, rs2);
3327 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3328 gen_store_gpr(dc, rd, dst);
3329 }
3330 }
3331 }
3332 #ifdef TARGET_SPARC64
3333 } else if (xop == 0x25) { /* sll, V9 sllx */
3334 cpu_src1 = get_src1(dc, insn);
3335 if (IS_IMM) { /* immediate */
3336 simm = GET_FIELDs(insn, 20, 31);
3337 if (insn & (1 << 12)) {
3338 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3339 } else {
3340 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3341 }
3342 } else { /* register */
3343 rs2 = GET_FIELD(insn, 27, 31);
3344 cpu_src2 = gen_load_gpr(dc, rs2);
3345 if (insn & (1 << 12)) {
3346 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3347 } else {
3348 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3349 }
3350 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3351 }
3352 gen_store_gpr(dc, rd, cpu_dst);
3353 } else if (xop == 0x26) { /* srl, V9 srlx */
3354 cpu_src1 = get_src1(dc, insn);
3355 if (IS_IMM) { /* immediate */
3356 simm = GET_FIELDs(insn, 20, 31);
3357 if (insn & (1 << 12)) {
3358 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3359 } else {
3360 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3361 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3362 }
3363 } else { /* register */
3364 rs2 = GET_FIELD(insn, 27, 31);
3365 cpu_src2 = gen_load_gpr(dc, rs2);
3366 if (insn & (1 << 12)) {
3367 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3368 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3369 } else {
3370 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3371 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3372 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3373 }
3374 }
3375 gen_store_gpr(dc, rd, cpu_dst);
3376 } else if (xop == 0x27) { /* sra, V9 srax */
3377 cpu_src1 = get_src1(dc, insn);
3378 if (IS_IMM) { /* immediate */
3379 simm = GET_FIELDs(insn, 20, 31);
3380 if (insn & (1 << 12)) {
3381 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3382 } else {
3383 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3384 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3385 }
3386 } else { /* register */
3387 rs2 = GET_FIELD(insn, 27, 31);
3388 cpu_src2 = gen_load_gpr(dc, rs2);
3389 if (insn & (1 << 12)) {
3390 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3391 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3392 } else {
3393 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3394 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3395 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3396 }
3397 }
3398 gen_store_gpr(dc, rd, cpu_dst);
3399 #endif
3400 } else if (xop < 0x36) {
3401 if (xop < 0x20) {
3402 cpu_src1 = get_src1(dc, insn);
3403 cpu_src2 = get_src2(dc, insn);
3404 switch (xop & ~0x10) {
3405 case 0x0: /* add */
3406 if (xop & 0x10) {
3407 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3408 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3409 dc->cc_op = CC_OP_ADD;
3410 } else {
3411 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3412 }
3413 break;
3414 case 0x1: /* and */
3415 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3416 if (xop & 0x10) {
3417 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3418 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3419 dc->cc_op = CC_OP_LOGIC;
3420 }
3421 break;
3422 case 0x2: /* or */
3423 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3424 if (xop & 0x10) {
3425 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3426 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3427 dc->cc_op = CC_OP_LOGIC;
3428 }
3429 break;
3430 case 0x3: /* xor */
3431 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3432 if (xop & 0x10) {
3433 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3434 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3435 dc->cc_op = CC_OP_LOGIC;
3436 }
3437 break;
3438 case 0x4: /* sub */
3439 if (xop & 0x10) {
3440 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3441 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3442 dc->cc_op = CC_OP_SUB;
3443 } else {
3444 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3445 }
3446 break;
3447 case 0x5: /* andn */
3448 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3449 if (xop & 0x10) {
3450 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3451 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3452 dc->cc_op = CC_OP_LOGIC;
3453 }
3454 break;
3455 case 0x6: /* orn */
3456 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3457 if (xop & 0x10) {
3458 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3459 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3460 dc->cc_op = CC_OP_LOGIC;
3461 }
3462 break;
3463 case 0x7: /* xorn */
3464 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3465 if (xop & 0x10) {
3466 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3467 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3468 dc->cc_op = CC_OP_LOGIC;
3469 }
3470 break;
3471 case 0x8: /* addx, V9 addc */
3472 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3473 (xop & 0x10));
3474 break;
3475 #ifdef TARGET_SPARC64
3476 case 0x9: /* V9 mulx */
3477 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3478 break;
3479 #endif
3480 case 0xa: /* umul */
3481 CHECK_IU_FEATURE(dc, MUL);
3482 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3483 if (xop & 0x10) {
3484 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3485 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3486 dc->cc_op = CC_OP_LOGIC;
3487 }
3488 break;
3489 case 0xb: /* smul */
3490 CHECK_IU_FEATURE(dc, MUL);
3491 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3492 if (xop & 0x10) {
3493 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3494 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3495 dc->cc_op = CC_OP_LOGIC;
3496 }
3497 break;
3498 case 0xc: /* subx, V9 subc */
3499 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3500 (xop & 0x10));
3501 break;
3502 #ifdef TARGET_SPARC64
3503 case 0xd: /* V9 udivx */
3504 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3505 break;
3506 #endif
3507 case 0xe: /* udiv */
3508 CHECK_IU_FEATURE(dc, DIV);
3509 if (xop & 0x10) {
3510 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3511 cpu_src2);
3512 dc->cc_op = CC_OP_DIV;
3513 } else {
3514 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3515 cpu_src2);
3516 }
3517 break;
3518 case 0xf: /* sdiv */
3519 CHECK_IU_FEATURE(dc, DIV);
3520 if (xop & 0x10) {
3521 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3522 cpu_src2);
3523 dc->cc_op = CC_OP_DIV;
3524 } else {
3525 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3526 cpu_src2);
3527 }
3528 break;
3529 default:
3530 goto illegal_insn;
3531 }
3532 gen_store_gpr(dc, rd, cpu_dst);
3533 } else {
3534 cpu_src1 = get_src1(dc, insn);
3535 cpu_src2 = get_src2(dc, insn);
3536 switch (xop) {
3537 case 0x20: /* taddcc */
3538 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3539 gen_store_gpr(dc, rd, cpu_dst);
3540 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3541 dc->cc_op = CC_OP_TADD;
3542 break;
3543 case 0x21: /* tsubcc */
3544 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3545 gen_store_gpr(dc, rd, cpu_dst);
3546 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3547 dc->cc_op = CC_OP_TSUB;
3548 break;
3549 case 0x22: /* taddcctv */
3550 gen_helper_taddcctv(cpu_dst, cpu_env,
3551 cpu_src1, cpu_src2);
3552 gen_store_gpr(dc, rd, cpu_dst);
3553 dc->cc_op = CC_OP_TADDTV;
3554 break;
3555 case 0x23: /* tsubcctv */
3556 gen_helper_tsubcctv(cpu_dst, cpu_env,
3557 cpu_src1, cpu_src2);
3558 gen_store_gpr(dc, rd, cpu_dst);
3559 dc->cc_op = CC_OP_TSUBTV;
3560 break;
3561 case 0x24: /* mulscc */
3562 update_psr(dc);
3563 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3564 gen_store_gpr(dc, rd, cpu_dst);
3565 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3566 dc->cc_op = CC_OP_ADD;
3567 break;
3568 #ifndef TARGET_SPARC64
3569 case 0x25: /* sll */
3570 if (IS_IMM) { /* immediate */
3571 simm = GET_FIELDs(insn, 20, 31);
3572 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3573 } else { /* register */
3574 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3575 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3576 }
3577 gen_store_gpr(dc, rd, cpu_dst);
3578 break;
3579 case 0x26: /* srl */
3580 if (IS_IMM) { /* immediate */
3581 simm = GET_FIELDs(insn, 20, 31);
3582 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3583 } else { /* register */
3584 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3585 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3586 }
3587 gen_store_gpr(dc, rd, cpu_dst);
3588 break;
3589 case 0x27: /* sra */
3590 if (IS_IMM) { /* immediate */
3591 simm = GET_FIELDs(insn, 20, 31);
3592 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3593 } else { /* register */
3594 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3595 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3596 }
3597 gen_store_gpr(dc, rd, cpu_dst);
3598 break;
3599 #endif
3600 case 0x30:
3601 {
3602 switch(rd) {
3603 case 0: /* wry */
3604 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3605 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3606 break;
3607 #ifndef TARGET_SPARC64
3608 case 0x01 ... 0x0f: /* undefined in the
3609 SPARCv8 manual, nop
3610 on the microSPARC
3611 II */
3612 case 0x10 ... 0x1f: /* implementation-dependent
3613 in the SPARCv8
3614 manual, nop on the
3615 microSPARC II */
3616 break;
3617 #else
3618 case 0x2: /* V9 wrccr */
3619 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3620 gen_helper_wrccr(cpu_env, cpu_dst);
3621 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3622 dc->cc_op = CC_OP_FLAGS;
3623 break;
3624 case 0x3: /* V9 wrasi */
3625 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3626 tcg_gen_andi_tl(cpu_dst, cpu_dst, 0xff);
3627 tcg_gen_trunc_tl_i32(cpu_asi, cpu_dst);
3628 break;
3629 case 0x6: /* V9 wrfprs */
3630 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3631 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_dst);
3632 save_state(dc);
3633 gen_op_next_insn();
3634 tcg_gen_exit_tb(0);
3635 dc->is_br = 1;
3636 break;
3637 case 0xf: /* V9 sir, nop if user */
3638 #if !defined(CONFIG_USER_ONLY)
3639 if (supervisor(dc)) {
3640 ; // XXX
3641 }
3642 #endif
3643 break;
3644 case 0x13: /* Graphics Status */
3645 if (gen_trap_ifnofpu(dc)) {
3646 goto jmp_insn;
3647 }
3648 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3649 break;
3650 case 0x14: /* Softint set */
3651 if (!supervisor(dc))
3652 goto illegal_insn;
3653 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3654 gen_helper_set_softint(cpu_env, cpu_tmp64);
3655 break;
3656 case 0x15: /* Softint clear */
3657 if (!supervisor(dc))
3658 goto illegal_insn;
3659 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3660 gen_helper_clear_softint(cpu_env, cpu_tmp64);
3661 break;
3662 case 0x16: /* Softint write */
3663 if (!supervisor(dc))
3664 goto illegal_insn;
3665 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3666 gen_helper_write_softint(cpu_env, cpu_tmp64);
3667 break;
3668 case 0x17: /* Tick compare */
3669 #if !defined(CONFIG_USER_ONLY)
3670 if (!supervisor(dc))
3671 goto illegal_insn;
3672 #endif
3673 {
3674 TCGv_ptr r_tickptr;
3675
3676 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3677 cpu_src2);
3678 r_tickptr = tcg_temp_new_ptr();
3679 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3680 offsetof(CPUSPARCState, tick));
3681 gen_helper_tick_set_limit(r_tickptr,
3682 cpu_tick_cmpr);
3683 tcg_temp_free_ptr(r_tickptr);
3684 }
3685 break;
3686 case 0x18: /* System tick */
3687 #if !defined(CONFIG_USER_ONLY)
3688 if (!supervisor(dc))
3689 goto illegal_insn;
3690 #endif
3691 {
3692 TCGv_ptr r_tickptr;
3693
3694 tcg_gen_xor_tl(cpu_dst, cpu_src1,
3695 cpu_src2);
3696 r_tickptr = tcg_temp_new_ptr();
3697 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3698 offsetof(CPUSPARCState, stick));
3699 gen_helper_tick_set_count(r_tickptr,
3700 cpu_dst);
3701 tcg_temp_free_ptr(r_tickptr);
3702 }
3703 break;
3704 case 0x19: /* System tick compare */
3705 #if !defined(CONFIG_USER_ONLY)
3706 if (!supervisor(dc))
3707 goto illegal_insn;
3708 #endif
3709 {
3710 TCGv_ptr r_tickptr;
3711
3712 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3713 cpu_src2);
3714 r_tickptr = tcg_temp_new_ptr();
3715 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3716 offsetof(CPUSPARCState, stick));
3717 gen_helper_tick_set_limit(r_tickptr,
3718 cpu_stick_cmpr);
3719 tcg_temp_free_ptr(r_tickptr);
3720 }
3721 break;
3722
3723 case 0x10: /* Performance Control */
3724 case 0x11: /* Performance Instrumentation
3725 Counter */
3726 case 0x12: /* Dispatch Control */
3727 #endif
3728 default:
3729 goto illegal_insn;
3730 }
3731 }
3732 break;
3733 #if !defined(CONFIG_USER_ONLY)
3734 case 0x31: /* wrpsr, V9 saved, restored */
3735 {
3736 if (!supervisor(dc))
3737 goto priv_insn;
3738 #ifdef TARGET_SPARC64
3739 switch (rd) {
3740 case 0:
3741 gen_helper_saved(cpu_env);
3742 break;
3743 case 1:
3744 gen_helper_restored(cpu_env);
3745 break;
3746 case 2: /* UA2005 allclean */
3747 case 3: /* UA2005 otherw */
3748 case 4: /* UA2005 normalw */
3749 case 5: /* UA2005 invalw */
3750 // XXX
3751 default:
3752 goto illegal_insn;
3753 }
3754 #else
3755 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3756 gen_helper_wrpsr(cpu_env, cpu_dst);
3757 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3758 dc->cc_op = CC_OP_FLAGS;
3759 save_state(dc);
3760 gen_op_next_insn();
3761 tcg_gen_exit_tb(0);
3762 dc->is_br = 1;
3763 #endif
3764 }
3765 break;
3766 case 0x32: /* wrwim, V9 wrpr */
3767 {
3768 if (!supervisor(dc))
3769 goto priv_insn;
3770 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3771 #ifdef TARGET_SPARC64
3772 switch (rd) {
3773 case 0: // tpc
3774 {
3775 TCGv_ptr r_tsptr;
3776
3777 r_tsptr = tcg_temp_new_ptr();
3778 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3779 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3780 offsetof(trap_state, tpc));
3781 tcg_temp_free_ptr(r_tsptr);
3782 }
3783 break;
3784 case 1: // tnpc
3785 {
3786 TCGv_ptr r_tsptr;
3787
3788 r_tsptr = tcg_temp_new_ptr();
3789 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3790 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3791 offsetof(trap_state, tnpc));
3792 tcg_temp_free_ptr(r_tsptr);
3793 }
3794 break;
3795 case 2: // tstate
3796 {
3797 TCGv_ptr r_tsptr;
3798
3799 r_tsptr = tcg_temp_new_ptr();
3800 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3801 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3802 offsetof(trap_state,
3803 tstate));
3804 tcg_temp_free_ptr(r_tsptr);
3805 }
3806 break;
3807 case 3: // tt
3808 {
3809 TCGv_ptr r_tsptr;
3810
3811 r_tsptr = tcg_temp_new_ptr();
3812 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3813 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3814 tcg_gen_st_i32(cpu_tmp32, r_tsptr,
3815 offsetof(trap_state, tt));
3816 tcg_temp_free_ptr(r_tsptr);
3817 }
3818 break;
3819 case 4: // tick
3820 {
3821 TCGv_ptr r_tickptr;
3822
3823 r_tickptr = tcg_temp_new_ptr();
3824 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3825 offsetof(CPUSPARCState, tick));
3826 gen_helper_tick_set_count(r_tickptr,
3827 cpu_tmp0);
3828 tcg_temp_free_ptr(r_tickptr);
3829 }
3830 break;
3831 case 5: // tba
3832 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3833 break;
3834 case 6: // pstate
3835 save_state(dc);
3836 gen_helper_wrpstate(cpu_env, cpu_tmp0);
3837 dc->npc = DYNAMIC_PC;
3838 break;
3839 case 7: // tl
3840 save_state(dc);
3841 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3842 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3843 offsetof(CPUSPARCState, tl));
3844 dc->npc = DYNAMIC_PC;
3845 break;
3846 case 8: // pil
3847 gen_helper_wrpil(cpu_env, cpu_tmp0);
3848 break;
3849 case 9: // cwp
3850 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3851 break;
3852 case 10: // cansave
3853 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3854 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3855 offsetof(CPUSPARCState,
3856 cansave));
3857 break;
3858 case 11: // canrestore
3859 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3860 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3861 offsetof(CPUSPARCState,
3862 canrestore));
3863 break;
3864 case 12: // cleanwin
3865 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3866 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3867 offsetof(CPUSPARCState,
3868 cleanwin));
3869 break;
3870 case 13: // otherwin
3871 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3872 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3873 offsetof(CPUSPARCState,
3874 otherwin));
3875 break;
3876 case 14: // wstate
3877 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3878 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3879 offsetof(CPUSPARCState,
3880 wstate));
3881 break;
3882 case 16: // UA2005 gl
3883 CHECK_IU_FEATURE(dc, GL);
3884 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3885 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3886 offsetof(CPUSPARCState, gl));
3887 break;
3888 case 26: // UA2005 strand status
3889 CHECK_IU_FEATURE(dc, HYPV);
3890 if (!hypervisor(dc))
3891 goto priv_insn;
3892 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
3893 break;
3894 default:
3895 goto illegal_insn;
3896 }
3897 #else
3898 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3899 if (dc->def->nwindows != 32)
3900 tcg_gen_andi_tl(cpu_tmp32, cpu_tmp32,
3901 (1 << dc->def->nwindows) - 1);
3902 tcg_gen_mov_i32(cpu_wim, cpu_tmp32);
3903 #endif
3904 }
3905 break;
3906 case 0x33: /* wrtbr, UA2005 wrhpr */
3907 {
3908 #ifndef TARGET_SPARC64
3909 if (!supervisor(dc))
3910 goto priv_insn;
3911 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
3912 #else
3913 CHECK_IU_FEATURE(dc, HYPV);
3914 if (!hypervisor(dc))
3915 goto priv_insn;
3916 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3917 switch (rd) {
3918 case 0: // hpstate
3919 // XXX gen_op_wrhpstate();
3920 save_state(dc);
3921 gen_op_next_insn();
3922 tcg_gen_exit_tb(0);
3923 dc->is_br = 1;
3924 break;
3925 case 1: // htstate
3926 // XXX gen_op_wrhtstate();
3927 break;
3928 case 3: // hintp
3929 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
3930 break;
3931 case 5: // htba
3932 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
3933 break;
3934 case 31: // hstick_cmpr
3935 {
3936 TCGv_ptr r_tickptr;
3937
3938 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
3939 r_tickptr = tcg_temp_new_ptr();
3940 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3941 offsetof(CPUSPARCState, hstick));
3942 gen_helper_tick_set_limit(r_tickptr,
3943 cpu_hstick_cmpr);
3944 tcg_temp_free_ptr(r_tickptr);
3945 }
3946 break;
3947 case 6: // hver readonly
3948 default:
3949 goto illegal_insn;
3950 }
3951 #endif
3952 }
3953 break;
3954 #endif
3955 #ifdef TARGET_SPARC64
3956 case 0x2c: /* V9 movcc */
3957 {
3958 int cc = GET_FIELD_SP(insn, 11, 12);
3959 int cond = GET_FIELD_SP(insn, 14, 17);
3960 DisasCompare cmp;
3961 TCGv dst;
3962
3963 if (insn & (1 << 18)) {
3964 if (cc == 0) {
3965 gen_compare(&cmp, 0, cond, dc);
3966 } else if (cc == 2) {
3967 gen_compare(&cmp, 1, cond, dc);
3968 } else {
3969 goto illegal_insn;
3970 }
3971 } else {
3972 gen_fcompare(&cmp, cc, cond);
3973 }
3974
3975 /* The get_src2 above loaded the normal 13-bit
3976 immediate field, not the 11-bit field we have
3977 in movcc. But it did handle the reg case. */
3978 if (IS_IMM) {
3979 simm = GET_FIELD_SPs(insn, 0, 10);
3980 tcg_gen_movi_tl(cpu_src2, simm);
3981 }
3982
3983 dst = gen_load_gpr(dc, rd);
3984 tcg_gen_movcond_tl(cmp.cond, dst,
3985 cmp.c1, cmp.c2,
3986 cpu_src2, dst);
3987 free_compare(&cmp);
3988 gen_store_gpr(dc, rd, dst);
3989 break;
3990 }
3991 case 0x2d: /* V9 sdivx */
3992 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3993 gen_store_gpr(dc, rd, cpu_dst);
3994 break;
3995 case 0x2e: /* V9 popc */
3996 gen_helper_popc(cpu_dst, cpu_src2);
3997 gen_store_gpr(dc, rd, cpu_dst);
3998 break;
3999 case 0x2f: /* V9 movr */
4000 {
4001 int cond = GET_FIELD_SP(insn, 10, 12);
4002 DisasCompare cmp;
4003 TCGv dst;
4004
4005 gen_compare_reg(&cmp, cond, cpu_src1);
4006
4007 /* The get_src2 above loaded the normal 13-bit
4008 immediate field, not the 10-bit field we have
4009 in movr. But it did handle the reg case. */
4010 if (IS_IMM) {
4011 simm = GET_FIELD_SPs(insn, 0, 9);
4012 tcg_gen_movi_tl(cpu_src2, simm);
4013 }
4014
4015 dst = gen_load_gpr(dc, rd);
4016 tcg_gen_movcond_tl(cmp.cond, dst,
4017 cmp.c1, cmp.c2,
4018 cpu_src2, dst);
4019 free_compare(&cmp);
4020 gen_store_gpr(dc, rd, dst);
4021 break;
4022 }
4023 #endif
4024 default:
4025 goto illegal_insn;
4026 }
4027 }
4028 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4029 #ifdef TARGET_SPARC64
4030 int opf = GET_FIELD_SP(insn, 5, 13);
4031 rs1 = GET_FIELD(insn, 13, 17);
4032 rs2 = GET_FIELD(insn, 27, 31);
4033 if (gen_trap_ifnofpu(dc)) {
4034 goto jmp_insn;
4035 }
4036
4037 switch (opf) {
4038 case 0x000: /* VIS I edge8cc */
4039 CHECK_FPU_FEATURE(dc, VIS1);
4040 cpu_src1 = gen_load_gpr(dc, rs1);
4041 cpu_src2 = gen_load_gpr(dc, rs2);
4042 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4043 gen_store_gpr(dc, rd, cpu_dst);
4044 break;
4045 case 0x001: /* VIS II edge8n */
4046 CHECK_FPU_FEATURE(dc, VIS2);
4047 cpu_src1 = gen_load_gpr(dc, rs1);
4048 cpu_src2 = gen_load_gpr(dc, rs2);
4049 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4050 gen_store_gpr(dc, rd, cpu_dst);
4051 break;
4052 case 0x002: /* VIS I edge8lcc */
4053 CHECK_FPU_FEATURE(dc, VIS1);
4054 cpu_src1 = gen_load_gpr(dc, rs1);
4055 cpu_src2 = gen_load_gpr(dc, rs2);
4056 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4057 gen_store_gpr(dc, rd, cpu_dst);
4058 break;
4059 case 0x003: /* VIS II edge8ln */
4060 CHECK_FPU_FEATURE(dc, VIS2);
4061 cpu_src1 = gen_load_gpr(dc, rs1);
4062 cpu_src2 = gen_load_gpr(dc, rs2);
4063 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4064 gen_store_gpr(dc, rd, cpu_dst);
4065 break;
4066 case 0x004: /* VIS I edge16cc */
4067 CHECK_FPU_FEATURE(dc, VIS1);
4068 cpu_src1 = gen_load_gpr(dc, rs1);
4069 cpu_src2 = gen_load_gpr(dc, rs2);
4070 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4071 gen_store_gpr(dc, rd, cpu_dst);
4072 break;
4073 case 0x005: /* VIS II edge16n */
4074 CHECK_FPU_FEATURE(dc, VIS2);
4075 cpu_src1 = gen_load_gpr(dc, rs1);
4076 cpu_src2 = gen_load_gpr(dc, rs2);
4077 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4078 gen_store_gpr(dc, rd, cpu_dst);
4079 break;
4080 case 0x006: /* VIS I edge16lcc */
4081 CHECK_FPU_FEATURE(dc, VIS1);
4082 cpu_src1 = gen_load_gpr(dc, rs1);
4083 cpu_src2 = gen_load_gpr(dc, rs2);
4084 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4085 gen_store_gpr(dc, rd, cpu_dst);
4086 break;
4087 case 0x007: /* VIS II edge16ln */
4088 CHECK_FPU_FEATURE(dc, VIS2);
4089 cpu_src1 = gen_load_gpr(dc, rs1);
4090 cpu_src2 = gen_load_gpr(dc, rs2);
4091 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4092 gen_store_gpr(dc, rd, cpu_dst);
4093 break;
4094 case 0x008: /* VIS I edge32cc */
4095 CHECK_FPU_FEATURE(dc, VIS1);
4096 cpu_src1 = gen_load_gpr(dc, rs1);
4097 cpu_src2 = gen_load_gpr(dc, rs2);
4098 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4099 gen_store_gpr(dc, rd, cpu_dst);
4100 break;
4101 case 0x009: /* VIS II edge32n */
4102 CHECK_FPU_FEATURE(dc, VIS2);
4103 cpu_src1 = gen_load_gpr(dc, rs1);
4104 cpu_src2 = gen_load_gpr(dc, rs2);
4105 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4106 gen_store_gpr(dc, rd, cpu_dst);
4107 break;
4108 case 0x00a: /* VIS I edge32lcc */
4109 CHECK_FPU_FEATURE(dc, VIS1);
4110 cpu_src1 = gen_load_gpr(dc, rs1);
4111 cpu_src2 = gen_load_gpr(dc, rs2);
4112 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4113 gen_store_gpr(dc, rd, cpu_dst);
4114 break;
4115 case 0x00b: /* VIS II edge32ln */
4116 CHECK_FPU_FEATURE(dc, VIS2);
4117 cpu_src1 = gen_load_gpr(dc, rs1);
4118 cpu_src2 = gen_load_gpr(dc, rs2);
4119 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4120 gen_store_gpr(dc, rd, cpu_dst);
4121 break;
4122 case 0x010: /* VIS I array8 */
4123 CHECK_FPU_FEATURE(dc, VIS1);
4124 cpu_src1 = gen_load_gpr(dc, rs1);
4125 cpu_src2 = gen_load_gpr(dc, rs2);
4126 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4127 gen_store_gpr(dc, rd, cpu_dst);
4128 break;
4129 case 0x012: /* VIS I array16 */
4130 CHECK_FPU_FEATURE(dc, VIS1);
4131 cpu_src1 = gen_load_gpr(dc, rs1);
4132 cpu_src2 = gen_load_gpr(dc, rs2);
4133 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4134 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4135 gen_store_gpr(dc, rd, cpu_dst);
4136 break;
4137 case 0x014: /* VIS I array32 */
4138 CHECK_FPU_FEATURE(dc, VIS1);
4139 cpu_src1 = gen_load_gpr(dc, rs1);
4140 cpu_src2 = gen_load_gpr(dc, rs2);
4141 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4142 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4143 gen_store_gpr(dc, rd, cpu_dst);
4144 break;
4145 case 0x018: /* VIS I alignaddr */
4146 CHECK_FPU_FEATURE(dc, VIS1);
4147 cpu_src1 = gen_load_gpr(dc, rs1);
4148 cpu_src2 = gen_load_gpr(dc, rs2);
4149 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4150 gen_store_gpr(dc, rd, cpu_dst);
4151 break;
4152 case 0x01a: /* VIS I alignaddrl */
4153 CHECK_FPU_FEATURE(dc, VIS1);
4154 cpu_src1 = gen_load_gpr(dc, rs1);
4155 cpu_src2 = gen_load_gpr(dc, rs2);
4156 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4157 gen_store_gpr(dc, rd, cpu_dst);
4158 break;
4159 case 0x019: /* VIS II bmask */
4160 CHECK_FPU_FEATURE(dc, VIS2);
4161 cpu_src1 = gen_load_gpr(dc, rs1);
4162 cpu_src2 = gen_load_gpr(dc, rs2);
4163 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4164 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4165 gen_store_gpr(dc, rd, cpu_dst);
4166 break;
4167 case 0x020: /* VIS I fcmple16 */
4168 CHECK_FPU_FEATURE(dc, VIS1);
4169 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4170 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4171 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4172 gen_store_gpr(dc, rd, cpu_dst);
4173 break;
4174 case 0x022: /* VIS I fcmpne16 */
4175 CHECK_FPU_FEATURE(dc, VIS1);
4176 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4177 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4178 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4179 gen_store_gpr(dc, rd, cpu_dst);
4180 break;
4181 case 0x024: /* VIS I fcmple32 */
4182 CHECK_FPU_FEATURE(dc, VIS1);
4183 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4184 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4185 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4186 gen_store_gpr(dc, rd, cpu_dst);
4187 break;
4188 case 0x026: /* VIS I fcmpne32 */
4189 CHECK_FPU_FEATURE(dc, VIS1);
4190 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4191 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4192 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4193 gen_store_gpr(dc, rd, cpu_dst);
4194 break;
4195 case 0x028: /* VIS I fcmpgt16 */
4196 CHECK_FPU_FEATURE(dc, VIS1);
4197 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4198 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4199 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4200 gen_store_gpr(dc, rd, cpu_dst);
4201 break;
4202 case 0x02a: /* VIS I fcmpeq16 */
4203 CHECK_FPU_FEATURE(dc, VIS1);
4204 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4205 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4206 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4207 gen_store_gpr(dc, rd, cpu_dst);
4208 break;
4209 case 0x02c: /* VIS I fcmpgt32 */
4210 CHECK_FPU_FEATURE(dc, VIS1);
4211 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4212 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4213 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4214 gen_store_gpr(dc, rd, cpu_dst);
4215 break;
4216 case 0x02e: /* VIS I fcmpeq32 */
4217 CHECK_FPU_FEATURE(dc, VIS1);
4218 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4219 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4220 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4221 gen_store_gpr(dc, rd, cpu_dst);
4222 break;
4223 case 0x031: /* VIS I fmul8x16 */
4224 CHECK_FPU_FEATURE(dc, VIS1);
4225 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4226 break;
4227 case 0x033: /* VIS I fmul8x16au */
4228 CHECK_FPU_FEATURE(dc, VIS1);
4229 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4230 break;
4231 case 0x035: /* VIS I fmul8x16al */
4232 CHECK_FPU_FEATURE(dc, VIS1);
4233 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4234 break;
4235 case 0x036: /* VIS I fmul8sux16 */
4236 CHECK_FPU_FEATURE(dc, VIS1);
4237 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4238 break;
4239 case 0x037: /* VIS I fmul8ulx16 */
4240 CHECK_FPU_FEATURE(dc, VIS1);
4241 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4242 break;
4243 case 0x038: /* VIS I fmuld8sux16 */
4244 CHECK_FPU_FEATURE(dc, VIS1);
4245 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4246 break;
4247 case 0x039: /* VIS I fmuld8ulx16 */
4248 CHECK_FPU_FEATURE(dc, VIS1);
4249 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4250 break;
4251 case 0x03a: /* VIS I fpack32 */
4252 CHECK_FPU_FEATURE(dc, VIS1);
4253 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4254 break;
4255 case 0x03b: /* VIS I fpack16 */
4256 CHECK_FPU_FEATURE(dc, VIS1);
4257 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4258 cpu_dst_32 = gen_dest_fpr_F();
4259 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4260 gen_store_fpr_F(dc, rd, cpu_dst_32);
4261 break;
4262 case 0x03d: /* VIS I fpackfix */
4263 CHECK_FPU_FEATURE(dc, VIS1);
4264 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4265 cpu_dst_32 = gen_dest_fpr_F();
4266 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4267 gen_store_fpr_F(dc, rd, cpu_dst_32);
4268 break;
4269 case 0x03e: /* VIS I pdist */
4270 CHECK_FPU_FEATURE(dc, VIS1);
4271 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4272 break;
4273 case 0x048: /* VIS I faligndata */
4274 CHECK_FPU_FEATURE(dc, VIS1);
4275 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4276 break;
4277 case 0x04b: /* VIS I fpmerge */
4278 CHECK_FPU_FEATURE(dc, VIS1);
4279 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4280 break;
4281 case 0x04c: /* VIS II bshuffle */
4282 CHECK_FPU_FEATURE(dc, VIS2);
4283 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4284 break;
4285 case 0x04d: /* VIS I fexpand */
4286 CHECK_FPU_FEATURE(dc, VIS1);
4287 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4288 break;
4289 case 0x050: /* VIS I fpadd16 */
4290 CHECK_FPU_FEATURE(dc, VIS1);
4291 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4292 break;
4293 case 0x051: /* VIS I fpadd16s */
4294 CHECK_FPU_FEATURE(dc, VIS1);
4295 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4296 break;
4297 case 0x052: /* VIS I fpadd32 */
4298 CHECK_FPU_FEATURE(dc, VIS1);
4299 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4300 break;
4301 case 0x053: /* VIS I fpadd32s */
4302 CHECK_FPU_FEATURE(dc, VIS1);
4303 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4304 break;
4305 case 0x054: /* VIS I fpsub16 */
4306 CHECK_FPU_FEATURE(dc, VIS1);
4307 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4308 break;
4309 case 0x055: /* VIS I fpsub16s */
4310 CHECK_FPU_FEATURE(dc, VIS1);
4311 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4312 break;
4313 case 0x056: /* VIS I fpsub32 */
4314 CHECK_FPU_FEATURE(dc, VIS1);
4315 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4316 break;
4317 case 0x057: /* VIS I fpsub32s */
4318 CHECK_FPU_FEATURE(dc, VIS1);
4319 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4320 break;
4321 case 0x060: /* VIS I fzero */
4322 CHECK_FPU_FEATURE(dc, VIS1);
4323 cpu_dst_64 = gen_dest_fpr_D();
4324 tcg_gen_movi_i64(cpu_dst_64, 0);
4325 gen_store_fpr_D(dc, rd, cpu_dst_64);
4326 break;
4327 case 0x061: /* VIS I fzeros */
4328 CHECK_FPU_FEATURE(dc, VIS1);
4329 cpu_dst_32 = gen_dest_fpr_F();
4330 tcg_gen_movi_i32(cpu_dst_32, 0);
4331 gen_store_fpr_F(dc, rd, cpu_dst_32);
4332 break;
4333 case 0x062: /* VIS I fnor */
4334 CHECK_FPU_FEATURE(dc, VIS1);
4335 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4336 break;
4337 case 0x063: /* VIS I fnors */
4338 CHECK_FPU_FEATURE(dc, VIS1);
4339 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4340 break;
4341 case 0x064: /* VIS I fandnot2 */
4342 CHECK_FPU_FEATURE(dc, VIS1);
4343 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4344 break;
4345 case 0x065: /* VIS I fandnot2s */
4346 CHECK_FPU_FEATURE(dc, VIS1);
4347 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4348 break;
4349 case 0x066: /* VIS I fnot2 */
4350 CHECK_FPU_FEATURE(dc, VIS1);
4351 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4352 break;
4353 case 0x067: /* VIS I fnot2s */
4354 CHECK_FPU_FEATURE(dc, VIS1);
4355 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4356 break;
4357 case 0x068: /* VIS I fandnot1 */
4358 CHECK_FPU_FEATURE(dc, VIS1);
4359 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4360 break;
4361 case 0x069: /* VIS I fandnot1s */
4362 CHECK_FPU_FEATURE(dc, VIS1);
4363 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4364 break;
4365 case 0x06a: /* VIS I fnot1 */
4366 CHECK_FPU_FEATURE(dc, VIS1);
4367 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4368 break;
4369 case 0x06b: /* VIS I fnot1s */
4370 CHECK_FPU_FEATURE(dc, VIS1);
4371 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4372 break;
4373 case 0x06c: /* VIS I fxor */
4374 CHECK_FPU_FEATURE(dc, VIS1);
4375 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4376 break;
4377 case 0x06d: /* VIS I fxors */
4378 CHECK_FPU_FEATURE(dc, VIS1);
4379 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4380 break;
4381 case 0x06e: /* VIS I fnand */
4382 CHECK_FPU_FEATURE(dc, VIS1);
4383 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4384 break;
4385 case 0x06f: /* VIS I fnands */
4386 CHECK_FPU_FEATURE(dc, VIS1);
4387 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4388 break;
4389 case 0x070: /* VIS I fand */
4390 CHECK_FPU_FEATURE(dc, VIS1);
4391 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4392 break;
4393 case 0x071: /* VIS I fands */
4394 CHECK_FPU_FEATURE(dc, VIS1);
4395 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4396 break;
4397 case 0x072: /* VIS I fxnor */
4398 CHECK_FPU_FEATURE(dc, VIS1);
4399 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4400 break;
4401 case 0x073: /* VIS I fxnors */
4402 CHECK_FPU_FEATURE(dc, VIS1);
4403 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4404 break;
4405 case 0x074: /* VIS I fsrc1 */
4406 CHECK_FPU_FEATURE(dc, VIS1);
4407 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4408 gen_store_fpr_D(dc, rd, cpu_src1_64);
4409 break;
4410 case 0x075: /* VIS I fsrc1s */
4411 CHECK_FPU_FEATURE(dc, VIS1);
4412 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4413 gen_store_fpr_F(dc, rd, cpu_src1_32);
4414 break;
4415 case 0x076: /* VIS I fornot2 */
4416 CHECK_FPU_FEATURE(dc, VIS1);
4417 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4418 break;
4419 case 0x077: /* VIS I fornot2s */
4420 CHECK_FPU_FEATURE(dc, VIS1);
4421 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4422 break;
4423 case 0x078: /* VIS I fsrc2 */
4424 CHECK_FPU_FEATURE(dc, VIS1);
4425 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4426 gen_store_fpr_D(dc, rd, cpu_src1_64);
4427 break;
4428 case 0x079: /* VIS I fsrc2s */
4429 CHECK_FPU_FEATURE(dc, VIS1);
4430 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4431 gen_store_fpr_F(dc, rd, cpu_src1_32);
4432 break;
4433 case 0x07a: /* VIS I fornot1 */
4434 CHECK_FPU_FEATURE(dc, VIS1);
4435 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4436 break;
4437 case 0x07b: /* VIS I fornot1s */
4438 CHECK_FPU_FEATURE(dc, VIS1);
4439 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4440 break;
4441 case 0x07c: /* VIS I for */
4442 CHECK_FPU_FEATURE(dc, VIS1);
4443 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4444 break;
4445 case 0x07d: /* VIS I fors */
4446 CHECK_FPU_FEATURE(dc, VIS1);
4447 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4448 break;
4449 case 0x07e: /* VIS I fone */
4450 CHECK_FPU_FEATURE(dc, VIS1);
4451 cpu_dst_64 = gen_dest_fpr_D();
4452 tcg_gen_movi_i64(cpu_dst_64, -1);
4453 gen_store_fpr_D(dc, rd, cpu_dst_64);
4454 break;
4455 case 0x07f: /* VIS I fones */
4456 CHECK_FPU_FEATURE(dc, VIS1);
4457 cpu_dst_32 = gen_dest_fpr_F();
4458 tcg_gen_movi_i32(cpu_dst_32, -1);
4459 gen_store_fpr_F(dc, rd, cpu_dst_32);
4460 break;
4461 case 0x080: /* VIS I shutdown */
4462 case 0x081: /* VIS II siam */
4463 // XXX
4464 goto illegal_insn;
4465 default:
4466 goto illegal_insn;
4467 }
4468 #else
4469 goto ncp_insn;
4470 #endif
4471 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4472 #ifdef TARGET_SPARC64
4473 goto illegal_insn;
4474 #else
4475 goto ncp_insn;
4476 #endif
4477 #ifdef TARGET_SPARC64
4478 } else if (xop == 0x39) { /* V9 return */
4479 TCGv_i32 r_const;
4480
4481 save_state(dc);
4482 cpu_src1 = get_src1(dc, insn);
4483 if (IS_IMM) { /* immediate */
4484 simm = GET_FIELDs(insn, 19, 31);
4485 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
4486 } else { /* register */
4487 rs2 = GET_FIELD(insn, 27, 31);
4488 if (rs2) {
4489 cpu_src2 = gen_load_gpr(dc, rs2);
4490 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4491 } else {
4492 tcg_gen_mov_tl(cpu_dst, cpu_src1);
4493 }
4494 }
4495 gen_helper_restore(cpu_env);
4496 gen_mov_pc_npc(dc);
4497 r_const = tcg_const_i32(3);
4498 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4499 tcg_temp_free_i32(r_const);
4500 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4501 dc->npc = DYNAMIC_PC;
4502 goto jmp_insn;
4503 #endif
4504 } else {
4505 cpu_src1 = get_src1(dc, insn);
4506 if (IS_IMM) { /* immediate */
4507 simm = GET_FIELDs(insn, 19, 31);
4508 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
4509 } else { /* register */
4510 rs2 = GET_FIELD(insn, 27, 31);
4511 if (rs2) {
4512 cpu_src2 = gen_load_gpr(dc, rs2);
4513 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4514 } else {
4515 tcg_gen_mov_tl(cpu_dst, cpu_src1);
4516 }
4517 }
4518 switch (xop) {
4519 case 0x38: /* jmpl */
4520 {
4521 TCGv t;
4522 TCGv_i32 r_const;
4523
4524 t = gen_dest_gpr(dc, rd);
4525 tcg_gen_movi_tl(t, dc->pc);
4526 gen_store_gpr(dc, rd, t);
4527 gen_mov_pc_npc(dc);
4528 r_const = tcg_const_i32(3);
4529 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4530 tcg_temp_free_i32(r_const);
4531 gen_address_mask(dc, cpu_dst);
4532 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4533 dc->npc = DYNAMIC_PC;
4534 }
4535 goto jmp_insn;
4536 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4537 case 0x39: /* rett, V9 return */
4538 {
4539 TCGv_i32 r_const;
4540
4541 if (!supervisor(dc))
4542 goto priv_insn;
4543 gen_mov_pc_npc(dc);
4544 r_const = tcg_const_i32(3);
4545 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4546 tcg_temp_free_i32(r_const);
4547 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4548 dc->npc = DYNAMIC_PC;
4549 gen_helper_rett(cpu_env);
4550 }
4551 goto jmp_insn;
4552 #endif
4553 case 0x3b: /* flush */
4554 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4555 goto unimp_flush;
4556 /* nop */
4557 break;
4558 case 0x3c: /* save */
4559 save_state(dc);
4560 gen_helper_save(cpu_env);
4561 gen_store_gpr(dc, rd, cpu_dst);
4562 break;
4563 case 0x3d: /* restore */
4564 save_state(dc);
4565 gen_helper_restore(cpu_env);
4566 gen_store_gpr(dc, rd, cpu_dst);
4567 break;
4568 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4569 case 0x3e: /* V9 done/retry */
4570 {
4571 switch (rd) {
4572 case 0:
4573 if (!supervisor(dc))
4574 goto priv_insn;
4575 dc->npc = DYNAMIC_PC;
4576 dc->pc = DYNAMIC_PC;
4577 gen_helper_done(cpu_env);
4578 goto jmp_insn;
4579 case 1:
4580 if (!supervisor(dc))
4581 goto priv_insn;
4582 dc->npc = DYNAMIC_PC;
4583 dc->pc = DYNAMIC_PC;
4584 gen_helper_retry(cpu_env);
4585 goto jmp_insn;
4586 default:
4587 goto illegal_insn;
4588 }
4589 }
4590 break;
4591 #endif
4592 default:
4593 goto illegal_insn;
4594 }
4595 }
4596 break;
4597 }
4598 break;
4599 case 3: /* load/store instructions */
4600 {
4601 unsigned int xop = GET_FIELD(insn, 7, 12);
4602
4603 cpu_src1 = get_src1(dc, insn);
4604 if (xop == 0x3c || xop == 0x3e) { // V9 casa/casxa
4605 rs2 = GET_FIELD(insn, 27, 31);
4606 cpu_src2 = gen_load_gpr(dc, rs2);
4607 tcg_gen_mov_tl(cpu_addr, cpu_src1);
4608 } else if (IS_IMM) { /* immediate */
4609 simm = GET_FIELDs(insn, 19, 31);
4610 tcg_gen_addi_tl(cpu_addr, cpu_src1, simm);
4611 } else { /* register */
4612 rs2 = GET_FIELD(insn, 27, 31);
4613 if (rs2 != 0) {
4614 cpu_src2 = gen_load_gpr(dc, rs2);
4615 tcg_gen_add_tl(cpu_addr, cpu_src1, cpu_src2);
4616 } else {
4617 tcg_gen_mov_tl(cpu_addr, cpu_src1);
4618 }
4619 }
4620 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4621 (xop > 0x17 && xop <= 0x1d ) ||
4622 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4623 TCGv cpu_val = gen_dest_gpr(dc, rd);
4624
4625 switch (xop) {
4626 case 0x0: /* ld, V9 lduw, load unsigned word */
4627 gen_address_mask(dc, cpu_addr);
4628 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4629 break;
4630 case 0x1: /* ldub, load unsigned byte */
4631 gen_address_mask(dc, cpu_addr);
4632 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4633 break;
4634 case 0x2: /* lduh, load unsigned halfword */
4635 gen_address_mask(dc, cpu_addr);
4636 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4637 break;
4638 case 0x3: /* ldd, load double word */
4639 if (rd & 1)
4640 goto illegal_insn;
4641 else {
4642 TCGv_i32 r_const;
4643
4644 save_state(dc);
4645 r_const = tcg_const_i32(7);
4646 /* XXX remove alignment check */
4647 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4648 tcg_temp_free_i32(r_const);
4649 gen_address_mask(dc, cpu_addr);
4650 tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
4651 tcg_gen_trunc_i64_tl(cpu_tmp0, cpu_tmp64);
4652 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffffULL);
4653 gen_store_gpr(dc, rd + 1, cpu_tmp0);
4654 tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
4655 tcg_gen_trunc_i64_tl(cpu_val, cpu_tmp64);
4656 tcg_gen_andi_tl(cpu_val, cpu_val, 0xffffffffULL);
4657 }
4658 break;
4659 case 0x9: /* ldsb, load signed byte */
4660 gen_address_mask(dc, cpu_addr);
4661 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4662 break;
4663 case 0xa: /* ldsh, load signed halfword */
4664 gen_address_mask(dc, cpu_addr);
4665 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4666 break;
4667 case 0xd: /* ldstub -- XXX: should be atomically */
4668 {
4669 TCGv r_const;
4670
4671 gen_address_mask(dc, cpu_addr);
4672 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4673 r_const = tcg_const_tl(0xff);
4674 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4675 tcg_temp_free(r_const);
4676 }
4677 break;
4678 case 0x0f: /* swap, swap register with memory. Also
4679 atomically */
4680 CHECK_IU_FEATURE(dc, SWAP);
4681 cpu_src1 = gen_load_gpr(dc, rd);
4682 gen_address_mask(dc, cpu_addr);
4683 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4684 tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
4685 tcg_gen_mov_tl(cpu_val, cpu_tmp0);
4686 break;
4687 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4688 case 0x10: /* lda, V9 lduwa, load word alternate */
4689 #ifndef TARGET_SPARC64
4690 if (IS_IMM)
4691 goto illegal_insn;
4692 if (!supervisor(dc))
4693 goto priv_insn;
4694 #endif
4695 save_state(dc);
4696 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
4697 break;
4698 case 0x11: /* lduba, load unsigned byte alternate */
4699 #ifndef TARGET_SPARC64
4700 if (IS_IMM)
4701 goto illegal_insn;
4702 if (!supervisor(dc))
4703 goto priv_insn;
4704 #endif
4705 save_state(dc);
4706 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
4707 break;
4708 case 0x12: /* lduha, load unsigned halfword alternate */
4709 #ifndef TARGET_SPARC64
4710 if (IS_IMM)
4711 goto illegal_insn;
4712 if (!supervisor(dc))
4713 goto priv_insn;
4714 #endif
4715 save_state(dc);
4716 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
4717 break;
4718 case 0x13: /* ldda, load double word alternate */
4719 #ifndef TARGET_SPARC64
4720 if (IS_IMM)
4721 goto illegal_insn;
4722 if (!supervisor(dc))
4723 goto priv_insn;
4724 #endif
4725 if (rd & 1)
4726 goto illegal_insn;
4727 save_state(dc);
4728 gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
4729 goto skip_move;
4730 case 0x19: /* ldsba, load signed byte alternate */
4731 #ifndef TARGET_SPARC64
4732 if (IS_IMM)
4733 goto illegal_insn;
4734 if (!supervisor(dc))
4735 goto priv_insn;
4736 #endif
4737 save_state(dc);
4738 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
4739 break;
4740 case 0x1a: /* ldsha, load signed halfword alternate */
4741 #ifndef TARGET_SPARC64
4742 if (IS_IMM)
4743 goto illegal_insn;
4744 if (!supervisor(dc))
4745 goto priv_insn;
4746 #endif
4747 save_state(dc);
4748 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
4749 break;
4750 case 0x1d: /* ldstuba -- XXX: should be atomically */
4751 #ifndef TARGET_SPARC64
4752 if (IS_IMM)
4753 goto illegal_insn;
4754 if (!supervisor(dc))
4755 goto priv_insn;
4756 #endif
4757 save_state(dc);
4758 gen_ldstub_asi(cpu_val, cpu_addr, insn);
4759 break;
4760 case 0x1f: /* swapa, swap reg with alt. memory. Also
4761 atomically */
4762 CHECK_IU_FEATURE(dc, SWAP);
4763 #ifndef TARGET_SPARC64
4764 if (IS_IMM)
4765 goto illegal_insn;
4766 if (!supervisor(dc))
4767 goto priv_insn;
4768 #endif
4769 save_state(dc);
4770 cpu_src1 = gen_load_gpr(dc, rd);
4771 gen_swap_asi(cpu_val, cpu_src1, cpu_addr, insn);
4772 break;
4773
4774 #ifndef TARGET_SPARC64
4775 case 0x30: /* ldc */
4776 case 0x31: /* ldcsr */
4777 case 0x33: /* lddc */
4778 goto ncp_insn;
4779 #endif
4780 #endif
4781 #ifdef TARGET_SPARC64
4782 case 0x08: /* V9 ldsw */
4783 gen_address_mask(dc, cpu_addr);
4784 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4785 break;
4786 case 0x0b: /* V9 ldx */
4787 gen_address_mask(dc, cpu_addr);
4788 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4789 break;
4790 case 0x18: /* V9 ldswa */
4791 save_state(dc);
4792 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
4793 break;
4794 case 0x1b: /* V9 ldxa */
4795 save_state(dc);
4796 gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
4797 break;
4798 case 0x2d: /* V9 prefetch, no effect */
4799 goto skip_move;
4800 case 0x30: /* V9 ldfa */
4801 if (gen_trap_ifnofpu(dc)) {
4802 goto jmp_insn;
4803 }
4804 save_state(dc);
4805 gen_ldf_asi(cpu_addr, insn, 4, rd);
4806 gen_update_fprs_dirty(rd);
4807 goto skip_move;
4808 case 0x33: /* V9 lddfa */
4809 if (gen_trap_ifnofpu(dc)) {
4810 goto jmp_insn;
4811 }
4812 save_state(dc);
4813 gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
4814 gen_update_fprs_dirty(DFPREG(rd));
4815 goto skip_move;
4816 case 0x3d: /* V9 prefetcha, no effect */
4817 goto skip_move;
4818 case 0x32: /* V9 ldqfa */
4819 CHECK_FPU_FEATURE(dc, FLOAT128);
4820 if (gen_trap_ifnofpu(dc)) {
4821 goto jmp_insn;
4822 }
4823 save_state(dc);
4824 gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
4825 gen_update_fprs_dirty(QFPREG(rd));
4826 goto skip_move;
4827 #endif
4828 default:
4829 goto illegal_insn;
4830 }
4831 gen_store_gpr(dc, rd, cpu_val);
4832 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4833 skip_move: ;
4834 #endif
4835 } else if (xop >= 0x20 && xop < 0x24) {
4836 if (gen_trap_ifnofpu(dc)) {
4837 goto jmp_insn;
4838 }
4839 save_state(dc);
4840 switch (xop) {
4841 case 0x20: /* ldf, load fpreg */
4842 gen_address_mask(dc, cpu_addr);
4843 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4844 cpu_dst_32 = gen_dest_fpr_F();
4845 tcg_gen_trunc_tl_i32(cpu_dst_32, cpu_tmp0);
4846 gen_store_fpr_F(dc, rd, cpu_dst_32);
4847 break;
4848 case 0x21: /* ldfsr, V9 ldxfsr */
4849 #ifdef TARGET_SPARC64
4850 gen_address_mask(dc, cpu_addr);
4851 if (rd == 1) {
4852 tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
4853 gen_helper_ldxfsr(cpu_env, cpu_tmp64);
4854 } else {
4855 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4856 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
4857 gen_helper_ldfsr(cpu_env, cpu_tmp32);
4858 }
4859 #else
4860 {
4861 tcg_gen_qemu_ld32u(cpu_tmp32, cpu_addr, dc->mem_idx);
4862 gen_helper_ldfsr(cpu_env, cpu_tmp32);
4863 }
4864 #endif
4865 break;
4866 case 0x22: /* ldqf, load quad fpreg */
4867 {
4868 TCGv_i32 r_const;
4869
4870 CHECK_FPU_FEATURE(dc, FLOAT128);
4871 r_const = tcg_const_i32(dc->mem_idx);
4872 gen_address_mask(dc, cpu_addr);
4873 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4874 tcg_temp_free_i32(r_const);
4875 gen_op_store_QT0_fpr(QFPREG(rd));
4876 gen_update_fprs_dirty(QFPREG(rd));
4877 }
4878 break;
4879 case 0x23: /* lddf, load double fpreg */
4880 gen_address_mask(dc, cpu_addr);
4881 cpu_dst_64 = gen_dest_fpr_D();
4882 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4883 gen_store_fpr_D(dc, rd, cpu_dst_64);
4884 break;
4885 default:
4886 goto illegal_insn;
4887 }
4888 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
4889 xop == 0xe || xop == 0x1e) {
4890 TCGv cpu_val = gen_load_gpr(dc, rd);
4891
4892 switch (xop) {
4893 case 0x4: /* st, store word */
4894 gen_address_mask(dc, cpu_addr);
4895 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4896 break;
4897 case 0x5: /* stb, store byte */
4898 gen_address_mask(dc, cpu_addr);
4899 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
4900 break;
4901 case 0x6: /* sth, store halfword */
4902 gen_address_mask(dc, cpu_addr);
4903 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
4904 break;
4905 case 0x7: /* std, store double word */
4906 if (rd & 1)
4907 goto illegal_insn;
4908 else {
4909 TCGv_i32 r_const;
4910 TCGv lo;
4911
4912 save_state(dc);
4913 gen_address_mask(dc, cpu_addr);
4914 r_const = tcg_const_i32(7);
4915 /* XXX remove alignment check */
4916 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4917 tcg_temp_free_i32(r_const);
4918 lo = gen_load_gpr(dc, rd + 1);
4919 tcg_gen_concat_tl_i64(cpu_tmp64, lo, cpu_val);
4920 tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
4921 }
4922 break;
4923 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4924 case 0x14: /* sta, V9 stwa, store word alternate */
4925 #ifndef TARGET_SPARC64
4926 if (IS_IMM)
4927 goto illegal_insn;
4928 if (!supervisor(dc))
4929 goto priv_insn;
4930 #endif
4931 save_state(dc);
4932 gen_st_asi(cpu_val, cpu_addr, insn, 4);
4933 dc->npc = DYNAMIC_PC;
4934 break;
4935 case 0x15: /* stba, store byte alternate */
4936 #ifndef TARGET_SPARC64
4937 if (IS_IMM)
4938 goto illegal_insn;
4939 if (!supervisor(dc))
4940 goto priv_insn;
4941 #endif
4942 save_state(dc);
4943 gen_st_asi(cpu_val, cpu_addr, insn, 1);
4944 dc->npc = DYNAMIC_PC;
4945 break;
4946 case 0x16: /* stha, store halfword alternate */
4947 #ifndef TARGET_SPARC64
4948 if (IS_IMM)
4949 goto illegal_insn;
4950 if (!supervisor(dc))
4951 goto priv_insn;
4952 #endif
4953 save_state(dc);
4954 gen_st_asi(cpu_val, cpu_addr, insn, 2);
4955 dc->npc = DYNAMIC_PC;
4956 break;
4957 case 0x17: /* stda, store double word alternate */
4958 #ifndef TARGET_SPARC64
4959 if (IS_IMM)
4960 goto illegal_insn;
4961 if (!supervisor(dc))
4962 goto priv_insn;
4963 #endif
4964 if (rd & 1)
4965 goto illegal_insn;
4966 else {
4967 save_state(dc);
4968 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
4969 }
4970 break;
4971 #endif
4972 #ifdef TARGET_SPARC64
4973 case 0x0e: /* V9 stx */
4974 gen_address_mask(dc, cpu_addr);
4975 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
4976 break;
4977 case 0x1e: /* V9 stxa */
4978 save_state(dc);
4979 gen_st_asi(cpu_val, cpu_addr, insn, 8);
4980 dc->npc = DYNAMIC_PC;
4981 break;
4982 #endif
4983 default:
4984 goto illegal_insn;
4985 }
4986 } else if (xop > 0x23 && xop < 0x28) {
4987 if (gen_trap_ifnofpu(dc)) {
4988 goto jmp_insn;
4989 }
4990 save_state(dc);
4991 switch (xop) {
4992 case 0x24: /* stf, store fpreg */
4993 gen_address_mask(dc, cpu_addr);
4994 cpu_src1_32 = gen_load_fpr_F(dc, rd);
4995 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_src1_32);
4996 tcg_gen_qemu_st32(cpu_tmp0, cpu_addr, dc->mem_idx);
4997 break;
4998 case 0x25: /* stfsr, V9 stxfsr */
4999 #ifdef TARGET_SPARC64
5000 gen_address_mask(dc, cpu_addr);
5001 tcg_gen_ld_i64(cpu_tmp64, cpu_env, offsetof(CPUSPARCState, fsr));
5002 if (rd == 1)
5003 tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
5004 else
5005 tcg_gen_qemu_st32(cpu_tmp64, cpu_addr, dc->mem_idx);
5006 #else
5007 tcg_gen_ld_i32(cpu_tmp32, cpu_env, offsetof(CPUSPARCState, fsr));
5008 tcg_gen_qemu_st32(cpu_tmp32, cpu_addr, dc->mem_idx);
5009 #endif
5010 break;
5011 case 0x26:
5012 #ifdef TARGET_SPARC64
5013 /* V9 stqf, store quad fpreg */
5014 {
5015 TCGv_i32 r_const;
5016
5017 CHECK_FPU_FEATURE(dc, FLOAT128);
5018 gen_op_load_fpr_QT0(QFPREG(rd));
5019 r_const = tcg_const_i32(dc->mem_idx);
5020 gen_address_mask(dc, cpu_addr);
5021 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5022 tcg_temp_free_i32(r_const);
5023 }
5024 break;
5025 #else /* !TARGET_SPARC64 */
5026 /* stdfq, store floating point queue */
5027 #if defined(CONFIG_USER_ONLY)
5028 goto illegal_insn;
5029 #else
5030 if (!supervisor(dc))
5031 goto priv_insn;
5032 if (gen_trap_ifnofpu(dc)) {
5033 goto jmp_insn;
5034 }
5035 goto nfq_insn;
5036 #endif
5037 #endif
5038 case 0x27: /* stdf, store double fpreg */
5039 gen_address_mask(dc, cpu_addr);
5040 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5041 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5042 break;
5043 default:
5044 goto illegal_insn;
5045 }
5046 } else if (xop > 0x33 && xop < 0x3f) {
5047 save_state(dc);
5048 switch (xop) {
5049 #ifdef TARGET_SPARC64
5050 case 0x34: /* V9 stfa */
5051 if (gen_trap_ifnofpu(dc)) {
5052 goto jmp_insn;
5053 }
5054 gen_stf_asi(cpu_addr, insn, 4, rd);
5055 break;
5056 case 0x36: /* V9 stqfa */
5057 {
5058 TCGv_i32 r_const;
5059
5060 CHECK_FPU_FEATURE(dc, FLOAT128);
5061 if (gen_trap_ifnofpu(dc)) {
5062 goto jmp_insn;
5063 }
5064 r_const = tcg_const_i32(7);
5065 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5066 tcg_temp_free_i32(r_const);
5067 gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
5068 }
5069 break;
5070 case 0x37: /* V9 stdfa */
5071 if (gen_trap_ifnofpu(dc)) {
5072 goto jmp_insn;
5073 }
5074 gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
5075 break;
5076 case 0x3c: /* V9 casa */
5077 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5078 break;
5079 case 0x3e: /* V9 casxa */
5080 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5081 break;
5082 #else
5083 case 0x34: /* stc */
5084 case 0x35: /* stcsr */
5085 case 0x36: /* stdcq */
5086 case 0x37: /* stdc */
5087 goto ncp_insn;
5088 #endif
5089 default:
5090 goto illegal_insn;
5091 }
5092 } else
5093 goto illegal_insn;
5094 }
5095 break;
5096 }
5097 /* default case for non jump instructions */
5098 if (dc->npc == DYNAMIC_PC) {
5099 dc->pc = DYNAMIC_PC;
5100 gen_op_next_insn();
5101 } else if (dc->npc == JUMP_PC) {
5102 /* we can do a static jump */
5103 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5104 dc->is_br = 1;
5105 } else {
5106 dc->pc = dc->npc;
5107 dc->npc = dc->npc + 4;
5108 }
5109 jmp_insn:
5110 goto egress;
5111 illegal_insn:
5112 {
5113 TCGv_i32 r_const;
5114
5115 save_state(dc);
5116 r_const = tcg_const_i32(TT_ILL_INSN);
5117 gen_helper_raise_exception(cpu_env, r_const);
5118 tcg_temp_free_i32(r_const);
5119 dc->is_br = 1;
5120 }
5121 goto egress;
5122 unimp_flush:
5123 {
5124 TCGv_i32 r_const;
5125
5126 save_state(dc);
5127 r_const = tcg_const_i32(TT_UNIMP_FLUSH);
5128 gen_helper_raise_exception(cpu_env, r_const);
5129 tcg_temp_free_i32(r_const);
5130 dc->is_br = 1;
5131 }
5132 goto egress;
5133 #if !defined(CONFIG_USER_ONLY)
5134 priv_insn:
5135 {
5136 TCGv_i32 r_const;
5137
5138 save_state(dc);
5139 r_const = tcg_const_i32(TT_PRIV_INSN);
5140 gen_helper_raise_exception(cpu_env, r_const);
5141 tcg_temp_free_i32(r_const);
5142 dc->is_br = 1;
5143 }
5144 goto egress;
5145 #endif
5146 nfpu_insn:
5147 save_state(dc);
5148 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
5149 dc->is_br = 1;
5150 goto egress;
5151 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5152 nfq_insn:
5153 save_state(dc);
5154 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
5155 dc->is_br = 1;
5156 goto egress;
5157 #endif
5158 #ifndef TARGET_SPARC64
5159 ncp_insn:
5160 {
5161 TCGv r_const;
5162
5163 save_state(dc);
5164 r_const = tcg_const_i32(TT_NCP_INSN);
5165 gen_helper_raise_exception(cpu_env, r_const);
5166 tcg_temp_free(r_const);
5167 dc->is_br = 1;
5168 }
5169 goto egress;
5170 #endif
5171 egress:
5172 tcg_temp_free(cpu_tmp1);
5173 tcg_temp_free(cpu_tmp2);
5174 if (dc->n_t32 != 0) {
5175 int i;
5176 for (i = dc->n_t32 - 1; i >= 0; --i) {
5177 tcg_temp_free_i32(dc->t32[i]);
5178 }
5179 dc->n_t32 = 0;
5180 }
5181 if (dc->n_ttl != 0) {
5182 int i;
5183 for (i = dc->n_ttl - 1; i >= 0; --i) {
5184 tcg_temp_free(dc->ttl[i]);
5185 }
5186 dc->n_ttl = 0;
5187 }
5188 }
5189
5190 static inline void gen_intermediate_code_internal(TranslationBlock * tb,
5191 int spc, CPUSPARCState *env)
5192 {
5193 target_ulong pc_start, last_pc;
5194 uint16_t *gen_opc_end;
5195 DisasContext dc1, *dc = &dc1;
5196 CPUBreakpoint *bp;
5197 int j, lj = -1;
5198 int num_insns;
5199 int max_insns;
5200 unsigned int insn;
5201
5202 memset(dc, 0, sizeof(DisasContext));
5203 dc->tb = tb;
5204 pc_start = tb->pc;
5205 dc->pc = pc_start;
5206 last_pc = dc->pc;
5207 dc->npc = (target_ulong) tb->cs_base;
5208 dc->cc_op = CC_OP_DYNAMIC;
5209 dc->mem_idx = cpu_mmu_index(env);
5210 dc->def = env->def;
5211 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5212 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5213 dc->singlestep = (env->singlestep_enabled || singlestep);
5214 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
5215
5216 num_insns = 0;
5217 max_insns = tb->cflags & CF_COUNT_MASK;
5218 if (max_insns == 0)
5219 max_insns = CF_COUNT_MASK;
5220 gen_icount_start();
5221 do {
5222 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
5223 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5224 if (bp->pc == dc->pc) {
5225 if (dc->pc != pc_start)
5226 save_state(dc);
5227 gen_helper_debug(cpu_env);
5228 tcg_gen_exit_tb(0);
5229 dc->is_br = 1;
5230 goto exit_gen_loop;
5231 }
5232 }
5233 }
5234 if (spc) {
5235 qemu_log("Search PC...\n");
5236 j = gen_opc_ptr - gen_opc_buf;
5237 if (lj < j) {
5238 lj++;
5239 while (lj < j)
5240 gen_opc_instr_start[lj++] = 0;
5241 gen_opc_pc[lj] = dc->pc;
5242 gen_opc_npc[lj] = dc->npc;
5243 gen_opc_instr_start[lj] = 1;
5244 gen_opc_icount[lj] = num_insns;
5245 }
5246 }
5247 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
5248 gen_io_start();
5249 last_pc = dc->pc;
5250 insn = cpu_ldl_code(env, dc->pc);
5251
5252 cpu_tmp0 = tcg_temp_new();
5253 cpu_tmp32 = tcg_temp_new_i32();
5254 cpu_tmp64 = tcg_temp_new_i64();
5255 cpu_dst = tcg_temp_new();
5256 cpu_addr = tcg_temp_new();
5257
5258 disas_sparc_insn(dc, insn);
5259 num_insns++;
5260
5261 tcg_temp_free(cpu_addr);
5262 tcg_temp_free(cpu_dst);
5263 tcg_temp_free_i64(cpu_tmp64);
5264 tcg_temp_free_i32(cpu_tmp32);
5265 tcg_temp_free(cpu_tmp0);
5266
5267 if (dc->is_br)
5268 break;
5269 /* if the next PC is different, we abort now */
5270 if (dc->pc != (last_pc + 4))
5271 break;
5272 /* if we reach a page boundary, we stop generation so that the
5273 PC of a TT_TFAULT exception is always in the right page */
5274 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5275 break;
5276 /* if single step mode, we generate only one instruction and
5277 generate an exception */
5278 if (dc->singlestep) {
5279 break;
5280 }
5281 } while ((gen_opc_ptr < gen_opc_end) &&
5282 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5283 num_insns < max_insns);
5284
5285 exit_gen_loop:
5286 if (tb->cflags & CF_LAST_IO) {
5287 gen_io_end();
5288 }
5289 if (!dc->is_br) {
5290 if (dc->pc != DYNAMIC_PC &&
5291 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5292 /* static PC and NPC: we can use direct chaining */
5293 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5294 } else {
5295 if (dc->pc != DYNAMIC_PC) {
5296 tcg_gen_movi_tl(cpu_pc, dc->pc);
5297 }
5298 save_npc(dc);
5299 tcg_gen_exit_tb(0);
5300 }
5301 }
5302 gen_icount_end(tb, num_insns);
5303 *gen_opc_ptr = INDEX_op_end;
5304 if (spc) {
5305 j = gen_opc_ptr - gen_opc_buf;
5306 lj++;
5307 while (lj <= j)
5308 gen_opc_instr_start[lj++] = 0;
5309 #if 0
5310 log_page_dump();
5311 #endif
5312 gen_opc_jump_pc[0] = dc->jump_pc[0];
5313 gen_opc_jump_pc[1] = dc->jump_pc[1];
5314 } else {
5315 tb->size = last_pc + 4 - pc_start;
5316 tb->icount = num_insns;
5317 }
5318 #ifdef DEBUG_DISAS
5319 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5320 qemu_log("--------------\n");
5321 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5322 log_target_disas(pc_start, last_pc + 4 - pc_start, 0);
5323 qemu_log("\n");
5324 }
5325 #endif
5326 }
5327
5328 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5329 {
5330 gen_intermediate_code_internal(tb, 0, env);
5331 }
5332
5333 void gen_intermediate_code_pc(CPUSPARCState * env, TranslationBlock * tb)
5334 {
5335 gen_intermediate_code_internal(tb, 1, env);
5336 }
5337
5338 void gen_intermediate_code_init(CPUSPARCState *env)
5339 {
5340 unsigned int i;
5341 static int inited;
5342 static const char * const gregnames[8] = {
5343 NULL, // g0 not used
5344 "g1",
5345 "g2",
5346 "g3",
5347 "g4",
5348 "g5",
5349 "g6",
5350 "g7",
5351 };
5352 static const char * const fregnames[32] = {
5353 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5354 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5355 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5356 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5357 };
5358
5359 /* init various static tables */
5360 if (!inited) {
5361 inited = 1;
5362
5363 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5364 cpu_regwptr = tcg_global_mem_new_ptr(TCG_AREG0,
5365 offsetof(CPUSPARCState, regwptr),
5366 "regwptr");
5367 #ifdef TARGET_SPARC64
5368 cpu_xcc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, xcc),
5369 "xcc");
5370 cpu_asi = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, asi),
5371 "asi");
5372 cpu_fprs = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, fprs),
5373 "fprs");
5374 cpu_gsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, gsr),
5375 "gsr");
5376 cpu_tick_cmpr = tcg_global_mem_new(TCG_AREG0,
5377 offsetof(CPUSPARCState, tick_cmpr),
5378 "tick_cmpr");
5379 cpu_stick_cmpr = tcg_global_mem_new(TCG_AREG0,
5380 offsetof(CPUSPARCState, stick_cmpr),
5381 "stick_cmpr");
5382 cpu_hstick_cmpr = tcg_global_mem_new(TCG_AREG0,
5383 offsetof(CPUSPARCState, hstick_cmpr),
5384 "hstick_cmpr");
5385 cpu_hintp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hintp),
5386 "hintp");
5387 cpu_htba = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, htba),
5388 "htba");
5389 cpu_hver = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hver),
5390 "hver");
5391 cpu_ssr = tcg_global_mem_new(TCG_AREG0,
5392 offsetof(CPUSPARCState, ssr), "ssr");
5393 cpu_ver = tcg_global_mem_new(TCG_AREG0,
5394 offsetof(CPUSPARCState, version), "ver");
5395 cpu_softint = tcg_global_mem_new_i32(TCG_AREG0,
5396 offsetof(CPUSPARCState, softint),
5397 "softint");
5398 #else
5399 cpu_wim = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, wim),
5400 "wim");
5401 #endif
5402 cpu_cond = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cond),
5403 "cond");
5404 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_src),
5405 "cc_src");
5406 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0,
5407 offsetof(CPUSPARCState, cc_src2),
5408 "cc_src2");
5409 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_dst),
5410 "cc_dst");
5411 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, cc_op),
5412 "cc_op");
5413 cpu_psr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, psr),
5414 "psr");
5415 cpu_fsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, fsr),
5416 "fsr");
5417 cpu_pc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, pc),
5418 "pc");
5419 cpu_npc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, npc),
5420 "npc");
5421 cpu_y = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, y), "y");
5422 #ifndef CONFIG_USER_ONLY
5423 cpu_tbr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, tbr),
5424 "tbr");
5425 #endif
5426 for (i = 1; i < 8; i++) {
5427 cpu_gregs[i] = tcg_global_mem_new(TCG_AREG0,
5428 offsetof(CPUSPARCState, gregs[i]),
5429 gregnames[i]);
5430 }
5431 for (i = 0; i < TARGET_DPREGS; i++) {
5432 cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
5433 offsetof(CPUSPARCState, fpr[i]),
5434 fregnames[i]);
5435 }
5436
5437 /* register helpers */
5438
5439 #define GEN_HELPER 2
5440 #include "helper.h"
5441 }
5442 }
5443
5444 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, int pc_pos)
5445 {
5446 target_ulong npc;
5447 env->pc = gen_opc_pc[pc_pos];
5448 npc = gen_opc_npc[pc_pos];
5449 if (npc == 1) {
5450 /* dynamic NPC: already stored */
5451 } else if (npc == 2) {
5452 /* jump PC: use 'cond' and the jump targets of the translation */
5453 if (env->cond) {
5454 env->npc = gen_opc_jump_pc[0];
5455 } else {
5456 env->npc = gen_opc_jump_pc[1];
5457 }
5458 } else {
5459 env->npc = npc;
5460 }
5461 }