]> git.proxmox.com Git - mirror_qemu.git/blob - target-sparc/translate.c
target-sparc: Tidy ldfsr, stfsr
[mirror_qemu.git] / target-sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "disas.h"
29 #include "helper.h"
30 #include "tcg-op.h"
31
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #define DEBUG_DISAS
36
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
40
41 /* global register indexes */
42 static TCGv_ptr cpu_env, cpu_regwptr;
43 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
44 static TCGv_i32 cpu_cc_op;
45 static TCGv_i32 cpu_psr;
46 static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
47 static TCGv cpu_y;
48 #ifndef CONFIG_USER_ONLY
49 static TCGv cpu_tbr;
50 #endif
51 static TCGv cpu_cond, cpu_dst;
52 #ifdef TARGET_SPARC64
53 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
54 static TCGv cpu_gsr;
55 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
56 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
57 static TCGv_i32 cpu_softint;
58 #else
59 static TCGv cpu_wim;
60 #endif
61 /* local register indexes (only used inside old micro ops) */
62 static TCGv cpu_tmp0;
63 static TCGv_i64 cpu_tmp64;
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
66
67 static target_ulong gen_opc_npc[OPC_BUF_SIZE];
68 static target_ulong gen_opc_jump_pc[2];
69
70 #include "gen-icount.h"
71
72 typedef struct DisasContext {
73 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
74 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
75 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
76 int is_br;
77 int mem_idx;
78 int fpu_enabled;
79 int address_mask_32bit;
80 int singlestep;
81 uint32_t cc_op; /* current CC operation */
82 struct TranslationBlock *tb;
83 sparc_def_t *def;
84 TCGv_i32 t32[3];
85 TCGv ttl[5];
86 int n_t32;
87 int n_ttl;
88 } DisasContext;
89
90 typedef struct {
91 TCGCond cond;
92 bool is_bool;
93 bool g1, g2;
94 TCGv c1, c2;
95 } DisasCompare;
96
97 // This function uses non-native bit order
98 #define GET_FIELD(X, FROM, TO) \
99 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
100
101 // This function uses the order in the manuals, i.e. bit 0 is 2^0
102 #define GET_FIELD_SP(X, FROM, TO) \
103 GET_FIELD(X, 31 - (TO), 31 - (FROM))
104
105 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
106 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
107
108 #ifdef TARGET_SPARC64
109 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
110 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
111 #else
112 #define DFPREG(r) (r & 0x1e)
113 #define QFPREG(r) (r & 0x1c)
114 #endif
115
116 #define UA2005_HTRAP_MASK 0xff
117 #define V8_TRAP_MASK 0x7f
118
119 static int sign_extend(int x, int len)
120 {
121 len = 32 - len;
122 return (x << len) >> len;
123 }
124
125 #define IS_IMM (insn & (1<<13))
126
127 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
128 {
129 TCGv_i32 t;
130 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
131 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
132 return t;
133 }
134
135 static inline TCGv get_temp_tl(DisasContext *dc)
136 {
137 TCGv t;
138 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
139 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
140 return t;
141 }
142
143 static inline void gen_update_fprs_dirty(int rd)
144 {
145 #if defined(TARGET_SPARC64)
146 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
147 #endif
148 }
149
150 /* floating point registers moves */
151 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
152 {
153 #if TCG_TARGET_REG_BITS == 32
154 if (src & 1) {
155 return TCGV_LOW(cpu_fpr[src / 2]);
156 } else {
157 return TCGV_HIGH(cpu_fpr[src / 2]);
158 }
159 #else
160 if (src & 1) {
161 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
162 } else {
163 TCGv_i32 ret = get_temp_i32(dc);
164 TCGv_i64 t = tcg_temp_new_i64();
165
166 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
167 tcg_gen_trunc_i64_i32(ret, t);
168 tcg_temp_free_i64(t);
169
170 return ret;
171 }
172 #endif
173 }
174
175 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
176 {
177 #if TCG_TARGET_REG_BITS == 32
178 if (dst & 1) {
179 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
180 } else {
181 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
182 }
183 #else
184 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
185 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
186 (dst & 1 ? 0 : 32), 32);
187 #endif
188 gen_update_fprs_dirty(dst);
189 }
190
191 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
192 {
193 return get_temp_i32(dc);
194 }
195
196 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
197 {
198 src = DFPREG(src);
199 return cpu_fpr[src / 2];
200 }
201
202 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
203 {
204 dst = DFPREG(dst);
205 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
206 gen_update_fprs_dirty(dst);
207 }
208
209 static TCGv_i64 gen_dest_fpr_D(void)
210 {
211 return cpu_tmp64;
212 }
213
214 static void gen_op_load_fpr_QT0(unsigned int src)
215 {
216 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
217 offsetof(CPU_QuadU, ll.upper));
218 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
219 offsetof(CPU_QuadU, ll.lower));
220 }
221
222 static void gen_op_load_fpr_QT1(unsigned int src)
223 {
224 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
225 offsetof(CPU_QuadU, ll.upper));
226 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
227 offsetof(CPU_QuadU, ll.lower));
228 }
229
230 static void gen_op_store_QT0_fpr(unsigned int dst)
231 {
232 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
233 offsetof(CPU_QuadU, ll.upper));
234 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
235 offsetof(CPU_QuadU, ll.lower));
236 }
237
238 #ifdef TARGET_SPARC64
239 static void gen_move_Q(unsigned int rd, unsigned int rs)
240 {
241 rd = QFPREG(rd);
242 rs = QFPREG(rs);
243
244 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
245 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
246 gen_update_fprs_dirty(rd);
247 }
248 #endif
249
250 /* moves */
251 #ifdef CONFIG_USER_ONLY
252 #define supervisor(dc) 0
253 #ifdef TARGET_SPARC64
254 #define hypervisor(dc) 0
255 #endif
256 #else
257 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
258 #ifdef TARGET_SPARC64
259 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
260 #else
261 #endif
262 #endif
263
264 #ifdef TARGET_SPARC64
265 #ifndef TARGET_ABI32
266 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
267 #else
268 #define AM_CHECK(dc) (1)
269 #endif
270 #endif
271
272 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
273 {
274 #ifdef TARGET_SPARC64
275 if (AM_CHECK(dc))
276 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
277 #endif
278 }
279
280 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
281 {
282 if (reg == 0 || reg >= 8) {
283 TCGv t = get_temp_tl(dc);
284 if (reg == 0) {
285 tcg_gen_movi_tl(t, 0);
286 } else {
287 tcg_gen_ld_tl(t, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
288 }
289 return t;
290 } else {
291 return cpu_gregs[reg];
292 }
293 }
294
295 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
296 {
297 if (reg > 0) {
298 if (reg < 8) {
299 tcg_gen_mov_tl(cpu_gregs[reg], v);
300 } else {
301 tcg_gen_st_tl(v, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
302 }
303 }
304 }
305
306 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
307 {
308 if (reg == 0 || reg >= 8) {
309 return get_temp_tl(dc);
310 } else {
311 return cpu_gregs[reg];
312 }
313 }
314
315 static inline void gen_goto_tb(DisasContext *s, int tb_num,
316 target_ulong pc, target_ulong npc)
317 {
318 TranslationBlock *tb;
319
320 tb = s->tb;
321 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
322 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
323 !s->singlestep) {
324 /* jump to same page: we can use a direct jump */
325 tcg_gen_goto_tb(tb_num);
326 tcg_gen_movi_tl(cpu_pc, pc);
327 tcg_gen_movi_tl(cpu_npc, npc);
328 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
329 } else {
330 /* jump to another page: currently not optimized */
331 tcg_gen_movi_tl(cpu_pc, pc);
332 tcg_gen_movi_tl(cpu_npc, npc);
333 tcg_gen_exit_tb(0);
334 }
335 }
336
337 // XXX suboptimal
338 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
339 {
340 tcg_gen_extu_i32_tl(reg, src);
341 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
342 tcg_gen_andi_tl(reg, reg, 0x1);
343 }
344
345 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
346 {
347 tcg_gen_extu_i32_tl(reg, src);
348 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
349 tcg_gen_andi_tl(reg, reg, 0x1);
350 }
351
352 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
353 {
354 tcg_gen_extu_i32_tl(reg, src);
355 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
356 tcg_gen_andi_tl(reg, reg, 0x1);
357 }
358
359 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
360 {
361 tcg_gen_extu_i32_tl(reg, src);
362 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
363 tcg_gen_andi_tl(reg, reg, 0x1);
364 }
365
366 static inline void gen_op_addi_cc(TCGv dst, TCGv src1, target_long src2)
367 {
368 tcg_gen_mov_tl(cpu_cc_src, src1);
369 tcg_gen_movi_tl(cpu_cc_src2, src2);
370 tcg_gen_addi_tl(cpu_cc_dst, cpu_cc_src, src2);
371 tcg_gen_mov_tl(dst, cpu_cc_dst);
372 }
373
374 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
375 {
376 tcg_gen_mov_tl(cpu_cc_src, src1);
377 tcg_gen_mov_tl(cpu_cc_src2, src2);
378 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
379 tcg_gen_mov_tl(dst, cpu_cc_dst);
380 }
381
382 static TCGv_i32 gen_add32_carry32(void)
383 {
384 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
385
386 /* Carry is computed from a previous add: (dst < src) */
387 #if TARGET_LONG_BITS == 64
388 cc_src1_32 = tcg_temp_new_i32();
389 cc_src2_32 = tcg_temp_new_i32();
390 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_dst);
391 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src);
392 #else
393 cc_src1_32 = cpu_cc_dst;
394 cc_src2_32 = cpu_cc_src;
395 #endif
396
397 carry_32 = tcg_temp_new_i32();
398 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
399
400 #if TARGET_LONG_BITS == 64
401 tcg_temp_free_i32(cc_src1_32);
402 tcg_temp_free_i32(cc_src2_32);
403 #endif
404
405 return carry_32;
406 }
407
408 static TCGv_i32 gen_sub32_carry32(void)
409 {
410 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
411
412 /* Carry is computed from a previous borrow: (src1 < src2) */
413 #if TARGET_LONG_BITS == 64
414 cc_src1_32 = tcg_temp_new_i32();
415 cc_src2_32 = tcg_temp_new_i32();
416 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_src);
417 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src2);
418 #else
419 cc_src1_32 = cpu_cc_src;
420 cc_src2_32 = cpu_cc_src2;
421 #endif
422
423 carry_32 = tcg_temp_new_i32();
424 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
425
426 #if TARGET_LONG_BITS == 64
427 tcg_temp_free_i32(cc_src1_32);
428 tcg_temp_free_i32(cc_src2_32);
429 #endif
430
431 return carry_32;
432 }
433
434 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
435 TCGv src2, int update_cc)
436 {
437 TCGv_i32 carry_32;
438 TCGv carry;
439
440 switch (dc->cc_op) {
441 case CC_OP_DIV:
442 case CC_OP_LOGIC:
443 /* Carry is known to be zero. Fall back to plain ADD. */
444 if (update_cc) {
445 gen_op_add_cc(dst, src1, src2);
446 } else {
447 tcg_gen_add_tl(dst, src1, src2);
448 }
449 return;
450
451 case CC_OP_ADD:
452 case CC_OP_TADD:
453 case CC_OP_TADDTV:
454 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
455 {
456 /* For 32-bit hosts, we can re-use the host's hardware carry
457 generation by using an ADD2 opcode. We discard the low
458 part of the output. Ideally we'd combine this operation
459 with the add that generated the carry in the first place. */
460 TCGv dst_low = tcg_temp_new();
461 tcg_gen_op6_i32(INDEX_op_add2_i32, dst_low, dst,
462 cpu_cc_src, src1, cpu_cc_src2, src2);
463 tcg_temp_free(dst_low);
464 goto add_done;
465 }
466 #endif
467 carry_32 = gen_add32_carry32();
468 break;
469
470 case CC_OP_SUB:
471 case CC_OP_TSUB:
472 case CC_OP_TSUBTV:
473 carry_32 = gen_sub32_carry32();
474 break;
475
476 default:
477 /* We need external help to produce the carry. */
478 carry_32 = tcg_temp_new_i32();
479 gen_helper_compute_C_icc(carry_32, cpu_env);
480 break;
481 }
482
483 #if TARGET_LONG_BITS == 64
484 carry = tcg_temp_new();
485 tcg_gen_extu_i32_i64(carry, carry_32);
486 #else
487 carry = carry_32;
488 #endif
489
490 tcg_gen_add_tl(dst, src1, src2);
491 tcg_gen_add_tl(dst, dst, carry);
492
493 tcg_temp_free_i32(carry_32);
494 #if TARGET_LONG_BITS == 64
495 tcg_temp_free(carry);
496 #endif
497
498 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
499 add_done:
500 #endif
501 if (update_cc) {
502 tcg_gen_mov_tl(cpu_cc_src, src1);
503 tcg_gen_mov_tl(cpu_cc_src2, src2);
504 tcg_gen_mov_tl(cpu_cc_dst, dst);
505 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
506 dc->cc_op = CC_OP_ADDX;
507 }
508 }
509
510 static inline void gen_op_subi_cc(TCGv dst, TCGv src1, target_long src2, DisasContext *dc)
511 {
512 tcg_gen_mov_tl(cpu_cc_src, src1);
513 tcg_gen_movi_tl(cpu_cc_src2, src2);
514 if (src2 == 0) {
515 tcg_gen_mov_tl(cpu_cc_dst, src1);
516 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
517 dc->cc_op = CC_OP_LOGIC;
518 } else {
519 tcg_gen_subi_tl(cpu_cc_dst, cpu_cc_src, src2);
520 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
521 dc->cc_op = CC_OP_SUB;
522 }
523 tcg_gen_mov_tl(dst, cpu_cc_dst);
524 }
525
526 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
527 {
528 tcg_gen_mov_tl(cpu_cc_src, src1);
529 tcg_gen_mov_tl(cpu_cc_src2, src2);
530 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
531 tcg_gen_mov_tl(dst, cpu_cc_dst);
532 }
533
534 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
535 TCGv src2, int update_cc)
536 {
537 TCGv_i32 carry_32;
538 TCGv carry;
539
540 switch (dc->cc_op) {
541 case CC_OP_DIV:
542 case CC_OP_LOGIC:
543 /* Carry is known to be zero. Fall back to plain SUB. */
544 if (update_cc) {
545 gen_op_sub_cc(dst, src1, src2);
546 } else {
547 tcg_gen_sub_tl(dst, src1, src2);
548 }
549 return;
550
551 case CC_OP_ADD:
552 case CC_OP_TADD:
553 case CC_OP_TADDTV:
554 carry_32 = gen_add32_carry32();
555 break;
556
557 case CC_OP_SUB:
558 case CC_OP_TSUB:
559 case CC_OP_TSUBTV:
560 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
561 {
562 /* For 32-bit hosts, we can re-use the host's hardware carry
563 generation by using a SUB2 opcode. We discard the low
564 part of the output. Ideally we'd combine this operation
565 with the add that generated the carry in the first place. */
566 TCGv dst_low = tcg_temp_new();
567 tcg_gen_op6_i32(INDEX_op_sub2_i32, dst_low, dst,
568 cpu_cc_src, src1, cpu_cc_src2, src2);
569 tcg_temp_free(dst_low);
570 goto sub_done;
571 }
572 #endif
573 carry_32 = gen_sub32_carry32();
574 break;
575
576 default:
577 /* We need external help to produce the carry. */
578 carry_32 = tcg_temp_new_i32();
579 gen_helper_compute_C_icc(carry_32, cpu_env);
580 break;
581 }
582
583 #if TARGET_LONG_BITS == 64
584 carry = tcg_temp_new();
585 tcg_gen_extu_i32_i64(carry, carry_32);
586 #else
587 carry = carry_32;
588 #endif
589
590 tcg_gen_sub_tl(dst, src1, src2);
591 tcg_gen_sub_tl(dst, dst, carry);
592
593 tcg_temp_free_i32(carry_32);
594 #if TARGET_LONG_BITS == 64
595 tcg_temp_free(carry);
596 #endif
597
598 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
599 sub_done:
600 #endif
601 if (update_cc) {
602 tcg_gen_mov_tl(cpu_cc_src, src1);
603 tcg_gen_mov_tl(cpu_cc_src2, src2);
604 tcg_gen_mov_tl(cpu_cc_dst, dst);
605 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
606 dc->cc_op = CC_OP_SUBX;
607 }
608 }
609
610 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
611 {
612 TCGv r_temp, zero;
613
614 r_temp = tcg_temp_new();
615
616 /* old op:
617 if (!(env->y & 1))
618 T1 = 0;
619 */
620 zero = tcg_const_tl(0);
621 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
622 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
623 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
624 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
625 zero, cpu_cc_src2);
626 tcg_temp_free(zero);
627
628 // b2 = T0 & 1;
629 // env->y = (b2 << 31) | (env->y >> 1);
630 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
631 tcg_gen_shli_tl(r_temp, r_temp, 31);
632 tcg_gen_shri_tl(cpu_tmp0, cpu_y, 1);
633 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0x7fffffff);
634 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, r_temp);
635 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
636
637 // b1 = N ^ V;
638 gen_mov_reg_N(cpu_tmp0, cpu_psr);
639 gen_mov_reg_V(r_temp, cpu_psr);
640 tcg_gen_xor_tl(cpu_tmp0, cpu_tmp0, r_temp);
641 tcg_temp_free(r_temp);
642
643 // T0 = (b1 << 31) | (T0 >> 1);
644 // src1 = T0;
645 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, 31);
646 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
647 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
648
649 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
650
651 tcg_gen_mov_tl(dst, cpu_cc_dst);
652 }
653
654 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
655 {
656 TCGv_i32 r_src1, r_src2;
657 TCGv_i64 r_temp, r_temp2;
658
659 r_src1 = tcg_temp_new_i32();
660 r_src2 = tcg_temp_new_i32();
661
662 tcg_gen_trunc_tl_i32(r_src1, src1);
663 tcg_gen_trunc_tl_i32(r_src2, src2);
664
665 r_temp = tcg_temp_new_i64();
666 r_temp2 = tcg_temp_new_i64();
667
668 if (sign_ext) {
669 tcg_gen_ext_i32_i64(r_temp, r_src2);
670 tcg_gen_ext_i32_i64(r_temp2, r_src1);
671 } else {
672 tcg_gen_extu_i32_i64(r_temp, r_src2);
673 tcg_gen_extu_i32_i64(r_temp2, r_src1);
674 }
675
676 tcg_gen_mul_i64(r_temp2, r_temp, r_temp2);
677
678 tcg_gen_shri_i64(r_temp, r_temp2, 32);
679 tcg_gen_trunc_i64_tl(cpu_tmp0, r_temp);
680 tcg_temp_free_i64(r_temp);
681 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
682
683 tcg_gen_trunc_i64_tl(dst, r_temp2);
684
685 tcg_temp_free_i64(r_temp2);
686
687 tcg_temp_free_i32(r_src1);
688 tcg_temp_free_i32(r_src2);
689 }
690
691 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
692 {
693 /* zero-extend truncated operands before multiplication */
694 gen_op_multiply(dst, src1, src2, 0);
695 }
696
697 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
698 {
699 /* sign-extend truncated operands before multiplication */
700 gen_op_multiply(dst, src1, src2, 1);
701 }
702
703 // 1
704 static inline void gen_op_eval_ba(TCGv dst)
705 {
706 tcg_gen_movi_tl(dst, 1);
707 }
708
709 // Z
710 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
711 {
712 gen_mov_reg_Z(dst, src);
713 }
714
715 // Z | (N ^ V)
716 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
717 {
718 gen_mov_reg_N(cpu_tmp0, src);
719 gen_mov_reg_V(dst, src);
720 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
721 gen_mov_reg_Z(cpu_tmp0, src);
722 tcg_gen_or_tl(dst, dst, cpu_tmp0);
723 }
724
725 // N ^ V
726 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
727 {
728 gen_mov_reg_V(cpu_tmp0, src);
729 gen_mov_reg_N(dst, src);
730 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
731 }
732
733 // C | Z
734 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
735 {
736 gen_mov_reg_Z(cpu_tmp0, src);
737 gen_mov_reg_C(dst, src);
738 tcg_gen_or_tl(dst, dst, cpu_tmp0);
739 }
740
741 // C
742 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
743 {
744 gen_mov_reg_C(dst, src);
745 }
746
747 // V
748 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
749 {
750 gen_mov_reg_V(dst, src);
751 }
752
753 // 0
754 static inline void gen_op_eval_bn(TCGv dst)
755 {
756 tcg_gen_movi_tl(dst, 0);
757 }
758
759 // N
760 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
761 {
762 gen_mov_reg_N(dst, src);
763 }
764
765 // !Z
766 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
767 {
768 gen_mov_reg_Z(dst, src);
769 tcg_gen_xori_tl(dst, dst, 0x1);
770 }
771
772 // !(Z | (N ^ V))
773 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
774 {
775 gen_mov_reg_N(cpu_tmp0, src);
776 gen_mov_reg_V(dst, src);
777 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
778 gen_mov_reg_Z(cpu_tmp0, src);
779 tcg_gen_or_tl(dst, dst, cpu_tmp0);
780 tcg_gen_xori_tl(dst, dst, 0x1);
781 }
782
783 // !(N ^ V)
784 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
785 {
786 gen_mov_reg_V(cpu_tmp0, src);
787 gen_mov_reg_N(dst, src);
788 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
789 tcg_gen_xori_tl(dst, dst, 0x1);
790 }
791
792 // !(C | Z)
793 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
794 {
795 gen_mov_reg_Z(cpu_tmp0, src);
796 gen_mov_reg_C(dst, src);
797 tcg_gen_or_tl(dst, dst, cpu_tmp0);
798 tcg_gen_xori_tl(dst, dst, 0x1);
799 }
800
801 // !C
802 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
803 {
804 gen_mov_reg_C(dst, src);
805 tcg_gen_xori_tl(dst, dst, 0x1);
806 }
807
808 // !N
809 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
810 {
811 gen_mov_reg_N(dst, src);
812 tcg_gen_xori_tl(dst, dst, 0x1);
813 }
814
815 // !V
816 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
817 {
818 gen_mov_reg_V(dst, src);
819 tcg_gen_xori_tl(dst, dst, 0x1);
820 }
821
822 /*
823 FPSR bit field FCC1 | FCC0:
824 0 =
825 1 <
826 2 >
827 3 unordered
828 */
829 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
830 unsigned int fcc_offset)
831 {
832 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
833 tcg_gen_andi_tl(reg, reg, 0x1);
834 }
835
836 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
837 unsigned int fcc_offset)
838 {
839 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
840 tcg_gen_andi_tl(reg, reg, 0x1);
841 }
842
843 // !0: FCC0 | FCC1
844 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
845 unsigned int fcc_offset)
846 {
847 gen_mov_reg_FCC0(dst, src, fcc_offset);
848 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
849 tcg_gen_or_tl(dst, dst, cpu_tmp0);
850 }
851
852 // 1 or 2: FCC0 ^ FCC1
853 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
854 unsigned int fcc_offset)
855 {
856 gen_mov_reg_FCC0(dst, src, fcc_offset);
857 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
858 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
859 }
860
861 // 1 or 3: FCC0
862 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
863 unsigned int fcc_offset)
864 {
865 gen_mov_reg_FCC0(dst, src, fcc_offset);
866 }
867
868 // 1: FCC0 & !FCC1
869 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
870 unsigned int fcc_offset)
871 {
872 gen_mov_reg_FCC0(dst, src, fcc_offset);
873 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
874 tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
875 tcg_gen_and_tl(dst, dst, cpu_tmp0);
876 }
877
878 // 2 or 3: FCC1
879 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
880 unsigned int fcc_offset)
881 {
882 gen_mov_reg_FCC1(dst, src, fcc_offset);
883 }
884
885 // 2: !FCC0 & FCC1
886 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
887 unsigned int fcc_offset)
888 {
889 gen_mov_reg_FCC0(dst, src, fcc_offset);
890 tcg_gen_xori_tl(dst, dst, 0x1);
891 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
892 tcg_gen_and_tl(dst, dst, cpu_tmp0);
893 }
894
895 // 3: FCC0 & FCC1
896 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
897 unsigned int fcc_offset)
898 {
899 gen_mov_reg_FCC0(dst, src, fcc_offset);
900 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
901 tcg_gen_and_tl(dst, dst, cpu_tmp0);
902 }
903
904 // 0: !(FCC0 | FCC1)
905 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
906 unsigned int fcc_offset)
907 {
908 gen_mov_reg_FCC0(dst, src, fcc_offset);
909 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
910 tcg_gen_or_tl(dst, dst, cpu_tmp0);
911 tcg_gen_xori_tl(dst, dst, 0x1);
912 }
913
914 // 0 or 3: !(FCC0 ^ FCC1)
915 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
916 unsigned int fcc_offset)
917 {
918 gen_mov_reg_FCC0(dst, src, fcc_offset);
919 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
920 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
921 tcg_gen_xori_tl(dst, dst, 0x1);
922 }
923
924 // 0 or 2: !FCC0
925 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
926 unsigned int fcc_offset)
927 {
928 gen_mov_reg_FCC0(dst, src, fcc_offset);
929 tcg_gen_xori_tl(dst, dst, 0x1);
930 }
931
932 // !1: !(FCC0 & !FCC1)
933 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
934 unsigned int fcc_offset)
935 {
936 gen_mov_reg_FCC0(dst, src, fcc_offset);
937 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
938 tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
939 tcg_gen_and_tl(dst, dst, cpu_tmp0);
940 tcg_gen_xori_tl(dst, dst, 0x1);
941 }
942
943 // 0 or 1: !FCC1
944 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
945 unsigned int fcc_offset)
946 {
947 gen_mov_reg_FCC1(dst, src, fcc_offset);
948 tcg_gen_xori_tl(dst, dst, 0x1);
949 }
950
951 // !2: !(!FCC0 & FCC1)
952 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
953 unsigned int fcc_offset)
954 {
955 gen_mov_reg_FCC0(dst, src, fcc_offset);
956 tcg_gen_xori_tl(dst, dst, 0x1);
957 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
958 tcg_gen_and_tl(dst, dst, cpu_tmp0);
959 tcg_gen_xori_tl(dst, dst, 0x1);
960 }
961
962 // !3: !(FCC0 & FCC1)
963 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
964 unsigned int fcc_offset)
965 {
966 gen_mov_reg_FCC0(dst, src, fcc_offset);
967 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
968 tcg_gen_and_tl(dst, dst, cpu_tmp0);
969 tcg_gen_xori_tl(dst, dst, 0x1);
970 }
971
972 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
973 target_ulong pc2, TCGv r_cond)
974 {
975 int l1;
976
977 l1 = gen_new_label();
978
979 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
980
981 gen_goto_tb(dc, 0, pc1, pc1 + 4);
982
983 gen_set_label(l1);
984 gen_goto_tb(dc, 1, pc2, pc2 + 4);
985 }
986
987 static inline void gen_branch_a(DisasContext *dc, target_ulong pc1,
988 target_ulong pc2, TCGv r_cond)
989 {
990 int l1;
991
992 l1 = gen_new_label();
993
994 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
995
996 gen_goto_tb(dc, 0, pc2, pc1);
997
998 gen_set_label(l1);
999 gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8);
1000 }
1001
1002 static inline void gen_generic_branch(DisasContext *dc)
1003 {
1004 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1005 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1006 TCGv zero = tcg_const_tl(0);
1007
1008 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1009
1010 tcg_temp_free(npc0);
1011 tcg_temp_free(npc1);
1012 tcg_temp_free(zero);
1013 }
1014
1015 /* call this function before using the condition register as it may
1016 have been set for a jump */
1017 static inline void flush_cond(DisasContext *dc)
1018 {
1019 if (dc->npc == JUMP_PC) {
1020 gen_generic_branch(dc);
1021 dc->npc = DYNAMIC_PC;
1022 }
1023 }
1024
1025 static inline void save_npc(DisasContext *dc)
1026 {
1027 if (dc->npc == JUMP_PC) {
1028 gen_generic_branch(dc);
1029 dc->npc = DYNAMIC_PC;
1030 } else if (dc->npc != DYNAMIC_PC) {
1031 tcg_gen_movi_tl(cpu_npc, dc->npc);
1032 }
1033 }
1034
1035 static inline void update_psr(DisasContext *dc)
1036 {
1037 if (dc->cc_op != CC_OP_FLAGS) {
1038 dc->cc_op = CC_OP_FLAGS;
1039 gen_helper_compute_psr(cpu_env);
1040 }
1041 }
1042
1043 static inline void save_state(DisasContext *dc)
1044 {
1045 tcg_gen_movi_tl(cpu_pc, dc->pc);
1046 save_npc(dc);
1047 }
1048
1049 static inline void gen_mov_pc_npc(DisasContext *dc)
1050 {
1051 if (dc->npc == JUMP_PC) {
1052 gen_generic_branch(dc);
1053 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1054 dc->pc = DYNAMIC_PC;
1055 } else if (dc->npc == DYNAMIC_PC) {
1056 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1057 dc->pc = DYNAMIC_PC;
1058 } else {
1059 dc->pc = dc->npc;
1060 }
1061 }
1062
1063 static inline void gen_op_next_insn(void)
1064 {
1065 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1066 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1067 }
1068
1069 static void free_compare(DisasCompare *cmp)
1070 {
1071 if (!cmp->g1) {
1072 tcg_temp_free(cmp->c1);
1073 }
1074 if (!cmp->g2) {
1075 tcg_temp_free(cmp->c2);
1076 }
1077 }
1078
1079 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1080 DisasContext *dc)
1081 {
1082 static int subcc_cond[16] = {
1083 TCG_COND_NEVER,
1084 TCG_COND_EQ,
1085 TCG_COND_LE,
1086 TCG_COND_LT,
1087 TCG_COND_LEU,
1088 TCG_COND_LTU,
1089 -1, /* neg */
1090 -1, /* overflow */
1091 TCG_COND_ALWAYS,
1092 TCG_COND_NE,
1093 TCG_COND_GT,
1094 TCG_COND_GE,
1095 TCG_COND_GTU,
1096 TCG_COND_GEU,
1097 -1, /* pos */
1098 -1, /* no overflow */
1099 };
1100
1101 static int logic_cond[16] = {
1102 TCG_COND_NEVER,
1103 TCG_COND_EQ, /* eq: Z */
1104 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1105 TCG_COND_LT, /* lt: N ^ V -> N */
1106 TCG_COND_EQ, /* leu: C | Z -> Z */
1107 TCG_COND_NEVER, /* ltu: C -> 0 */
1108 TCG_COND_LT, /* neg: N */
1109 TCG_COND_NEVER, /* vs: V -> 0 */
1110 TCG_COND_ALWAYS,
1111 TCG_COND_NE, /* ne: !Z */
1112 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1113 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1114 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1115 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1116 TCG_COND_GE, /* pos: !N */
1117 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1118 };
1119
1120 TCGv_i32 r_src;
1121 TCGv r_dst;
1122
1123 #ifdef TARGET_SPARC64
1124 if (xcc) {
1125 r_src = cpu_xcc;
1126 } else {
1127 r_src = cpu_psr;
1128 }
1129 #else
1130 r_src = cpu_psr;
1131 #endif
1132
1133 switch (dc->cc_op) {
1134 case CC_OP_LOGIC:
1135 cmp->cond = logic_cond[cond];
1136 do_compare_dst_0:
1137 cmp->is_bool = false;
1138 cmp->g2 = false;
1139 cmp->c2 = tcg_const_tl(0);
1140 #ifdef TARGET_SPARC64
1141 if (!xcc) {
1142 cmp->g1 = false;
1143 cmp->c1 = tcg_temp_new();
1144 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1145 break;
1146 }
1147 #endif
1148 cmp->g1 = true;
1149 cmp->c1 = cpu_cc_dst;
1150 break;
1151
1152 case CC_OP_SUB:
1153 switch (cond) {
1154 case 6: /* neg */
1155 case 14: /* pos */
1156 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1157 goto do_compare_dst_0;
1158
1159 case 7: /* overflow */
1160 case 15: /* !overflow */
1161 goto do_dynamic;
1162
1163 default:
1164 cmp->cond = subcc_cond[cond];
1165 cmp->is_bool = false;
1166 #ifdef TARGET_SPARC64
1167 if (!xcc) {
1168 /* Note that sign-extension works for unsigned compares as
1169 long as both operands are sign-extended. */
1170 cmp->g1 = cmp->g2 = false;
1171 cmp->c1 = tcg_temp_new();
1172 cmp->c2 = tcg_temp_new();
1173 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1174 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1175 break;
1176 }
1177 #endif
1178 cmp->g1 = cmp->g2 = true;
1179 cmp->c1 = cpu_cc_src;
1180 cmp->c2 = cpu_cc_src2;
1181 break;
1182 }
1183 break;
1184
1185 default:
1186 do_dynamic:
1187 gen_helper_compute_psr(cpu_env);
1188 dc->cc_op = CC_OP_FLAGS;
1189 /* FALLTHRU */
1190
1191 case CC_OP_FLAGS:
1192 /* We're going to generate a boolean result. */
1193 cmp->cond = TCG_COND_NE;
1194 cmp->is_bool = true;
1195 cmp->g1 = cmp->g2 = false;
1196 cmp->c1 = r_dst = tcg_temp_new();
1197 cmp->c2 = tcg_const_tl(0);
1198
1199 switch (cond) {
1200 case 0x0:
1201 gen_op_eval_bn(r_dst);
1202 break;
1203 case 0x1:
1204 gen_op_eval_be(r_dst, r_src);
1205 break;
1206 case 0x2:
1207 gen_op_eval_ble(r_dst, r_src);
1208 break;
1209 case 0x3:
1210 gen_op_eval_bl(r_dst, r_src);
1211 break;
1212 case 0x4:
1213 gen_op_eval_bleu(r_dst, r_src);
1214 break;
1215 case 0x5:
1216 gen_op_eval_bcs(r_dst, r_src);
1217 break;
1218 case 0x6:
1219 gen_op_eval_bneg(r_dst, r_src);
1220 break;
1221 case 0x7:
1222 gen_op_eval_bvs(r_dst, r_src);
1223 break;
1224 case 0x8:
1225 gen_op_eval_ba(r_dst);
1226 break;
1227 case 0x9:
1228 gen_op_eval_bne(r_dst, r_src);
1229 break;
1230 case 0xa:
1231 gen_op_eval_bg(r_dst, r_src);
1232 break;
1233 case 0xb:
1234 gen_op_eval_bge(r_dst, r_src);
1235 break;
1236 case 0xc:
1237 gen_op_eval_bgu(r_dst, r_src);
1238 break;
1239 case 0xd:
1240 gen_op_eval_bcc(r_dst, r_src);
1241 break;
1242 case 0xe:
1243 gen_op_eval_bpos(r_dst, r_src);
1244 break;
1245 case 0xf:
1246 gen_op_eval_bvc(r_dst, r_src);
1247 break;
1248 }
1249 break;
1250 }
1251 }
1252
1253 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1254 {
1255 unsigned int offset;
1256 TCGv r_dst;
1257
1258 /* For now we still generate a straight boolean result. */
1259 cmp->cond = TCG_COND_NE;
1260 cmp->is_bool = true;
1261 cmp->g1 = cmp->g2 = false;
1262 cmp->c1 = r_dst = tcg_temp_new();
1263 cmp->c2 = tcg_const_tl(0);
1264
1265 switch (cc) {
1266 default:
1267 case 0x0:
1268 offset = 0;
1269 break;
1270 case 0x1:
1271 offset = 32 - 10;
1272 break;
1273 case 0x2:
1274 offset = 34 - 10;
1275 break;
1276 case 0x3:
1277 offset = 36 - 10;
1278 break;
1279 }
1280
1281 switch (cond) {
1282 case 0x0:
1283 gen_op_eval_bn(r_dst);
1284 break;
1285 case 0x1:
1286 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1287 break;
1288 case 0x2:
1289 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1290 break;
1291 case 0x3:
1292 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1293 break;
1294 case 0x4:
1295 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1296 break;
1297 case 0x5:
1298 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1299 break;
1300 case 0x6:
1301 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1302 break;
1303 case 0x7:
1304 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1305 break;
1306 case 0x8:
1307 gen_op_eval_ba(r_dst);
1308 break;
1309 case 0x9:
1310 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1311 break;
1312 case 0xa:
1313 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1314 break;
1315 case 0xb:
1316 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1317 break;
1318 case 0xc:
1319 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1320 break;
1321 case 0xd:
1322 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1323 break;
1324 case 0xe:
1325 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1326 break;
1327 case 0xf:
1328 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1329 break;
1330 }
1331 }
1332
1333 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1334 DisasContext *dc)
1335 {
1336 DisasCompare cmp;
1337 gen_compare(&cmp, cc, cond, dc);
1338
1339 /* The interface is to return a boolean in r_dst. */
1340 if (cmp.is_bool) {
1341 tcg_gen_mov_tl(r_dst, cmp.c1);
1342 } else {
1343 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1344 }
1345
1346 free_compare(&cmp);
1347 }
1348
1349 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1350 {
1351 DisasCompare cmp;
1352 gen_fcompare(&cmp, cc, cond);
1353
1354 /* The interface is to return a boolean in r_dst. */
1355 if (cmp.is_bool) {
1356 tcg_gen_mov_tl(r_dst, cmp.c1);
1357 } else {
1358 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1359 }
1360
1361 free_compare(&cmp);
1362 }
1363
1364 #ifdef TARGET_SPARC64
1365 // Inverted logic
1366 static const int gen_tcg_cond_reg[8] = {
1367 -1,
1368 TCG_COND_NE,
1369 TCG_COND_GT,
1370 TCG_COND_GE,
1371 -1,
1372 TCG_COND_EQ,
1373 TCG_COND_LE,
1374 TCG_COND_LT,
1375 };
1376
1377 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1378 {
1379 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1380 cmp->is_bool = false;
1381 cmp->g1 = true;
1382 cmp->g2 = false;
1383 cmp->c1 = r_src;
1384 cmp->c2 = tcg_const_tl(0);
1385 }
1386
1387 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1388 {
1389 DisasCompare cmp;
1390 gen_compare_reg(&cmp, cond, r_src);
1391
1392 /* The interface is to return a boolean in r_dst. */
1393 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1394
1395 free_compare(&cmp);
1396 }
1397 #endif
1398
1399 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1400 {
1401 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1402 target_ulong target = dc->pc + offset;
1403
1404 #ifdef TARGET_SPARC64
1405 if (unlikely(AM_CHECK(dc))) {
1406 target &= 0xffffffffULL;
1407 }
1408 #endif
1409 if (cond == 0x0) {
1410 /* unconditional not taken */
1411 if (a) {
1412 dc->pc = dc->npc + 4;
1413 dc->npc = dc->pc + 4;
1414 } else {
1415 dc->pc = dc->npc;
1416 dc->npc = dc->pc + 4;
1417 }
1418 } else if (cond == 0x8) {
1419 /* unconditional taken */
1420 if (a) {
1421 dc->pc = target;
1422 dc->npc = dc->pc + 4;
1423 } else {
1424 dc->pc = dc->npc;
1425 dc->npc = target;
1426 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1427 }
1428 } else {
1429 flush_cond(dc);
1430 gen_cond(cpu_cond, cc, cond, dc);
1431 if (a) {
1432 gen_branch_a(dc, target, dc->npc, cpu_cond);
1433 dc->is_br = 1;
1434 } else {
1435 dc->pc = dc->npc;
1436 dc->jump_pc[0] = target;
1437 if (unlikely(dc->npc == DYNAMIC_PC)) {
1438 dc->jump_pc[1] = DYNAMIC_PC;
1439 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1440 } else {
1441 dc->jump_pc[1] = dc->npc + 4;
1442 dc->npc = JUMP_PC;
1443 }
1444 }
1445 }
1446 }
1447
1448 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1449 {
1450 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1451 target_ulong target = dc->pc + offset;
1452
1453 #ifdef TARGET_SPARC64
1454 if (unlikely(AM_CHECK(dc))) {
1455 target &= 0xffffffffULL;
1456 }
1457 #endif
1458 if (cond == 0x0) {
1459 /* unconditional not taken */
1460 if (a) {
1461 dc->pc = dc->npc + 4;
1462 dc->npc = dc->pc + 4;
1463 } else {
1464 dc->pc = dc->npc;
1465 dc->npc = dc->pc + 4;
1466 }
1467 } else if (cond == 0x8) {
1468 /* unconditional taken */
1469 if (a) {
1470 dc->pc = target;
1471 dc->npc = dc->pc + 4;
1472 } else {
1473 dc->pc = dc->npc;
1474 dc->npc = target;
1475 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1476 }
1477 } else {
1478 flush_cond(dc);
1479 gen_fcond(cpu_cond, cc, cond);
1480 if (a) {
1481 gen_branch_a(dc, target, dc->npc, cpu_cond);
1482 dc->is_br = 1;
1483 } else {
1484 dc->pc = dc->npc;
1485 dc->jump_pc[0] = target;
1486 if (unlikely(dc->npc == DYNAMIC_PC)) {
1487 dc->jump_pc[1] = DYNAMIC_PC;
1488 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1489 } else {
1490 dc->jump_pc[1] = dc->npc + 4;
1491 dc->npc = JUMP_PC;
1492 }
1493 }
1494 }
1495 }
1496
1497 #ifdef TARGET_SPARC64
1498 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1499 TCGv r_reg)
1500 {
1501 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1502 target_ulong target = dc->pc + offset;
1503
1504 if (unlikely(AM_CHECK(dc))) {
1505 target &= 0xffffffffULL;
1506 }
1507 flush_cond(dc);
1508 gen_cond_reg(cpu_cond, cond, r_reg);
1509 if (a) {
1510 gen_branch_a(dc, target, dc->npc, cpu_cond);
1511 dc->is_br = 1;
1512 } else {
1513 dc->pc = dc->npc;
1514 dc->jump_pc[0] = target;
1515 if (unlikely(dc->npc == DYNAMIC_PC)) {
1516 dc->jump_pc[1] = DYNAMIC_PC;
1517 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1518 } else {
1519 dc->jump_pc[1] = dc->npc + 4;
1520 dc->npc = JUMP_PC;
1521 }
1522 }
1523 }
1524
1525 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1526 {
1527 switch (fccno) {
1528 case 0:
1529 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1530 break;
1531 case 1:
1532 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1533 break;
1534 case 2:
1535 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1536 break;
1537 case 3:
1538 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1539 break;
1540 }
1541 }
1542
1543 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1544 {
1545 switch (fccno) {
1546 case 0:
1547 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1548 break;
1549 case 1:
1550 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1551 break;
1552 case 2:
1553 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1554 break;
1555 case 3:
1556 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1557 break;
1558 }
1559 }
1560
1561 static inline void gen_op_fcmpq(int fccno)
1562 {
1563 switch (fccno) {
1564 case 0:
1565 gen_helper_fcmpq(cpu_env);
1566 break;
1567 case 1:
1568 gen_helper_fcmpq_fcc1(cpu_env);
1569 break;
1570 case 2:
1571 gen_helper_fcmpq_fcc2(cpu_env);
1572 break;
1573 case 3:
1574 gen_helper_fcmpq_fcc3(cpu_env);
1575 break;
1576 }
1577 }
1578
1579 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1580 {
1581 switch (fccno) {
1582 case 0:
1583 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1584 break;
1585 case 1:
1586 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1587 break;
1588 case 2:
1589 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1590 break;
1591 case 3:
1592 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1593 break;
1594 }
1595 }
1596
1597 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1598 {
1599 switch (fccno) {
1600 case 0:
1601 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1602 break;
1603 case 1:
1604 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1605 break;
1606 case 2:
1607 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1608 break;
1609 case 3:
1610 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1611 break;
1612 }
1613 }
1614
1615 static inline void gen_op_fcmpeq(int fccno)
1616 {
1617 switch (fccno) {
1618 case 0:
1619 gen_helper_fcmpeq(cpu_env);
1620 break;
1621 case 1:
1622 gen_helper_fcmpeq_fcc1(cpu_env);
1623 break;
1624 case 2:
1625 gen_helper_fcmpeq_fcc2(cpu_env);
1626 break;
1627 case 3:
1628 gen_helper_fcmpeq_fcc3(cpu_env);
1629 break;
1630 }
1631 }
1632
1633 #else
1634
1635 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1636 {
1637 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1638 }
1639
1640 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1641 {
1642 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1643 }
1644
1645 static inline void gen_op_fcmpq(int fccno)
1646 {
1647 gen_helper_fcmpq(cpu_env);
1648 }
1649
1650 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1651 {
1652 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1653 }
1654
1655 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1656 {
1657 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1658 }
1659
1660 static inline void gen_op_fcmpeq(int fccno)
1661 {
1662 gen_helper_fcmpeq(cpu_env);
1663 }
1664 #endif
1665
1666 static inline void gen_op_fpexception_im(int fsr_flags)
1667 {
1668 TCGv_i32 r_const;
1669
1670 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1671 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1672 r_const = tcg_const_i32(TT_FP_EXCP);
1673 gen_helper_raise_exception(cpu_env, r_const);
1674 tcg_temp_free_i32(r_const);
1675 }
1676
1677 static int gen_trap_ifnofpu(DisasContext *dc)
1678 {
1679 #if !defined(CONFIG_USER_ONLY)
1680 if (!dc->fpu_enabled) {
1681 TCGv_i32 r_const;
1682
1683 save_state(dc);
1684 r_const = tcg_const_i32(TT_NFPU_INSN);
1685 gen_helper_raise_exception(cpu_env, r_const);
1686 tcg_temp_free_i32(r_const);
1687 dc->is_br = 1;
1688 return 1;
1689 }
1690 #endif
1691 return 0;
1692 }
1693
1694 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1695 {
1696 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1697 }
1698
1699 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1700 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1701 {
1702 TCGv_i32 dst, src;
1703
1704 src = gen_load_fpr_F(dc, rs);
1705 dst = gen_dest_fpr_F(dc);
1706
1707 gen(dst, cpu_env, src);
1708
1709 gen_store_fpr_F(dc, rd, dst);
1710 }
1711
1712 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1713 void (*gen)(TCGv_i32, TCGv_i32))
1714 {
1715 TCGv_i32 dst, src;
1716
1717 src = gen_load_fpr_F(dc, rs);
1718 dst = gen_dest_fpr_F(dc);
1719
1720 gen(dst, src);
1721
1722 gen_store_fpr_F(dc, rd, dst);
1723 }
1724
1725 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1726 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1727 {
1728 TCGv_i32 dst, src1, src2;
1729
1730 src1 = gen_load_fpr_F(dc, rs1);
1731 src2 = gen_load_fpr_F(dc, rs2);
1732 dst = gen_dest_fpr_F(dc);
1733
1734 gen(dst, cpu_env, src1, src2);
1735
1736 gen_store_fpr_F(dc, rd, dst);
1737 }
1738
1739 #ifdef TARGET_SPARC64
1740 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1741 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1742 {
1743 TCGv_i32 dst, src1, src2;
1744
1745 src1 = gen_load_fpr_F(dc, rs1);
1746 src2 = gen_load_fpr_F(dc, rs2);
1747 dst = gen_dest_fpr_F(dc);
1748
1749 gen(dst, src1, src2);
1750
1751 gen_store_fpr_F(dc, rd, dst);
1752 }
1753 #endif
1754
1755 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1756 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1757 {
1758 TCGv_i64 dst, src;
1759
1760 src = gen_load_fpr_D(dc, rs);
1761 dst = gen_dest_fpr_D();
1762
1763 gen(dst, cpu_env, src);
1764
1765 gen_store_fpr_D(dc, rd, dst);
1766 }
1767
1768 #ifdef TARGET_SPARC64
1769 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1770 void (*gen)(TCGv_i64, TCGv_i64))
1771 {
1772 TCGv_i64 dst, src;
1773
1774 src = gen_load_fpr_D(dc, rs);
1775 dst = gen_dest_fpr_D();
1776
1777 gen(dst, src);
1778
1779 gen_store_fpr_D(dc, rd, dst);
1780 }
1781 #endif
1782
1783 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1784 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1785 {
1786 TCGv_i64 dst, src1, src2;
1787
1788 src1 = gen_load_fpr_D(dc, rs1);
1789 src2 = gen_load_fpr_D(dc, rs2);
1790 dst = gen_dest_fpr_D();
1791
1792 gen(dst, cpu_env, src1, src2);
1793
1794 gen_store_fpr_D(dc, rd, dst);
1795 }
1796
1797 #ifdef TARGET_SPARC64
1798 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1799 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1800 {
1801 TCGv_i64 dst, src1, src2;
1802
1803 src1 = gen_load_fpr_D(dc, rs1);
1804 src2 = gen_load_fpr_D(dc, rs2);
1805 dst = gen_dest_fpr_D();
1806
1807 gen(dst, src1, src2);
1808
1809 gen_store_fpr_D(dc, rd, dst);
1810 }
1811
1812 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1813 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1814 {
1815 TCGv_i64 dst, src1, src2;
1816
1817 src1 = gen_load_fpr_D(dc, rs1);
1818 src2 = gen_load_fpr_D(dc, rs2);
1819 dst = gen_dest_fpr_D();
1820
1821 gen(dst, cpu_gsr, src1, src2);
1822
1823 gen_store_fpr_D(dc, rd, dst);
1824 }
1825
1826 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1827 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1828 {
1829 TCGv_i64 dst, src0, src1, src2;
1830
1831 src1 = gen_load_fpr_D(dc, rs1);
1832 src2 = gen_load_fpr_D(dc, rs2);
1833 src0 = gen_load_fpr_D(dc, rd);
1834 dst = gen_dest_fpr_D();
1835
1836 gen(dst, src0, src1, src2);
1837
1838 gen_store_fpr_D(dc, rd, dst);
1839 }
1840 #endif
1841
1842 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1843 void (*gen)(TCGv_ptr))
1844 {
1845 gen_op_load_fpr_QT1(QFPREG(rs));
1846
1847 gen(cpu_env);
1848
1849 gen_op_store_QT0_fpr(QFPREG(rd));
1850 gen_update_fprs_dirty(QFPREG(rd));
1851 }
1852
1853 #ifdef TARGET_SPARC64
1854 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1855 void (*gen)(TCGv_ptr))
1856 {
1857 gen_op_load_fpr_QT1(QFPREG(rs));
1858
1859 gen(cpu_env);
1860
1861 gen_op_store_QT0_fpr(QFPREG(rd));
1862 gen_update_fprs_dirty(QFPREG(rd));
1863 }
1864 #endif
1865
1866 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1867 void (*gen)(TCGv_ptr))
1868 {
1869 gen_op_load_fpr_QT0(QFPREG(rs1));
1870 gen_op_load_fpr_QT1(QFPREG(rs2));
1871
1872 gen(cpu_env);
1873
1874 gen_op_store_QT0_fpr(QFPREG(rd));
1875 gen_update_fprs_dirty(QFPREG(rd));
1876 }
1877
1878 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1879 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1880 {
1881 TCGv_i64 dst;
1882 TCGv_i32 src1, src2;
1883
1884 src1 = gen_load_fpr_F(dc, rs1);
1885 src2 = gen_load_fpr_F(dc, rs2);
1886 dst = gen_dest_fpr_D();
1887
1888 gen(dst, cpu_env, src1, src2);
1889
1890 gen_store_fpr_D(dc, rd, dst);
1891 }
1892
1893 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1894 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1895 {
1896 TCGv_i64 src1, src2;
1897
1898 src1 = gen_load_fpr_D(dc, rs1);
1899 src2 = gen_load_fpr_D(dc, rs2);
1900
1901 gen(cpu_env, src1, src2);
1902
1903 gen_op_store_QT0_fpr(QFPREG(rd));
1904 gen_update_fprs_dirty(QFPREG(rd));
1905 }
1906
1907 #ifdef TARGET_SPARC64
1908 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1909 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1910 {
1911 TCGv_i64 dst;
1912 TCGv_i32 src;
1913
1914 src = gen_load_fpr_F(dc, rs);
1915 dst = gen_dest_fpr_D();
1916
1917 gen(dst, cpu_env, src);
1918
1919 gen_store_fpr_D(dc, rd, dst);
1920 }
1921 #endif
1922
1923 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1924 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1925 {
1926 TCGv_i64 dst;
1927 TCGv_i32 src;
1928
1929 src = gen_load_fpr_F(dc, rs);
1930 dst = gen_dest_fpr_D();
1931
1932 gen(dst, cpu_env, src);
1933
1934 gen_store_fpr_D(dc, rd, dst);
1935 }
1936
1937 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1938 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1939 {
1940 TCGv_i32 dst;
1941 TCGv_i64 src;
1942
1943 src = gen_load_fpr_D(dc, rs);
1944 dst = gen_dest_fpr_F(dc);
1945
1946 gen(dst, cpu_env, src);
1947
1948 gen_store_fpr_F(dc, rd, dst);
1949 }
1950
1951 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1952 void (*gen)(TCGv_i32, TCGv_ptr))
1953 {
1954 TCGv_i32 dst;
1955
1956 gen_op_load_fpr_QT1(QFPREG(rs));
1957 dst = gen_dest_fpr_F(dc);
1958
1959 gen(dst, cpu_env);
1960
1961 gen_store_fpr_F(dc, rd, dst);
1962 }
1963
1964 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1965 void (*gen)(TCGv_i64, TCGv_ptr))
1966 {
1967 TCGv_i64 dst;
1968
1969 gen_op_load_fpr_QT1(QFPREG(rs));
1970 dst = gen_dest_fpr_D();
1971
1972 gen(dst, cpu_env);
1973
1974 gen_store_fpr_D(dc, rd, dst);
1975 }
1976
1977 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1978 void (*gen)(TCGv_ptr, TCGv_i32))
1979 {
1980 TCGv_i32 src;
1981
1982 src = gen_load_fpr_F(dc, rs);
1983
1984 gen(cpu_env, src);
1985
1986 gen_op_store_QT0_fpr(QFPREG(rd));
1987 gen_update_fprs_dirty(QFPREG(rd));
1988 }
1989
1990 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1991 void (*gen)(TCGv_ptr, TCGv_i64))
1992 {
1993 TCGv_i64 src;
1994
1995 src = gen_load_fpr_D(dc, rs);
1996
1997 gen(cpu_env, src);
1998
1999 gen_op_store_QT0_fpr(QFPREG(rd));
2000 gen_update_fprs_dirty(QFPREG(rd));
2001 }
2002
2003 /* asi moves */
2004 #ifdef TARGET_SPARC64
2005 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
2006 {
2007 int asi;
2008 TCGv_i32 r_asi;
2009
2010 if (IS_IMM) {
2011 r_asi = tcg_temp_new_i32();
2012 tcg_gen_mov_i32(r_asi, cpu_asi);
2013 } else {
2014 asi = GET_FIELD(insn, 19, 26);
2015 r_asi = tcg_const_i32(asi);
2016 }
2017 return r_asi;
2018 }
2019
2020 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2021 int sign)
2022 {
2023 TCGv_i32 r_asi, r_size, r_sign;
2024
2025 r_asi = gen_get_asi(insn, addr);
2026 r_size = tcg_const_i32(size);
2027 r_sign = tcg_const_i32(sign);
2028 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
2029 tcg_temp_free_i32(r_sign);
2030 tcg_temp_free_i32(r_size);
2031 tcg_temp_free_i32(r_asi);
2032 }
2033
2034 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2035 {
2036 TCGv_i32 r_asi, r_size;
2037
2038 r_asi = gen_get_asi(insn, addr);
2039 r_size = tcg_const_i32(size);
2040 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2041 tcg_temp_free_i32(r_size);
2042 tcg_temp_free_i32(r_asi);
2043 }
2044
2045 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
2046 {
2047 TCGv_i32 r_asi, r_size, r_rd;
2048
2049 r_asi = gen_get_asi(insn, addr);
2050 r_size = tcg_const_i32(size);
2051 r_rd = tcg_const_i32(rd);
2052 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2053 tcg_temp_free_i32(r_rd);
2054 tcg_temp_free_i32(r_size);
2055 tcg_temp_free_i32(r_asi);
2056 }
2057
2058 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
2059 {
2060 TCGv_i32 r_asi, r_size, r_rd;
2061
2062 r_asi = gen_get_asi(insn, addr);
2063 r_size = tcg_const_i32(size);
2064 r_rd = tcg_const_i32(rd);
2065 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2066 tcg_temp_free_i32(r_rd);
2067 tcg_temp_free_i32(r_size);
2068 tcg_temp_free_i32(r_asi);
2069 }
2070
2071 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2072 {
2073 TCGv_i32 r_asi, r_size, r_sign;
2074
2075 r_asi = gen_get_asi(insn, addr);
2076 r_size = tcg_const_i32(4);
2077 r_sign = tcg_const_i32(0);
2078 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2079 tcg_temp_free_i32(r_sign);
2080 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2081 tcg_temp_free_i32(r_size);
2082 tcg_temp_free_i32(r_asi);
2083 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2084 }
2085
2086 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2087 int insn, int rd)
2088 {
2089 TCGv_i32 r_asi, r_rd;
2090
2091 r_asi = gen_get_asi(insn, addr);
2092 r_rd = tcg_const_i32(rd);
2093 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2094 tcg_temp_free_i32(r_rd);
2095 tcg_temp_free_i32(r_asi);
2096 }
2097
2098 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2099 int insn, int rd)
2100 {
2101 TCGv_i32 r_asi, r_size;
2102 TCGv lo = gen_load_gpr(dc, rd + 1);
2103
2104 tcg_gen_concat_tl_i64(cpu_tmp64, lo, hi);
2105 r_asi = gen_get_asi(insn, addr);
2106 r_size = tcg_const_i32(8);
2107 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2108 tcg_temp_free_i32(r_size);
2109 tcg_temp_free_i32(r_asi);
2110 }
2111
2112 static inline void gen_cas_asi(DisasContext *dc, TCGv addr,
2113 TCGv val2, int insn, int rd)
2114 {
2115 TCGv val1 = gen_load_gpr(dc, rd);
2116 TCGv dst = gen_dest_gpr(dc, rd);
2117 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2118
2119 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2120 tcg_temp_free_i32(r_asi);
2121 gen_store_gpr(dc, rd, dst);
2122 }
2123
2124 static inline void gen_casx_asi(DisasContext *dc, TCGv addr,
2125 TCGv val2, int insn, int rd)
2126 {
2127 TCGv val1 = gen_load_gpr(dc, rd);
2128 TCGv dst = gen_dest_gpr(dc, rd);
2129 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2130
2131 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2132 tcg_temp_free_i32(r_asi);
2133 gen_store_gpr(dc, rd, dst);
2134 }
2135
2136 #elif !defined(CONFIG_USER_ONLY)
2137
2138 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2139 int sign)
2140 {
2141 TCGv_i32 r_asi, r_size, r_sign;
2142
2143 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2144 r_size = tcg_const_i32(size);
2145 r_sign = tcg_const_i32(sign);
2146 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2147 tcg_temp_free(r_sign);
2148 tcg_temp_free(r_size);
2149 tcg_temp_free(r_asi);
2150 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2151 }
2152
2153 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2154 {
2155 TCGv_i32 r_asi, r_size;
2156
2157 tcg_gen_extu_tl_i64(cpu_tmp64, src);
2158 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2159 r_size = tcg_const_i32(size);
2160 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2161 tcg_temp_free(r_size);
2162 tcg_temp_free(r_asi);
2163 }
2164
2165 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2166 {
2167 TCGv_i32 r_asi, r_size, r_sign;
2168 TCGv_i64 r_val;
2169
2170 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2171 r_size = tcg_const_i32(4);
2172 r_sign = tcg_const_i32(0);
2173 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2174 tcg_temp_free(r_sign);
2175 r_val = tcg_temp_new_i64();
2176 tcg_gen_extu_tl_i64(r_val, src);
2177 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2178 tcg_temp_free_i64(r_val);
2179 tcg_temp_free(r_size);
2180 tcg_temp_free(r_asi);
2181 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2182 }
2183
2184 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2185 int insn, int rd)
2186 {
2187 TCGv_i32 r_asi, r_size, r_sign;
2188 TCGv t;
2189
2190 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2191 r_size = tcg_const_i32(8);
2192 r_sign = tcg_const_i32(0);
2193 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2194 tcg_temp_free(r_sign);
2195 tcg_temp_free(r_size);
2196 tcg_temp_free(r_asi);
2197
2198 t = gen_dest_gpr(dc, rd + 1);
2199 tcg_gen_trunc_i64_tl(t, cpu_tmp64);
2200 gen_store_gpr(dc, rd + 1, t);
2201
2202 tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
2203 tcg_gen_trunc_i64_tl(hi, cpu_tmp64);
2204 gen_store_gpr(dc, rd, hi);
2205 }
2206
2207 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2208 int insn, int rd)
2209 {
2210 TCGv_i32 r_asi, r_size;
2211 TCGv lo = gen_load_gpr(dc, rd + 1);
2212
2213 tcg_gen_concat_tl_i64(cpu_tmp64, lo, hi);
2214 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2215 r_size = tcg_const_i32(8);
2216 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2217 tcg_temp_free(r_size);
2218 tcg_temp_free(r_asi);
2219 }
2220 #endif
2221
2222 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2223 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2224 {
2225 TCGv_i64 r_val;
2226 TCGv_i32 r_asi, r_size;
2227
2228 gen_ld_asi(dst, addr, insn, 1, 0);
2229
2230 r_val = tcg_const_i64(0xffULL);
2231 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2232 r_size = tcg_const_i32(1);
2233 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2234 tcg_temp_free_i32(r_size);
2235 tcg_temp_free_i32(r_asi);
2236 tcg_temp_free_i64(r_val);
2237 }
2238 #endif
2239
2240 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2241 {
2242 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2243 return gen_load_gpr(dc, rs1);
2244 }
2245
2246 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2247 {
2248 if (IS_IMM) { /* immediate */
2249 target_long simm = GET_FIELDs(insn, 19, 31);
2250 TCGv t = get_temp_tl(dc);
2251 tcg_gen_movi_tl(t, simm);
2252 return t;
2253 } else { /* register */
2254 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2255 return gen_load_gpr(dc, rs2);
2256 }
2257 }
2258
2259 #ifdef TARGET_SPARC64
2260 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2261 {
2262 TCGv_i32 c32, zero, dst, s1, s2;
2263
2264 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2265 or fold the comparison down to 32 bits and use movcond_i32. Choose
2266 the later. */
2267 c32 = tcg_temp_new_i32();
2268 if (cmp->is_bool) {
2269 tcg_gen_trunc_i64_i32(c32, cmp->c1);
2270 } else {
2271 TCGv_i64 c64 = tcg_temp_new_i64();
2272 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2273 tcg_gen_trunc_i64_i32(c32, c64);
2274 tcg_temp_free_i64(c64);
2275 }
2276
2277 s1 = gen_load_fpr_F(dc, rs);
2278 s2 = gen_load_fpr_F(dc, rd);
2279 dst = gen_dest_fpr_F(dc);
2280 zero = tcg_const_i32(0);
2281
2282 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2283
2284 tcg_temp_free_i32(c32);
2285 tcg_temp_free_i32(zero);
2286 gen_store_fpr_F(dc, rd, dst);
2287 }
2288
2289 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2290 {
2291 TCGv_i64 dst = gen_dest_fpr_D();
2292 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2293 gen_load_fpr_D(dc, rs),
2294 gen_load_fpr_D(dc, rd));
2295 gen_store_fpr_D(dc, rd, dst);
2296 }
2297
2298 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2299 {
2300 int qd = QFPREG(rd);
2301 int qs = QFPREG(rs);
2302
2303 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2304 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2305 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2306 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2307
2308 gen_update_fprs_dirty(qd);
2309 }
2310
2311 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2312 {
2313 TCGv_i32 r_tl = tcg_temp_new_i32();
2314
2315 /* load env->tl into r_tl */
2316 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2317
2318 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2319 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2320
2321 /* calculate offset to current trap state from env->ts, reuse r_tl */
2322 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2323 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2324
2325 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2326 {
2327 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2328 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2329 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2330 tcg_temp_free_ptr(r_tl_tmp);
2331 }
2332
2333 tcg_temp_free_i32(r_tl);
2334 }
2335
2336 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2337 int width, bool cc, bool left)
2338 {
2339 TCGv lo1, lo2, t1, t2;
2340 uint64_t amask, tabl, tabr;
2341 int shift, imask, omask;
2342
2343 if (cc) {
2344 tcg_gen_mov_tl(cpu_cc_src, s1);
2345 tcg_gen_mov_tl(cpu_cc_src2, s2);
2346 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2347 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2348 dc->cc_op = CC_OP_SUB;
2349 }
2350
2351 /* Theory of operation: there are two tables, left and right (not to
2352 be confused with the left and right versions of the opcode). These
2353 are indexed by the low 3 bits of the inputs. To make things "easy",
2354 these tables are loaded into two constants, TABL and TABR below.
2355 The operation index = (input & imask) << shift calculates the index
2356 into the constant, while val = (table >> index) & omask calculates
2357 the value we're looking for. */
2358 switch (width) {
2359 case 8:
2360 imask = 0x7;
2361 shift = 3;
2362 omask = 0xff;
2363 if (left) {
2364 tabl = 0x80c0e0f0f8fcfeffULL;
2365 tabr = 0xff7f3f1f0f070301ULL;
2366 } else {
2367 tabl = 0x0103070f1f3f7fffULL;
2368 tabr = 0xfffefcf8f0e0c080ULL;
2369 }
2370 break;
2371 case 16:
2372 imask = 0x6;
2373 shift = 1;
2374 omask = 0xf;
2375 if (left) {
2376 tabl = 0x8cef;
2377 tabr = 0xf731;
2378 } else {
2379 tabl = 0x137f;
2380 tabr = 0xfec8;
2381 }
2382 break;
2383 case 32:
2384 imask = 0x4;
2385 shift = 0;
2386 omask = 0x3;
2387 if (left) {
2388 tabl = (2 << 2) | 3;
2389 tabr = (3 << 2) | 1;
2390 } else {
2391 tabl = (1 << 2) | 3;
2392 tabr = (3 << 2) | 2;
2393 }
2394 break;
2395 default:
2396 abort();
2397 }
2398
2399 lo1 = tcg_temp_new();
2400 lo2 = tcg_temp_new();
2401 tcg_gen_andi_tl(lo1, s1, imask);
2402 tcg_gen_andi_tl(lo2, s2, imask);
2403 tcg_gen_shli_tl(lo1, lo1, shift);
2404 tcg_gen_shli_tl(lo2, lo2, shift);
2405
2406 t1 = tcg_const_tl(tabl);
2407 t2 = tcg_const_tl(tabr);
2408 tcg_gen_shr_tl(lo1, t1, lo1);
2409 tcg_gen_shr_tl(lo2, t2, lo2);
2410 tcg_gen_andi_tl(dst, lo1, omask);
2411 tcg_gen_andi_tl(lo2, lo2, omask);
2412
2413 amask = -8;
2414 if (AM_CHECK(dc)) {
2415 amask &= 0xffffffffULL;
2416 }
2417 tcg_gen_andi_tl(s1, s1, amask);
2418 tcg_gen_andi_tl(s2, s2, amask);
2419
2420 /* We want to compute
2421 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2422 We've already done dst = lo1, so this reduces to
2423 dst &= (s1 == s2 ? -1 : lo2)
2424 Which we perform by
2425 lo2 |= -(s1 == s2)
2426 dst &= lo2
2427 */
2428 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2429 tcg_gen_neg_tl(t1, t1);
2430 tcg_gen_or_tl(lo2, lo2, t1);
2431 tcg_gen_and_tl(dst, dst, lo2);
2432
2433 tcg_temp_free(lo1);
2434 tcg_temp_free(lo2);
2435 tcg_temp_free(t1);
2436 tcg_temp_free(t2);
2437 }
2438
2439 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2440 {
2441 TCGv tmp = tcg_temp_new();
2442
2443 tcg_gen_add_tl(tmp, s1, s2);
2444 tcg_gen_andi_tl(dst, tmp, -8);
2445 if (left) {
2446 tcg_gen_neg_tl(tmp, tmp);
2447 }
2448 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2449
2450 tcg_temp_free(tmp);
2451 }
2452
2453 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2454 {
2455 TCGv t1, t2, shift;
2456
2457 t1 = tcg_temp_new();
2458 t2 = tcg_temp_new();
2459 shift = tcg_temp_new();
2460
2461 tcg_gen_andi_tl(shift, gsr, 7);
2462 tcg_gen_shli_tl(shift, shift, 3);
2463 tcg_gen_shl_tl(t1, s1, shift);
2464
2465 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2466 shift of (up to 63) followed by a constant shift of 1. */
2467 tcg_gen_xori_tl(shift, shift, 63);
2468 tcg_gen_shr_tl(t2, s2, shift);
2469 tcg_gen_shri_tl(t2, t2, 1);
2470
2471 tcg_gen_or_tl(dst, t1, t2);
2472
2473 tcg_temp_free(t1);
2474 tcg_temp_free(t2);
2475 tcg_temp_free(shift);
2476 }
2477 #endif
2478
2479 #define CHECK_IU_FEATURE(dc, FEATURE) \
2480 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2481 goto illegal_insn;
2482 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2483 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2484 goto nfpu_insn;
2485
2486 /* before an instruction, dc->pc must be static */
2487 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2488 {
2489 unsigned int opc, rs1, rs2, rd;
2490 TCGv cpu_src1, cpu_src2;
2491 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2492 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2493 target_long simm;
2494
2495 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2496 tcg_gen_debug_insn_start(dc->pc);
2497 }
2498
2499 opc = GET_FIELD(insn, 0, 1);
2500
2501 rd = GET_FIELD(insn, 2, 6);
2502
2503 switch (opc) {
2504 case 0: /* branches/sethi */
2505 {
2506 unsigned int xop = GET_FIELD(insn, 7, 9);
2507 int32_t target;
2508 switch (xop) {
2509 #ifdef TARGET_SPARC64
2510 case 0x1: /* V9 BPcc */
2511 {
2512 int cc;
2513
2514 target = GET_FIELD_SP(insn, 0, 18);
2515 target = sign_extend(target, 19);
2516 target <<= 2;
2517 cc = GET_FIELD_SP(insn, 20, 21);
2518 if (cc == 0)
2519 do_branch(dc, target, insn, 0);
2520 else if (cc == 2)
2521 do_branch(dc, target, insn, 1);
2522 else
2523 goto illegal_insn;
2524 goto jmp_insn;
2525 }
2526 case 0x3: /* V9 BPr */
2527 {
2528 target = GET_FIELD_SP(insn, 0, 13) |
2529 (GET_FIELD_SP(insn, 20, 21) << 14);
2530 target = sign_extend(target, 16);
2531 target <<= 2;
2532 cpu_src1 = get_src1(dc, insn);
2533 do_branch_reg(dc, target, insn, cpu_src1);
2534 goto jmp_insn;
2535 }
2536 case 0x5: /* V9 FBPcc */
2537 {
2538 int cc = GET_FIELD_SP(insn, 20, 21);
2539 if (gen_trap_ifnofpu(dc)) {
2540 goto jmp_insn;
2541 }
2542 target = GET_FIELD_SP(insn, 0, 18);
2543 target = sign_extend(target, 19);
2544 target <<= 2;
2545 do_fbranch(dc, target, insn, cc);
2546 goto jmp_insn;
2547 }
2548 #else
2549 case 0x7: /* CBN+x */
2550 {
2551 goto ncp_insn;
2552 }
2553 #endif
2554 case 0x2: /* BN+x */
2555 {
2556 target = GET_FIELD(insn, 10, 31);
2557 target = sign_extend(target, 22);
2558 target <<= 2;
2559 do_branch(dc, target, insn, 0);
2560 goto jmp_insn;
2561 }
2562 case 0x6: /* FBN+x */
2563 {
2564 if (gen_trap_ifnofpu(dc)) {
2565 goto jmp_insn;
2566 }
2567 target = GET_FIELD(insn, 10, 31);
2568 target = sign_extend(target, 22);
2569 target <<= 2;
2570 do_fbranch(dc, target, insn, 0);
2571 goto jmp_insn;
2572 }
2573 case 0x4: /* SETHI */
2574 /* Special-case %g0 because that's the canonical nop. */
2575 if (rd) {
2576 uint32_t value = GET_FIELD(insn, 10, 31);
2577 TCGv t = gen_dest_gpr(dc, rd);
2578 tcg_gen_movi_tl(t, value << 10);
2579 gen_store_gpr(dc, rd, t);
2580 }
2581 break;
2582 case 0x0: /* UNIMPL */
2583 default:
2584 goto illegal_insn;
2585 }
2586 break;
2587 }
2588 break;
2589 case 1: /*CALL*/
2590 {
2591 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2592 TCGv o7 = gen_dest_gpr(dc, 15);
2593
2594 tcg_gen_movi_tl(o7, dc->pc);
2595 gen_store_gpr(dc, 15, o7);
2596 target += dc->pc;
2597 gen_mov_pc_npc(dc);
2598 #ifdef TARGET_SPARC64
2599 if (unlikely(AM_CHECK(dc))) {
2600 target &= 0xffffffffULL;
2601 }
2602 #endif
2603 dc->npc = target;
2604 }
2605 goto jmp_insn;
2606 case 2: /* FPU & Logical Operations */
2607 {
2608 unsigned int xop = GET_FIELD(insn, 7, 12);
2609 if (xop == 0x3a) { /* generate trap */
2610 int cond = GET_FIELD(insn, 3, 6);
2611 TCGv_i32 trap;
2612 int l1 = -1, mask;
2613
2614 if (cond == 0) {
2615 /* Trap never. */
2616 break;
2617 }
2618
2619 save_state(dc);
2620
2621 if (cond != 8) {
2622 /* Conditional trap. */
2623 DisasCompare cmp;
2624 #ifdef TARGET_SPARC64
2625 /* V9 icc/xcc */
2626 int cc = GET_FIELD_SP(insn, 11, 12);
2627 if (cc == 0) {
2628 gen_compare(&cmp, 0, cond, dc);
2629 } else if (cc == 2) {
2630 gen_compare(&cmp, 1, cond, dc);
2631 } else {
2632 goto illegal_insn;
2633 }
2634 #else
2635 gen_compare(&cmp, 0, cond, dc);
2636 #endif
2637 l1 = gen_new_label();
2638 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2639 cmp.c1, cmp.c2, l1);
2640 free_compare(&cmp);
2641 }
2642
2643 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2644 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2645
2646 /* Don't use the normal temporaries, as they may well have
2647 gone out of scope with the branch above. While we're
2648 doing that we might as well pre-truncate to 32-bit. */
2649 trap = tcg_temp_new_i32();
2650
2651 rs1 = GET_FIELD_SP(insn, 14, 18);
2652 if (IS_IMM) {
2653 rs2 = GET_FIELD_SP(insn, 0, 6);
2654 if (rs1 == 0) {
2655 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2656 /* Signal that the trap value is fully constant. */
2657 mask = 0;
2658 } else {
2659 TCGv t1 = gen_load_gpr(dc, rs1);
2660 tcg_gen_trunc_tl_i32(trap, t1);
2661 tcg_gen_addi_i32(trap, trap, rs2);
2662 }
2663 } else {
2664 TCGv t1, t2;
2665 rs2 = GET_FIELD_SP(insn, 0, 4);
2666 t1 = gen_load_gpr(dc, rs1);
2667 t2 = gen_load_gpr(dc, rs2);
2668 tcg_gen_add_tl(t1, t1, t2);
2669 tcg_gen_trunc_tl_i32(trap, t1);
2670 }
2671 if (mask != 0) {
2672 tcg_gen_andi_i32(trap, trap, mask);
2673 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2674 }
2675
2676 gen_helper_raise_exception(cpu_env, trap);
2677 tcg_temp_free_i32(trap);
2678
2679 if (cond == 8) {
2680 /* An unconditional trap ends the TB. */
2681 dc->is_br = 1;
2682 goto jmp_insn;
2683 } else {
2684 /* A conditional trap falls through to the next insn. */
2685 gen_set_label(l1);
2686 break;
2687 }
2688 } else if (xop == 0x28) {
2689 rs1 = GET_FIELD(insn, 13, 17);
2690 switch(rs1) {
2691 case 0: /* rdy */
2692 #ifndef TARGET_SPARC64
2693 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2694 manual, rdy on the microSPARC
2695 II */
2696 case 0x0f: /* stbar in the SPARCv8 manual,
2697 rdy on the microSPARC II */
2698 case 0x10 ... 0x1f: /* implementation-dependent in the
2699 SPARCv8 manual, rdy on the
2700 microSPARC II */
2701 /* Read Asr17 */
2702 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2703 TCGv t = gen_dest_gpr(dc, rd);
2704 /* Read Asr17 for a Leon3 monoprocessor */
2705 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
2706 gen_store_gpr(dc, rd, t);
2707 break;
2708 }
2709 #endif
2710 gen_store_gpr(dc, rd, cpu_y);
2711 break;
2712 #ifdef TARGET_SPARC64
2713 case 0x2: /* V9 rdccr */
2714 update_psr(dc);
2715 gen_helper_rdccr(cpu_dst, cpu_env);
2716 gen_store_gpr(dc, rd, cpu_dst);
2717 break;
2718 case 0x3: /* V9 rdasi */
2719 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2720 gen_store_gpr(dc, rd, cpu_dst);
2721 break;
2722 case 0x4: /* V9 rdtick */
2723 {
2724 TCGv_ptr r_tickptr;
2725
2726 r_tickptr = tcg_temp_new_ptr();
2727 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2728 offsetof(CPUSPARCState, tick));
2729 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2730 tcg_temp_free_ptr(r_tickptr);
2731 gen_store_gpr(dc, rd, cpu_dst);
2732 }
2733 break;
2734 case 0x5: /* V9 rdpc */
2735 {
2736 TCGv t = gen_dest_gpr(dc, rd);
2737 if (unlikely(AM_CHECK(dc))) {
2738 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
2739 } else {
2740 tcg_gen_movi_tl(t, dc->pc);
2741 }
2742 gen_store_gpr(dc, rd, t);
2743 }
2744 break;
2745 case 0x6: /* V9 rdfprs */
2746 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2747 gen_store_gpr(dc, rd, cpu_dst);
2748 break;
2749 case 0xf: /* V9 membar */
2750 break; /* no effect */
2751 case 0x13: /* Graphics Status */
2752 if (gen_trap_ifnofpu(dc)) {
2753 goto jmp_insn;
2754 }
2755 gen_store_gpr(dc, rd, cpu_gsr);
2756 break;
2757 case 0x16: /* Softint */
2758 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2759 gen_store_gpr(dc, rd, cpu_dst);
2760 break;
2761 case 0x17: /* Tick compare */
2762 gen_store_gpr(dc, rd, cpu_tick_cmpr);
2763 break;
2764 case 0x18: /* System tick */
2765 {
2766 TCGv_ptr r_tickptr;
2767
2768 r_tickptr = tcg_temp_new_ptr();
2769 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2770 offsetof(CPUSPARCState, stick));
2771 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2772 tcg_temp_free_ptr(r_tickptr);
2773 gen_store_gpr(dc, rd, cpu_dst);
2774 }
2775 break;
2776 case 0x19: /* System tick compare */
2777 gen_store_gpr(dc, rd, cpu_stick_cmpr);
2778 break;
2779 case 0x10: /* Performance Control */
2780 case 0x11: /* Performance Instrumentation Counter */
2781 case 0x12: /* Dispatch Control */
2782 case 0x14: /* Softint set, WO */
2783 case 0x15: /* Softint clear, WO */
2784 #endif
2785 default:
2786 goto illegal_insn;
2787 }
2788 #if !defined(CONFIG_USER_ONLY)
2789 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2790 #ifndef TARGET_SPARC64
2791 if (!supervisor(dc)) {
2792 goto priv_insn;
2793 }
2794 update_psr(dc);
2795 gen_helper_rdpsr(cpu_dst, cpu_env);
2796 #else
2797 CHECK_IU_FEATURE(dc, HYPV);
2798 if (!hypervisor(dc))
2799 goto priv_insn;
2800 rs1 = GET_FIELD(insn, 13, 17);
2801 switch (rs1) {
2802 case 0: // hpstate
2803 // gen_op_rdhpstate();
2804 break;
2805 case 1: // htstate
2806 // gen_op_rdhtstate();
2807 break;
2808 case 3: // hintp
2809 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2810 break;
2811 case 5: // htba
2812 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2813 break;
2814 case 6: // hver
2815 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2816 break;
2817 case 31: // hstick_cmpr
2818 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2819 break;
2820 default:
2821 goto illegal_insn;
2822 }
2823 #endif
2824 gen_store_gpr(dc, rd, cpu_dst);
2825 break;
2826 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2827 if (!supervisor(dc))
2828 goto priv_insn;
2829 #ifdef TARGET_SPARC64
2830 rs1 = GET_FIELD(insn, 13, 17);
2831 switch (rs1) {
2832 case 0: // tpc
2833 {
2834 TCGv_ptr r_tsptr;
2835
2836 r_tsptr = tcg_temp_new_ptr();
2837 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2838 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2839 offsetof(trap_state, tpc));
2840 tcg_temp_free_ptr(r_tsptr);
2841 }
2842 break;
2843 case 1: // tnpc
2844 {
2845 TCGv_ptr r_tsptr;
2846
2847 r_tsptr = tcg_temp_new_ptr();
2848 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2849 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2850 offsetof(trap_state, tnpc));
2851 tcg_temp_free_ptr(r_tsptr);
2852 }
2853 break;
2854 case 2: // tstate
2855 {
2856 TCGv_ptr r_tsptr;
2857
2858 r_tsptr = tcg_temp_new_ptr();
2859 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2860 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2861 offsetof(trap_state, tstate));
2862 tcg_temp_free_ptr(r_tsptr);
2863 }
2864 break;
2865 case 3: // tt
2866 {
2867 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2868
2869 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2870 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
2871 offsetof(trap_state, tt));
2872 tcg_temp_free_ptr(r_tsptr);
2873 }
2874 break;
2875 case 4: // tick
2876 {
2877 TCGv_ptr r_tickptr;
2878
2879 r_tickptr = tcg_temp_new_ptr();
2880 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2881 offsetof(CPUSPARCState, tick));
2882 gen_helper_tick_get_count(cpu_tmp0, r_tickptr);
2883 tcg_temp_free_ptr(r_tickptr);
2884 }
2885 break;
2886 case 5: // tba
2887 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2888 break;
2889 case 6: // pstate
2890 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2891 offsetof(CPUSPARCState, pstate));
2892 break;
2893 case 7: // tl
2894 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2895 offsetof(CPUSPARCState, tl));
2896 break;
2897 case 8: // pil
2898 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2899 offsetof(CPUSPARCState, psrpil));
2900 break;
2901 case 9: // cwp
2902 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2903 break;
2904 case 10: // cansave
2905 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2906 offsetof(CPUSPARCState, cansave));
2907 break;
2908 case 11: // canrestore
2909 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2910 offsetof(CPUSPARCState, canrestore));
2911 break;
2912 case 12: // cleanwin
2913 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2914 offsetof(CPUSPARCState, cleanwin));
2915 break;
2916 case 13: // otherwin
2917 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2918 offsetof(CPUSPARCState, otherwin));
2919 break;
2920 case 14: // wstate
2921 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2922 offsetof(CPUSPARCState, wstate));
2923 break;
2924 case 16: // UA2005 gl
2925 CHECK_IU_FEATURE(dc, GL);
2926 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2927 offsetof(CPUSPARCState, gl));
2928 break;
2929 case 26: // UA2005 strand status
2930 CHECK_IU_FEATURE(dc, HYPV);
2931 if (!hypervisor(dc))
2932 goto priv_insn;
2933 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2934 break;
2935 case 31: // ver
2936 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2937 break;
2938 case 15: // fq
2939 default:
2940 goto illegal_insn;
2941 }
2942 #else
2943 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2944 #endif
2945 gen_store_gpr(dc, rd, cpu_tmp0);
2946 break;
2947 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2948 #ifdef TARGET_SPARC64
2949 save_state(dc);
2950 gen_helper_flushw(cpu_env);
2951 #else
2952 if (!supervisor(dc))
2953 goto priv_insn;
2954 gen_store_gpr(dc, rd, cpu_tbr);
2955 #endif
2956 break;
2957 #endif
2958 } else if (xop == 0x34) { /* FPU Operations */
2959 if (gen_trap_ifnofpu(dc)) {
2960 goto jmp_insn;
2961 }
2962 gen_op_clear_ieee_excp_and_FTT();
2963 rs1 = GET_FIELD(insn, 13, 17);
2964 rs2 = GET_FIELD(insn, 27, 31);
2965 xop = GET_FIELD(insn, 18, 26);
2966 save_state(dc);
2967 switch (xop) {
2968 case 0x1: /* fmovs */
2969 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2970 gen_store_fpr_F(dc, rd, cpu_src1_32);
2971 break;
2972 case 0x5: /* fnegs */
2973 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2974 break;
2975 case 0x9: /* fabss */
2976 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2977 break;
2978 case 0x29: /* fsqrts */
2979 CHECK_FPU_FEATURE(dc, FSQRT);
2980 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2981 break;
2982 case 0x2a: /* fsqrtd */
2983 CHECK_FPU_FEATURE(dc, FSQRT);
2984 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
2985 break;
2986 case 0x2b: /* fsqrtq */
2987 CHECK_FPU_FEATURE(dc, FLOAT128);
2988 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
2989 break;
2990 case 0x41: /* fadds */
2991 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
2992 break;
2993 case 0x42: /* faddd */
2994 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
2995 break;
2996 case 0x43: /* faddq */
2997 CHECK_FPU_FEATURE(dc, FLOAT128);
2998 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
2999 break;
3000 case 0x45: /* fsubs */
3001 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3002 break;
3003 case 0x46: /* fsubd */
3004 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3005 break;
3006 case 0x47: /* fsubq */
3007 CHECK_FPU_FEATURE(dc, FLOAT128);
3008 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3009 break;
3010 case 0x49: /* fmuls */
3011 CHECK_FPU_FEATURE(dc, FMUL);
3012 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3013 break;
3014 case 0x4a: /* fmuld */
3015 CHECK_FPU_FEATURE(dc, FMUL);
3016 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3017 break;
3018 case 0x4b: /* fmulq */
3019 CHECK_FPU_FEATURE(dc, FLOAT128);
3020 CHECK_FPU_FEATURE(dc, FMUL);
3021 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3022 break;
3023 case 0x4d: /* fdivs */
3024 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3025 break;
3026 case 0x4e: /* fdivd */
3027 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3028 break;
3029 case 0x4f: /* fdivq */
3030 CHECK_FPU_FEATURE(dc, FLOAT128);
3031 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3032 break;
3033 case 0x69: /* fsmuld */
3034 CHECK_FPU_FEATURE(dc, FSMULD);
3035 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3036 break;
3037 case 0x6e: /* fdmulq */
3038 CHECK_FPU_FEATURE(dc, FLOAT128);
3039 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3040 break;
3041 case 0xc4: /* fitos */
3042 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3043 break;
3044 case 0xc6: /* fdtos */
3045 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3046 break;
3047 case 0xc7: /* fqtos */
3048 CHECK_FPU_FEATURE(dc, FLOAT128);
3049 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3050 break;
3051 case 0xc8: /* fitod */
3052 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3053 break;
3054 case 0xc9: /* fstod */
3055 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3056 break;
3057 case 0xcb: /* fqtod */
3058 CHECK_FPU_FEATURE(dc, FLOAT128);
3059 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3060 break;
3061 case 0xcc: /* fitoq */
3062 CHECK_FPU_FEATURE(dc, FLOAT128);
3063 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3064 break;
3065 case 0xcd: /* fstoq */
3066 CHECK_FPU_FEATURE(dc, FLOAT128);
3067 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3068 break;
3069 case 0xce: /* fdtoq */
3070 CHECK_FPU_FEATURE(dc, FLOAT128);
3071 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3072 break;
3073 case 0xd1: /* fstoi */
3074 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3075 break;
3076 case 0xd2: /* fdtoi */
3077 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3078 break;
3079 case 0xd3: /* fqtoi */
3080 CHECK_FPU_FEATURE(dc, FLOAT128);
3081 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3082 break;
3083 #ifdef TARGET_SPARC64
3084 case 0x2: /* V9 fmovd */
3085 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3086 gen_store_fpr_D(dc, rd, cpu_src1_64);
3087 break;
3088 case 0x3: /* V9 fmovq */
3089 CHECK_FPU_FEATURE(dc, FLOAT128);
3090 gen_move_Q(rd, rs2);
3091 break;
3092 case 0x6: /* V9 fnegd */
3093 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3094 break;
3095 case 0x7: /* V9 fnegq */
3096 CHECK_FPU_FEATURE(dc, FLOAT128);
3097 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3098 break;
3099 case 0xa: /* V9 fabsd */
3100 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3101 break;
3102 case 0xb: /* V9 fabsq */
3103 CHECK_FPU_FEATURE(dc, FLOAT128);
3104 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3105 break;
3106 case 0x81: /* V9 fstox */
3107 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3108 break;
3109 case 0x82: /* V9 fdtox */
3110 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3111 break;
3112 case 0x83: /* V9 fqtox */
3113 CHECK_FPU_FEATURE(dc, FLOAT128);
3114 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3115 break;
3116 case 0x84: /* V9 fxtos */
3117 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3118 break;
3119 case 0x88: /* V9 fxtod */
3120 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3121 break;
3122 case 0x8c: /* V9 fxtoq */
3123 CHECK_FPU_FEATURE(dc, FLOAT128);
3124 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3125 break;
3126 #endif
3127 default:
3128 goto illegal_insn;
3129 }
3130 } else if (xop == 0x35) { /* FPU Operations */
3131 #ifdef TARGET_SPARC64
3132 int cond;
3133 #endif
3134 if (gen_trap_ifnofpu(dc)) {
3135 goto jmp_insn;
3136 }
3137 gen_op_clear_ieee_excp_and_FTT();
3138 rs1 = GET_FIELD(insn, 13, 17);
3139 rs2 = GET_FIELD(insn, 27, 31);
3140 xop = GET_FIELD(insn, 18, 26);
3141 save_state(dc);
3142
3143 #ifdef TARGET_SPARC64
3144 #define FMOVR(sz) \
3145 do { \
3146 DisasCompare cmp; \
3147 cond = GET_FIELD_SP(insn, 14, 17); \
3148 cpu_src1 = get_src1(dc, insn); \
3149 gen_compare_reg(&cmp, cond, cpu_src1); \
3150 gen_fmov##sz(dc, &cmp, rd, rs2); \
3151 free_compare(&cmp); \
3152 } while (0)
3153
3154 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3155 FMOVR(s);
3156 break;
3157 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3158 FMOVR(d);
3159 break;
3160 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3161 CHECK_FPU_FEATURE(dc, FLOAT128);
3162 FMOVR(q);
3163 break;
3164 }
3165 #undef FMOVR
3166 #endif
3167 switch (xop) {
3168 #ifdef TARGET_SPARC64
3169 #define FMOVCC(fcc, sz) \
3170 do { \
3171 DisasCompare cmp; \
3172 cond = GET_FIELD_SP(insn, 14, 17); \
3173 gen_fcompare(&cmp, fcc, cond); \
3174 gen_fmov##sz(dc, &cmp, rd, rs2); \
3175 free_compare(&cmp); \
3176 } while (0)
3177
3178 case 0x001: /* V9 fmovscc %fcc0 */
3179 FMOVCC(0, s);
3180 break;
3181 case 0x002: /* V9 fmovdcc %fcc0 */
3182 FMOVCC(0, d);
3183 break;
3184 case 0x003: /* V9 fmovqcc %fcc0 */
3185 CHECK_FPU_FEATURE(dc, FLOAT128);
3186 FMOVCC(0, q);
3187 break;
3188 case 0x041: /* V9 fmovscc %fcc1 */
3189 FMOVCC(1, s);
3190 break;
3191 case 0x042: /* V9 fmovdcc %fcc1 */
3192 FMOVCC(1, d);
3193 break;
3194 case 0x043: /* V9 fmovqcc %fcc1 */
3195 CHECK_FPU_FEATURE(dc, FLOAT128);
3196 FMOVCC(1, q);
3197 break;
3198 case 0x081: /* V9 fmovscc %fcc2 */
3199 FMOVCC(2, s);
3200 break;
3201 case 0x082: /* V9 fmovdcc %fcc2 */
3202 FMOVCC(2, d);
3203 break;
3204 case 0x083: /* V9 fmovqcc %fcc2 */
3205 CHECK_FPU_FEATURE(dc, FLOAT128);
3206 FMOVCC(2, q);
3207 break;
3208 case 0x0c1: /* V9 fmovscc %fcc3 */
3209 FMOVCC(3, s);
3210 break;
3211 case 0x0c2: /* V9 fmovdcc %fcc3 */
3212 FMOVCC(3, d);
3213 break;
3214 case 0x0c3: /* V9 fmovqcc %fcc3 */
3215 CHECK_FPU_FEATURE(dc, FLOAT128);
3216 FMOVCC(3, q);
3217 break;
3218 #undef FMOVCC
3219 #define FMOVCC(xcc, sz) \
3220 do { \
3221 DisasCompare cmp; \
3222 cond = GET_FIELD_SP(insn, 14, 17); \
3223 gen_compare(&cmp, xcc, cond, dc); \
3224 gen_fmov##sz(dc, &cmp, rd, rs2); \
3225 free_compare(&cmp); \
3226 } while (0)
3227
3228 case 0x101: /* V9 fmovscc %icc */
3229 FMOVCC(0, s);
3230 break;
3231 case 0x102: /* V9 fmovdcc %icc */
3232 FMOVCC(0, d);
3233 break;
3234 case 0x103: /* V9 fmovqcc %icc */
3235 CHECK_FPU_FEATURE(dc, FLOAT128);
3236 FMOVCC(0, q);
3237 break;
3238 case 0x181: /* V9 fmovscc %xcc */
3239 FMOVCC(1, s);
3240 break;
3241 case 0x182: /* V9 fmovdcc %xcc */
3242 FMOVCC(1, d);
3243 break;
3244 case 0x183: /* V9 fmovqcc %xcc */
3245 CHECK_FPU_FEATURE(dc, FLOAT128);
3246 FMOVCC(1, q);
3247 break;
3248 #undef FMOVCC
3249 #endif
3250 case 0x51: /* fcmps, V9 %fcc */
3251 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3252 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3253 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3254 break;
3255 case 0x52: /* fcmpd, V9 %fcc */
3256 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3257 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3258 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3259 break;
3260 case 0x53: /* fcmpq, V9 %fcc */
3261 CHECK_FPU_FEATURE(dc, FLOAT128);
3262 gen_op_load_fpr_QT0(QFPREG(rs1));
3263 gen_op_load_fpr_QT1(QFPREG(rs2));
3264 gen_op_fcmpq(rd & 3);
3265 break;
3266 case 0x55: /* fcmpes, V9 %fcc */
3267 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3268 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3269 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3270 break;
3271 case 0x56: /* fcmped, V9 %fcc */
3272 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3273 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3274 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3275 break;
3276 case 0x57: /* fcmpeq, V9 %fcc */
3277 CHECK_FPU_FEATURE(dc, FLOAT128);
3278 gen_op_load_fpr_QT0(QFPREG(rs1));
3279 gen_op_load_fpr_QT1(QFPREG(rs2));
3280 gen_op_fcmpeq(rd & 3);
3281 break;
3282 default:
3283 goto illegal_insn;
3284 }
3285 } else if (xop == 0x2) {
3286 TCGv dst = gen_dest_gpr(dc, rd);
3287 rs1 = GET_FIELD(insn, 13, 17);
3288 if (rs1 == 0) {
3289 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3290 if (IS_IMM) { /* immediate */
3291 simm = GET_FIELDs(insn, 19, 31);
3292 tcg_gen_movi_tl(dst, simm);
3293 gen_store_gpr(dc, rd, dst);
3294 } else { /* register */
3295 rs2 = GET_FIELD(insn, 27, 31);
3296 if (rs2 == 0) {
3297 tcg_gen_movi_tl(dst, 0);
3298 gen_store_gpr(dc, rd, dst);
3299 } else {
3300 cpu_src2 = gen_load_gpr(dc, rs2);
3301 gen_store_gpr(dc, rd, cpu_src2);
3302 }
3303 }
3304 } else {
3305 cpu_src1 = get_src1(dc, insn);
3306 if (IS_IMM) { /* immediate */
3307 simm = GET_FIELDs(insn, 19, 31);
3308 tcg_gen_ori_tl(dst, cpu_src1, simm);
3309 gen_store_gpr(dc, rd, dst);
3310 } else { /* register */
3311 rs2 = GET_FIELD(insn, 27, 31);
3312 if (rs2 == 0) {
3313 /* mov shortcut: or x, %g0, y -> mov x, y */
3314 gen_store_gpr(dc, rd, cpu_src1);
3315 } else {
3316 cpu_src2 = gen_load_gpr(dc, rs2);
3317 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3318 gen_store_gpr(dc, rd, dst);
3319 }
3320 }
3321 }
3322 #ifdef TARGET_SPARC64
3323 } else if (xop == 0x25) { /* sll, V9 sllx */
3324 cpu_src1 = get_src1(dc, insn);
3325 if (IS_IMM) { /* immediate */
3326 simm = GET_FIELDs(insn, 20, 31);
3327 if (insn & (1 << 12)) {
3328 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3329 } else {
3330 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3331 }
3332 } else { /* register */
3333 rs2 = GET_FIELD(insn, 27, 31);
3334 cpu_src2 = gen_load_gpr(dc, rs2);
3335 if (insn & (1 << 12)) {
3336 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3337 } else {
3338 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3339 }
3340 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3341 }
3342 gen_store_gpr(dc, rd, cpu_dst);
3343 } else if (xop == 0x26) { /* srl, V9 srlx */
3344 cpu_src1 = get_src1(dc, insn);
3345 if (IS_IMM) { /* immediate */
3346 simm = GET_FIELDs(insn, 20, 31);
3347 if (insn & (1 << 12)) {
3348 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3349 } else {
3350 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3351 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3352 }
3353 } else { /* register */
3354 rs2 = GET_FIELD(insn, 27, 31);
3355 cpu_src2 = gen_load_gpr(dc, rs2);
3356 if (insn & (1 << 12)) {
3357 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3358 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3359 } else {
3360 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3361 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3362 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3363 }
3364 }
3365 gen_store_gpr(dc, rd, cpu_dst);
3366 } else if (xop == 0x27) { /* sra, V9 srax */
3367 cpu_src1 = get_src1(dc, insn);
3368 if (IS_IMM) { /* immediate */
3369 simm = GET_FIELDs(insn, 20, 31);
3370 if (insn & (1 << 12)) {
3371 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3372 } else {
3373 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3374 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3375 }
3376 } else { /* register */
3377 rs2 = GET_FIELD(insn, 27, 31);
3378 cpu_src2 = gen_load_gpr(dc, rs2);
3379 if (insn & (1 << 12)) {
3380 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3381 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3382 } else {
3383 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3384 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3385 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3386 }
3387 }
3388 gen_store_gpr(dc, rd, cpu_dst);
3389 #endif
3390 } else if (xop < 0x36) {
3391 if (xop < 0x20) {
3392 cpu_src1 = get_src1(dc, insn);
3393 cpu_src2 = get_src2(dc, insn);
3394 switch (xop & ~0x10) {
3395 case 0x0: /* add */
3396 if (xop & 0x10) {
3397 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3398 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3399 dc->cc_op = CC_OP_ADD;
3400 } else {
3401 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3402 }
3403 break;
3404 case 0x1: /* and */
3405 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3406 if (xop & 0x10) {
3407 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3408 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3409 dc->cc_op = CC_OP_LOGIC;
3410 }
3411 break;
3412 case 0x2: /* or */
3413 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3414 if (xop & 0x10) {
3415 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3416 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3417 dc->cc_op = CC_OP_LOGIC;
3418 }
3419 break;
3420 case 0x3: /* xor */
3421 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3422 if (xop & 0x10) {
3423 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3424 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3425 dc->cc_op = CC_OP_LOGIC;
3426 }
3427 break;
3428 case 0x4: /* sub */
3429 if (xop & 0x10) {
3430 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3431 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3432 dc->cc_op = CC_OP_SUB;
3433 } else {
3434 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3435 }
3436 break;
3437 case 0x5: /* andn */
3438 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3439 if (xop & 0x10) {
3440 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3441 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3442 dc->cc_op = CC_OP_LOGIC;
3443 }
3444 break;
3445 case 0x6: /* orn */
3446 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3447 if (xop & 0x10) {
3448 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3449 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3450 dc->cc_op = CC_OP_LOGIC;
3451 }
3452 break;
3453 case 0x7: /* xorn */
3454 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3455 if (xop & 0x10) {
3456 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3457 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3458 dc->cc_op = CC_OP_LOGIC;
3459 }
3460 break;
3461 case 0x8: /* addx, V9 addc */
3462 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3463 (xop & 0x10));
3464 break;
3465 #ifdef TARGET_SPARC64
3466 case 0x9: /* V9 mulx */
3467 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3468 break;
3469 #endif
3470 case 0xa: /* umul */
3471 CHECK_IU_FEATURE(dc, MUL);
3472 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3473 if (xop & 0x10) {
3474 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3475 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3476 dc->cc_op = CC_OP_LOGIC;
3477 }
3478 break;
3479 case 0xb: /* smul */
3480 CHECK_IU_FEATURE(dc, MUL);
3481 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3482 if (xop & 0x10) {
3483 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3484 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3485 dc->cc_op = CC_OP_LOGIC;
3486 }
3487 break;
3488 case 0xc: /* subx, V9 subc */
3489 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3490 (xop & 0x10));
3491 break;
3492 #ifdef TARGET_SPARC64
3493 case 0xd: /* V9 udivx */
3494 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3495 break;
3496 #endif
3497 case 0xe: /* udiv */
3498 CHECK_IU_FEATURE(dc, DIV);
3499 if (xop & 0x10) {
3500 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3501 cpu_src2);
3502 dc->cc_op = CC_OP_DIV;
3503 } else {
3504 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3505 cpu_src2);
3506 }
3507 break;
3508 case 0xf: /* sdiv */
3509 CHECK_IU_FEATURE(dc, DIV);
3510 if (xop & 0x10) {
3511 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3512 cpu_src2);
3513 dc->cc_op = CC_OP_DIV;
3514 } else {
3515 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3516 cpu_src2);
3517 }
3518 break;
3519 default:
3520 goto illegal_insn;
3521 }
3522 gen_store_gpr(dc, rd, cpu_dst);
3523 } else {
3524 cpu_src1 = get_src1(dc, insn);
3525 cpu_src2 = get_src2(dc, insn);
3526 switch (xop) {
3527 case 0x20: /* taddcc */
3528 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3529 gen_store_gpr(dc, rd, cpu_dst);
3530 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3531 dc->cc_op = CC_OP_TADD;
3532 break;
3533 case 0x21: /* tsubcc */
3534 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3535 gen_store_gpr(dc, rd, cpu_dst);
3536 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3537 dc->cc_op = CC_OP_TSUB;
3538 break;
3539 case 0x22: /* taddcctv */
3540 gen_helper_taddcctv(cpu_dst, cpu_env,
3541 cpu_src1, cpu_src2);
3542 gen_store_gpr(dc, rd, cpu_dst);
3543 dc->cc_op = CC_OP_TADDTV;
3544 break;
3545 case 0x23: /* tsubcctv */
3546 gen_helper_tsubcctv(cpu_dst, cpu_env,
3547 cpu_src1, cpu_src2);
3548 gen_store_gpr(dc, rd, cpu_dst);
3549 dc->cc_op = CC_OP_TSUBTV;
3550 break;
3551 case 0x24: /* mulscc */
3552 update_psr(dc);
3553 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3554 gen_store_gpr(dc, rd, cpu_dst);
3555 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3556 dc->cc_op = CC_OP_ADD;
3557 break;
3558 #ifndef TARGET_SPARC64
3559 case 0x25: /* sll */
3560 if (IS_IMM) { /* immediate */
3561 simm = GET_FIELDs(insn, 20, 31);
3562 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3563 } else { /* register */
3564 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3565 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3566 }
3567 gen_store_gpr(dc, rd, cpu_dst);
3568 break;
3569 case 0x26: /* srl */
3570 if (IS_IMM) { /* immediate */
3571 simm = GET_FIELDs(insn, 20, 31);
3572 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3573 } else { /* register */
3574 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3575 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3576 }
3577 gen_store_gpr(dc, rd, cpu_dst);
3578 break;
3579 case 0x27: /* sra */
3580 if (IS_IMM) { /* immediate */
3581 simm = GET_FIELDs(insn, 20, 31);
3582 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3583 } else { /* register */
3584 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3585 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3586 }
3587 gen_store_gpr(dc, rd, cpu_dst);
3588 break;
3589 #endif
3590 case 0x30:
3591 {
3592 switch(rd) {
3593 case 0: /* wry */
3594 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3595 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3596 break;
3597 #ifndef TARGET_SPARC64
3598 case 0x01 ... 0x0f: /* undefined in the
3599 SPARCv8 manual, nop
3600 on the microSPARC
3601 II */
3602 case 0x10 ... 0x1f: /* implementation-dependent
3603 in the SPARCv8
3604 manual, nop on the
3605 microSPARC II */
3606 break;
3607 #else
3608 case 0x2: /* V9 wrccr */
3609 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3610 gen_helper_wrccr(cpu_env, cpu_dst);
3611 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3612 dc->cc_op = CC_OP_FLAGS;
3613 break;
3614 case 0x3: /* V9 wrasi */
3615 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3616 tcg_gen_andi_tl(cpu_dst, cpu_dst, 0xff);
3617 tcg_gen_trunc_tl_i32(cpu_asi, cpu_dst);
3618 break;
3619 case 0x6: /* V9 wrfprs */
3620 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3621 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_dst);
3622 save_state(dc);
3623 gen_op_next_insn();
3624 tcg_gen_exit_tb(0);
3625 dc->is_br = 1;
3626 break;
3627 case 0xf: /* V9 sir, nop if user */
3628 #if !defined(CONFIG_USER_ONLY)
3629 if (supervisor(dc)) {
3630 ; // XXX
3631 }
3632 #endif
3633 break;
3634 case 0x13: /* Graphics Status */
3635 if (gen_trap_ifnofpu(dc)) {
3636 goto jmp_insn;
3637 }
3638 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3639 break;
3640 case 0x14: /* Softint set */
3641 if (!supervisor(dc))
3642 goto illegal_insn;
3643 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3644 gen_helper_set_softint(cpu_env, cpu_tmp64);
3645 break;
3646 case 0x15: /* Softint clear */
3647 if (!supervisor(dc))
3648 goto illegal_insn;
3649 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3650 gen_helper_clear_softint(cpu_env, cpu_tmp64);
3651 break;
3652 case 0x16: /* Softint write */
3653 if (!supervisor(dc))
3654 goto illegal_insn;
3655 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3656 gen_helper_write_softint(cpu_env, cpu_tmp64);
3657 break;
3658 case 0x17: /* Tick compare */
3659 #if !defined(CONFIG_USER_ONLY)
3660 if (!supervisor(dc))
3661 goto illegal_insn;
3662 #endif
3663 {
3664 TCGv_ptr r_tickptr;
3665
3666 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3667 cpu_src2);
3668 r_tickptr = tcg_temp_new_ptr();
3669 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3670 offsetof(CPUSPARCState, tick));
3671 gen_helper_tick_set_limit(r_tickptr,
3672 cpu_tick_cmpr);
3673 tcg_temp_free_ptr(r_tickptr);
3674 }
3675 break;
3676 case 0x18: /* System tick */
3677 #if !defined(CONFIG_USER_ONLY)
3678 if (!supervisor(dc))
3679 goto illegal_insn;
3680 #endif
3681 {
3682 TCGv_ptr r_tickptr;
3683
3684 tcg_gen_xor_tl(cpu_dst, cpu_src1,
3685 cpu_src2);
3686 r_tickptr = tcg_temp_new_ptr();
3687 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3688 offsetof(CPUSPARCState, stick));
3689 gen_helper_tick_set_count(r_tickptr,
3690 cpu_dst);
3691 tcg_temp_free_ptr(r_tickptr);
3692 }
3693 break;
3694 case 0x19: /* System tick compare */
3695 #if !defined(CONFIG_USER_ONLY)
3696 if (!supervisor(dc))
3697 goto illegal_insn;
3698 #endif
3699 {
3700 TCGv_ptr r_tickptr;
3701
3702 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3703 cpu_src2);
3704 r_tickptr = tcg_temp_new_ptr();
3705 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3706 offsetof(CPUSPARCState, stick));
3707 gen_helper_tick_set_limit(r_tickptr,
3708 cpu_stick_cmpr);
3709 tcg_temp_free_ptr(r_tickptr);
3710 }
3711 break;
3712
3713 case 0x10: /* Performance Control */
3714 case 0x11: /* Performance Instrumentation
3715 Counter */
3716 case 0x12: /* Dispatch Control */
3717 #endif
3718 default:
3719 goto illegal_insn;
3720 }
3721 }
3722 break;
3723 #if !defined(CONFIG_USER_ONLY)
3724 case 0x31: /* wrpsr, V9 saved, restored */
3725 {
3726 if (!supervisor(dc))
3727 goto priv_insn;
3728 #ifdef TARGET_SPARC64
3729 switch (rd) {
3730 case 0:
3731 gen_helper_saved(cpu_env);
3732 break;
3733 case 1:
3734 gen_helper_restored(cpu_env);
3735 break;
3736 case 2: /* UA2005 allclean */
3737 case 3: /* UA2005 otherw */
3738 case 4: /* UA2005 normalw */
3739 case 5: /* UA2005 invalw */
3740 // XXX
3741 default:
3742 goto illegal_insn;
3743 }
3744 #else
3745 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3746 gen_helper_wrpsr(cpu_env, cpu_dst);
3747 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3748 dc->cc_op = CC_OP_FLAGS;
3749 save_state(dc);
3750 gen_op_next_insn();
3751 tcg_gen_exit_tb(0);
3752 dc->is_br = 1;
3753 #endif
3754 }
3755 break;
3756 case 0x32: /* wrwim, V9 wrpr */
3757 {
3758 if (!supervisor(dc))
3759 goto priv_insn;
3760 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3761 #ifdef TARGET_SPARC64
3762 switch (rd) {
3763 case 0: // tpc
3764 {
3765 TCGv_ptr r_tsptr;
3766
3767 r_tsptr = tcg_temp_new_ptr();
3768 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3769 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3770 offsetof(trap_state, tpc));
3771 tcg_temp_free_ptr(r_tsptr);
3772 }
3773 break;
3774 case 1: // tnpc
3775 {
3776 TCGv_ptr r_tsptr;
3777
3778 r_tsptr = tcg_temp_new_ptr();
3779 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3780 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3781 offsetof(trap_state, tnpc));
3782 tcg_temp_free_ptr(r_tsptr);
3783 }
3784 break;
3785 case 2: // tstate
3786 {
3787 TCGv_ptr r_tsptr;
3788
3789 r_tsptr = tcg_temp_new_ptr();
3790 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3791 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3792 offsetof(trap_state,
3793 tstate));
3794 tcg_temp_free_ptr(r_tsptr);
3795 }
3796 break;
3797 case 3: // tt
3798 {
3799 TCGv_ptr r_tsptr;
3800
3801 r_tsptr = tcg_temp_new_ptr();
3802 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3803 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
3804 offsetof(trap_state, tt));
3805 tcg_temp_free_ptr(r_tsptr);
3806 }
3807 break;
3808 case 4: // tick
3809 {
3810 TCGv_ptr r_tickptr;
3811
3812 r_tickptr = tcg_temp_new_ptr();
3813 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3814 offsetof(CPUSPARCState, tick));
3815 gen_helper_tick_set_count(r_tickptr,
3816 cpu_tmp0);
3817 tcg_temp_free_ptr(r_tickptr);
3818 }
3819 break;
3820 case 5: // tba
3821 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3822 break;
3823 case 6: // pstate
3824 save_state(dc);
3825 gen_helper_wrpstate(cpu_env, cpu_tmp0);
3826 dc->npc = DYNAMIC_PC;
3827 break;
3828 case 7: // tl
3829 save_state(dc);
3830 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3831 offsetof(CPUSPARCState, tl));
3832 dc->npc = DYNAMIC_PC;
3833 break;
3834 case 8: // pil
3835 gen_helper_wrpil(cpu_env, cpu_tmp0);
3836 break;
3837 case 9: // cwp
3838 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3839 break;
3840 case 10: // cansave
3841 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3842 offsetof(CPUSPARCState,
3843 cansave));
3844 break;
3845 case 11: // canrestore
3846 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3847 offsetof(CPUSPARCState,
3848 canrestore));
3849 break;
3850 case 12: // cleanwin
3851 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3852 offsetof(CPUSPARCState,
3853 cleanwin));
3854 break;
3855 case 13: // otherwin
3856 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3857 offsetof(CPUSPARCState,
3858 otherwin));
3859 break;
3860 case 14: // wstate
3861 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3862 offsetof(CPUSPARCState,
3863 wstate));
3864 break;
3865 case 16: // UA2005 gl
3866 CHECK_IU_FEATURE(dc, GL);
3867 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3868 offsetof(CPUSPARCState, gl));
3869 break;
3870 case 26: // UA2005 strand status
3871 CHECK_IU_FEATURE(dc, HYPV);
3872 if (!hypervisor(dc))
3873 goto priv_insn;
3874 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
3875 break;
3876 default:
3877 goto illegal_insn;
3878 }
3879 #else
3880 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
3881 if (dc->def->nwindows != 32) {
3882 tcg_gen_andi_tl(cpu_wim, cpu_wim,
3883 (1 << dc->def->nwindows) - 1);
3884 }
3885 #endif
3886 }
3887 break;
3888 case 0x33: /* wrtbr, UA2005 wrhpr */
3889 {
3890 #ifndef TARGET_SPARC64
3891 if (!supervisor(dc))
3892 goto priv_insn;
3893 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
3894 #else
3895 CHECK_IU_FEATURE(dc, HYPV);
3896 if (!hypervisor(dc))
3897 goto priv_insn;
3898 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3899 switch (rd) {
3900 case 0: // hpstate
3901 // XXX gen_op_wrhpstate();
3902 save_state(dc);
3903 gen_op_next_insn();
3904 tcg_gen_exit_tb(0);
3905 dc->is_br = 1;
3906 break;
3907 case 1: // htstate
3908 // XXX gen_op_wrhtstate();
3909 break;
3910 case 3: // hintp
3911 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
3912 break;
3913 case 5: // htba
3914 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
3915 break;
3916 case 31: // hstick_cmpr
3917 {
3918 TCGv_ptr r_tickptr;
3919
3920 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
3921 r_tickptr = tcg_temp_new_ptr();
3922 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3923 offsetof(CPUSPARCState, hstick));
3924 gen_helper_tick_set_limit(r_tickptr,
3925 cpu_hstick_cmpr);
3926 tcg_temp_free_ptr(r_tickptr);
3927 }
3928 break;
3929 case 6: // hver readonly
3930 default:
3931 goto illegal_insn;
3932 }
3933 #endif
3934 }
3935 break;
3936 #endif
3937 #ifdef TARGET_SPARC64
3938 case 0x2c: /* V9 movcc */
3939 {
3940 int cc = GET_FIELD_SP(insn, 11, 12);
3941 int cond = GET_FIELD_SP(insn, 14, 17);
3942 DisasCompare cmp;
3943 TCGv dst;
3944
3945 if (insn & (1 << 18)) {
3946 if (cc == 0) {
3947 gen_compare(&cmp, 0, cond, dc);
3948 } else if (cc == 2) {
3949 gen_compare(&cmp, 1, cond, dc);
3950 } else {
3951 goto illegal_insn;
3952 }
3953 } else {
3954 gen_fcompare(&cmp, cc, cond);
3955 }
3956
3957 /* The get_src2 above loaded the normal 13-bit
3958 immediate field, not the 11-bit field we have
3959 in movcc. But it did handle the reg case. */
3960 if (IS_IMM) {
3961 simm = GET_FIELD_SPs(insn, 0, 10);
3962 tcg_gen_movi_tl(cpu_src2, simm);
3963 }
3964
3965 dst = gen_load_gpr(dc, rd);
3966 tcg_gen_movcond_tl(cmp.cond, dst,
3967 cmp.c1, cmp.c2,
3968 cpu_src2, dst);
3969 free_compare(&cmp);
3970 gen_store_gpr(dc, rd, dst);
3971 break;
3972 }
3973 case 0x2d: /* V9 sdivx */
3974 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3975 gen_store_gpr(dc, rd, cpu_dst);
3976 break;
3977 case 0x2e: /* V9 popc */
3978 gen_helper_popc(cpu_dst, cpu_src2);
3979 gen_store_gpr(dc, rd, cpu_dst);
3980 break;
3981 case 0x2f: /* V9 movr */
3982 {
3983 int cond = GET_FIELD_SP(insn, 10, 12);
3984 DisasCompare cmp;
3985 TCGv dst;
3986
3987 gen_compare_reg(&cmp, cond, cpu_src1);
3988
3989 /* The get_src2 above loaded the normal 13-bit
3990 immediate field, not the 10-bit field we have
3991 in movr. But it did handle the reg case. */
3992 if (IS_IMM) {
3993 simm = GET_FIELD_SPs(insn, 0, 9);
3994 tcg_gen_movi_tl(cpu_src2, simm);
3995 }
3996
3997 dst = gen_load_gpr(dc, rd);
3998 tcg_gen_movcond_tl(cmp.cond, dst,
3999 cmp.c1, cmp.c2,
4000 cpu_src2, dst);
4001 free_compare(&cmp);
4002 gen_store_gpr(dc, rd, dst);
4003 break;
4004 }
4005 #endif
4006 default:
4007 goto illegal_insn;
4008 }
4009 }
4010 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4011 #ifdef TARGET_SPARC64
4012 int opf = GET_FIELD_SP(insn, 5, 13);
4013 rs1 = GET_FIELD(insn, 13, 17);
4014 rs2 = GET_FIELD(insn, 27, 31);
4015 if (gen_trap_ifnofpu(dc)) {
4016 goto jmp_insn;
4017 }
4018
4019 switch (opf) {
4020 case 0x000: /* VIS I edge8cc */
4021 CHECK_FPU_FEATURE(dc, VIS1);
4022 cpu_src1 = gen_load_gpr(dc, rs1);
4023 cpu_src2 = gen_load_gpr(dc, rs2);
4024 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4025 gen_store_gpr(dc, rd, cpu_dst);
4026 break;
4027 case 0x001: /* VIS II edge8n */
4028 CHECK_FPU_FEATURE(dc, VIS2);
4029 cpu_src1 = gen_load_gpr(dc, rs1);
4030 cpu_src2 = gen_load_gpr(dc, rs2);
4031 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4032 gen_store_gpr(dc, rd, cpu_dst);
4033 break;
4034 case 0x002: /* VIS I edge8lcc */
4035 CHECK_FPU_FEATURE(dc, VIS1);
4036 cpu_src1 = gen_load_gpr(dc, rs1);
4037 cpu_src2 = gen_load_gpr(dc, rs2);
4038 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4039 gen_store_gpr(dc, rd, cpu_dst);
4040 break;
4041 case 0x003: /* VIS II edge8ln */
4042 CHECK_FPU_FEATURE(dc, VIS2);
4043 cpu_src1 = gen_load_gpr(dc, rs1);
4044 cpu_src2 = gen_load_gpr(dc, rs2);
4045 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4046 gen_store_gpr(dc, rd, cpu_dst);
4047 break;
4048 case 0x004: /* VIS I edge16cc */
4049 CHECK_FPU_FEATURE(dc, VIS1);
4050 cpu_src1 = gen_load_gpr(dc, rs1);
4051 cpu_src2 = gen_load_gpr(dc, rs2);
4052 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4053 gen_store_gpr(dc, rd, cpu_dst);
4054 break;
4055 case 0x005: /* VIS II edge16n */
4056 CHECK_FPU_FEATURE(dc, VIS2);
4057 cpu_src1 = gen_load_gpr(dc, rs1);
4058 cpu_src2 = gen_load_gpr(dc, rs2);
4059 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4060 gen_store_gpr(dc, rd, cpu_dst);
4061 break;
4062 case 0x006: /* VIS I edge16lcc */
4063 CHECK_FPU_FEATURE(dc, VIS1);
4064 cpu_src1 = gen_load_gpr(dc, rs1);
4065 cpu_src2 = gen_load_gpr(dc, rs2);
4066 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4067 gen_store_gpr(dc, rd, cpu_dst);
4068 break;
4069 case 0x007: /* VIS II edge16ln */
4070 CHECK_FPU_FEATURE(dc, VIS2);
4071 cpu_src1 = gen_load_gpr(dc, rs1);
4072 cpu_src2 = gen_load_gpr(dc, rs2);
4073 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4074 gen_store_gpr(dc, rd, cpu_dst);
4075 break;
4076 case 0x008: /* VIS I edge32cc */
4077 CHECK_FPU_FEATURE(dc, VIS1);
4078 cpu_src1 = gen_load_gpr(dc, rs1);
4079 cpu_src2 = gen_load_gpr(dc, rs2);
4080 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4081 gen_store_gpr(dc, rd, cpu_dst);
4082 break;
4083 case 0x009: /* VIS II edge32n */
4084 CHECK_FPU_FEATURE(dc, VIS2);
4085 cpu_src1 = gen_load_gpr(dc, rs1);
4086 cpu_src2 = gen_load_gpr(dc, rs2);
4087 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4088 gen_store_gpr(dc, rd, cpu_dst);
4089 break;
4090 case 0x00a: /* VIS I edge32lcc */
4091 CHECK_FPU_FEATURE(dc, VIS1);
4092 cpu_src1 = gen_load_gpr(dc, rs1);
4093 cpu_src2 = gen_load_gpr(dc, rs2);
4094 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4095 gen_store_gpr(dc, rd, cpu_dst);
4096 break;
4097 case 0x00b: /* VIS II edge32ln */
4098 CHECK_FPU_FEATURE(dc, VIS2);
4099 cpu_src1 = gen_load_gpr(dc, rs1);
4100 cpu_src2 = gen_load_gpr(dc, rs2);
4101 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4102 gen_store_gpr(dc, rd, cpu_dst);
4103 break;
4104 case 0x010: /* VIS I array8 */
4105 CHECK_FPU_FEATURE(dc, VIS1);
4106 cpu_src1 = gen_load_gpr(dc, rs1);
4107 cpu_src2 = gen_load_gpr(dc, rs2);
4108 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4109 gen_store_gpr(dc, rd, cpu_dst);
4110 break;
4111 case 0x012: /* VIS I array16 */
4112 CHECK_FPU_FEATURE(dc, VIS1);
4113 cpu_src1 = gen_load_gpr(dc, rs1);
4114 cpu_src2 = gen_load_gpr(dc, rs2);
4115 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4116 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4117 gen_store_gpr(dc, rd, cpu_dst);
4118 break;
4119 case 0x014: /* VIS I array32 */
4120 CHECK_FPU_FEATURE(dc, VIS1);
4121 cpu_src1 = gen_load_gpr(dc, rs1);
4122 cpu_src2 = gen_load_gpr(dc, rs2);
4123 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4124 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4125 gen_store_gpr(dc, rd, cpu_dst);
4126 break;
4127 case 0x018: /* VIS I alignaddr */
4128 CHECK_FPU_FEATURE(dc, VIS1);
4129 cpu_src1 = gen_load_gpr(dc, rs1);
4130 cpu_src2 = gen_load_gpr(dc, rs2);
4131 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4132 gen_store_gpr(dc, rd, cpu_dst);
4133 break;
4134 case 0x01a: /* VIS I alignaddrl */
4135 CHECK_FPU_FEATURE(dc, VIS1);
4136 cpu_src1 = gen_load_gpr(dc, rs1);
4137 cpu_src2 = gen_load_gpr(dc, rs2);
4138 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4139 gen_store_gpr(dc, rd, cpu_dst);
4140 break;
4141 case 0x019: /* VIS II bmask */
4142 CHECK_FPU_FEATURE(dc, VIS2);
4143 cpu_src1 = gen_load_gpr(dc, rs1);
4144 cpu_src2 = gen_load_gpr(dc, rs2);
4145 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4146 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4147 gen_store_gpr(dc, rd, cpu_dst);
4148 break;
4149 case 0x020: /* VIS I fcmple16 */
4150 CHECK_FPU_FEATURE(dc, VIS1);
4151 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4152 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4153 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4154 gen_store_gpr(dc, rd, cpu_dst);
4155 break;
4156 case 0x022: /* VIS I fcmpne16 */
4157 CHECK_FPU_FEATURE(dc, VIS1);
4158 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4159 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4160 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4161 gen_store_gpr(dc, rd, cpu_dst);
4162 break;
4163 case 0x024: /* VIS I fcmple32 */
4164 CHECK_FPU_FEATURE(dc, VIS1);
4165 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4166 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4167 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4168 gen_store_gpr(dc, rd, cpu_dst);
4169 break;
4170 case 0x026: /* VIS I fcmpne32 */
4171 CHECK_FPU_FEATURE(dc, VIS1);
4172 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4173 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4174 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4175 gen_store_gpr(dc, rd, cpu_dst);
4176 break;
4177 case 0x028: /* VIS I fcmpgt16 */
4178 CHECK_FPU_FEATURE(dc, VIS1);
4179 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4180 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4181 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4182 gen_store_gpr(dc, rd, cpu_dst);
4183 break;
4184 case 0x02a: /* VIS I fcmpeq16 */
4185 CHECK_FPU_FEATURE(dc, VIS1);
4186 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4187 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4188 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4189 gen_store_gpr(dc, rd, cpu_dst);
4190 break;
4191 case 0x02c: /* VIS I fcmpgt32 */
4192 CHECK_FPU_FEATURE(dc, VIS1);
4193 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4194 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4195 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4196 gen_store_gpr(dc, rd, cpu_dst);
4197 break;
4198 case 0x02e: /* VIS I fcmpeq32 */
4199 CHECK_FPU_FEATURE(dc, VIS1);
4200 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4201 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4202 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4203 gen_store_gpr(dc, rd, cpu_dst);
4204 break;
4205 case 0x031: /* VIS I fmul8x16 */
4206 CHECK_FPU_FEATURE(dc, VIS1);
4207 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4208 break;
4209 case 0x033: /* VIS I fmul8x16au */
4210 CHECK_FPU_FEATURE(dc, VIS1);
4211 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4212 break;
4213 case 0x035: /* VIS I fmul8x16al */
4214 CHECK_FPU_FEATURE(dc, VIS1);
4215 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4216 break;
4217 case 0x036: /* VIS I fmul8sux16 */
4218 CHECK_FPU_FEATURE(dc, VIS1);
4219 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4220 break;
4221 case 0x037: /* VIS I fmul8ulx16 */
4222 CHECK_FPU_FEATURE(dc, VIS1);
4223 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4224 break;
4225 case 0x038: /* VIS I fmuld8sux16 */
4226 CHECK_FPU_FEATURE(dc, VIS1);
4227 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4228 break;
4229 case 0x039: /* VIS I fmuld8ulx16 */
4230 CHECK_FPU_FEATURE(dc, VIS1);
4231 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4232 break;
4233 case 0x03a: /* VIS I fpack32 */
4234 CHECK_FPU_FEATURE(dc, VIS1);
4235 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4236 break;
4237 case 0x03b: /* VIS I fpack16 */
4238 CHECK_FPU_FEATURE(dc, VIS1);
4239 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4240 cpu_dst_32 = gen_dest_fpr_F(dc);
4241 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4242 gen_store_fpr_F(dc, rd, cpu_dst_32);
4243 break;
4244 case 0x03d: /* VIS I fpackfix */
4245 CHECK_FPU_FEATURE(dc, VIS1);
4246 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4247 cpu_dst_32 = gen_dest_fpr_F(dc);
4248 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4249 gen_store_fpr_F(dc, rd, cpu_dst_32);
4250 break;
4251 case 0x03e: /* VIS I pdist */
4252 CHECK_FPU_FEATURE(dc, VIS1);
4253 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4254 break;
4255 case 0x048: /* VIS I faligndata */
4256 CHECK_FPU_FEATURE(dc, VIS1);
4257 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4258 break;
4259 case 0x04b: /* VIS I fpmerge */
4260 CHECK_FPU_FEATURE(dc, VIS1);
4261 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4262 break;
4263 case 0x04c: /* VIS II bshuffle */
4264 CHECK_FPU_FEATURE(dc, VIS2);
4265 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4266 break;
4267 case 0x04d: /* VIS I fexpand */
4268 CHECK_FPU_FEATURE(dc, VIS1);
4269 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4270 break;
4271 case 0x050: /* VIS I fpadd16 */
4272 CHECK_FPU_FEATURE(dc, VIS1);
4273 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4274 break;
4275 case 0x051: /* VIS I fpadd16s */
4276 CHECK_FPU_FEATURE(dc, VIS1);
4277 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4278 break;
4279 case 0x052: /* VIS I fpadd32 */
4280 CHECK_FPU_FEATURE(dc, VIS1);
4281 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4282 break;
4283 case 0x053: /* VIS I fpadd32s */
4284 CHECK_FPU_FEATURE(dc, VIS1);
4285 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4286 break;
4287 case 0x054: /* VIS I fpsub16 */
4288 CHECK_FPU_FEATURE(dc, VIS1);
4289 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4290 break;
4291 case 0x055: /* VIS I fpsub16s */
4292 CHECK_FPU_FEATURE(dc, VIS1);
4293 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4294 break;
4295 case 0x056: /* VIS I fpsub32 */
4296 CHECK_FPU_FEATURE(dc, VIS1);
4297 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4298 break;
4299 case 0x057: /* VIS I fpsub32s */
4300 CHECK_FPU_FEATURE(dc, VIS1);
4301 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4302 break;
4303 case 0x060: /* VIS I fzero */
4304 CHECK_FPU_FEATURE(dc, VIS1);
4305 cpu_dst_64 = gen_dest_fpr_D();
4306 tcg_gen_movi_i64(cpu_dst_64, 0);
4307 gen_store_fpr_D(dc, rd, cpu_dst_64);
4308 break;
4309 case 0x061: /* VIS I fzeros */
4310 CHECK_FPU_FEATURE(dc, VIS1);
4311 cpu_dst_32 = gen_dest_fpr_F(dc);
4312 tcg_gen_movi_i32(cpu_dst_32, 0);
4313 gen_store_fpr_F(dc, rd, cpu_dst_32);
4314 break;
4315 case 0x062: /* VIS I fnor */
4316 CHECK_FPU_FEATURE(dc, VIS1);
4317 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4318 break;
4319 case 0x063: /* VIS I fnors */
4320 CHECK_FPU_FEATURE(dc, VIS1);
4321 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4322 break;
4323 case 0x064: /* VIS I fandnot2 */
4324 CHECK_FPU_FEATURE(dc, VIS1);
4325 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4326 break;
4327 case 0x065: /* VIS I fandnot2s */
4328 CHECK_FPU_FEATURE(dc, VIS1);
4329 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4330 break;
4331 case 0x066: /* VIS I fnot2 */
4332 CHECK_FPU_FEATURE(dc, VIS1);
4333 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4334 break;
4335 case 0x067: /* VIS I fnot2s */
4336 CHECK_FPU_FEATURE(dc, VIS1);
4337 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4338 break;
4339 case 0x068: /* VIS I fandnot1 */
4340 CHECK_FPU_FEATURE(dc, VIS1);
4341 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4342 break;
4343 case 0x069: /* VIS I fandnot1s */
4344 CHECK_FPU_FEATURE(dc, VIS1);
4345 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4346 break;
4347 case 0x06a: /* VIS I fnot1 */
4348 CHECK_FPU_FEATURE(dc, VIS1);
4349 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4350 break;
4351 case 0x06b: /* VIS I fnot1s */
4352 CHECK_FPU_FEATURE(dc, VIS1);
4353 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4354 break;
4355 case 0x06c: /* VIS I fxor */
4356 CHECK_FPU_FEATURE(dc, VIS1);
4357 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4358 break;
4359 case 0x06d: /* VIS I fxors */
4360 CHECK_FPU_FEATURE(dc, VIS1);
4361 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4362 break;
4363 case 0x06e: /* VIS I fnand */
4364 CHECK_FPU_FEATURE(dc, VIS1);
4365 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4366 break;
4367 case 0x06f: /* VIS I fnands */
4368 CHECK_FPU_FEATURE(dc, VIS1);
4369 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4370 break;
4371 case 0x070: /* VIS I fand */
4372 CHECK_FPU_FEATURE(dc, VIS1);
4373 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4374 break;
4375 case 0x071: /* VIS I fands */
4376 CHECK_FPU_FEATURE(dc, VIS1);
4377 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4378 break;
4379 case 0x072: /* VIS I fxnor */
4380 CHECK_FPU_FEATURE(dc, VIS1);
4381 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4382 break;
4383 case 0x073: /* VIS I fxnors */
4384 CHECK_FPU_FEATURE(dc, VIS1);
4385 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4386 break;
4387 case 0x074: /* VIS I fsrc1 */
4388 CHECK_FPU_FEATURE(dc, VIS1);
4389 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4390 gen_store_fpr_D(dc, rd, cpu_src1_64);
4391 break;
4392 case 0x075: /* VIS I fsrc1s */
4393 CHECK_FPU_FEATURE(dc, VIS1);
4394 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4395 gen_store_fpr_F(dc, rd, cpu_src1_32);
4396 break;
4397 case 0x076: /* VIS I fornot2 */
4398 CHECK_FPU_FEATURE(dc, VIS1);
4399 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4400 break;
4401 case 0x077: /* VIS I fornot2s */
4402 CHECK_FPU_FEATURE(dc, VIS1);
4403 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4404 break;
4405 case 0x078: /* VIS I fsrc2 */
4406 CHECK_FPU_FEATURE(dc, VIS1);
4407 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4408 gen_store_fpr_D(dc, rd, cpu_src1_64);
4409 break;
4410 case 0x079: /* VIS I fsrc2s */
4411 CHECK_FPU_FEATURE(dc, VIS1);
4412 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4413 gen_store_fpr_F(dc, rd, cpu_src1_32);
4414 break;
4415 case 0x07a: /* VIS I fornot1 */
4416 CHECK_FPU_FEATURE(dc, VIS1);
4417 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4418 break;
4419 case 0x07b: /* VIS I fornot1s */
4420 CHECK_FPU_FEATURE(dc, VIS1);
4421 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4422 break;
4423 case 0x07c: /* VIS I for */
4424 CHECK_FPU_FEATURE(dc, VIS1);
4425 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4426 break;
4427 case 0x07d: /* VIS I fors */
4428 CHECK_FPU_FEATURE(dc, VIS1);
4429 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4430 break;
4431 case 0x07e: /* VIS I fone */
4432 CHECK_FPU_FEATURE(dc, VIS1);
4433 cpu_dst_64 = gen_dest_fpr_D();
4434 tcg_gen_movi_i64(cpu_dst_64, -1);
4435 gen_store_fpr_D(dc, rd, cpu_dst_64);
4436 break;
4437 case 0x07f: /* VIS I fones */
4438 CHECK_FPU_FEATURE(dc, VIS1);
4439 cpu_dst_32 = gen_dest_fpr_F(dc);
4440 tcg_gen_movi_i32(cpu_dst_32, -1);
4441 gen_store_fpr_F(dc, rd, cpu_dst_32);
4442 break;
4443 case 0x080: /* VIS I shutdown */
4444 case 0x081: /* VIS II siam */
4445 // XXX
4446 goto illegal_insn;
4447 default:
4448 goto illegal_insn;
4449 }
4450 #else
4451 goto ncp_insn;
4452 #endif
4453 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4454 #ifdef TARGET_SPARC64
4455 goto illegal_insn;
4456 #else
4457 goto ncp_insn;
4458 #endif
4459 #ifdef TARGET_SPARC64
4460 } else if (xop == 0x39) { /* V9 return */
4461 TCGv_i32 r_const;
4462
4463 save_state(dc);
4464 cpu_src1 = get_src1(dc, insn);
4465 if (IS_IMM) { /* immediate */
4466 simm = GET_FIELDs(insn, 19, 31);
4467 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
4468 } else { /* register */
4469 rs2 = GET_FIELD(insn, 27, 31);
4470 if (rs2) {
4471 cpu_src2 = gen_load_gpr(dc, rs2);
4472 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4473 } else {
4474 tcg_gen_mov_tl(cpu_dst, cpu_src1);
4475 }
4476 }
4477 gen_helper_restore(cpu_env);
4478 gen_mov_pc_npc(dc);
4479 r_const = tcg_const_i32(3);
4480 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4481 tcg_temp_free_i32(r_const);
4482 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4483 dc->npc = DYNAMIC_PC;
4484 goto jmp_insn;
4485 #endif
4486 } else {
4487 cpu_src1 = get_src1(dc, insn);
4488 if (IS_IMM) { /* immediate */
4489 simm = GET_FIELDs(insn, 19, 31);
4490 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
4491 } else { /* register */
4492 rs2 = GET_FIELD(insn, 27, 31);
4493 if (rs2) {
4494 cpu_src2 = gen_load_gpr(dc, rs2);
4495 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4496 } else {
4497 tcg_gen_mov_tl(cpu_dst, cpu_src1);
4498 }
4499 }
4500 switch (xop) {
4501 case 0x38: /* jmpl */
4502 {
4503 TCGv t;
4504 TCGv_i32 r_const;
4505
4506 t = gen_dest_gpr(dc, rd);
4507 tcg_gen_movi_tl(t, dc->pc);
4508 gen_store_gpr(dc, rd, t);
4509 gen_mov_pc_npc(dc);
4510 r_const = tcg_const_i32(3);
4511 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4512 tcg_temp_free_i32(r_const);
4513 gen_address_mask(dc, cpu_dst);
4514 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4515 dc->npc = DYNAMIC_PC;
4516 }
4517 goto jmp_insn;
4518 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4519 case 0x39: /* rett, V9 return */
4520 {
4521 TCGv_i32 r_const;
4522
4523 if (!supervisor(dc))
4524 goto priv_insn;
4525 gen_mov_pc_npc(dc);
4526 r_const = tcg_const_i32(3);
4527 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4528 tcg_temp_free_i32(r_const);
4529 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4530 dc->npc = DYNAMIC_PC;
4531 gen_helper_rett(cpu_env);
4532 }
4533 goto jmp_insn;
4534 #endif
4535 case 0x3b: /* flush */
4536 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4537 goto unimp_flush;
4538 /* nop */
4539 break;
4540 case 0x3c: /* save */
4541 save_state(dc);
4542 gen_helper_save(cpu_env);
4543 gen_store_gpr(dc, rd, cpu_dst);
4544 break;
4545 case 0x3d: /* restore */
4546 save_state(dc);
4547 gen_helper_restore(cpu_env);
4548 gen_store_gpr(dc, rd, cpu_dst);
4549 break;
4550 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4551 case 0x3e: /* V9 done/retry */
4552 {
4553 switch (rd) {
4554 case 0:
4555 if (!supervisor(dc))
4556 goto priv_insn;
4557 dc->npc = DYNAMIC_PC;
4558 dc->pc = DYNAMIC_PC;
4559 gen_helper_done(cpu_env);
4560 goto jmp_insn;
4561 case 1:
4562 if (!supervisor(dc))
4563 goto priv_insn;
4564 dc->npc = DYNAMIC_PC;
4565 dc->pc = DYNAMIC_PC;
4566 gen_helper_retry(cpu_env);
4567 goto jmp_insn;
4568 default:
4569 goto illegal_insn;
4570 }
4571 }
4572 break;
4573 #endif
4574 default:
4575 goto illegal_insn;
4576 }
4577 }
4578 break;
4579 }
4580 break;
4581 case 3: /* load/store instructions */
4582 {
4583 unsigned int xop = GET_FIELD(insn, 7, 12);
4584 /* ??? gen_address_mask prevents us from using a source
4585 register directly. Always generate a temporary. */
4586 TCGv cpu_addr = get_temp_tl(dc);
4587
4588 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
4589 if (xop == 0x3c || xop == 0x3e) {
4590 /* V9 casa/casxa : no offset */
4591 } else if (IS_IMM) { /* immediate */
4592 simm = GET_FIELDs(insn, 19, 31);
4593 if (simm != 0) {
4594 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
4595 }
4596 } else { /* register */
4597 rs2 = GET_FIELD(insn, 27, 31);
4598 if (rs2 != 0) {
4599 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
4600 }
4601 }
4602 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4603 (xop > 0x17 && xop <= 0x1d ) ||
4604 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4605 TCGv cpu_val = gen_dest_gpr(dc, rd);
4606
4607 switch (xop) {
4608 case 0x0: /* ld, V9 lduw, load unsigned word */
4609 gen_address_mask(dc, cpu_addr);
4610 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4611 break;
4612 case 0x1: /* ldub, load unsigned byte */
4613 gen_address_mask(dc, cpu_addr);
4614 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4615 break;
4616 case 0x2: /* lduh, load unsigned halfword */
4617 gen_address_mask(dc, cpu_addr);
4618 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4619 break;
4620 case 0x3: /* ldd, load double word */
4621 if (rd & 1)
4622 goto illegal_insn;
4623 else {
4624 TCGv_i32 r_const;
4625
4626 save_state(dc);
4627 r_const = tcg_const_i32(7);
4628 /* XXX remove alignment check */
4629 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4630 tcg_temp_free_i32(r_const);
4631 gen_address_mask(dc, cpu_addr);
4632 tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
4633 tcg_gen_trunc_i64_tl(cpu_tmp0, cpu_tmp64);
4634 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffffULL);
4635 gen_store_gpr(dc, rd + 1, cpu_tmp0);
4636 tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
4637 tcg_gen_trunc_i64_tl(cpu_val, cpu_tmp64);
4638 tcg_gen_andi_tl(cpu_val, cpu_val, 0xffffffffULL);
4639 }
4640 break;
4641 case 0x9: /* ldsb, load signed byte */
4642 gen_address_mask(dc, cpu_addr);
4643 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4644 break;
4645 case 0xa: /* ldsh, load signed halfword */
4646 gen_address_mask(dc, cpu_addr);
4647 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4648 break;
4649 case 0xd: /* ldstub -- XXX: should be atomically */
4650 {
4651 TCGv r_const;
4652
4653 gen_address_mask(dc, cpu_addr);
4654 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4655 r_const = tcg_const_tl(0xff);
4656 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4657 tcg_temp_free(r_const);
4658 }
4659 break;
4660 case 0x0f: /* swap, swap register with memory. Also
4661 atomically */
4662 CHECK_IU_FEATURE(dc, SWAP);
4663 cpu_src1 = gen_load_gpr(dc, rd);
4664 gen_address_mask(dc, cpu_addr);
4665 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4666 tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
4667 tcg_gen_mov_tl(cpu_val, cpu_tmp0);
4668 break;
4669 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4670 case 0x10: /* lda, V9 lduwa, load word alternate */
4671 #ifndef TARGET_SPARC64
4672 if (IS_IMM)
4673 goto illegal_insn;
4674 if (!supervisor(dc))
4675 goto priv_insn;
4676 #endif
4677 save_state(dc);
4678 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
4679 break;
4680 case 0x11: /* lduba, load unsigned byte alternate */
4681 #ifndef TARGET_SPARC64
4682 if (IS_IMM)
4683 goto illegal_insn;
4684 if (!supervisor(dc))
4685 goto priv_insn;
4686 #endif
4687 save_state(dc);
4688 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
4689 break;
4690 case 0x12: /* lduha, load unsigned halfword alternate */
4691 #ifndef TARGET_SPARC64
4692 if (IS_IMM)
4693 goto illegal_insn;
4694 if (!supervisor(dc))
4695 goto priv_insn;
4696 #endif
4697 save_state(dc);
4698 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
4699 break;
4700 case 0x13: /* ldda, load double word alternate */
4701 #ifndef TARGET_SPARC64
4702 if (IS_IMM)
4703 goto illegal_insn;
4704 if (!supervisor(dc))
4705 goto priv_insn;
4706 #endif
4707 if (rd & 1)
4708 goto illegal_insn;
4709 save_state(dc);
4710 gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
4711 goto skip_move;
4712 case 0x19: /* ldsba, load signed byte alternate */
4713 #ifndef TARGET_SPARC64
4714 if (IS_IMM)
4715 goto illegal_insn;
4716 if (!supervisor(dc))
4717 goto priv_insn;
4718 #endif
4719 save_state(dc);
4720 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
4721 break;
4722 case 0x1a: /* ldsha, load signed halfword alternate */
4723 #ifndef TARGET_SPARC64
4724 if (IS_IMM)
4725 goto illegal_insn;
4726 if (!supervisor(dc))
4727 goto priv_insn;
4728 #endif
4729 save_state(dc);
4730 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
4731 break;
4732 case 0x1d: /* ldstuba -- XXX: should be atomically */
4733 #ifndef TARGET_SPARC64
4734 if (IS_IMM)
4735 goto illegal_insn;
4736 if (!supervisor(dc))
4737 goto priv_insn;
4738 #endif
4739 save_state(dc);
4740 gen_ldstub_asi(cpu_val, cpu_addr, insn);
4741 break;
4742 case 0x1f: /* swapa, swap reg with alt. memory. Also
4743 atomically */
4744 CHECK_IU_FEATURE(dc, SWAP);
4745 #ifndef TARGET_SPARC64
4746 if (IS_IMM)
4747 goto illegal_insn;
4748 if (!supervisor(dc))
4749 goto priv_insn;
4750 #endif
4751 save_state(dc);
4752 cpu_src1 = gen_load_gpr(dc, rd);
4753 gen_swap_asi(cpu_val, cpu_src1, cpu_addr, insn);
4754 break;
4755
4756 #ifndef TARGET_SPARC64
4757 case 0x30: /* ldc */
4758 case 0x31: /* ldcsr */
4759 case 0x33: /* lddc */
4760 goto ncp_insn;
4761 #endif
4762 #endif
4763 #ifdef TARGET_SPARC64
4764 case 0x08: /* V9 ldsw */
4765 gen_address_mask(dc, cpu_addr);
4766 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4767 break;
4768 case 0x0b: /* V9 ldx */
4769 gen_address_mask(dc, cpu_addr);
4770 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4771 break;
4772 case 0x18: /* V9 ldswa */
4773 save_state(dc);
4774 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
4775 break;
4776 case 0x1b: /* V9 ldxa */
4777 save_state(dc);
4778 gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
4779 break;
4780 case 0x2d: /* V9 prefetch, no effect */
4781 goto skip_move;
4782 case 0x30: /* V9 ldfa */
4783 if (gen_trap_ifnofpu(dc)) {
4784 goto jmp_insn;
4785 }
4786 save_state(dc);
4787 gen_ldf_asi(cpu_addr, insn, 4, rd);
4788 gen_update_fprs_dirty(rd);
4789 goto skip_move;
4790 case 0x33: /* V9 lddfa */
4791 if (gen_trap_ifnofpu(dc)) {
4792 goto jmp_insn;
4793 }
4794 save_state(dc);
4795 gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
4796 gen_update_fprs_dirty(DFPREG(rd));
4797 goto skip_move;
4798 case 0x3d: /* V9 prefetcha, no effect */
4799 goto skip_move;
4800 case 0x32: /* V9 ldqfa */
4801 CHECK_FPU_FEATURE(dc, FLOAT128);
4802 if (gen_trap_ifnofpu(dc)) {
4803 goto jmp_insn;
4804 }
4805 save_state(dc);
4806 gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
4807 gen_update_fprs_dirty(QFPREG(rd));
4808 goto skip_move;
4809 #endif
4810 default:
4811 goto illegal_insn;
4812 }
4813 gen_store_gpr(dc, rd, cpu_val);
4814 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4815 skip_move: ;
4816 #endif
4817 } else if (xop >= 0x20 && xop < 0x24) {
4818 if (gen_trap_ifnofpu(dc)) {
4819 goto jmp_insn;
4820 }
4821 save_state(dc);
4822 switch (xop) {
4823 case 0x20: /* ldf, load fpreg */
4824 gen_address_mask(dc, cpu_addr);
4825 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4826 cpu_dst_32 = gen_dest_fpr_F(dc);
4827 tcg_gen_trunc_tl_i32(cpu_dst_32, cpu_tmp0);
4828 gen_store_fpr_F(dc, rd, cpu_dst_32);
4829 break;
4830 case 0x21: /* ldfsr, V9 ldxfsr */
4831 #ifdef TARGET_SPARC64
4832 gen_address_mask(dc, cpu_addr);
4833 if (rd == 1) {
4834 tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
4835 gen_helper_ldxfsr(cpu_env, cpu_tmp64);
4836 break;
4837 }
4838 #endif
4839 {
4840 TCGv_i32 t32 = get_temp_i32(dc);
4841 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4842 tcg_gen_trunc_tl_i32(t32, cpu_tmp0);
4843 gen_helper_ldfsr(cpu_env, t32);
4844 }
4845 break;
4846 case 0x22: /* ldqf, load quad fpreg */
4847 {
4848 TCGv_i32 r_const;
4849
4850 CHECK_FPU_FEATURE(dc, FLOAT128);
4851 r_const = tcg_const_i32(dc->mem_idx);
4852 gen_address_mask(dc, cpu_addr);
4853 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4854 tcg_temp_free_i32(r_const);
4855 gen_op_store_QT0_fpr(QFPREG(rd));
4856 gen_update_fprs_dirty(QFPREG(rd));
4857 }
4858 break;
4859 case 0x23: /* lddf, load double fpreg */
4860 gen_address_mask(dc, cpu_addr);
4861 cpu_dst_64 = gen_dest_fpr_D();
4862 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4863 gen_store_fpr_D(dc, rd, cpu_dst_64);
4864 break;
4865 default:
4866 goto illegal_insn;
4867 }
4868 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
4869 xop == 0xe || xop == 0x1e) {
4870 TCGv cpu_val = gen_load_gpr(dc, rd);
4871
4872 switch (xop) {
4873 case 0x4: /* st, store word */
4874 gen_address_mask(dc, cpu_addr);
4875 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4876 break;
4877 case 0x5: /* stb, store byte */
4878 gen_address_mask(dc, cpu_addr);
4879 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
4880 break;
4881 case 0x6: /* sth, store halfword */
4882 gen_address_mask(dc, cpu_addr);
4883 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
4884 break;
4885 case 0x7: /* std, store double word */
4886 if (rd & 1)
4887 goto illegal_insn;
4888 else {
4889 TCGv_i32 r_const;
4890 TCGv lo;
4891
4892 save_state(dc);
4893 gen_address_mask(dc, cpu_addr);
4894 r_const = tcg_const_i32(7);
4895 /* XXX remove alignment check */
4896 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4897 tcg_temp_free_i32(r_const);
4898 lo = gen_load_gpr(dc, rd + 1);
4899 tcg_gen_concat_tl_i64(cpu_tmp64, lo, cpu_val);
4900 tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
4901 }
4902 break;
4903 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4904 case 0x14: /* sta, V9 stwa, store word alternate */
4905 #ifndef TARGET_SPARC64
4906 if (IS_IMM)
4907 goto illegal_insn;
4908 if (!supervisor(dc))
4909 goto priv_insn;
4910 #endif
4911 save_state(dc);
4912 gen_st_asi(cpu_val, cpu_addr, insn, 4);
4913 dc->npc = DYNAMIC_PC;
4914 break;
4915 case 0x15: /* stba, store byte alternate */
4916 #ifndef TARGET_SPARC64
4917 if (IS_IMM)
4918 goto illegal_insn;
4919 if (!supervisor(dc))
4920 goto priv_insn;
4921 #endif
4922 save_state(dc);
4923 gen_st_asi(cpu_val, cpu_addr, insn, 1);
4924 dc->npc = DYNAMIC_PC;
4925 break;
4926 case 0x16: /* stha, store halfword alternate */
4927 #ifndef TARGET_SPARC64
4928 if (IS_IMM)
4929 goto illegal_insn;
4930 if (!supervisor(dc))
4931 goto priv_insn;
4932 #endif
4933 save_state(dc);
4934 gen_st_asi(cpu_val, cpu_addr, insn, 2);
4935 dc->npc = DYNAMIC_PC;
4936 break;
4937 case 0x17: /* stda, store double word alternate */
4938 #ifndef TARGET_SPARC64
4939 if (IS_IMM)
4940 goto illegal_insn;
4941 if (!supervisor(dc))
4942 goto priv_insn;
4943 #endif
4944 if (rd & 1)
4945 goto illegal_insn;
4946 else {
4947 save_state(dc);
4948 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
4949 }
4950 break;
4951 #endif
4952 #ifdef TARGET_SPARC64
4953 case 0x0e: /* V9 stx */
4954 gen_address_mask(dc, cpu_addr);
4955 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
4956 break;
4957 case 0x1e: /* V9 stxa */
4958 save_state(dc);
4959 gen_st_asi(cpu_val, cpu_addr, insn, 8);
4960 dc->npc = DYNAMIC_PC;
4961 break;
4962 #endif
4963 default:
4964 goto illegal_insn;
4965 }
4966 } else if (xop > 0x23 && xop < 0x28) {
4967 if (gen_trap_ifnofpu(dc)) {
4968 goto jmp_insn;
4969 }
4970 save_state(dc);
4971 switch (xop) {
4972 case 0x24: /* stf, store fpreg */
4973 gen_address_mask(dc, cpu_addr);
4974 cpu_src1_32 = gen_load_fpr_F(dc, rd);
4975 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_src1_32);
4976 tcg_gen_qemu_st32(cpu_tmp0, cpu_addr, dc->mem_idx);
4977 break;
4978 case 0x25: /* stfsr, V9 stxfsr */
4979 {
4980 TCGv t = get_temp_tl(dc);
4981
4982 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUSPARCState, fsr));
4983 #ifdef TARGET_SPARC64
4984 gen_address_mask(dc, cpu_addr);
4985 if (rd == 1) {
4986 tcg_gen_qemu_st64(t, cpu_addr, dc->mem_idx);
4987 break;
4988 }
4989 #endif
4990 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
4991 }
4992 break;
4993 case 0x26:
4994 #ifdef TARGET_SPARC64
4995 /* V9 stqf, store quad fpreg */
4996 {
4997 TCGv_i32 r_const;
4998
4999 CHECK_FPU_FEATURE(dc, FLOAT128);
5000 gen_op_load_fpr_QT0(QFPREG(rd));
5001 r_const = tcg_const_i32(dc->mem_idx);
5002 gen_address_mask(dc, cpu_addr);
5003 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5004 tcg_temp_free_i32(r_const);
5005 }
5006 break;
5007 #else /* !TARGET_SPARC64 */
5008 /* stdfq, store floating point queue */
5009 #if defined(CONFIG_USER_ONLY)
5010 goto illegal_insn;
5011 #else
5012 if (!supervisor(dc))
5013 goto priv_insn;
5014 if (gen_trap_ifnofpu(dc)) {
5015 goto jmp_insn;
5016 }
5017 goto nfq_insn;
5018 #endif
5019 #endif
5020 case 0x27: /* stdf, store double fpreg */
5021 gen_address_mask(dc, cpu_addr);
5022 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5023 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5024 break;
5025 default:
5026 goto illegal_insn;
5027 }
5028 } else if (xop > 0x33 && xop < 0x3f) {
5029 save_state(dc);
5030 switch (xop) {
5031 #ifdef TARGET_SPARC64
5032 case 0x34: /* V9 stfa */
5033 if (gen_trap_ifnofpu(dc)) {
5034 goto jmp_insn;
5035 }
5036 gen_stf_asi(cpu_addr, insn, 4, rd);
5037 break;
5038 case 0x36: /* V9 stqfa */
5039 {
5040 TCGv_i32 r_const;
5041
5042 CHECK_FPU_FEATURE(dc, FLOAT128);
5043 if (gen_trap_ifnofpu(dc)) {
5044 goto jmp_insn;
5045 }
5046 r_const = tcg_const_i32(7);
5047 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5048 tcg_temp_free_i32(r_const);
5049 gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
5050 }
5051 break;
5052 case 0x37: /* V9 stdfa */
5053 if (gen_trap_ifnofpu(dc)) {
5054 goto jmp_insn;
5055 }
5056 gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
5057 break;
5058 case 0x3c: /* V9 casa */
5059 rs2 = GET_FIELD(insn, 27, 31);
5060 cpu_src2 = gen_load_gpr(dc, rs2);
5061 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5062 break;
5063 case 0x3e: /* V9 casxa */
5064 rs2 = GET_FIELD(insn, 27, 31);
5065 cpu_src2 = gen_load_gpr(dc, rs2);
5066 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5067 break;
5068 #else
5069 case 0x34: /* stc */
5070 case 0x35: /* stcsr */
5071 case 0x36: /* stdcq */
5072 case 0x37: /* stdc */
5073 goto ncp_insn;
5074 #endif
5075 default:
5076 goto illegal_insn;
5077 }
5078 } else {
5079 goto illegal_insn;
5080 }
5081 }
5082 break;
5083 }
5084 /* default case for non jump instructions */
5085 if (dc->npc == DYNAMIC_PC) {
5086 dc->pc = DYNAMIC_PC;
5087 gen_op_next_insn();
5088 } else if (dc->npc == JUMP_PC) {
5089 /* we can do a static jump */
5090 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5091 dc->is_br = 1;
5092 } else {
5093 dc->pc = dc->npc;
5094 dc->npc = dc->npc + 4;
5095 }
5096 jmp_insn:
5097 goto egress;
5098 illegal_insn:
5099 {
5100 TCGv_i32 r_const;
5101
5102 save_state(dc);
5103 r_const = tcg_const_i32(TT_ILL_INSN);
5104 gen_helper_raise_exception(cpu_env, r_const);
5105 tcg_temp_free_i32(r_const);
5106 dc->is_br = 1;
5107 }
5108 goto egress;
5109 unimp_flush:
5110 {
5111 TCGv_i32 r_const;
5112
5113 save_state(dc);
5114 r_const = tcg_const_i32(TT_UNIMP_FLUSH);
5115 gen_helper_raise_exception(cpu_env, r_const);
5116 tcg_temp_free_i32(r_const);
5117 dc->is_br = 1;
5118 }
5119 goto egress;
5120 #if !defined(CONFIG_USER_ONLY)
5121 priv_insn:
5122 {
5123 TCGv_i32 r_const;
5124
5125 save_state(dc);
5126 r_const = tcg_const_i32(TT_PRIV_INSN);
5127 gen_helper_raise_exception(cpu_env, r_const);
5128 tcg_temp_free_i32(r_const);
5129 dc->is_br = 1;
5130 }
5131 goto egress;
5132 #endif
5133 nfpu_insn:
5134 save_state(dc);
5135 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
5136 dc->is_br = 1;
5137 goto egress;
5138 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5139 nfq_insn:
5140 save_state(dc);
5141 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
5142 dc->is_br = 1;
5143 goto egress;
5144 #endif
5145 #ifndef TARGET_SPARC64
5146 ncp_insn:
5147 {
5148 TCGv r_const;
5149
5150 save_state(dc);
5151 r_const = tcg_const_i32(TT_NCP_INSN);
5152 gen_helper_raise_exception(cpu_env, r_const);
5153 tcg_temp_free(r_const);
5154 dc->is_br = 1;
5155 }
5156 goto egress;
5157 #endif
5158 egress:
5159 if (dc->n_t32 != 0) {
5160 int i;
5161 for (i = dc->n_t32 - 1; i >= 0; --i) {
5162 tcg_temp_free_i32(dc->t32[i]);
5163 }
5164 dc->n_t32 = 0;
5165 }
5166 if (dc->n_ttl != 0) {
5167 int i;
5168 for (i = dc->n_ttl - 1; i >= 0; --i) {
5169 tcg_temp_free(dc->ttl[i]);
5170 }
5171 dc->n_ttl = 0;
5172 }
5173 }
5174
5175 static inline void gen_intermediate_code_internal(TranslationBlock * tb,
5176 int spc, CPUSPARCState *env)
5177 {
5178 target_ulong pc_start, last_pc;
5179 uint16_t *gen_opc_end;
5180 DisasContext dc1, *dc = &dc1;
5181 CPUBreakpoint *bp;
5182 int j, lj = -1;
5183 int num_insns;
5184 int max_insns;
5185 unsigned int insn;
5186
5187 memset(dc, 0, sizeof(DisasContext));
5188 dc->tb = tb;
5189 pc_start = tb->pc;
5190 dc->pc = pc_start;
5191 last_pc = dc->pc;
5192 dc->npc = (target_ulong) tb->cs_base;
5193 dc->cc_op = CC_OP_DYNAMIC;
5194 dc->mem_idx = cpu_mmu_index(env);
5195 dc->def = env->def;
5196 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5197 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5198 dc->singlestep = (env->singlestep_enabled || singlestep);
5199 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
5200
5201 num_insns = 0;
5202 max_insns = tb->cflags & CF_COUNT_MASK;
5203 if (max_insns == 0)
5204 max_insns = CF_COUNT_MASK;
5205 gen_icount_start();
5206 do {
5207 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
5208 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5209 if (bp->pc == dc->pc) {
5210 if (dc->pc != pc_start)
5211 save_state(dc);
5212 gen_helper_debug(cpu_env);
5213 tcg_gen_exit_tb(0);
5214 dc->is_br = 1;
5215 goto exit_gen_loop;
5216 }
5217 }
5218 }
5219 if (spc) {
5220 qemu_log("Search PC...\n");
5221 j = gen_opc_ptr - gen_opc_buf;
5222 if (lj < j) {
5223 lj++;
5224 while (lj < j)
5225 gen_opc_instr_start[lj++] = 0;
5226 gen_opc_pc[lj] = dc->pc;
5227 gen_opc_npc[lj] = dc->npc;
5228 gen_opc_instr_start[lj] = 1;
5229 gen_opc_icount[lj] = num_insns;
5230 }
5231 }
5232 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
5233 gen_io_start();
5234 last_pc = dc->pc;
5235 insn = cpu_ldl_code(env, dc->pc);
5236
5237 cpu_tmp0 = tcg_temp_new();
5238 cpu_tmp64 = tcg_temp_new_i64();
5239 cpu_dst = tcg_temp_new();
5240
5241 disas_sparc_insn(dc, insn);
5242 num_insns++;
5243
5244 tcg_temp_free(cpu_dst);
5245 tcg_temp_free_i64(cpu_tmp64);
5246 tcg_temp_free(cpu_tmp0);
5247
5248 if (dc->is_br)
5249 break;
5250 /* if the next PC is different, we abort now */
5251 if (dc->pc != (last_pc + 4))
5252 break;
5253 /* if we reach a page boundary, we stop generation so that the
5254 PC of a TT_TFAULT exception is always in the right page */
5255 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5256 break;
5257 /* if single step mode, we generate only one instruction and
5258 generate an exception */
5259 if (dc->singlestep) {
5260 break;
5261 }
5262 } while ((gen_opc_ptr < gen_opc_end) &&
5263 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5264 num_insns < max_insns);
5265
5266 exit_gen_loop:
5267 if (tb->cflags & CF_LAST_IO) {
5268 gen_io_end();
5269 }
5270 if (!dc->is_br) {
5271 if (dc->pc != DYNAMIC_PC &&
5272 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5273 /* static PC and NPC: we can use direct chaining */
5274 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5275 } else {
5276 if (dc->pc != DYNAMIC_PC) {
5277 tcg_gen_movi_tl(cpu_pc, dc->pc);
5278 }
5279 save_npc(dc);
5280 tcg_gen_exit_tb(0);
5281 }
5282 }
5283 gen_icount_end(tb, num_insns);
5284 *gen_opc_ptr = INDEX_op_end;
5285 if (spc) {
5286 j = gen_opc_ptr - gen_opc_buf;
5287 lj++;
5288 while (lj <= j)
5289 gen_opc_instr_start[lj++] = 0;
5290 #if 0
5291 log_page_dump();
5292 #endif
5293 gen_opc_jump_pc[0] = dc->jump_pc[0];
5294 gen_opc_jump_pc[1] = dc->jump_pc[1];
5295 } else {
5296 tb->size = last_pc + 4 - pc_start;
5297 tb->icount = num_insns;
5298 }
5299 #ifdef DEBUG_DISAS
5300 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5301 qemu_log("--------------\n");
5302 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5303 log_target_disas(pc_start, last_pc + 4 - pc_start, 0);
5304 qemu_log("\n");
5305 }
5306 #endif
5307 }
5308
5309 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5310 {
5311 gen_intermediate_code_internal(tb, 0, env);
5312 }
5313
5314 void gen_intermediate_code_pc(CPUSPARCState * env, TranslationBlock * tb)
5315 {
5316 gen_intermediate_code_internal(tb, 1, env);
5317 }
5318
5319 void gen_intermediate_code_init(CPUSPARCState *env)
5320 {
5321 unsigned int i;
5322 static int inited;
5323 static const char * const gregnames[8] = {
5324 NULL, // g0 not used
5325 "g1",
5326 "g2",
5327 "g3",
5328 "g4",
5329 "g5",
5330 "g6",
5331 "g7",
5332 };
5333 static const char * const fregnames[32] = {
5334 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5335 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5336 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5337 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5338 };
5339
5340 /* init various static tables */
5341 if (!inited) {
5342 inited = 1;
5343
5344 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5345 cpu_regwptr = tcg_global_mem_new_ptr(TCG_AREG0,
5346 offsetof(CPUSPARCState, regwptr),
5347 "regwptr");
5348 #ifdef TARGET_SPARC64
5349 cpu_xcc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, xcc),
5350 "xcc");
5351 cpu_asi = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, asi),
5352 "asi");
5353 cpu_fprs = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, fprs),
5354 "fprs");
5355 cpu_gsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, gsr),
5356 "gsr");
5357 cpu_tick_cmpr = tcg_global_mem_new(TCG_AREG0,
5358 offsetof(CPUSPARCState, tick_cmpr),
5359 "tick_cmpr");
5360 cpu_stick_cmpr = tcg_global_mem_new(TCG_AREG0,
5361 offsetof(CPUSPARCState, stick_cmpr),
5362 "stick_cmpr");
5363 cpu_hstick_cmpr = tcg_global_mem_new(TCG_AREG0,
5364 offsetof(CPUSPARCState, hstick_cmpr),
5365 "hstick_cmpr");
5366 cpu_hintp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hintp),
5367 "hintp");
5368 cpu_htba = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, htba),
5369 "htba");
5370 cpu_hver = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hver),
5371 "hver");
5372 cpu_ssr = tcg_global_mem_new(TCG_AREG0,
5373 offsetof(CPUSPARCState, ssr), "ssr");
5374 cpu_ver = tcg_global_mem_new(TCG_AREG0,
5375 offsetof(CPUSPARCState, version), "ver");
5376 cpu_softint = tcg_global_mem_new_i32(TCG_AREG0,
5377 offsetof(CPUSPARCState, softint),
5378 "softint");
5379 #else
5380 cpu_wim = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, wim),
5381 "wim");
5382 #endif
5383 cpu_cond = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cond),
5384 "cond");
5385 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_src),
5386 "cc_src");
5387 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0,
5388 offsetof(CPUSPARCState, cc_src2),
5389 "cc_src2");
5390 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_dst),
5391 "cc_dst");
5392 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, cc_op),
5393 "cc_op");
5394 cpu_psr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, psr),
5395 "psr");
5396 cpu_fsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, fsr),
5397 "fsr");
5398 cpu_pc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, pc),
5399 "pc");
5400 cpu_npc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, npc),
5401 "npc");
5402 cpu_y = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, y), "y");
5403 #ifndef CONFIG_USER_ONLY
5404 cpu_tbr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, tbr),
5405 "tbr");
5406 #endif
5407 for (i = 1; i < 8; i++) {
5408 cpu_gregs[i] = tcg_global_mem_new(TCG_AREG0,
5409 offsetof(CPUSPARCState, gregs[i]),
5410 gregnames[i]);
5411 }
5412 for (i = 0; i < TARGET_DPREGS; i++) {
5413 cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
5414 offsetof(CPUSPARCState, fpr[i]),
5415 fregnames[i]);
5416 }
5417
5418 /* register helpers */
5419
5420 #define GEN_HELPER 2
5421 #include "helper.h"
5422 }
5423 }
5424
5425 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, int pc_pos)
5426 {
5427 target_ulong npc;
5428 env->pc = gen_opc_pc[pc_pos];
5429 npc = gen_opc_npc[pc_pos];
5430 if (npc == 1) {
5431 /* dynamic NPC: already stored */
5432 } else if (npc == 2) {
5433 /* jump PC: use 'cond' and the jump targets of the translation */
5434 if (env->cond) {
5435 env->npc = gen_opc_jump_pc[0];
5436 } else {
5437 env->npc = gen_opc_jump_pc[1];
5438 }
5439 } else {
5440 env->npc = npc;
5441 }
5442 }