]> git.proxmox.com Git - mirror_qemu.git/blob - target-sparc/translate.c
target-sparc: Avoid cpu_tmp32 in Read Priv Register
[mirror_qemu.git] / target-sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "disas.h"
29 #include "helper.h"
30 #include "tcg-op.h"
31
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #define DEBUG_DISAS
36
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
40
41 /* global register indexes */
42 static TCGv_ptr cpu_env, cpu_regwptr;
43 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
44 static TCGv_i32 cpu_cc_op;
45 static TCGv_i32 cpu_psr;
46 static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
47 static TCGv cpu_y;
48 #ifndef CONFIG_USER_ONLY
49 static TCGv cpu_tbr;
50 #endif
51 static TCGv cpu_cond, cpu_dst;
52 #ifdef TARGET_SPARC64
53 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
54 static TCGv cpu_gsr;
55 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
56 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
57 static TCGv_i32 cpu_softint;
58 #else
59 static TCGv cpu_wim;
60 #endif
61 /* local register indexes (only used inside old micro ops) */
62 static TCGv cpu_tmp0;
63 static TCGv_i32 cpu_tmp32;
64 static TCGv_i64 cpu_tmp64;
65 /* Floating point registers */
66 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67
68 static target_ulong gen_opc_npc[OPC_BUF_SIZE];
69 static target_ulong gen_opc_jump_pc[2];
70
71 #include "gen-icount.h"
72
73 typedef struct DisasContext {
74 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
75 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
76 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
77 int is_br;
78 int mem_idx;
79 int fpu_enabled;
80 int address_mask_32bit;
81 int singlestep;
82 uint32_t cc_op; /* current CC operation */
83 struct TranslationBlock *tb;
84 sparc_def_t *def;
85 TCGv_i32 t32[3];
86 TCGv ttl[5];
87 int n_t32;
88 int n_ttl;
89 } DisasContext;
90
91 typedef struct {
92 TCGCond cond;
93 bool is_bool;
94 bool g1, g2;
95 TCGv c1, c2;
96 } DisasCompare;
97
98 // This function uses non-native bit order
99 #define GET_FIELD(X, FROM, TO) \
100 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
101
102 // This function uses the order in the manuals, i.e. bit 0 is 2^0
103 #define GET_FIELD_SP(X, FROM, TO) \
104 GET_FIELD(X, 31 - (TO), 31 - (FROM))
105
106 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
107 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
108
109 #ifdef TARGET_SPARC64
110 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
111 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
112 #else
113 #define DFPREG(r) (r & 0x1e)
114 #define QFPREG(r) (r & 0x1c)
115 #endif
116
117 #define UA2005_HTRAP_MASK 0xff
118 #define V8_TRAP_MASK 0x7f
119
120 static int sign_extend(int x, int len)
121 {
122 len = 32 - len;
123 return (x << len) >> len;
124 }
125
126 #define IS_IMM (insn & (1<<13))
127
128 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
129 {
130 TCGv_i32 t;
131 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
132 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
133 return t;
134 }
135
136 static inline TCGv get_temp_tl(DisasContext *dc)
137 {
138 TCGv t;
139 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
140 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
141 return t;
142 }
143
144 static inline void gen_update_fprs_dirty(int rd)
145 {
146 #if defined(TARGET_SPARC64)
147 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
148 #endif
149 }
150
151 /* floating point registers moves */
152 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
153 {
154 #if TCG_TARGET_REG_BITS == 32
155 if (src & 1) {
156 return TCGV_LOW(cpu_fpr[src / 2]);
157 } else {
158 return TCGV_HIGH(cpu_fpr[src / 2]);
159 }
160 #else
161 if (src & 1) {
162 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
163 } else {
164 TCGv_i32 ret = get_temp_i32(dc);
165 TCGv_i64 t = tcg_temp_new_i64();
166
167 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
168 tcg_gen_trunc_i64_i32(ret, t);
169 tcg_temp_free_i64(t);
170
171 return ret;
172 }
173 #endif
174 }
175
176 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
177 {
178 #if TCG_TARGET_REG_BITS == 32
179 if (dst & 1) {
180 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
181 } else {
182 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
183 }
184 #else
185 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
186 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
187 (dst & 1 ? 0 : 32), 32);
188 #endif
189 gen_update_fprs_dirty(dst);
190 }
191
192 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
193 {
194 return get_temp_i32(dc);
195 }
196
197 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
198 {
199 src = DFPREG(src);
200 return cpu_fpr[src / 2];
201 }
202
203 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
204 {
205 dst = DFPREG(dst);
206 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
207 gen_update_fprs_dirty(dst);
208 }
209
210 static TCGv_i64 gen_dest_fpr_D(void)
211 {
212 return cpu_tmp64;
213 }
214
215 static void gen_op_load_fpr_QT0(unsigned int src)
216 {
217 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
218 offsetof(CPU_QuadU, ll.upper));
219 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
220 offsetof(CPU_QuadU, ll.lower));
221 }
222
223 static void gen_op_load_fpr_QT1(unsigned int src)
224 {
225 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
226 offsetof(CPU_QuadU, ll.upper));
227 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
228 offsetof(CPU_QuadU, ll.lower));
229 }
230
231 static void gen_op_store_QT0_fpr(unsigned int dst)
232 {
233 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
234 offsetof(CPU_QuadU, ll.upper));
235 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
236 offsetof(CPU_QuadU, ll.lower));
237 }
238
239 #ifdef TARGET_SPARC64
240 static void gen_move_Q(unsigned int rd, unsigned int rs)
241 {
242 rd = QFPREG(rd);
243 rs = QFPREG(rs);
244
245 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
246 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
247 gen_update_fprs_dirty(rd);
248 }
249 #endif
250
251 /* moves */
252 #ifdef CONFIG_USER_ONLY
253 #define supervisor(dc) 0
254 #ifdef TARGET_SPARC64
255 #define hypervisor(dc) 0
256 #endif
257 #else
258 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
259 #ifdef TARGET_SPARC64
260 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
261 #else
262 #endif
263 #endif
264
265 #ifdef TARGET_SPARC64
266 #ifndef TARGET_ABI32
267 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
268 #else
269 #define AM_CHECK(dc) (1)
270 #endif
271 #endif
272
273 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
274 {
275 #ifdef TARGET_SPARC64
276 if (AM_CHECK(dc))
277 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
278 #endif
279 }
280
281 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
282 {
283 if (reg == 0 || reg >= 8) {
284 TCGv t = get_temp_tl(dc);
285 if (reg == 0) {
286 tcg_gen_movi_tl(t, 0);
287 } else {
288 tcg_gen_ld_tl(t, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
289 }
290 return t;
291 } else {
292 return cpu_gregs[reg];
293 }
294 }
295
296 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
297 {
298 if (reg > 0) {
299 if (reg < 8) {
300 tcg_gen_mov_tl(cpu_gregs[reg], v);
301 } else {
302 tcg_gen_st_tl(v, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
303 }
304 }
305 }
306
307 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
308 {
309 if (reg == 0 || reg >= 8) {
310 return get_temp_tl(dc);
311 } else {
312 return cpu_gregs[reg];
313 }
314 }
315
316 static inline void gen_goto_tb(DisasContext *s, int tb_num,
317 target_ulong pc, target_ulong npc)
318 {
319 TranslationBlock *tb;
320
321 tb = s->tb;
322 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
323 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
324 !s->singlestep) {
325 /* jump to same page: we can use a direct jump */
326 tcg_gen_goto_tb(tb_num);
327 tcg_gen_movi_tl(cpu_pc, pc);
328 tcg_gen_movi_tl(cpu_npc, npc);
329 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
330 } else {
331 /* jump to another page: currently not optimized */
332 tcg_gen_movi_tl(cpu_pc, pc);
333 tcg_gen_movi_tl(cpu_npc, npc);
334 tcg_gen_exit_tb(0);
335 }
336 }
337
338 // XXX suboptimal
339 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
340 {
341 tcg_gen_extu_i32_tl(reg, src);
342 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
343 tcg_gen_andi_tl(reg, reg, 0x1);
344 }
345
346 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
347 {
348 tcg_gen_extu_i32_tl(reg, src);
349 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
350 tcg_gen_andi_tl(reg, reg, 0x1);
351 }
352
353 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
354 {
355 tcg_gen_extu_i32_tl(reg, src);
356 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
357 tcg_gen_andi_tl(reg, reg, 0x1);
358 }
359
360 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
361 {
362 tcg_gen_extu_i32_tl(reg, src);
363 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
364 tcg_gen_andi_tl(reg, reg, 0x1);
365 }
366
367 static inline void gen_op_addi_cc(TCGv dst, TCGv src1, target_long src2)
368 {
369 tcg_gen_mov_tl(cpu_cc_src, src1);
370 tcg_gen_movi_tl(cpu_cc_src2, src2);
371 tcg_gen_addi_tl(cpu_cc_dst, cpu_cc_src, src2);
372 tcg_gen_mov_tl(dst, cpu_cc_dst);
373 }
374
375 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
376 {
377 tcg_gen_mov_tl(cpu_cc_src, src1);
378 tcg_gen_mov_tl(cpu_cc_src2, src2);
379 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
380 tcg_gen_mov_tl(dst, cpu_cc_dst);
381 }
382
383 static TCGv_i32 gen_add32_carry32(void)
384 {
385 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
386
387 /* Carry is computed from a previous add: (dst < src) */
388 #if TARGET_LONG_BITS == 64
389 cc_src1_32 = tcg_temp_new_i32();
390 cc_src2_32 = tcg_temp_new_i32();
391 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_dst);
392 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src);
393 #else
394 cc_src1_32 = cpu_cc_dst;
395 cc_src2_32 = cpu_cc_src;
396 #endif
397
398 carry_32 = tcg_temp_new_i32();
399 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
400
401 #if TARGET_LONG_BITS == 64
402 tcg_temp_free_i32(cc_src1_32);
403 tcg_temp_free_i32(cc_src2_32);
404 #endif
405
406 return carry_32;
407 }
408
409 static TCGv_i32 gen_sub32_carry32(void)
410 {
411 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
412
413 /* Carry is computed from a previous borrow: (src1 < src2) */
414 #if TARGET_LONG_BITS == 64
415 cc_src1_32 = tcg_temp_new_i32();
416 cc_src2_32 = tcg_temp_new_i32();
417 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_src);
418 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src2);
419 #else
420 cc_src1_32 = cpu_cc_src;
421 cc_src2_32 = cpu_cc_src2;
422 #endif
423
424 carry_32 = tcg_temp_new_i32();
425 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
426
427 #if TARGET_LONG_BITS == 64
428 tcg_temp_free_i32(cc_src1_32);
429 tcg_temp_free_i32(cc_src2_32);
430 #endif
431
432 return carry_32;
433 }
434
435 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
436 TCGv src2, int update_cc)
437 {
438 TCGv_i32 carry_32;
439 TCGv carry;
440
441 switch (dc->cc_op) {
442 case CC_OP_DIV:
443 case CC_OP_LOGIC:
444 /* Carry is known to be zero. Fall back to plain ADD. */
445 if (update_cc) {
446 gen_op_add_cc(dst, src1, src2);
447 } else {
448 tcg_gen_add_tl(dst, src1, src2);
449 }
450 return;
451
452 case CC_OP_ADD:
453 case CC_OP_TADD:
454 case CC_OP_TADDTV:
455 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
456 {
457 /* For 32-bit hosts, we can re-use the host's hardware carry
458 generation by using an ADD2 opcode. We discard the low
459 part of the output. Ideally we'd combine this operation
460 with the add that generated the carry in the first place. */
461 TCGv dst_low = tcg_temp_new();
462 tcg_gen_op6_i32(INDEX_op_add2_i32, dst_low, dst,
463 cpu_cc_src, src1, cpu_cc_src2, src2);
464 tcg_temp_free(dst_low);
465 goto add_done;
466 }
467 #endif
468 carry_32 = gen_add32_carry32();
469 break;
470
471 case CC_OP_SUB:
472 case CC_OP_TSUB:
473 case CC_OP_TSUBTV:
474 carry_32 = gen_sub32_carry32();
475 break;
476
477 default:
478 /* We need external help to produce the carry. */
479 carry_32 = tcg_temp_new_i32();
480 gen_helper_compute_C_icc(carry_32, cpu_env);
481 break;
482 }
483
484 #if TARGET_LONG_BITS == 64
485 carry = tcg_temp_new();
486 tcg_gen_extu_i32_i64(carry, carry_32);
487 #else
488 carry = carry_32;
489 #endif
490
491 tcg_gen_add_tl(dst, src1, src2);
492 tcg_gen_add_tl(dst, dst, carry);
493
494 tcg_temp_free_i32(carry_32);
495 #if TARGET_LONG_BITS == 64
496 tcg_temp_free(carry);
497 #endif
498
499 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
500 add_done:
501 #endif
502 if (update_cc) {
503 tcg_gen_mov_tl(cpu_cc_src, src1);
504 tcg_gen_mov_tl(cpu_cc_src2, src2);
505 tcg_gen_mov_tl(cpu_cc_dst, dst);
506 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
507 dc->cc_op = CC_OP_ADDX;
508 }
509 }
510
511 static inline void gen_op_subi_cc(TCGv dst, TCGv src1, target_long src2, DisasContext *dc)
512 {
513 tcg_gen_mov_tl(cpu_cc_src, src1);
514 tcg_gen_movi_tl(cpu_cc_src2, src2);
515 if (src2 == 0) {
516 tcg_gen_mov_tl(cpu_cc_dst, src1);
517 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
518 dc->cc_op = CC_OP_LOGIC;
519 } else {
520 tcg_gen_subi_tl(cpu_cc_dst, cpu_cc_src, src2);
521 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
522 dc->cc_op = CC_OP_SUB;
523 }
524 tcg_gen_mov_tl(dst, cpu_cc_dst);
525 }
526
527 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
528 {
529 tcg_gen_mov_tl(cpu_cc_src, src1);
530 tcg_gen_mov_tl(cpu_cc_src2, src2);
531 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
532 tcg_gen_mov_tl(dst, cpu_cc_dst);
533 }
534
535 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
536 TCGv src2, int update_cc)
537 {
538 TCGv_i32 carry_32;
539 TCGv carry;
540
541 switch (dc->cc_op) {
542 case CC_OP_DIV:
543 case CC_OP_LOGIC:
544 /* Carry is known to be zero. Fall back to plain SUB. */
545 if (update_cc) {
546 gen_op_sub_cc(dst, src1, src2);
547 } else {
548 tcg_gen_sub_tl(dst, src1, src2);
549 }
550 return;
551
552 case CC_OP_ADD:
553 case CC_OP_TADD:
554 case CC_OP_TADDTV:
555 carry_32 = gen_add32_carry32();
556 break;
557
558 case CC_OP_SUB:
559 case CC_OP_TSUB:
560 case CC_OP_TSUBTV:
561 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
562 {
563 /* For 32-bit hosts, we can re-use the host's hardware carry
564 generation by using a SUB2 opcode. We discard the low
565 part of the output. Ideally we'd combine this operation
566 with the add that generated the carry in the first place. */
567 TCGv dst_low = tcg_temp_new();
568 tcg_gen_op6_i32(INDEX_op_sub2_i32, dst_low, dst,
569 cpu_cc_src, src1, cpu_cc_src2, src2);
570 tcg_temp_free(dst_low);
571 goto sub_done;
572 }
573 #endif
574 carry_32 = gen_sub32_carry32();
575 break;
576
577 default:
578 /* We need external help to produce the carry. */
579 carry_32 = tcg_temp_new_i32();
580 gen_helper_compute_C_icc(carry_32, cpu_env);
581 break;
582 }
583
584 #if TARGET_LONG_BITS == 64
585 carry = tcg_temp_new();
586 tcg_gen_extu_i32_i64(carry, carry_32);
587 #else
588 carry = carry_32;
589 #endif
590
591 tcg_gen_sub_tl(dst, src1, src2);
592 tcg_gen_sub_tl(dst, dst, carry);
593
594 tcg_temp_free_i32(carry_32);
595 #if TARGET_LONG_BITS == 64
596 tcg_temp_free(carry);
597 #endif
598
599 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
600 sub_done:
601 #endif
602 if (update_cc) {
603 tcg_gen_mov_tl(cpu_cc_src, src1);
604 tcg_gen_mov_tl(cpu_cc_src2, src2);
605 tcg_gen_mov_tl(cpu_cc_dst, dst);
606 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
607 dc->cc_op = CC_OP_SUBX;
608 }
609 }
610
611 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
612 {
613 TCGv r_temp, zero;
614
615 r_temp = tcg_temp_new();
616
617 /* old op:
618 if (!(env->y & 1))
619 T1 = 0;
620 */
621 zero = tcg_const_tl(0);
622 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
623 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
624 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
625 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
626 zero, cpu_cc_src2);
627 tcg_temp_free(zero);
628
629 // b2 = T0 & 1;
630 // env->y = (b2 << 31) | (env->y >> 1);
631 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
632 tcg_gen_shli_tl(r_temp, r_temp, 31);
633 tcg_gen_shri_tl(cpu_tmp0, cpu_y, 1);
634 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0x7fffffff);
635 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, r_temp);
636 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
637
638 // b1 = N ^ V;
639 gen_mov_reg_N(cpu_tmp0, cpu_psr);
640 gen_mov_reg_V(r_temp, cpu_psr);
641 tcg_gen_xor_tl(cpu_tmp0, cpu_tmp0, r_temp);
642 tcg_temp_free(r_temp);
643
644 // T0 = (b1 << 31) | (T0 >> 1);
645 // src1 = T0;
646 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, 31);
647 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
648 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
649
650 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
651
652 tcg_gen_mov_tl(dst, cpu_cc_dst);
653 }
654
655 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
656 {
657 TCGv_i32 r_src1, r_src2;
658 TCGv_i64 r_temp, r_temp2;
659
660 r_src1 = tcg_temp_new_i32();
661 r_src2 = tcg_temp_new_i32();
662
663 tcg_gen_trunc_tl_i32(r_src1, src1);
664 tcg_gen_trunc_tl_i32(r_src2, src2);
665
666 r_temp = tcg_temp_new_i64();
667 r_temp2 = tcg_temp_new_i64();
668
669 if (sign_ext) {
670 tcg_gen_ext_i32_i64(r_temp, r_src2);
671 tcg_gen_ext_i32_i64(r_temp2, r_src1);
672 } else {
673 tcg_gen_extu_i32_i64(r_temp, r_src2);
674 tcg_gen_extu_i32_i64(r_temp2, r_src1);
675 }
676
677 tcg_gen_mul_i64(r_temp2, r_temp, r_temp2);
678
679 tcg_gen_shri_i64(r_temp, r_temp2, 32);
680 tcg_gen_trunc_i64_tl(cpu_tmp0, r_temp);
681 tcg_temp_free_i64(r_temp);
682 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
683
684 tcg_gen_trunc_i64_tl(dst, r_temp2);
685
686 tcg_temp_free_i64(r_temp2);
687
688 tcg_temp_free_i32(r_src1);
689 tcg_temp_free_i32(r_src2);
690 }
691
692 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
693 {
694 /* zero-extend truncated operands before multiplication */
695 gen_op_multiply(dst, src1, src2, 0);
696 }
697
698 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
699 {
700 /* sign-extend truncated operands before multiplication */
701 gen_op_multiply(dst, src1, src2, 1);
702 }
703
704 // 1
705 static inline void gen_op_eval_ba(TCGv dst)
706 {
707 tcg_gen_movi_tl(dst, 1);
708 }
709
710 // Z
711 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
712 {
713 gen_mov_reg_Z(dst, src);
714 }
715
716 // Z | (N ^ V)
717 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
718 {
719 gen_mov_reg_N(cpu_tmp0, src);
720 gen_mov_reg_V(dst, src);
721 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
722 gen_mov_reg_Z(cpu_tmp0, src);
723 tcg_gen_or_tl(dst, dst, cpu_tmp0);
724 }
725
726 // N ^ V
727 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
728 {
729 gen_mov_reg_V(cpu_tmp0, src);
730 gen_mov_reg_N(dst, src);
731 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
732 }
733
734 // C | Z
735 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
736 {
737 gen_mov_reg_Z(cpu_tmp0, src);
738 gen_mov_reg_C(dst, src);
739 tcg_gen_or_tl(dst, dst, cpu_tmp0);
740 }
741
742 // C
743 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
744 {
745 gen_mov_reg_C(dst, src);
746 }
747
748 // V
749 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
750 {
751 gen_mov_reg_V(dst, src);
752 }
753
754 // 0
755 static inline void gen_op_eval_bn(TCGv dst)
756 {
757 tcg_gen_movi_tl(dst, 0);
758 }
759
760 // N
761 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
762 {
763 gen_mov_reg_N(dst, src);
764 }
765
766 // !Z
767 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
768 {
769 gen_mov_reg_Z(dst, src);
770 tcg_gen_xori_tl(dst, dst, 0x1);
771 }
772
773 // !(Z | (N ^ V))
774 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
775 {
776 gen_mov_reg_N(cpu_tmp0, src);
777 gen_mov_reg_V(dst, src);
778 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
779 gen_mov_reg_Z(cpu_tmp0, src);
780 tcg_gen_or_tl(dst, dst, cpu_tmp0);
781 tcg_gen_xori_tl(dst, dst, 0x1);
782 }
783
784 // !(N ^ V)
785 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
786 {
787 gen_mov_reg_V(cpu_tmp0, src);
788 gen_mov_reg_N(dst, src);
789 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
790 tcg_gen_xori_tl(dst, dst, 0x1);
791 }
792
793 // !(C | Z)
794 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
795 {
796 gen_mov_reg_Z(cpu_tmp0, src);
797 gen_mov_reg_C(dst, src);
798 tcg_gen_or_tl(dst, dst, cpu_tmp0);
799 tcg_gen_xori_tl(dst, dst, 0x1);
800 }
801
802 // !C
803 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
804 {
805 gen_mov_reg_C(dst, src);
806 tcg_gen_xori_tl(dst, dst, 0x1);
807 }
808
809 // !N
810 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
811 {
812 gen_mov_reg_N(dst, src);
813 tcg_gen_xori_tl(dst, dst, 0x1);
814 }
815
816 // !V
817 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
818 {
819 gen_mov_reg_V(dst, src);
820 tcg_gen_xori_tl(dst, dst, 0x1);
821 }
822
823 /*
824 FPSR bit field FCC1 | FCC0:
825 0 =
826 1 <
827 2 >
828 3 unordered
829 */
830 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
831 unsigned int fcc_offset)
832 {
833 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
834 tcg_gen_andi_tl(reg, reg, 0x1);
835 }
836
837 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
838 unsigned int fcc_offset)
839 {
840 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
841 tcg_gen_andi_tl(reg, reg, 0x1);
842 }
843
844 // !0: FCC0 | FCC1
845 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
846 unsigned int fcc_offset)
847 {
848 gen_mov_reg_FCC0(dst, src, fcc_offset);
849 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
850 tcg_gen_or_tl(dst, dst, cpu_tmp0);
851 }
852
853 // 1 or 2: FCC0 ^ FCC1
854 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
855 unsigned int fcc_offset)
856 {
857 gen_mov_reg_FCC0(dst, src, fcc_offset);
858 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
859 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
860 }
861
862 // 1 or 3: FCC0
863 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
864 unsigned int fcc_offset)
865 {
866 gen_mov_reg_FCC0(dst, src, fcc_offset);
867 }
868
869 // 1: FCC0 & !FCC1
870 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
871 unsigned int fcc_offset)
872 {
873 gen_mov_reg_FCC0(dst, src, fcc_offset);
874 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
875 tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
876 tcg_gen_and_tl(dst, dst, cpu_tmp0);
877 }
878
879 // 2 or 3: FCC1
880 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
881 unsigned int fcc_offset)
882 {
883 gen_mov_reg_FCC1(dst, src, fcc_offset);
884 }
885
886 // 2: !FCC0 & FCC1
887 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
888 unsigned int fcc_offset)
889 {
890 gen_mov_reg_FCC0(dst, src, fcc_offset);
891 tcg_gen_xori_tl(dst, dst, 0x1);
892 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
893 tcg_gen_and_tl(dst, dst, cpu_tmp0);
894 }
895
896 // 3: FCC0 & FCC1
897 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
898 unsigned int fcc_offset)
899 {
900 gen_mov_reg_FCC0(dst, src, fcc_offset);
901 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
902 tcg_gen_and_tl(dst, dst, cpu_tmp0);
903 }
904
905 // 0: !(FCC0 | FCC1)
906 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
907 unsigned int fcc_offset)
908 {
909 gen_mov_reg_FCC0(dst, src, fcc_offset);
910 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
911 tcg_gen_or_tl(dst, dst, cpu_tmp0);
912 tcg_gen_xori_tl(dst, dst, 0x1);
913 }
914
915 // 0 or 3: !(FCC0 ^ FCC1)
916 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
917 unsigned int fcc_offset)
918 {
919 gen_mov_reg_FCC0(dst, src, fcc_offset);
920 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
921 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
922 tcg_gen_xori_tl(dst, dst, 0x1);
923 }
924
925 // 0 or 2: !FCC0
926 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
927 unsigned int fcc_offset)
928 {
929 gen_mov_reg_FCC0(dst, src, fcc_offset);
930 tcg_gen_xori_tl(dst, dst, 0x1);
931 }
932
933 // !1: !(FCC0 & !FCC1)
934 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
935 unsigned int fcc_offset)
936 {
937 gen_mov_reg_FCC0(dst, src, fcc_offset);
938 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
939 tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
940 tcg_gen_and_tl(dst, dst, cpu_tmp0);
941 tcg_gen_xori_tl(dst, dst, 0x1);
942 }
943
944 // 0 or 1: !FCC1
945 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
946 unsigned int fcc_offset)
947 {
948 gen_mov_reg_FCC1(dst, src, fcc_offset);
949 tcg_gen_xori_tl(dst, dst, 0x1);
950 }
951
952 // !2: !(!FCC0 & FCC1)
953 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
954 unsigned int fcc_offset)
955 {
956 gen_mov_reg_FCC0(dst, src, fcc_offset);
957 tcg_gen_xori_tl(dst, dst, 0x1);
958 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
959 tcg_gen_and_tl(dst, dst, cpu_tmp0);
960 tcg_gen_xori_tl(dst, dst, 0x1);
961 }
962
963 // !3: !(FCC0 & FCC1)
964 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
965 unsigned int fcc_offset)
966 {
967 gen_mov_reg_FCC0(dst, src, fcc_offset);
968 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
969 tcg_gen_and_tl(dst, dst, cpu_tmp0);
970 tcg_gen_xori_tl(dst, dst, 0x1);
971 }
972
973 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
974 target_ulong pc2, TCGv r_cond)
975 {
976 int l1;
977
978 l1 = gen_new_label();
979
980 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
981
982 gen_goto_tb(dc, 0, pc1, pc1 + 4);
983
984 gen_set_label(l1);
985 gen_goto_tb(dc, 1, pc2, pc2 + 4);
986 }
987
988 static inline void gen_branch_a(DisasContext *dc, target_ulong pc1,
989 target_ulong pc2, TCGv r_cond)
990 {
991 int l1;
992
993 l1 = gen_new_label();
994
995 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
996
997 gen_goto_tb(dc, 0, pc2, pc1);
998
999 gen_set_label(l1);
1000 gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8);
1001 }
1002
1003 static inline void gen_generic_branch(DisasContext *dc)
1004 {
1005 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1006 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1007 TCGv zero = tcg_const_tl(0);
1008
1009 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1010
1011 tcg_temp_free(npc0);
1012 tcg_temp_free(npc1);
1013 tcg_temp_free(zero);
1014 }
1015
1016 /* call this function before using the condition register as it may
1017 have been set for a jump */
1018 static inline void flush_cond(DisasContext *dc)
1019 {
1020 if (dc->npc == JUMP_PC) {
1021 gen_generic_branch(dc);
1022 dc->npc = DYNAMIC_PC;
1023 }
1024 }
1025
1026 static inline void save_npc(DisasContext *dc)
1027 {
1028 if (dc->npc == JUMP_PC) {
1029 gen_generic_branch(dc);
1030 dc->npc = DYNAMIC_PC;
1031 } else if (dc->npc != DYNAMIC_PC) {
1032 tcg_gen_movi_tl(cpu_npc, dc->npc);
1033 }
1034 }
1035
1036 static inline void update_psr(DisasContext *dc)
1037 {
1038 if (dc->cc_op != CC_OP_FLAGS) {
1039 dc->cc_op = CC_OP_FLAGS;
1040 gen_helper_compute_psr(cpu_env);
1041 }
1042 }
1043
1044 static inline void save_state(DisasContext *dc)
1045 {
1046 tcg_gen_movi_tl(cpu_pc, dc->pc);
1047 save_npc(dc);
1048 }
1049
1050 static inline void gen_mov_pc_npc(DisasContext *dc)
1051 {
1052 if (dc->npc == JUMP_PC) {
1053 gen_generic_branch(dc);
1054 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1055 dc->pc = DYNAMIC_PC;
1056 } else if (dc->npc == DYNAMIC_PC) {
1057 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1058 dc->pc = DYNAMIC_PC;
1059 } else {
1060 dc->pc = dc->npc;
1061 }
1062 }
1063
1064 static inline void gen_op_next_insn(void)
1065 {
1066 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1067 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1068 }
1069
1070 static void free_compare(DisasCompare *cmp)
1071 {
1072 if (!cmp->g1) {
1073 tcg_temp_free(cmp->c1);
1074 }
1075 if (!cmp->g2) {
1076 tcg_temp_free(cmp->c2);
1077 }
1078 }
1079
1080 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1081 DisasContext *dc)
1082 {
1083 static int subcc_cond[16] = {
1084 TCG_COND_NEVER,
1085 TCG_COND_EQ,
1086 TCG_COND_LE,
1087 TCG_COND_LT,
1088 TCG_COND_LEU,
1089 TCG_COND_LTU,
1090 -1, /* neg */
1091 -1, /* overflow */
1092 TCG_COND_ALWAYS,
1093 TCG_COND_NE,
1094 TCG_COND_GT,
1095 TCG_COND_GE,
1096 TCG_COND_GTU,
1097 TCG_COND_GEU,
1098 -1, /* pos */
1099 -1, /* no overflow */
1100 };
1101
1102 static int logic_cond[16] = {
1103 TCG_COND_NEVER,
1104 TCG_COND_EQ, /* eq: Z */
1105 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1106 TCG_COND_LT, /* lt: N ^ V -> N */
1107 TCG_COND_EQ, /* leu: C | Z -> Z */
1108 TCG_COND_NEVER, /* ltu: C -> 0 */
1109 TCG_COND_LT, /* neg: N */
1110 TCG_COND_NEVER, /* vs: V -> 0 */
1111 TCG_COND_ALWAYS,
1112 TCG_COND_NE, /* ne: !Z */
1113 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1114 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1115 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1116 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1117 TCG_COND_GE, /* pos: !N */
1118 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1119 };
1120
1121 TCGv_i32 r_src;
1122 TCGv r_dst;
1123
1124 #ifdef TARGET_SPARC64
1125 if (xcc) {
1126 r_src = cpu_xcc;
1127 } else {
1128 r_src = cpu_psr;
1129 }
1130 #else
1131 r_src = cpu_psr;
1132 #endif
1133
1134 switch (dc->cc_op) {
1135 case CC_OP_LOGIC:
1136 cmp->cond = logic_cond[cond];
1137 do_compare_dst_0:
1138 cmp->is_bool = false;
1139 cmp->g2 = false;
1140 cmp->c2 = tcg_const_tl(0);
1141 #ifdef TARGET_SPARC64
1142 if (!xcc) {
1143 cmp->g1 = false;
1144 cmp->c1 = tcg_temp_new();
1145 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1146 break;
1147 }
1148 #endif
1149 cmp->g1 = true;
1150 cmp->c1 = cpu_cc_dst;
1151 break;
1152
1153 case CC_OP_SUB:
1154 switch (cond) {
1155 case 6: /* neg */
1156 case 14: /* pos */
1157 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1158 goto do_compare_dst_0;
1159
1160 case 7: /* overflow */
1161 case 15: /* !overflow */
1162 goto do_dynamic;
1163
1164 default:
1165 cmp->cond = subcc_cond[cond];
1166 cmp->is_bool = false;
1167 #ifdef TARGET_SPARC64
1168 if (!xcc) {
1169 /* Note that sign-extension works for unsigned compares as
1170 long as both operands are sign-extended. */
1171 cmp->g1 = cmp->g2 = false;
1172 cmp->c1 = tcg_temp_new();
1173 cmp->c2 = tcg_temp_new();
1174 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1175 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1176 break;
1177 }
1178 #endif
1179 cmp->g1 = cmp->g2 = true;
1180 cmp->c1 = cpu_cc_src;
1181 cmp->c2 = cpu_cc_src2;
1182 break;
1183 }
1184 break;
1185
1186 default:
1187 do_dynamic:
1188 gen_helper_compute_psr(cpu_env);
1189 dc->cc_op = CC_OP_FLAGS;
1190 /* FALLTHRU */
1191
1192 case CC_OP_FLAGS:
1193 /* We're going to generate a boolean result. */
1194 cmp->cond = TCG_COND_NE;
1195 cmp->is_bool = true;
1196 cmp->g1 = cmp->g2 = false;
1197 cmp->c1 = r_dst = tcg_temp_new();
1198 cmp->c2 = tcg_const_tl(0);
1199
1200 switch (cond) {
1201 case 0x0:
1202 gen_op_eval_bn(r_dst);
1203 break;
1204 case 0x1:
1205 gen_op_eval_be(r_dst, r_src);
1206 break;
1207 case 0x2:
1208 gen_op_eval_ble(r_dst, r_src);
1209 break;
1210 case 0x3:
1211 gen_op_eval_bl(r_dst, r_src);
1212 break;
1213 case 0x4:
1214 gen_op_eval_bleu(r_dst, r_src);
1215 break;
1216 case 0x5:
1217 gen_op_eval_bcs(r_dst, r_src);
1218 break;
1219 case 0x6:
1220 gen_op_eval_bneg(r_dst, r_src);
1221 break;
1222 case 0x7:
1223 gen_op_eval_bvs(r_dst, r_src);
1224 break;
1225 case 0x8:
1226 gen_op_eval_ba(r_dst);
1227 break;
1228 case 0x9:
1229 gen_op_eval_bne(r_dst, r_src);
1230 break;
1231 case 0xa:
1232 gen_op_eval_bg(r_dst, r_src);
1233 break;
1234 case 0xb:
1235 gen_op_eval_bge(r_dst, r_src);
1236 break;
1237 case 0xc:
1238 gen_op_eval_bgu(r_dst, r_src);
1239 break;
1240 case 0xd:
1241 gen_op_eval_bcc(r_dst, r_src);
1242 break;
1243 case 0xe:
1244 gen_op_eval_bpos(r_dst, r_src);
1245 break;
1246 case 0xf:
1247 gen_op_eval_bvc(r_dst, r_src);
1248 break;
1249 }
1250 break;
1251 }
1252 }
1253
1254 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1255 {
1256 unsigned int offset;
1257 TCGv r_dst;
1258
1259 /* For now we still generate a straight boolean result. */
1260 cmp->cond = TCG_COND_NE;
1261 cmp->is_bool = true;
1262 cmp->g1 = cmp->g2 = false;
1263 cmp->c1 = r_dst = tcg_temp_new();
1264 cmp->c2 = tcg_const_tl(0);
1265
1266 switch (cc) {
1267 default:
1268 case 0x0:
1269 offset = 0;
1270 break;
1271 case 0x1:
1272 offset = 32 - 10;
1273 break;
1274 case 0x2:
1275 offset = 34 - 10;
1276 break;
1277 case 0x3:
1278 offset = 36 - 10;
1279 break;
1280 }
1281
1282 switch (cond) {
1283 case 0x0:
1284 gen_op_eval_bn(r_dst);
1285 break;
1286 case 0x1:
1287 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1288 break;
1289 case 0x2:
1290 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1291 break;
1292 case 0x3:
1293 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1294 break;
1295 case 0x4:
1296 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1297 break;
1298 case 0x5:
1299 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1300 break;
1301 case 0x6:
1302 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1303 break;
1304 case 0x7:
1305 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1306 break;
1307 case 0x8:
1308 gen_op_eval_ba(r_dst);
1309 break;
1310 case 0x9:
1311 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1312 break;
1313 case 0xa:
1314 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1315 break;
1316 case 0xb:
1317 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1318 break;
1319 case 0xc:
1320 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1321 break;
1322 case 0xd:
1323 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1324 break;
1325 case 0xe:
1326 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1327 break;
1328 case 0xf:
1329 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1330 break;
1331 }
1332 }
1333
1334 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1335 DisasContext *dc)
1336 {
1337 DisasCompare cmp;
1338 gen_compare(&cmp, cc, cond, dc);
1339
1340 /* The interface is to return a boolean in r_dst. */
1341 if (cmp.is_bool) {
1342 tcg_gen_mov_tl(r_dst, cmp.c1);
1343 } else {
1344 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1345 }
1346
1347 free_compare(&cmp);
1348 }
1349
1350 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1351 {
1352 DisasCompare cmp;
1353 gen_fcompare(&cmp, cc, cond);
1354
1355 /* The interface is to return a boolean in r_dst. */
1356 if (cmp.is_bool) {
1357 tcg_gen_mov_tl(r_dst, cmp.c1);
1358 } else {
1359 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1360 }
1361
1362 free_compare(&cmp);
1363 }
1364
1365 #ifdef TARGET_SPARC64
1366 // Inverted logic
1367 static const int gen_tcg_cond_reg[8] = {
1368 -1,
1369 TCG_COND_NE,
1370 TCG_COND_GT,
1371 TCG_COND_GE,
1372 -1,
1373 TCG_COND_EQ,
1374 TCG_COND_LE,
1375 TCG_COND_LT,
1376 };
1377
1378 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1379 {
1380 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1381 cmp->is_bool = false;
1382 cmp->g1 = true;
1383 cmp->g2 = false;
1384 cmp->c1 = r_src;
1385 cmp->c2 = tcg_const_tl(0);
1386 }
1387
1388 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1389 {
1390 DisasCompare cmp;
1391 gen_compare_reg(&cmp, cond, r_src);
1392
1393 /* The interface is to return a boolean in r_dst. */
1394 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1395
1396 free_compare(&cmp);
1397 }
1398 #endif
1399
1400 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1401 {
1402 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1403 target_ulong target = dc->pc + offset;
1404
1405 #ifdef TARGET_SPARC64
1406 if (unlikely(AM_CHECK(dc))) {
1407 target &= 0xffffffffULL;
1408 }
1409 #endif
1410 if (cond == 0x0) {
1411 /* unconditional not taken */
1412 if (a) {
1413 dc->pc = dc->npc + 4;
1414 dc->npc = dc->pc + 4;
1415 } else {
1416 dc->pc = dc->npc;
1417 dc->npc = dc->pc + 4;
1418 }
1419 } else if (cond == 0x8) {
1420 /* unconditional taken */
1421 if (a) {
1422 dc->pc = target;
1423 dc->npc = dc->pc + 4;
1424 } else {
1425 dc->pc = dc->npc;
1426 dc->npc = target;
1427 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1428 }
1429 } else {
1430 flush_cond(dc);
1431 gen_cond(cpu_cond, cc, cond, dc);
1432 if (a) {
1433 gen_branch_a(dc, target, dc->npc, cpu_cond);
1434 dc->is_br = 1;
1435 } else {
1436 dc->pc = dc->npc;
1437 dc->jump_pc[0] = target;
1438 if (unlikely(dc->npc == DYNAMIC_PC)) {
1439 dc->jump_pc[1] = DYNAMIC_PC;
1440 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1441 } else {
1442 dc->jump_pc[1] = dc->npc + 4;
1443 dc->npc = JUMP_PC;
1444 }
1445 }
1446 }
1447 }
1448
1449 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1450 {
1451 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1452 target_ulong target = dc->pc + offset;
1453
1454 #ifdef TARGET_SPARC64
1455 if (unlikely(AM_CHECK(dc))) {
1456 target &= 0xffffffffULL;
1457 }
1458 #endif
1459 if (cond == 0x0) {
1460 /* unconditional not taken */
1461 if (a) {
1462 dc->pc = dc->npc + 4;
1463 dc->npc = dc->pc + 4;
1464 } else {
1465 dc->pc = dc->npc;
1466 dc->npc = dc->pc + 4;
1467 }
1468 } else if (cond == 0x8) {
1469 /* unconditional taken */
1470 if (a) {
1471 dc->pc = target;
1472 dc->npc = dc->pc + 4;
1473 } else {
1474 dc->pc = dc->npc;
1475 dc->npc = target;
1476 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1477 }
1478 } else {
1479 flush_cond(dc);
1480 gen_fcond(cpu_cond, cc, cond);
1481 if (a) {
1482 gen_branch_a(dc, target, dc->npc, cpu_cond);
1483 dc->is_br = 1;
1484 } else {
1485 dc->pc = dc->npc;
1486 dc->jump_pc[0] = target;
1487 if (unlikely(dc->npc == DYNAMIC_PC)) {
1488 dc->jump_pc[1] = DYNAMIC_PC;
1489 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1490 } else {
1491 dc->jump_pc[1] = dc->npc + 4;
1492 dc->npc = JUMP_PC;
1493 }
1494 }
1495 }
1496 }
1497
1498 #ifdef TARGET_SPARC64
1499 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1500 TCGv r_reg)
1501 {
1502 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1503 target_ulong target = dc->pc + offset;
1504
1505 if (unlikely(AM_CHECK(dc))) {
1506 target &= 0xffffffffULL;
1507 }
1508 flush_cond(dc);
1509 gen_cond_reg(cpu_cond, cond, r_reg);
1510 if (a) {
1511 gen_branch_a(dc, target, dc->npc, cpu_cond);
1512 dc->is_br = 1;
1513 } else {
1514 dc->pc = dc->npc;
1515 dc->jump_pc[0] = target;
1516 if (unlikely(dc->npc == DYNAMIC_PC)) {
1517 dc->jump_pc[1] = DYNAMIC_PC;
1518 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1519 } else {
1520 dc->jump_pc[1] = dc->npc + 4;
1521 dc->npc = JUMP_PC;
1522 }
1523 }
1524 }
1525
1526 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1527 {
1528 switch (fccno) {
1529 case 0:
1530 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1531 break;
1532 case 1:
1533 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1534 break;
1535 case 2:
1536 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1537 break;
1538 case 3:
1539 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1540 break;
1541 }
1542 }
1543
1544 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1545 {
1546 switch (fccno) {
1547 case 0:
1548 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1549 break;
1550 case 1:
1551 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1552 break;
1553 case 2:
1554 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1555 break;
1556 case 3:
1557 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1558 break;
1559 }
1560 }
1561
1562 static inline void gen_op_fcmpq(int fccno)
1563 {
1564 switch (fccno) {
1565 case 0:
1566 gen_helper_fcmpq(cpu_env);
1567 break;
1568 case 1:
1569 gen_helper_fcmpq_fcc1(cpu_env);
1570 break;
1571 case 2:
1572 gen_helper_fcmpq_fcc2(cpu_env);
1573 break;
1574 case 3:
1575 gen_helper_fcmpq_fcc3(cpu_env);
1576 break;
1577 }
1578 }
1579
1580 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1581 {
1582 switch (fccno) {
1583 case 0:
1584 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1585 break;
1586 case 1:
1587 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1588 break;
1589 case 2:
1590 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1591 break;
1592 case 3:
1593 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1594 break;
1595 }
1596 }
1597
1598 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1599 {
1600 switch (fccno) {
1601 case 0:
1602 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1603 break;
1604 case 1:
1605 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1606 break;
1607 case 2:
1608 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1609 break;
1610 case 3:
1611 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1612 break;
1613 }
1614 }
1615
1616 static inline void gen_op_fcmpeq(int fccno)
1617 {
1618 switch (fccno) {
1619 case 0:
1620 gen_helper_fcmpeq(cpu_env);
1621 break;
1622 case 1:
1623 gen_helper_fcmpeq_fcc1(cpu_env);
1624 break;
1625 case 2:
1626 gen_helper_fcmpeq_fcc2(cpu_env);
1627 break;
1628 case 3:
1629 gen_helper_fcmpeq_fcc3(cpu_env);
1630 break;
1631 }
1632 }
1633
1634 #else
1635
1636 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1637 {
1638 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1639 }
1640
1641 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1642 {
1643 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1644 }
1645
1646 static inline void gen_op_fcmpq(int fccno)
1647 {
1648 gen_helper_fcmpq(cpu_env);
1649 }
1650
1651 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1652 {
1653 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1654 }
1655
1656 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1657 {
1658 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1659 }
1660
1661 static inline void gen_op_fcmpeq(int fccno)
1662 {
1663 gen_helper_fcmpeq(cpu_env);
1664 }
1665 #endif
1666
1667 static inline void gen_op_fpexception_im(int fsr_flags)
1668 {
1669 TCGv_i32 r_const;
1670
1671 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1672 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1673 r_const = tcg_const_i32(TT_FP_EXCP);
1674 gen_helper_raise_exception(cpu_env, r_const);
1675 tcg_temp_free_i32(r_const);
1676 }
1677
1678 static int gen_trap_ifnofpu(DisasContext *dc)
1679 {
1680 #if !defined(CONFIG_USER_ONLY)
1681 if (!dc->fpu_enabled) {
1682 TCGv_i32 r_const;
1683
1684 save_state(dc);
1685 r_const = tcg_const_i32(TT_NFPU_INSN);
1686 gen_helper_raise_exception(cpu_env, r_const);
1687 tcg_temp_free_i32(r_const);
1688 dc->is_br = 1;
1689 return 1;
1690 }
1691 #endif
1692 return 0;
1693 }
1694
1695 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1696 {
1697 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1698 }
1699
1700 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1701 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1702 {
1703 TCGv_i32 dst, src;
1704
1705 src = gen_load_fpr_F(dc, rs);
1706 dst = gen_dest_fpr_F(dc);
1707
1708 gen(dst, cpu_env, src);
1709
1710 gen_store_fpr_F(dc, rd, dst);
1711 }
1712
1713 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1714 void (*gen)(TCGv_i32, TCGv_i32))
1715 {
1716 TCGv_i32 dst, src;
1717
1718 src = gen_load_fpr_F(dc, rs);
1719 dst = gen_dest_fpr_F(dc);
1720
1721 gen(dst, src);
1722
1723 gen_store_fpr_F(dc, rd, dst);
1724 }
1725
1726 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1727 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1728 {
1729 TCGv_i32 dst, src1, src2;
1730
1731 src1 = gen_load_fpr_F(dc, rs1);
1732 src2 = gen_load_fpr_F(dc, rs2);
1733 dst = gen_dest_fpr_F(dc);
1734
1735 gen(dst, cpu_env, src1, src2);
1736
1737 gen_store_fpr_F(dc, rd, dst);
1738 }
1739
1740 #ifdef TARGET_SPARC64
1741 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1742 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1743 {
1744 TCGv_i32 dst, src1, src2;
1745
1746 src1 = gen_load_fpr_F(dc, rs1);
1747 src2 = gen_load_fpr_F(dc, rs2);
1748 dst = gen_dest_fpr_F(dc);
1749
1750 gen(dst, src1, src2);
1751
1752 gen_store_fpr_F(dc, rd, dst);
1753 }
1754 #endif
1755
1756 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1757 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1758 {
1759 TCGv_i64 dst, src;
1760
1761 src = gen_load_fpr_D(dc, rs);
1762 dst = gen_dest_fpr_D();
1763
1764 gen(dst, cpu_env, src);
1765
1766 gen_store_fpr_D(dc, rd, dst);
1767 }
1768
1769 #ifdef TARGET_SPARC64
1770 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1771 void (*gen)(TCGv_i64, TCGv_i64))
1772 {
1773 TCGv_i64 dst, src;
1774
1775 src = gen_load_fpr_D(dc, rs);
1776 dst = gen_dest_fpr_D();
1777
1778 gen(dst, src);
1779
1780 gen_store_fpr_D(dc, rd, dst);
1781 }
1782 #endif
1783
1784 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1785 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1786 {
1787 TCGv_i64 dst, src1, src2;
1788
1789 src1 = gen_load_fpr_D(dc, rs1);
1790 src2 = gen_load_fpr_D(dc, rs2);
1791 dst = gen_dest_fpr_D();
1792
1793 gen(dst, cpu_env, src1, src2);
1794
1795 gen_store_fpr_D(dc, rd, dst);
1796 }
1797
1798 #ifdef TARGET_SPARC64
1799 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1800 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1801 {
1802 TCGv_i64 dst, src1, src2;
1803
1804 src1 = gen_load_fpr_D(dc, rs1);
1805 src2 = gen_load_fpr_D(dc, rs2);
1806 dst = gen_dest_fpr_D();
1807
1808 gen(dst, src1, src2);
1809
1810 gen_store_fpr_D(dc, rd, dst);
1811 }
1812
1813 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1814 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1815 {
1816 TCGv_i64 dst, src1, src2;
1817
1818 src1 = gen_load_fpr_D(dc, rs1);
1819 src2 = gen_load_fpr_D(dc, rs2);
1820 dst = gen_dest_fpr_D();
1821
1822 gen(dst, cpu_gsr, src1, src2);
1823
1824 gen_store_fpr_D(dc, rd, dst);
1825 }
1826
1827 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1828 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1829 {
1830 TCGv_i64 dst, src0, src1, src2;
1831
1832 src1 = gen_load_fpr_D(dc, rs1);
1833 src2 = gen_load_fpr_D(dc, rs2);
1834 src0 = gen_load_fpr_D(dc, rd);
1835 dst = gen_dest_fpr_D();
1836
1837 gen(dst, src0, src1, src2);
1838
1839 gen_store_fpr_D(dc, rd, dst);
1840 }
1841 #endif
1842
1843 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1844 void (*gen)(TCGv_ptr))
1845 {
1846 gen_op_load_fpr_QT1(QFPREG(rs));
1847
1848 gen(cpu_env);
1849
1850 gen_op_store_QT0_fpr(QFPREG(rd));
1851 gen_update_fprs_dirty(QFPREG(rd));
1852 }
1853
1854 #ifdef TARGET_SPARC64
1855 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1856 void (*gen)(TCGv_ptr))
1857 {
1858 gen_op_load_fpr_QT1(QFPREG(rs));
1859
1860 gen(cpu_env);
1861
1862 gen_op_store_QT0_fpr(QFPREG(rd));
1863 gen_update_fprs_dirty(QFPREG(rd));
1864 }
1865 #endif
1866
1867 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1868 void (*gen)(TCGv_ptr))
1869 {
1870 gen_op_load_fpr_QT0(QFPREG(rs1));
1871 gen_op_load_fpr_QT1(QFPREG(rs2));
1872
1873 gen(cpu_env);
1874
1875 gen_op_store_QT0_fpr(QFPREG(rd));
1876 gen_update_fprs_dirty(QFPREG(rd));
1877 }
1878
1879 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1880 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1881 {
1882 TCGv_i64 dst;
1883 TCGv_i32 src1, src2;
1884
1885 src1 = gen_load_fpr_F(dc, rs1);
1886 src2 = gen_load_fpr_F(dc, rs2);
1887 dst = gen_dest_fpr_D();
1888
1889 gen(dst, cpu_env, src1, src2);
1890
1891 gen_store_fpr_D(dc, rd, dst);
1892 }
1893
1894 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1895 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1896 {
1897 TCGv_i64 src1, src2;
1898
1899 src1 = gen_load_fpr_D(dc, rs1);
1900 src2 = gen_load_fpr_D(dc, rs2);
1901
1902 gen(cpu_env, src1, src2);
1903
1904 gen_op_store_QT0_fpr(QFPREG(rd));
1905 gen_update_fprs_dirty(QFPREG(rd));
1906 }
1907
1908 #ifdef TARGET_SPARC64
1909 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1910 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1911 {
1912 TCGv_i64 dst;
1913 TCGv_i32 src;
1914
1915 src = gen_load_fpr_F(dc, rs);
1916 dst = gen_dest_fpr_D();
1917
1918 gen(dst, cpu_env, src);
1919
1920 gen_store_fpr_D(dc, rd, dst);
1921 }
1922 #endif
1923
1924 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1925 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1926 {
1927 TCGv_i64 dst;
1928 TCGv_i32 src;
1929
1930 src = gen_load_fpr_F(dc, rs);
1931 dst = gen_dest_fpr_D();
1932
1933 gen(dst, cpu_env, src);
1934
1935 gen_store_fpr_D(dc, rd, dst);
1936 }
1937
1938 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1939 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1940 {
1941 TCGv_i32 dst;
1942 TCGv_i64 src;
1943
1944 src = gen_load_fpr_D(dc, rs);
1945 dst = gen_dest_fpr_F(dc);
1946
1947 gen(dst, cpu_env, src);
1948
1949 gen_store_fpr_F(dc, rd, dst);
1950 }
1951
1952 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1953 void (*gen)(TCGv_i32, TCGv_ptr))
1954 {
1955 TCGv_i32 dst;
1956
1957 gen_op_load_fpr_QT1(QFPREG(rs));
1958 dst = gen_dest_fpr_F(dc);
1959
1960 gen(dst, cpu_env);
1961
1962 gen_store_fpr_F(dc, rd, dst);
1963 }
1964
1965 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1966 void (*gen)(TCGv_i64, TCGv_ptr))
1967 {
1968 TCGv_i64 dst;
1969
1970 gen_op_load_fpr_QT1(QFPREG(rs));
1971 dst = gen_dest_fpr_D();
1972
1973 gen(dst, cpu_env);
1974
1975 gen_store_fpr_D(dc, rd, dst);
1976 }
1977
1978 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1979 void (*gen)(TCGv_ptr, TCGv_i32))
1980 {
1981 TCGv_i32 src;
1982
1983 src = gen_load_fpr_F(dc, rs);
1984
1985 gen(cpu_env, src);
1986
1987 gen_op_store_QT0_fpr(QFPREG(rd));
1988 gen_update_fprs_dirty(QFPREG(rd));
1989 }
1990
1991 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1992 void (*gen)(TCGv_ptr, TCGv_i64))
1993 {
1994 TCGv_i64 src;
1995
1996 src = gen_load_fpr_D(dc, rs);
1997
1998 gen(cpu_env, src);
1999
2000 gen_op_store_QT0_fpr(QFPREG(rd));
2001 gen_update_fprs_dirty(QFPREG(rd));
2002 }
2003
2004 /* asi moves */
2005 #ifdef TARGET_SPARC64
2006 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
2007 {
2008 int asi;
2009 TCGv_i32 r_asi;
2010
2011 if (IS_IMM) {
2012 r_asi = tcg_temp_new_i32();
2013 tcg_gen_mov_i32(r_asi, cpu_asi);
2014 } else {
2015 asi = GET_FIELD(insn, 19, 26);
2016 r_asi = tcg_const_i32(asi);
2017 }
2018 return r_asi;
2019 }
2020
2021 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2022 int sign)
2023 {
2024 TCGv_i32 r_asi, r_size, r_sign;
2025
2026 r_asi = gen_get_asi(insn, addr);
2027 r_size = tcg_const_i32(size);
2028 r_sign = tcg_const_i32(sign);
2029 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
2030 tcg_temp_free_i32(r_sign);
2031 tcg_temp_free_i32(r_size);
2032 tcg_temp_free_i32(r_asi);
2033 }
2034
2035 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2036 {
2037 TCGv_i32 r_asi, r_size;
2038
2039 r_asi = gen_get_asi(insn, addr);
2040 r_size = tcg_const_i32(size);
2041 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2042 tcg_temp_free_i32(r_size);
2043 tcg_temp_free_i32(r_asi);
2044 }
2045
2046 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
2047 {
2048 TCGv_i32 r_asi, r_size, r_rd;
2049
2050 r_asi = gen_get_asi(insn, addr);
2051 r_size = tcg_const_i32(size);
2052 r_rd = tcg_const_i32(rd);
2053 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2054 tcg_temp_free_i32(r_rd);
2055 tcg_temp_free_i32(r_size);
2056 tcg_temp_free_i32(r_asi);
2057 }
2058
2059 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
2060 {
2061 TCGv_i32 r_asi, r_size, r_rd;
2062
2063 r_asi = gen_get_asi(insn, addr);
2064 r_size = tcg_const_i32(size);
2065 r_rd = tcg_const_i32(rd);
2066 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2067 tcg_temp_free_i32(r_rd);
2068 tcg_temp_free_i32(r_size);
2069 tcg_temp_free_i32(r_asi);
2070 }
2071
2072 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2073 {
2074 TCGv_i32 r_asi, r_size, r_sign;
2075
2076 r_asi = gen_get_asi(insn, addr);
2077 r_size = tcg_const_i32(4);
2078 r_sign = tcg_const_i32(0);
2079 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2080 tcg_temp_free_i32(r_sign);
2081 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2082 tcg_temp_free_i32(r_size);
2083 tcg_temp_free_i32(r_asi);
2084 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2085 }
2086
2087 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2088 int insn, int rd)
2089 {
2090 TCGv_i32 r_asi, r_rd;
2091
2092 r_asi = gen_get_asi(insn, addr);
2093 r_rd = tcg_const_i32(rd);
2094 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2095 tcg_temp_free_i32(r_rd);
2096 tcg_temp_free_i32(r_asi);
2097 }
2098
2099 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2100 int insn, int rd)
2101 {
2102 TCGv_i32 r_asi, r_size;
2103 TCGv lo = gen_load_gpr(dc, rd + 1);
2104
2105 tcg_gen_concat_tl_i64(cpu_tmp64, lo, hi);
2106 r_asi = gen_get_asi(insn, addr);
2107 r_size = tcg_const_i32(8);
2108 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2109 tcg_temp_free_i32(r_size);
2110 tcg_temp_free_i32(r_asi);
2111 }
2112
2113 static inline void gen_cas_asi(DisasContext *dc, TCGv addr,
2114 TCGv val2, int insn, int rd)
2115 {
2116 TCGv val1 = gen_load_gpr(dc, rd);
2117 TCGv dst = gen_dest_gpr(dc, rd);
2118 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2119
2120 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2121 tcg_temp_free_i32(r_asi);
2122 gen_store_gpr(dc, rd, dst);
2123 }
2124
2125 static inline void gen_casx_asi(DisasContext *dc, TCGv addr,
2126 TCGv val2, int insn, int rd)
2127 {
2128 TCGv val1 = gen_load_gpr(dc, rd);
2129 TCGv dst = gen_dest_gpr(dc, rd);
2130 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2131
2132 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2133 tcg_temp_free_i32(r_asi);
2134 gen_store_gpr(dc, rd, dst);
2135 }
2136
2137 #elif !defined(CONFIG_USER_ONLY)
2138
2139 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2140 int sign)
2141 {
2142 TCGv_i32 r_asi, r_size, r_sign;
2143
2144 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2145 r_size = tcg_const_i32(size);
2146 r_sign = tcg_const_i32(sign);
2147 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2148 tcg_temp_free(r_sign);
2149 tcg_temp_free(r_size);
2150 tcg_temp_free(r_asi);
2151 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2152 }
2153
2154 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2155 {
2156 TCGv_i32 r_asi, r_size;
2157
2158 tcg_gen_extu_tl_i64(cpu_tmp64, src);
2159 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2160 r_size = tcg_const_i32(size);
2161 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2162 tcg_temp_free(r_size);
2163 tcg_temp_free(r_asi);
2164 }
2165
2166 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2167 {
2168 TCGv_i32 r_asi, r_size, r_sign;
2169 TCGv_i64 r_val;
2170
2171 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2172 r_size = tcg_const_i32(4);
2173 r_sign = tcg_const_i32(0);
2174 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2175 tcg_temp_free(r_sign);
2176 r_val = tcg_temp_new_i64();
2177 tcg_gen_extu_tl_i64(r_val, src);
2178 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2179 tcg_temp_free_i64(r_val);
2180 tcg_temp_free(r_size);
2181 tcg_temp_free(r_asi);
2182 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2183 }
2184
2185 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2186 int insn, int rd)
2187 {
2188 TCGv_i32 r_asi, r_size, r_sign;
2189 TCGv t;
2190
2191 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2192 r_size = tcg_const_i32(8);
2193 r_sign = tcg_const_i32(0);
2194 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2195 tcg_temp_free(r_sign);
2196 tcg_temp_free(r_size);
2197 tcg_temp_free(r_asi);
2198
2199 t = gen_dest_gpr(dc, rd + 1);
2200 tcg_gen_trunc_i64_tl(t, cpu_tmp64);
2201 gen_store_gpr(dc, rd + 1, t);
2202
2203 tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
2204 tcg_gen_trunc_i64_tl(hi, cpu_tmp64);
2205 gen_store_gpr(dc, rd, hi);
2206 }
2207
2208 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2209 int insn, int rd)
2210 {
2211 TCGv_i32 r_asi, r_size;
2212 TCGv lo = gen_load_gpr(dc, rd + 1);
2213
2214 tcg_gen_concat_tl_i64(cpu_tmp64, lo, hi);
2215 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2216 r_size = tcg_const_i32(8);
2217 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2218 tcg_temp_free(r_size);
2219 tcg_temp_free(r_asi);
2220 }
2221 #endif
2222
2223 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2224 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2225 {
2226 TCGv_i64 r_val;
2227 TCGv_i32 r_asi, r_size;
2228
2229 gen_ld_asi(dst, addr, insn, 1, 0);
2230
2231 r_val = tcg_const_i64(0xffULL);
2232 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2233 r_size = tcg_const_i32(1);
2234 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2235 tcg_temp_free_i32(r_size);
2236 tcg_temp_free_i32(r_asi);
2237 tcg_temp_free_i64(r_val);
2238 }
2239 #endif
2240
2241 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2242 {
2243 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2244 return gen_load_gpr(dc, rs1);
2245 }
2246
2247 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2248 {
2249 if (IS_IMM) { /* immediate */
2250 target_long simm = GET_FIELDs(insn, 19, 31);
2251 TCGv t = get_temp_tl(dc);
2252 tcg_gen_movi_tl(t, simm);
2253 return t;
2254 } else { /* register */
2255 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2256 return gen_load_gpr(dc, rs2);
2257 }
2258 }
2259
2260 #ifdef TARGET_SPARC64
2261 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2262 {
2263 TCGv_i32 c32, zero, dst, s1, s2;
2264
2265 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2266 or fold the comparison down to 32 bits and use movcond_i32. Choose
2267 the later. */
2268 c32 = tcg_temp_new_i32();
2269 if (cmp->is_bool) {
2270 tcg_gen_trunc_i64_i32(c32, cmp->c1);
2271 } else {
2272 TCGv_i64 c64 = tcg_temp_new_i64();
2273 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2274 tcg_gen_trunc_i64_i32(c32, c64);
2275 tcg_temp_free_i64(c64);
2276 }
2277
2278 s1 = gen_load_fpr_F(dc, rs);
2279 s2 = gen_load_fpr_F(dc, rd);
2280 dst = gen_dest_fpr_F(dc);
2281 zero = tcg_const_i32(0);
2282
2283 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2284
2285 tcg_temp_free_i32(c32);
2286 tcg_temp_free_i32(zero);
2287 gen_store_fpr_F(dc, rd, dst);
2288 }
2289
2290 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2291 {
2292 TCGv_i64 dst = gen_dest_fpr_D();
2293 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2294 gen_load_fpr_D(dc, rs),
2295 gen_load_fpr_D(dc, rd));
2296 gen_store_fpr_D(dc, rd, dst);
2297 }
2298
2299 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2300 {
2301 int qd = QFPREG(rd);
2302 int qs = QFPREG(rs);
2303
2304 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2305 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2306 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2307 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2308
2309 gen_update_fprs_dirty(qd);
2310 }
2311
2312 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2313 {
2314 TCGv_i32 r_tl = tcg_temp_new_i32();
2315
2316 /* load env->tl into r_tl */
2317 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2318
2319 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2320 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2321
2322 /* calculate offset to current trap state from env->ts, reuse r_tl */
2323 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2324 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2325
2326 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2327 {
2328 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2329 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2330 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2331 tcg_temp_free_ptr(r_tl_tmp);
2332 }
2333
2334 tcg_temp_free_i32(r_tl);
2335 }
2336
2337 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2338 int width, bool cc, bool left)
2339 {
2340 TCGv lo1, lo2, t1, t2;
2341 uint64_t amask, tabl, tabr;
2342 int shift, imask, omask;
2343
2344 if (cc) {
2345 tcg_gen_mov_tl(cpu_cc_src, s1);
2346 tcg_gen_mov_tl(cpu_cc_src2, s2);
2347 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2348 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2349 dc->cc_op = CC_OP_SUB;
2350 }
2351
2352 /* Theory of operation: there are two tables, left and right (not to
2353 be confused with the left and right versions of the opcode). These
2354 are indexed by the low 3 bits of the inputs. To make things "easy",
2355 these tables are loaded into two constants, TABL and TABR below.
2356 The operation index = (input & imask) << shift calculates the index
2357 into the constant, while val = (table >> index) & omask calculates
2358 the value we're looking for. */
2359 switch (width) {
2360 case 8:
2361 imask = 0x7;
2362 shift = 3;
2363 omask = 0xff;
2364 if (left) {
2365 tabl = 0x80c0e0f0f8fcfeffULL;
2366 tabr = 0xff7f3f1f0f070301ULL;
2367 } else {
2368 tabl = 0x0103070f1f3f7fffULL;
2369 tabr = 0xfffefcf8f0e0c080ULL;
2370 }
2371 break;
2372 case 16:
2373 imask = 0x6;
2374 shift = 1;
2375 omask = 0xf;
2376 if (left) {
2377 tabl = 0x8cef;
2378 tabr = 0xf731;
2379 } else {
2380 tabl = 0x137f;
2381 tabr = 0xfec8;
2382 }
2383 break;
2384 case 32:
2385 imask = 0x4;
2386 shift = 0;
2387 omask = 0x3;
2388 if (left) {
2389 tabl = (2 << 2) | 3;
2390 tabr = (3 << 2) | 1;
2391 } else {
2392 tabl = (1 << 2) | 3;
2393 tabr = (3 << 2) | 2;
2394 }
2395 break;
2396 default:
2397 abort();
2398 }
2399
2400 lo1 = tcg_temp_new();
2401 lo2 = tcg_temp_new();
2402 tcg_gen_andi_tl(lo1, s1, imask);
2403 tcg_gen_andi_tl(lo2, s2, imask);
2404 tcg_gen_shli_tl(lo1, lo1, shift);
2405 tcg_gen_shli_tl(lo2, lo2, shift);
2406
2407 t1 = tcg_const_tl(tabl);
2408 t2 = tcg_const_tl(tabr);
2409 tcg_gen_shr_tl(lo1, t1, lo1);
2410 tcg_gen_shr_tl(lo2, t2, lo2);
2411 tcg_gen_andi_tl(dst, lo1, omask);
2412 tcg_gen_andi_tl(lo2, lo2, omask);
2413
2414 amask = -8;
2415 if (AM_CHECK(dc)) {
2416 amask &= 0xffffffffULL;
2417 }
2418 tcg_gen_andi_tl(s1, s1, amask);
2419 tcg_gen_andi_tl(s2, s2, amask);
2420
2421 /* We want to compute
2422 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2423 We've already done dst = lo1, so this reduces to
2424 dst &= (s1 == s2 ? -1 : lo2)
2425 Which we perform by
2426 lo2 |= -(s1 == s2)
2427 dst &= lo2
2428 */
2429 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2430 tcg_gen_neg_tl(t1, t1);
2431 tcg_gen_or_tl(lo2, lo2, t1);
2432 tcg_gen_and_tl(dst, dst, lo2);
2433
2434 tcg_temp_free(lo1);
2435 tcg_temp_free(lo2);
2436 tcg_temp_free(t1);
2437 tcg_temp_free(t2);
2438 }
2439
2440 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2441 {
2442 TCGv tmp = tcg_temp_new();
2443
2444 tcg_gen_add_tl(tmp, s1, s2);
2445 tcg_gen_andi_tl(dst, tmp, -8);
2446 if (left) {
2447 tcg_gen_neg_tl(tmp, tmp);
2448 }
2449 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2450
2451 tcg_temp_free(tmp);
2452 }
2453
2454 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2455 {
2456 TCGv t1, t2, shift;
2457
2458 t1 = tcg_temp_new();
2459 t2 = tcg_temp_new();
2460 shift = tcg_temp_new();
2461
2462 tcg_gen_andi_tl(shift, gsr, 7);
2463 tcg_gen_shli_tl(shift, shift, 3);
2464 tcg_gen_shl_tl(t1, s1, shift);
2465
2466 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2467 shift of (up to 63) followed by a constant shift of 1. */
2468 tcg_gen_xori_tl(shift, shift, 63);
2469 tcg_gen_shr_tl(t2, s2, shift);
2470 tcg_gen_shri_tl(t2, t2, 1);
2471
2472 tcg_gen_or_tl(dst, t1, t2);
2473
2474 tcg_temp_free(t1);
2475 tcg_temp_free(t2);
2476 tcg_temp_free(shift);
2477 }
2478 #endif
2479
2480 #define CHECK_IU_FEATURE(dc, FEATURE) \
2481 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2482 goto illegal_insn;
2483 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2484 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2485 goto nfpu_insn;
2486
2487 /* before an instruction, dc->pc must be static */
2488 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2489 {
2490 unsigned int opc, rs1, rs2, rd;
2491 TCGv cpu_src1, cpu_src2;
2492 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2493 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2494 target_long simm;
2495
2496 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2497 tcg_gen_debug_insn_start(dc->pc);
2498 }
2499
2500 opc = GET_FIELD(insn, 0, 1);
2501
2502 rd = GET_FIELD(insn, 2, 6);
2503
2504 switch (opc) {
2505 case 0: /* branches/sethi */
2506 {
2507 unsigned int xop = GET_FIELD(insn, 7, 9);
2508 int32_t target;
2509 switch (xop) {
2510 #ifdef TARGET_SPARC64
2511 case 0x1: /* V9 BPcc */
2512 {
2513 int cc;
2514
2515 target = GET_FIELD_SP(insn, 0, 18);
2516 target = sign_extend(target, 19);
2517 target <<= 2;
2518 cc = GET_FIELD_SP(insn, 20, 21);
2519 if (cc == 0)
2520 do_branch(dc, target, insn, 0);
2521 else if (cc == 2)
2522 do_branch(dc, target, insn, 1);
2523 else
2524 goto illegal_insn;
2525 goto jmp_insn;
2526 }
2527 case 0x3: /* V9 BPr */
2528 {
2529 target = GET_FIELD_SP(insn, 0, 13) |
2530 (GET_FIELD_SP(insn, 20, 21) << 14);
2531 target = sign_extend(target, 16);
2532 target <<= 2;
2533 cpu_src1 = get_src1(dc, insn);
2534 do_branch_reg(dc, target, insn, cpu_src1);
2535 goto jmp_insn;
2536 }
2537 case 0x5: /* V9 FBPcc */
2538 {
2539 int cc = GET_FIELD_SP(insn, 20, 21);
2540 if (gen_trap_ifnofpu(dc)) {
2541 goto jmp_insn;
2542 }
2543 target = GET_FIELD_SP(insn, 0, 18);
2544 target = sign_extend(target, 19);
2545 target <<= 2;
2546 do_fbranch(dc, target, insn, cc);
2547 goto jmp_insn;
2548 }
2549 #else
2550 case 0x7: /* CBN+x */
2551 {
2552 goto ncp_insn;
2553 }
2554 #endif
2555 case 0x2: /* BN+x */
2556 {
2557 target = GET_FIELD(insn, 10, 31);
2558 target = sign_extend(target, 22);
2559 target <<= 2;
2560 do_branch(dc, target, insn, 0);
2561 goto jmp_insn;
2562 }
2563 case 0x6: /* FBN+x */
2564 {
2565 if (gen_trap_ifnofpu(dc)) {
2566 goto jmp_insn;
2567 }
2568 target = GET_FIELD(insn, 10, 31);
2569 target = sign_extend(target, 22);
2570 target <<= 2;
2571 do_fbranch(dc, target, insn, 0);
2572 goto jmp_insn;
2573 }
2574 case 0x4: /* SETHI */
2575 /* Special-case %g0 because that's the canonical nop. */
2576 if (rd) {
2577 uint32_t value = GET_FIELD(insn, 10, 31);
2578 TCGv t = gen_dest_gpr(dc, rd);
2579 tcg_gen_movi_tl(t, value << 10);
2580 gen_store_gpr(dc, rd, t);
2581 }
2582 break;
2583 case 0x0: /* UNIMPL */
2584 default:
2585 goto illegal_insn;
2586 }
2587 break;
2588 }
2589 break;
2590 case 1: /*CALL*/
2591 {
2592 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2593 TCGv o7 = gen_dest_gpr(dc, 15);
2594
2595 tcg_gen_movi_tl(o7, dc->pc);
2596 gen_store_gpr(dc, 15, o7);
2597 target += dc->pc;
2598 gen_mov_pc_npc(dc);
2599 #ifdef TARGET_SPARC64
2600 if (unlikely(AM_CHECK(dc))) {
2601 target &= 0xffffffffULL;
2602 }
2603 #endif
2604 dc->npc = target;
2605 }
2606 goto jmp_insn;
2607 case 2: /* FPU & Logical Operations */
2608 {
2609 unsigned int xop = GET_FIELD(insn, 7, 12);
2610 if (xop == 0x3a) { /* generate trap */
2611 int cond = GET_FIELD(insn, 3, 6);
2612 TCGv_i32 trap;
2613 int l1 = -1, mask;
2614
2615 if (cond == 0) {
2616 /* Trap never. */
2617 break;
2618 }
2619
2620 save_state(dc);
2621
2622 if (cond != 8) {
2623 /* Conditional trap. */
2624 DisasCompare cmp;
2625 #ifdef TARGET_SPARC64
2626 /* V9 icc/xcc */
2627 int cc = GET_FIELD_SP(insn, 11, 12);
2628 if (cc == 0) {
2629 gen_compare(&cmp, 0, cond, dc);
2630 } else if (cc == 2) {
2631 gen_compare(&cmp, 1, cond, dc);
2632 } else {
2633 goto illegal_insn;
2634 }
2635 #else
2636 gen_compare(&cmp, 0, cond, dc);
2637 #endif
2638 l1 = gen_new_label();
2639 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2640 cmp.c1, cmp.c2, l1);
2641 free_compare(&cmp);
2642 }
2643
2644 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2645 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2646
2647 /* Don't use the normal temporaries, as they may well have
2648 gone out of scope with the branch above. While we're
2649 doing that we might as well pre-truncate to 32-bit. */
2650 trap = tcg_temp_new_i32();
2651
2652 rs1 = GET_FIELD_SP(insn, 14, 18);
2653 if (IS_IMM) {
2654 rs2 = GET_FIELD_SP(insn, 0, 6);
2655 if (rs1 == 0) {
2656 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2657 /* Signal that the trap value is fully constant. */
2658 mask = 0;
2659 } else {
2660 TCGv t1 = gen_load_gpr(dc, rs1);
2661 tcg_gen_trunc_tl_i32(trap, t1);
2662 tcg_gen_addi_i32(trap, trap, rs2);
2663 }
2664 } else {
2665 TCGv t1, t2;
2666 rs2 = GET_FIELD_SP(insn, 0, 4);
2667 t1 = gen_load_gpr(dc, rs1);
2668 t2 = gen_load_gpr(dc, rs2);
2669 tcg_gen_add_tl(t1, t1, t2);
2670 tcg_gen_trunc_tl_i32(trap, t1);
2671 }
2672 if (mask != 0) {
2673 tcg_gen_andi_i32(trap, trap, mask);
2674 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2675 }
2676
2677 gen_helper_raise_exception(cpu_env, trap);
2678 tcg_temp_free_i32(trap);
2679
2680 if (cond == 8) {
2681 /* An unconditional trap ends the TB. */
2682 dc->is_br = 1;
2683 goto jmp_insn;
2684 } else {
2685 /* A conditional trap falls through to the next insn. */
2686 gen_set_label(l1);
2687 break;
2688 }
2689 } else if (xop == 0x28) {
2690 rs1 = GET_FIELD(insn, 13, 17);
2691 switch(rs1) {
2692 case 0: /* rdy */
2693 #ifndef TARGET_SPARC64
2694 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2695 manual, rdy on the microSPARC
2696 II */
2697 case 0x0f: /* stbar in the SPARCv8 manual,
2698 rdy on the microSPARC II */
2699 case 0x10 ... 0x1f: /* implementation-dependent in the
2700 SPARCv8 manual, rdy on the
2701 microSPARC II */
2702 /* Read Asr17 */
2703 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2704 TCGv t = gen_dest_gpr(dc, rd);
2705 /* Read Asr17 for a Leon3 monoprocessor */
2706 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
2707 gen_store_gpr(dc, rd, t);
2708 break;
2709 }
2710 #endif
2711 gen_store_gpr(dc, rd, cpu_y);
2712 break;
2713 #ifdef TARGET_SPARC64
2714 case 0x2: /* V9 rdccr */
2715 update_psr(dc);
2716 gen_helper_rdccr(cpu_dst, cpu_env);
2717 gen_store_gpr(dc, rd, cpu_dst);
2718 break;
2719 case 0x3: /* V9 rdasi */
2720 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2721 gen_store_gpr(dc, rd, cpu_dst);
2722 break;
2723 case 0x4: /* V9 rdtick */
2724 {
2725 TCGv_ptr r_tickptr;
2726
2727 r_tickptr = tcg_temp_new_ptr();
2728 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2729 offsetof(CPUSPARCState, tick));
2730 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2731 tcg_temp_free_ptr(r_tickptr);
2732 gen_store_gpr(dc, rd, cpu_dst);
2733 }
2734 break;
2735 case 0x5: /* V9 rdpc */
2736 {
2737 TCGv t = gen_dest_gpr(dc, rd);
2738 if (unlikely(AM_CHECK(dc))) {
2739 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
2740 } else {
2741 tcg_gen_movi_tl(t, dc->pc);
2742 }
2743 gen_store_gpr(dc, rd, t);
2744 }
2745 break;
2746 case 0x6: /* V9 rdfprs */
2747 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2748 gen_store_gpr(dc, rd, cpu_dst);
2749 break;
2750 case 0xf: /* V9 membar */
2751 break; /* no effect */
2752 case 0x13: /* Graphics Status */
2753 if (gen_trap_ifnofpu(dc)) {
2754 goto jmp_insn;
2755 }
2756 gen_store_gpr(dc, rd, cpu_gsr);
2757 break;
2758 case 0x16: /* Softint */
2759 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2760 gen_store_gpr(dc, rd, cpu_dst);
2761 break;
2762 case 0x17: /* Tick compare */
2763 gen_store_gpr(dc, rd, cpu_tick_cmpr);
2764 break;
2765 case 0x18: /* System tick */
2766 {
2767 TCGv_ptr r_tickptr;
2768
2769 r_tickptr = tcg_temp_new_ptr();
2770 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2771 offsetof(CPUSPARCState, stick));
2772 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2773 tcg_temp_free_ptr(r_tickptr);
2774 gen_store_gpr(dc, rd, cpu_dst);
2775 }
2776 break;
2777 case 0x19: /* System tick compare */
2778 gen_store_gpr(dc, rd, cpu_stick_cmpr);
2779 break;
2780 case 0x10: /* Performance Control */
2781 case 0x11: /* Performance Instrumentation Counter */
2782 case 0x12: /* Dispatch Control */
2783 case 0x14: /* Softint set, WO */
2784 case 0x15: /* Softint clear, WO */
2785 #endif
2786 default:
2787 goto illegal_insn;
2788 }
2789 #if !defined(CONFIG_USER_ONLY)
2790 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2791 #ifndef TARGET_SPARC64
2792 if (!supervisor(dc)) {
2793 goto priv_insn;
2794 }
2795 update_psr(dc);
2796 gen_helper_rdpsr(cpu_dst, cpu_env);
2797 #else
2798 CHECK_IU_FEATURE(dc, HYPV);
2799 if (!hypervisor(dc))
2800 goto priv_insn;
2801 rs1 = GET_FIELD(insn, 13, 17);
2802 switch (rs1) {
2803 case 0: // hpstate
2804 // gen_op_rdhpstate();
2805 break;
2806 case 1: // htstate
2807 // gen_op_rdhtstate();
2808 break;
2809 case 3: // hintp
2810 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2811 break;
2812 case 5: // htba
2813 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2814 break;
2815 case 6: // hver
2816 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2817 break;
2818 case 31: // hstick_cmpr
2819 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2820 break;
2821 default:
2822 goto illegal_insn;
2823 }
2824 #endif
2825 gen_store_gpr(dc, rd, cpu_dst);
2826 break;
2827 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2828 if (!supervisor(dc))
2829 goto priv_insn;
2830 #ifdef TARGET_SPARC64
2831 rs1 = GET_FIELD(insn, 13, 17);
2832 switch (rs1) {
2833 case 0: // tpc
2834 {
2835 TCGv_ptr r_tsptr;
2836
2837 r_tsptr = tcg_temp_new_ptr();
2838 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2839 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2840 offsetof(trap_state, tpc));
2841 tcg_temp_free_ptr(r_tsptr);
2842 }
2843 break;
2844 case 1: // tnpc
2845 {
2846 TCGv_ptr r_tsptr;
2847
2848 r_tsptr = tcg_temp_new_ptr();
2849 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2850 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2851 offsetof(trap_state, tnpc));
2852 tcg_temp_free_ptr(r_tsptr);
2853 }
2854 break;
2855 case 2: // tstate
2856 {
2857 TCGv_ptr r_tsptr;
2858
2859 r_tsptr = tcg_temp_new_ptr();
2860 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2861 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2862 offsetof(trap_state, tstate));
2863 tcg_temp_free_ptr(r_tsptr);
2864 }
2865 break;
2866 case 3: // tt
2867 {
2868 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2869
2870 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2871 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
2872 offsetof(trap_state, tt));
2873 tcg_temp_free_ptr(r_tsptr);
2874 }
2875 break;
2876 case 4: // tick
2877 {
2878 TCGv_ptr r_tickptr;
2879
2880 r_tickptr = tcg_temp_new_ptr();
2881 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2882 offsetof(CPUSPARCState, tick));
2883 gen_helper_tick_get_count(cpu_tmp0, r_tickptr);
2884 tcg_temp_free_ptr(r_tickptr);
2885 }
2886 break;
2887 case 5: // tba
2888 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2889 break;
2890 case 6: // pstate
2891 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2892 offsetof(CPUSPARCState, pstate));
2893 break;
2894 case 7: // tl
2895 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2896 offsetof(CPUSPARCState, tl));
2897 break;
2898 case 8: // pil
2899 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2900 offsetof(CPUSPARCState, psrpil));
2901 break;
2902 case 9: // cwp
2903 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2904 break;
2905 case 10: // cansave
2906 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2907 offsetof(CPUSPARCState, cansave));
2908 break;
2909 case 11: // canrestore
2910 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2911 offsetof(CPUSPARCState, canrestore));
2912 break;
2913 case 12: // cleanwin
2914 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2915 offsetof(CPUSPARCState, cleanwin));
2916 break;
2917 case 13: // otherwin
2918 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2919 offsetof(CPUSPARCState, otherwin));
2920 break;
2921 case 14: // wstate
2922 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2923 offsetof(CPUSPARCState, wstate));
2924 break;
2925 case 16: // UA2005 gl
2926 CHECK_IU_FEATURE(dc, GL);
2927 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2928 offsetof(CPUSPARCState, gl));
2929 break;
2930 case 26: // UA2005 strand status
2931 CHECK_IU_FEATURE(dc, HYPV);
2932 if (!hypervisor(dc))
2933 goto priv_insn;
2934 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2935 break;
2936 case 31: // ver
2937 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2938 break;
2939 case 15: // fq
2940 default:
2941 goto illegal_insn;
2942 }
2943 #else
2944 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2945 #endif
2946 gen_store_gpr(dc, rd, cpu_tmp0);
2947 break;
2948 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2949 #ifdef TARGET_SPARC64
2950 save_state(dc);
2951 gen_helper_flushw(cpu_env);
2952 #else
2953 if (!supervisor(dc))
2954 goto priv_insn;
2955 gen_store_gpr(dc, rd, cpu_tbr);
2956 #endif
2957 break;
2958 #endif
2959 } else if (xop == 0x34) { /* FPU Operations */
2960 if (gen_trap_ifnofpu(dc)) {
2961 goto jmp_insn;
2962 }
2963 gen_op_clear_ieee_excp_and_FTT();
2964 rs1 = GET_FIELD(insn, 13, 17);
2965 rs2 = GET_FIELD(insn, 27, 31);
2966 xop = GET_FIELD(insn, 18, 26);
2967 save_state(dc);
2968 switch (xop) {
2969 case 0x1: /* fmovs */
2970 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2971 gen_store_fpr_F(dc, rd, cpu_src1_32);
2972 break;
2973 case 0x5: /* fnegs */
2974 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2975 break;
2976 case 0x9: /* fabss */
2977 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2978 break;
2979 case 0x29: /* fsqrts */
2980 CHECK_FPU_FEATURE(dc, FSQRT);
2981 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2982 break;
2983 case 0x2a: /* fsqrtd */
2984 CHECK_FPU_FEATURE(dc, FSQRT);
2985 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
2986 break;
2987 case 0x2b: /* fsqrtq */
2988 CHECK_FPU_FEATURE(dc, FLOAT128);
2989 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
2990 break;
2991 case 0x41: /* fadds */
2992 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
2993 break;
2994 case 0x42: /* faddd */
2995 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
2996 break;
2997 case 0x43: /* faddq */
2998 CHECK_FPU_FEATURE(dc, FLOAT128);
2999 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3000 break;
3001 case 0x45: /* fsubs */
3002 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3003 break;
3004 case 0x46: /* fsubd */
3005 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3006 break;
3007 case 0x47: /* fsubq */
3008 CHECK_FPU_FEATURE(dc, FLOAT128);
3009 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3010 break;
3011 case 0x49: /* fmuls */
3012 CHECK_FPU_FEATURE(dc, FMUL);
3013 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3014 break;
3015 case 0x4a: /* fmuld */
3016 CHECK_FPU_FEATURE(dc, FMUL);
3017 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3018 break;
3019 case 0x4b: /* fmulq */
3020 CHECK_FPU_FEATURE(dc, FLOAT128);
3021 CHECK_FPU_FEATURE(dc, FMUL);
3022 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3023 break;
3024 case 0x4d: /* fdivs */
3025 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3026 break;
3027 case 0x4e: /* fdivd */
3028 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3029 break;
3030 case 0x4f: /* fdivq */
3031 CHECK_FPU_FEATURE(dc, FLOAT128);
3032 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3033 break;
3034 case 0x69: /* fsmuld */
3035 CHECK_FPU_FEATURE(dc, FSMULD);
3036 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3037 break;
3038 case 0x6e: /* fdmulq */
3039 CHECK_FPU_FEATURE(dc, FLOAT128);
3040 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3041 break;
3042 case 0xc4: /* fitos */
3043 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3044 break;
3045 case 0xc6: /* fdtos */
3046 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3047 break;
3048 case 0xc7: /* fqtos */
3049 CHECK_FPU_FEATURE(dc, FLOAT128);
3050 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3051 break;
3052 case 0xc8: /* fitod */
3053 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3054 break;
3055 case 0xc9: /* fstod */
3056 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3057 break;
3058 case 0xcb: /* fqtod */
3059 CHECK_FPU_FEATURE(dc, FLOAT128);
3060 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3061 break;
3062 case 0xcc: /* fitoq */
3063 CHECK_FPU_FEATURE(dc, FLOAT128);
3064 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3065 break;
3066 case 0xcd: /* fstoq */
3067 CHECK_FPU_FEATURE(dc, FLOAT128);
3068 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3069 break;
3070 case 0xce: /* fdtoq */
3071 CHECK_FPU_FEATURE(dc, FLOAT128);
3072 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3073 break;
3074 case 0xd1: /* fstoi */
3075 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3076 break;
3077 case 0xd2: /* fdtoi */
3078 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3079 break;
3080 case 0xd3: /* fqtoi */
3081 CHECK_FPU_FEATURE(dc, FLOAT128);
3082 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3083 break;
3084 #ifdef TARGET_SPARC64
3085 case 0x2: /* V9 fmovd */
3086 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3087 gen_store_fpr_D(dc, rd, cpu_src1_64);
3088 break;
3089 case 0x3: /* V9 fmovq */
3090 CHECK_FPU_FEATURE(dc, FLOAT128);
3091 gen_move_Q(rd, rs2);
3092 break;
3093 case 0x6: /* V9 fnegd */
3094 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3095 break;
3096 case 0x7: /* V9 fnegq */
3097 CHECK_FPU_FEATURE(dc, FLOAT128);
3098 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3099 break;
3100 case 0xa: /* V9 fabsd */
3101 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3102 break;
3103 case 0xb: /* V9 fabsq */
3104 CHECK_FPU_FEATURE(dc, FLOAT128);
3105 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3106 break;
3107 case 0x81: /* V9 fstox */
3108 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3109 break;
3110 case 0x82: /* V9 fdtox */
3111 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3112 break;
3113 case 0x83: /* V9 fqtox */
3114 CHECK_FPU_FEATURE(dc, FLOAT128);
3115 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3116 break;
3117 case 0x84: /* V9 fxtos */
3118 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3119 break;
3120 case 0x88: /* V9 fxtod */
3121 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3122 break;
3123 case 0x8c: /* V9 fxtoq */
3124 CHECK_FPU_FEATURE(dc, FLOAT128);
3125 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3126 break;
3127 #endif
3128 default:
3129 goto illegal_insn;
3130 }
3131 } else if (xop == 0x35) { /* FPU Operations */
3132 #ifdef TARGET_SPARC64
3133 int cond;
3134 #endif
3135 if (gen_trap_ifnofpu(dc)) {
3136 goto jmp_insn;
3137 }
3138 gen_op_clear_ieee_excp_and_FTT();
3139 rs1 = GET_FIELD(insn, 13, 17);
3140 rs2 = GET_FIELD(insn, 27, 31);
3141 xop = GET_FIELD(insn, 18, 26);
3142 save_state(dc);
3143
3144 #ifdef TARGET_SPARC64
3145 #define FMOVR(sz) \
3146 do { \
3147 DisasCompare cmp; \
3148 cond = GET_FIELD_SP(insn, 14, 17); \
3149 cpu_src1 = get_src1(dc, insn); \
3150 gen_compare_reg(&cmp, cond, cpu_src1); \
3151 gen_fmov##sz(dc, &cmp, rd, rs2); \
3152 free_compare(&cmp); \
3153 } while (0)
3154
3155 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3156 FMOVR(s);
3157 break;
3158 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3159 FMOVR(d);
3160 break;
3161 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3162 CHECK_FPU_FEATURE(dc, FLOAT128);
3163 FMOVR(q);
3164 break;
3165 }
3166 #undef FMOVR
3167 #endif
3168 switch (xop) {
3169 #ifdef TARGET_SPARC64
3170 #define FMOVCC(fcc, sz) \
3171 do { \
3172 DisasCompare cmp; \
3173 cond = GET_FIELD_SP(insn, 14, 17); \
3174 gen_fcompare(&cmp, fcc, cond); \
3175 gen_fmov##sz(dc, &cmp, rd, rs2); \
3176 free_compare(&cmp); \
3177 } while (0)
3178
3179 case 0x001: /* V9 fmovscc %fcc0 */
3180 FMOVCC(0, s);
3181 break;
3182 case 0x002: /* V9 fmovdcc %fcc0 */
3183 FMOVCC(0, d);
3184 break;
3185 case 0x003: /* V9 fmovqcc %fcc0 */
3186 CHECK_FPU_FEATURE(dc, FLOAT128);
3187 FMOVCC(0, q);
3188 break;
3189 case 0x041: /* V9 fmovscc %fcc1 */
3190 FMOVCC(1, s);
3191 break;
3192 case 0x042: /* V9 fmovdcc %fcc1 */
3193 FMOVCC(1, d);
3194 break;
3195 case 0x043: /* V9 fmovqcc %fcc1 */
3196 CHECK_FPU_FEATURE(dc, FLOAT128);
3197 FMOVCC(1, q);
3198 break;
3199 case 0x081: /* V9 fmovscc %fcc2 */
3200 FMOVCC(2, s);
3201 break;
3202 case 0x082: /* V9 fmovdcc %fcc2 */
3203 FMOVCC(2, d);
3204 break;
3205 case 0x083: /* V9 fmovqcc %fcc2 */
3206 CHECK_FPU_FEATURE(dc, FLOAT128);
3207 FMOVCC(2, q);
3208 break;
3209 case 0x0c1: /* V9 fmovscc %fcc3 */
3210 FMOVCC(3, s);
3211 break;
3212 case 0x0c2: /* V9 fmovdcc %fcc3 */
3213 FMOVCC(3, d);
3214 break;
3215 case 0x0c3: /* V9 fmovqcc %fcc3 */
3216 CHECK_FPU_FEATURE(dc, FLOAT128);
3217 FMOVCC(3, q);
3218 break;
3219 #undef FMOVCC
3220 #define FMOVCC(xcc, sz) \
3221 do { \
3222 DisasCompare cmp; \
3223 cond = GET_FIELD_SP(insn, 14, 17); \
3224 gen_compare(&cmp, xcc, cond, dc); \
3225 gen_fmov##sz(dc, &cmp, rd, rs2); \
3226 free_compare(&cmp); \
3227 } while (0)
3228
3229 case 0x101: /* V9 fmovscc %icc */
3230 FMOVCC(0, s);
3231 break;
3232 case 0x102: /* V9 fmovdcc %icc */
3233 FMOVCC(0, d);
3234 break;
3235 case 0x103: /* V9 fmovqcc %icc */
3236 CHECK_FPU_FEATURE(dc, FLOAT128);
3237 FMOVCC(0, q);
3238 break;
3239 case 0x181: /* V9 fmovscc %xcc */
3240 FMOVCC(1, s);
3241 break;
3242 case 0x182: /* V9 fmovdcc %xcc */
3243 FMOVCC(1, d);
3244 break;
3245 case 0x183: /* V9 fmovqcc %xcc */
3246 CHECK_FPU_FEATURE(dc, FLOAT128);
3247 FMOVCC(1, q);
3248 break;
3249 #undef FMOVCC
3250 #endif
3251 case 0x51: /* fcmps, V9 %fcc */
3252 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3253 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3254 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3255 break;
3256 case 0x52: /* fcmpd, V9 %fcc */
3257 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3258 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3259 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3260 break;
3261 case 0x53: /* fcmpq, V9 %fcc */
3262 CHECK_FPU_FEATURE(dc, FLOAT128);
3263 gen_op_load_fpr_QT0(QFPREG(rs1));
3264 gen_op_load_fpr_QT1(QFPREG(rs2));
3265 gen_op_fcmpq(rd & 3);
3266 break;
3267 case 0x55: /* fcmpes, V9 %fcc */
3268 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3269 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3270 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3271 break;
3272 case 0x56: /* fcmped, V9 %fcc */
3273 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3274 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3275 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3276 break;
3277 case 0x57: /* fcmpeq, V9 %fcc */
3278 CHECK_FPU_FEATURE(dc, FLOAT128);
3279 gen_op_load_fpr_QT0(QFPREG(rs1));
3280 gen_op_load_fpr_QT1(QFPREG(rs2));
3281 gen_op_fcmpeq(rd & 3);
3282 break;
3283 default:
3284 goto illegal_insn;
3285 }
3286 } else if (xop == 0x2) {
3287 TCGv dst = gen_dest_gpr(dc, rd);
3288 rs1 = GET_FIELD(insn, 13, 17);
3289 if (rs1 == 0) {
3290 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3291 if (IS_IMM) { /* immediate */
3292 simm = GET_FIELDs(insn, 19, 31);
3293 tcg_gen_movi_tl(dst, simm);
3294 gen_store_gpr(dc, rd, dst);
3295 } else { /* register */
3296 rs2 = GET_FIELD(insn, 27, 31);
3297 if (rs2 == 0) {
3298 tcg_gen_movi_tl(dst, 0);
3299 gen_store_gpr(dc, rd, dst);
3300 } else {
3301 cpu_src2 = gen_load_gpr(dc, rs2);
3302 gen_store_gpr(dc, rd, cpu_src2);
3303 }
3304 }
3305 } else {
3306 cpu_src1 = get_src1(dc, insn);
3307 if (IS_IMM) { /* immediate */
3308 simm = GET_FIELDs(insn, 19, 31);
3309 tcg_gen_ori_tl(dst, cpu_src1, simm);
3310 gen_store_gpr(dc, rd, dst);
3311 } else { /* register */
3312 rs2 = GET_FIELD(insn, 27, 31);
3313 if (rs2 == 0) {
3314 /* mov shortcut: or x, %g0, y -> mov x, y */
3315 gen_store_gpr(dc, rd, cpu_src1);
3316 } else {
3317 cpu_src2 = gen_load_gpr(dc, rs2);
3318 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3319 gen_store_gpr(dc, rd, dst);
3320 }
3321 }
3322 }
3323 #ifdef TARGET_SPARC64
3324 } else if (xop == 0x25) { /* sll, V9 sllx */
3325 cpu_src1 = get_src1(dc, insn);
3326 if (IS_IMM) { /* immediate */
3327 simm = GET_FIELDs(insn, 20, 31);
3328 if (insn & (1 << 12)) {
3329 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3330 } else {
3331 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3332 }
3333 } else { /* register */
3334 rs2 = GET_FIELD(insn, 27, 31);
3335 cpu_src2 = gen_load_gpr(dc, rs2);
3336 if (insn & (1 << 12)) {
3337 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3338 } else {
3339 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3340 }
3341 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3342 }
3343 gen_store_gpr(dc, rd, cpu_dst);
3344 } else if (xop == 0x26) { /* srl, V9 srlx */
3345 cpu_src1 = get_src1(dc, insn);
3346 if (IS_IMM) { /* immediate */
3347 simm = GET_FIELDs(insn, 20, 31);
3348 if (insn & (1 << 12)) {
3349 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3350 } else {
3351 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3352 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3353 }
3354 } else { /* register */
3355 rs2 = GET_FIELD(insn, 27, 31);
3356 cpu_src2 = gen_load_gpr(dc, rs2);
3357 if (insn & (1 << 12)) {
3358 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3359 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3360 } else {
3361 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3362 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3363 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3364 }
3365 }
3366 gen_store_gpr(dc, rd, cpu_dst);
3367 } else if (xop == 0x27) { /* sra, V9 srax */
3368 cpu_src1 = get_src1(dc, insn);
3369 if (IS_IMM) { /* immediate */
3370 simm = GET_FIELDs(insn, 20, 31);
3371 if (insn & (1 << 12)) {
3372 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3373 } else {
3374 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3375 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3376 }
3377 } else { /* register */
3378 rs2 = GET_FIELD(insn, 27, 31);
3379 cpu_src2 = gen_load_gpr(dc, rs2);
3380 if (insn & (1 << 12)) {
3381 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3382 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3383 } else {
3384 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3385 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3386 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3387 }
3388 }
3389 gen_store_gpr(dc, rd, cpu_dst);
3390 #endif
3391 } else if (xop < 0x36) {
3392 if (xop < 0x20) {
3393 cpu_src1 = get_src1(dc, insn);
3394 cpu_src2 = get_src2(dc, insn);
3395 switch (xop & ~0x10) {
3396 case 0x0: /* add */
3397 if (xop & 0x10) {
3398 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3399 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3400 dc->cc_op = CC_OP_ADD;
3401 } else {
3402 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3403 }
3404 break;
3405 case 0x1: /* and */
3406 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3407 if (xop & 0x10) {
3408 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3409 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3410 dc->cc_op = CC_OP_LOGIC;
3411 }
3412 break;
3413 case 0x2: /* or */
3414 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3415 if (xop & 0x10) {
3416 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3417 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3418 dc->cc_op = CC_OP_LOGIC;
3419 }
3420 break;
3421 case 0x3: /* xor */
3422 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3423 if (xop & 0x10) {
3424 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3425 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3426 dc->cc_op = CC_OP_LOGIC;
3427 }
3428 break;
3429 case 0x4: /* sub */
3430 if (xop & 0x10) {
3431 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3432 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3433 dc->cc_op = CC_OP_SUB;
3434 } else {
3435 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3436 }
3437 break;
3438 case 0x5: /* andn */
3439 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3440 if (xop & 0x10) {
3441 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3442 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3443 dc->cc_op = CC_OP_LOGIC;
3444 }
3445 break;
3446 case 0x6: /* orn */
3447 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3448 if (xop & 0x10) {
3449 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3450 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3451 dc->cc_op = CC_OP_LOGIC;
3452 }
3453 break;
3454 case 0x7: /* xorn */
3455 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3456 if (xop & 0x10) {
3457 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3458 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3459 dc->cc_op = CC_OP_LOGIC;
3460 }
3461 break;
3462 case 0x8: /* addx, V9 addc */
3463 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3464 (xop & 0x10));
3465 break;
3466 #ifdef TARGET_SPARC64
3467 case 0x9: /* V9 mulx */
3468 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3469 break;
3470 #endif
3471 case 0xa: /* umul */
3472 CHECK_IU_FEATURE(dc, MUL);
3473 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3474 if (xop & 0x10) {
3475 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3476 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3477 dc->cc_op = CC_OP_LOGIC;
3478 }
3479 break;
3480 case 0xb: /* smul */
3481 CHECK_IU_FEATURE(dc, MUL);
3482 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3483 if (xop & 0x10) {
3484 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3485 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3486 dc->cc_op = CC_OP_LOGIC;
3487 }
3488 break;
3489 case 0xc: /* subx, V9 subc */
3490 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3491 (xop & 0x10));
3492 break;
3493 #ifdef TARGET_SPARC64
3494 case 0xd: /* V9 udivx */
3495 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3496 break;
3497 #endif
3498 case 0xe: /* udiv */
3499 CHECK_IU_FEATURE(dc, DIV);
3500 if (xop & 0x10) {
3501 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3502 cpu_src2);
3503 dc->cc_op = CC_OP_DIV;
3504 } else {
3505 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3506 cpu_src2);
3507 }
3508 break;
3509 case 0xf: /* sdiv */
3510 CHECK_IU_FEATURE(dc, DIV);
3511 if (xop & 0x10) {
3512 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3513 cpu_src2);
3514 dc->cc_op = CC_OP_DIV;
3515 } else {
3516 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3517 cpu_src2);
3518 }
3519 break;
3520 default:
3521 goto illegal_insn;
3522 }
3523 gen_store_gpr(dc, rd, cpu_dst);
3524 } else {
3525 cpu_src1 = get_src1(dc, insn);
3526 cpu_src2 = get_src2(dc, insn);
3527 switch (xop) {
3528 case 0x20: /* taddcc */
3529 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3530 gen_store_gpr(dc, rd, cpu_dst);
3531 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3532 dc->cc_op = CC_OP_TADD;
3533 break;
3534 case 0x21: /* tsubcc */
3535 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3536 gen_store_gpr(dc, rd, cpu_dst);
3537 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3538 dc->cc_op = CC_OP_TSUB;
3539 break;
3540 case 0x22: /* taddcctv */
3541 gen_helper_taddcctv(cpu_dst, cpu_env,
3542 cpu_src1, cpu_src2);
3543 gen_store_gpr(dc, rd, cpu_dst);
3544 dc->cc_op = CC_OP_TADDTV;
3545 break;
3546 case 0x23: /* tsubcctv */
3547 gen_helper_tsubcctv(cpu_dst, cpu_env,
3548 cpu_src1, cpu_src2);
3549 gen_store_gpr(dc, rd, cpu_dst);
3550 dc->cc_op = CC_OP_TSUBTV;
3551 break;
3552 case 0x24: /* mulscc */
3553 update_psr(dc);
3554 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3555 gen_store_gpr(dc, rd, cpu_dst);
3556 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3557 dc->cc_op = CC_OP_ADD;
3558 break;
3559 #ifndef TARGET_SPARC64
3560 case 0x25: /* sll */
3561 if (IS_IMM) { /* immediate */
3562 simm = GET_FIELDs(insn, 20, 31);
3563 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3564 } else { /* register */
3565 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3566 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3567 }
3568 gen_store_gpr(dc, rd, cpu_dst);
3569 break;
3570 case 0x26: /* srl */
3571 if (IS_IMM) { /* immediate */
3572 simm = GET_FIELDs(insn, 20, 31);
3573 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3574 } else { /* register */
3575 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3576 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3577 }
3578 gen_store_gpr(dc, rd, cpu_dst);
3579 break;
3580 case 0x27: /* sra */
3581 if (IS_IMM) { /* immediate */
3582 simm = GET_FIELDs(insn, 20, 31);
3583 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3584 } else { /* register */
3585 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3586 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3587 }
3588 gen_store_gpr(dc, rd, cpu_dst);
3589 break;
3590 #endif
3591 case 0x30:
3592 {
3593 switch(rd) {
3594 case 0: /* wry */
3595 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3596 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3597 break;
3598 #ifndef TARGET_SPARC64
3599 case 0x01 ... 0x0f: /* undefined in the
3600 SPARCv8 manual, nop
3601 on the microSPARC
3602 II */
3603 case 0x10 ... 0x1f: /* implementation-dependent
3604 in the SPARCv8
3605 manual, nop on the
3606 microSPARC II */
3607 break;
3608 #else
3609 case 0x2: /* V9 wrccr */
3610 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3611 gen_helper_wrccr(cpu_env, cpu_dst);
3612 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3613 dc->cc_op = CC_OP_FLAGS;
3614 break;
3615 case 0x3: /* V9 wrasi */
3616 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3617 tcg_gen_andi_tl(cpu_dst, cpu_dst, 0xff);
3618 tcg_gen_trunc_tl_i32(cpu_asi, cpu_dst);
3619 break;
3620 case 0x6: /* V9 wrfprs */
3621 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3622 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_dst);
3623 save_state(dc);
3624 gen_op_next_insn();
3625 tcg_gen_exit_tb(0);
3626 dc->is_br = 1;
3627 break;
3628 case 0xf: /* V9 sir, nop if user */
3629 #if !defined(CONFIG_USER_ONLY)
3630 if (supervisor(dc)) {
3631 ; // XXX
3632 }
3633 #endif
3634 break;
3635 case 0x13: /* Graphics Status */
3636 if (gen_trap_ifnofpu(dc)) {
3637 goto jmp_insn;
3638 }
3639 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3640 break;
3641 case 0x14: /* Softint set */
3642 if (!supervisor(dc))
3643 goto illegal_insn;
3644 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3645 gen_helper_set_softint(cpu_env, cpu_tmp64);
3646 break;
3647 case 0x15: /* Softint clear */
3648 if (!supervisor(dc))
3649 goto illegal_insn;
3650 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3651 gen_helper_clear_softint(cpu_env, cpu_tmp64);
3652 break;
3653 case 0x16: /* Softint write */
3654 if (!supervisor(dc))
3655 goto illegal_insn;
3656 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3657 gen_helper_write_softint(cpu_env, cpu_tmp64);
3658 break;
3659 case 0x17: /* Tick compare */
3660 #if !defined(CONFIG_USER_ONLY)
3661 if (!supervisor(dc))
3662 goto illegal_insn;
3663 #endif
3664 {
3665 TCGv_ptr r_tickptr;
3666
3667 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3668 cpu_src2);
3669 r_tickptr = tcg_temp_new_ptr();
3670 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3671 offsetof(CPUSPARCState, tick));
3672 gen_helper_tick_set_limit(r_tickptr,
3673 cpu_tick_cmpr);
3674 tcg_temp_free_ptr(r_tickptr);
3675 }
3676 break;
3677 case 0x18: /* System tick */
3678 #if !defined(CONFIG_USER_ONLY)
3679 if (!supervisor(dc))
3680 goto illegal_insn;
3681 #endif
3682 {
3683 TCGv_ptr r_tickptr;
3684
3685 tcg_gen_xor_tl(cpu_dst, cpu_src1,
3686 cpu_src2);
3687 r_tickptr = tcg_temp_new_ptr();
3688 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3689 offsetof(CPUSPARCState, stick));
3690 gen_helper_tick_set_count(r_tickptr,
3691 cpu_dst);
3692 tcg_temp_free_ptr(r_tickptr);
3693 }
3694 break;
3695 case 0x19: /* System tick compare */
3696 #if !defined(CONFIG_USER_ONLY)
3697 if (!supervisor(dc))
3698 goto illegal_insn;
3699 #endif
3700 {
3701 TCGv_ptr r_tickptr;
3702
3703 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3704 cpu_src2);
3705 r_tickptr = tcg_temp_new_ptr();
3706 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3707 offsetof(CPUSPARCState, stick));
3708 gen_helper_tick_set_limit(r_tickptr,
3709 cpu_stick_cmpr);
3710 tcg_temp_free_ptr(r_tickptr);
3711 }
3712 break;
3713
3714 case 0x10: /* Performance Control */
3715 case 0x11: /* Performance Instrumentation
3716 Counter */
3717 case 0x12: /* Dispatch Control */
3718 #endif
3719 default:
3720 goto illegal_insn;
3721 }
3722 }
3723 break;
3724 #if !defined(CONFIG_USER_ONLY)
3725 case 0x31: /* wrpsr, V9 saved, restored */
3726 {
3727 if (!supervisor(dc))
3728 goto priv_insn;
3729 #ifdef TARGET_SPARC64
3730 switch (rd) {
3731 case 0:
3732 gen_helper_saved(cpu_env);
3733 break;
3734 case 1:
3735 gen_helper_restored(cpu_env);
3736 break;
3737 case 2: /* UA2005 allclean */
3738 case 3: /* UA2005 otherw */
3739 case 4: /* UA2005 normalw */
3740 case 5: /* UA2005 invalw */
3741 // XXX
3742 default:
3743 goto illegal_insn;
3744 }
3745 #else
3746 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3747 gen_helper_wrpsr(cpu_env, cpu_dst);
3748 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3749 dc->cc_op = CC_OP_FLAGS;
3750 save_state(dc);
3751 gen_op_next_insn();
3752 tcg_gen_exit_tb(0);
3753 dc->is_br = 1;
3754 #endif
3755 }
3756 break;
3757 case 0x32: /* wrwim, V9 wrpr */
3758 {
3759 if (!supervisor(dc))
3760 goto priv_insn;
3761 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3762 #ifdef TARGET_SPARC64
3763 switch (rd) {
3764 case 0: // tpc
3765 {
3766 TCGv_ptr r_tsptr;
3767
3768 r_tsptr = tcg_temp_new_ptr();
3769 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3770 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3771 offsetof(trap_state, tpc));
3772 tcg_temp_free_ptr(r_tsptr);
3773 }
3774 break;
3775 case 1: // tnpc
3776 {
3777 TCGv_ptr r_tsptr;
3778
3779 r_tsptr = tcg_temp_new_ptr();
3780 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3781 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3782 offsetof(trap_state, tnpc));
3783 tcg_temp_free_ptr(r_tsptr);
3784 }
3785 break;
3786 case 2: // tstate
3787 {
3788 TCGv_ptr r_tsptr;
3789
3790 r_tsptr = tcg_temp_new_ptr();
3791 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3792 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3793 offsetof(trap_state,
3794 tstate));
3795 tcg_temp_free_ptr(r_tsptr);
3796 }
3797 break;
3798 case 3: // tt
3799 {
3800 TCGv_ptr r_tsptr;
3801
3802 r_tsptr = tcg_temp_new_ptr();
3803 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3804 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3805 tcg_gen_st_i32(cpu_tmp32, r_tsptr,
3806 offsetof(trap_state, tt));
3807 tcg_temp_free_ptr(r_tsptr);
3808 }
3809 break;
3810 case 4: // tick
3811 {
3812 TCGv_ptr r_tickptr;
3813
3814 r_tickptr = tcg_temp_new_ptr();
3815 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3816 offsetof(CPUSPARCState, tick));
3817 gen_helper_tick_set_count(r_tickptr,
3818 cpu_tmp0);
3819 tcg_temp_free_ptr(r_tickptr);
3820 }
3821 break;
3822 case 5: // tba
3823 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3824 break;
3825 case 6: // pstate
3826 save_state(dc);
3827 gen_helper_wrpstate(cpu_env, cpu_tmp0);
3828 dc->npc = DYNAMIC_PC;
3829 break;
3830 case 7: // tl
3831 save_state(dc);
3832 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3833 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3834 offsetof(CPUSPARCState, tl));
3835 dc->npc = DYNAMIC_PC;
3836 break;
3837 case 8: // pil
3838 gen_helper_wrpil(cpu_env, cpu_tmp0);
3839 break;
3840 case 9: // cwp
3841 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3842 break;
3843 case 10: // cansave
3844 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3845 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3846 offsetof(CPUSPARCState,
3847 cansave));
3848 break;
3849 case 11: // canrestore
3850 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3851 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3852 offsetof(CPUSPARCState,
3853 canrestore));
3854 break;
3855 case 12: // cleanwin
3856 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3857 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3858 offsetof(CPUSPARCState,
3859 cleanwin));
3860 break;
3861 case 13: // otherwin
3862 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3863 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3864 offsetof(CPUSPARCState,
3865 otherwin));
3866 break;
3867 case 14: // wstate
3868 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3869 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3870 offsetof(CPUSPARCState,
3871 wstate));
3872 break;
3873 case 16: // UA2005 gl
3874 CHECK_IU_FEATURE(dc, GL);
3875 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3876 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3877 offsetof(CPUSPARCState, gl));
3878 break;
3879 case 26: // UA2005 strand status
3880 CHECK_IU_FEATURE(dc, HYPV);
3881 if (!hypervisor(dc))
3882 goto priv_insn;
3883 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
3884 break;
3885 default:
3886 goto illegal_insn;
3887 }
3888 #else
3889 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3890 if (dc->def->nwindows != 32)
3891 tcg_gen_andi_tl(cpu_tmp32, cpu_tmp32,
3892 (1 << dc->def->nwindows) - 1);
3893 tcg_gen_mov_i32(cpu_wim, cpu_tmp32);
3894 #endif
3895 }
3896 break;
3897 case 0x33: /* wrtbr, UA2005 wrhpr */
3898 {
3899 #ifndef TARGET_SPARC64
3900 if (!supervisor(dc))
3901 goto priv_insn;
3902 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
3903 #else
3904 CHECK_IU_FEATURE(dc, HYPV);
3905 if (!hypervisor(dc))
3906 goto priv_insn;
3907 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3908 switch (rd) {
3909 case 0: // hpstate
3910 // XXX gen_op_wrhpstate();
3911 save_state(dc);
3912 gen_op_next_insn();
3913 tcg_gen_exit_tb(0);
3914 dc->is_br = 1;
3915 break;
3916 case 1: // htstate
3917 // XXX gen_op_wrhtstate();
3918 break;
3919 case 3: // hintp
3920 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
3921 break;
3922 case 5: // htba
3923 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
3924 break;
3925 case 31: // hstick_cmpr
3926 {
3927 TCGv_ptr r_tickptr;
3928
3929 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
3930 r_tickptr = tcg_temp_new_ptr();
3931 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3932 offsetof(CPUSPARCState, hstick));
3933 gen_helper_tick_set_limit(r_tickptr,
3934 cpu_hstick_cmpr);
3935 tcg_temp_free_ptr(r_tickptr);
3936 }
3937 break;
3938 case 6: // hver readonly
3939 default:
3940 goto illegal_insn;
3941 }
3942 #endif
3943 }
3944 break;
3945 #endif
3946 #ifdef TARGET_SPARC64
3947 case 0x2c: /* V9 movcc */
3948 {
3949 int cc = GET_FIELD_SP(insn, 11, 12);
3950 int cond = GET_FIELD_SP(insn, 14, 17);
3951 DisasCompare cmp;
3952 TCGv dst;
3953
3954 if (insn & (1 << 18)) {
3955 if (cc == 0) {
3956 gen_compare(&cmp, 0, cond, dc);
3957 } else if (cc == 2) {
3958 gen_compare(&cmp, 1, cond, dc);
3959 } else {
3960 goto illegal_insn;
3961 }
3962 } else {
3963 gen_fcompare(&cmp, cc, cond);
3964 }
3965
3966 /* The get_src2 above loaded the normal 13-bit
3967 immediate field, not the 11-bit field we have
3968 in movcc. But it did handle the reg case. */
3969 if (IS_IMM) {
3970 simm = GET_FIELD_SPs(insn, 0, 10);
3971 tcg_gen_movi_tl(cpu_src2, simm);
3972 }
3973
3974 dst = gen_load_gpr(dc, rd);
3975 tcg_gen_movcond_tl(cmp.cond, dst,
3976 cmp.c1, cmp.c2,
3977 cpu_src2, dst);
3978 free_compare(&cmp);
3979 gen_store_gpr(dc, rd, dst);
3980 break;
3981 }
3982 case 0x2d: /* V9 sdivx */
3983 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3984 gen_store_gpr(dc, rd, cpu_dst);
3985 break;
3986 case 0x2e: /* V9 popc */
3987 gen_helper_popc(cpu_dst, cpu_src2);
3988 gen_store_gpr(dc, rd, cpu_dst);
3989 break;
3990 case 0x2f: /* V9 movr */
3991 {
3992 int cond = GET_FIELD_SP(insn, 10, 12);
3993 DisasCompare cmp;
3994 TCGv dst;
3995
3996 gen_compare_reg(&cmp, cond, cpu_src1);
3997
3998 /* The get_src2 above loaded the normal 13-bit
3999 immediate field, not the 10-bit field we have
4000 in movr. But it did handle the reg case. */
4001 if (IS_IMM) {
4002 simm = GET_FIELD_SPs(insn, 0, 9);
4003 tcg_gen_movi_tl(cpu_src2, simm);
4004 }
4005
4006 dst = gen_load_gpr(dc, rd);
4007 tcg_gen_movcond_tl(cmp.cond, dst,
4008 cmp.c1, cmp.c2,
4009 cpu_src2, dst);
4010 free_compare(&cmp);
4011 gen_store_gpr(dc, rd, dst);
4012 break;
4013 }
4014 #endif
4015 default:
4016 goto illegal_insn;
4017 }
4018 }
4019 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4020 #ifdef TARGET_SPARC64
4021 int opf = GET_FIELD_SP(insn, 5, 13);
4022 rs1 = GET_FIELD(insn, 13, 17);
4023 rs2 = GET_FIELD(insn, 27, 31);
4024 if (gen_trap_ifnofpu(dc)) {
4025 goto jmp_insn;
4026 }
4027
4028 switch (opf) {
4029 case 0x000: /* VIS I edge8cc */
4030 CHECK_FPU_FEATURE(dc, VIS1);
4031 cpu_src1 = gen_load_gpr(dc, rs1);
4032 cpu_src2 = gen_load_gpr(dc, rs2);
4033 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4034 gen_store_gpr(dc, rd, cpu_dst);
4035 break;
4036 case 0x001: /* VIS II edge8n */
4037 CHECK_FPU_FEATURE(dc, VIS2);
4038 cpu_src1 = gen_load_gpr(dc, rs1);
4039 cpu_src2 = gen_load_gpr(dc, rs2);
4040 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4041 gen_store_gpr(dc, rd, cpu_dst);
4042 break;
4043 case 0x002: /* VIS I edge8lcc */
4044 CHECK_FPU_FEATURE(dc, VIS1);
4045 cpu_src1 = gen_load_gpr(dc, rs1);
4046 cpu_src2 = gen_load_gpr(dc, rs2);
4047 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4048 gen_store_gpr(dc, rd, cpu_dst);
4049 break;
4050 case 0x003: /* VIS II edge8ln */
4051 CHECK_FPU_FEATURE(dc, VIS2);
4052 cpu_src1 = gen_load_gpr(dc, rs1);
4053 cpu_src2 = gen_load_gpr(dc, rs2);
4054 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4055 gen_store_gpr(dc, rd, cpu_dst);
4056 break;
4057 case 0x004: /* VIS I edge16cc */
4058 CHECK_FPU_FEATURE(dc, VIS1);
4059 cpu_src1 = gen_load_gpr(dc, rs1);
4060 cpu_src2 = gen_load_gpr(dc, rs2);
4061 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4062 gen_store_gpr(dc, rd, cpu_dst);
4063 break;
4064 case 0x005: /* VIS II edge16n */
4065 CHECK_FPU_FEATURE(dc, VIS2);
4066 cpu_src1 = gen_load_gpr(dc, rs1);
4067 cpu_src2 = gen_load_gpr(dc, rs2);
4068 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4069 gen_store_gpr(dc, rd, cpu_dst);
4070 break;
4071 case 0x006: /* VIS I edge16lcc */
4072 CHECK_FPU_FEATURE(dc, VIS1);
4073 cpu_src1 = gen_load_gpr(dc, rs1);
4074 cpu_src2 = gen_load_gpr(dc, rs2);
4075 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4076 gen_store_gpr(dc, rd, cpu_dst);
4077 break;
4078 case 0x007: /* VIS II edge16ln */
4079 CHECK_FPU_FEATURE(dc, VIS2);
4080 cpu_src1 = gen_load_gpr(dc, rs1);
4081 cpu_src2 = gen_load_gpr(dc, rs2);
4082 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4083 gen_store_gpr(dc, rd, cpu_dst);
4084 break;
4085 case 0x008: /* VIS I edge32cc */
4086 CHECK_FPU_FEATURE(dc, VIS1);
4087 cpu_src1 = gen_load_gpr(dc, rs1);
4088 cpu_src2 = gen_load_gpr(dc, rs2);
4089 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4090 gen_store_gpr(dc, rd, cpu_dst);
4091 break;
4092 case 0x009: /* VIS II edge32n */
4093 CHECK_FPU_FEATURE(dc, VIS2);
4094 cpu_src1 = gen_load_gpr(dc, rs1);
4095 cpu_src2 = gen_load_gpr(dc, rs2);
4096 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4097 gen_store_gpr(dc, rd, cpu_dst);
4098 break;
4099 case 0x00a: /* VIS I edge32lcc */
4100 CHECK_FPU_FEATURE(dc, VIS1);
4101 cpu_src1 = gen_load_gpr(dc, rs1);
4102 cpu_src2 = gen_load_gpr(dc, rs2);
4103 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4104 gen_store_gpr(dc, rd, cpu_dst);
4105 break;
4106 case 0x00b: /* VIS II edge32ln */
4107 CHECK_FPU_FEATURE(dc, VIS2);
4108 cpu_src1 = gen_load_gpr(dc, rs1);
4109 cpu_src2 = gen_load_gpr(dc, rs2);
4110 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4111 gen_store_gpr(dc, rd, cpu_dst);
4112 break;
4113 case 0x010: /* VIS I array8 */
4114 CHECK_FPU_FEATURE(dc, VIS1);
4115 cpu_src1 = gen_load_gpr(dc, rs1);
4116 cpu_src2 = gen_load_gpr(dc, rs2);
4117 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4118 gen_store_gpr(dc, rd, cpu_dst);
4119 break;
4120 case 0x012: /* VIS I array16 */
4121 CHECK_FPU_FEATURE(dc, VIS1);
4122 cpu_src1 = gen_load_gpr(dc, rs1);
4123 cpu_src2 = gen_load_gpr(dc, rs2);
4124 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4125 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4126 gen_store_gpr(dc, rd, cpu_dst);
4127 break;
4128 case 0x014: /* VIS I array32 */
4129 CHECK_FPU_FEATURE(dc, VIS1);
4130 cpu_src1 = gen_load_gpr(dc, rs1);
4131 cpu_src2 = gen_load_gpr(dc, rs2);
4132 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4133 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4134 gen_store_gpr(dc, rd, cpu_dst);
4135 break;
4136 case 0x018: /* VIS I alignaddr */
4137 CHECK_FPU_FEATURE(dc, VIS1);
4138 cpu_src1 = gen_load_gpr(dc, rs1);
4139 cpu_src2 = gen_load_gpr(dc, rs2);
4140 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4141 gen_store_gpr(dc, rd, cpu_dst);
4142 break;
4143 case 0x01a: /* VIS I alignaddrl */
4144 CHECK_FPU_FEATURE(dc, VIS1);
4145 cpu_src1 = gen_load_gpr(dc, rs1);
4146 cpu_src2 = gen_load_gpr(dc, rs2);
4147 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4148 gen_store_gpr(dc, rd, cpu_dst);
4149 break;
4150 case 0x019: /* VIS II bmask */
4151 CHECK_FPU_FEATURE(dc, VIS2);
4152 cpu_src1 = gen_load_gpr(dc, rs1);
4153 cpu_src2 = gen_load_gpr(dc, rs2);
4154 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4155 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4156 gen_store_gpr(dc, rd, cpu_dst);
4157 break;
4158 case 0x020: /* VIS I fcmple16 */
4159 CHECK_FPU_FEATURE(dc, VIS1);
4160 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4161 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4162 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4163 gen_store_gpr(dc, rd, cpu_dst);
4164 break;
4165 case 0x022: /* VIS I fcmpne16 */
4166 CHECK_FPU_FEATURE(dc, VIS1);
4167 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4168 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4169 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4170 gen_store_gpr(dc, rd, cpu_dst);
4171 break;
4172 case 0x024: /* VIS I fcmple32 */
4173 CHECK_FPU_FEATURE(dc, VIS1);
4174 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4175 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4176 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4177 gen_store_gpr(dc, rd, cpu_dst);
4178 break;
4179 case 0x026: /* VIS I fcmpne32 */
4180 CHECK_FPU_FEATURE(dc, VIS1);
4181 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4182 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4183 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4184 gen_store_gpr(dc, rd, cpu_dst);
4185 break;
4186 case 0x028: /* VIS I fcmpgt16 */
4187 CHECK_FPU_FEATURE(dc, VIS1);
4188 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4189 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4190 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4191 gen_store_gpr(dc, rd, cpu_dst);
4192 break;
4193 case 0x02a: /* VIS I fcmpeq16 */
4194 CHECK_FPU_FEATURE(dc, VIS1);
4195 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4196 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4197 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4198 gen_store_gpr(dc, rd, cpu_dst);
4199 break;
4200 case 0x02c: /* VIS I fcmpgt32 */
4201 CHECK_FPU_FEATURE(dc, VIS1);
4202 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4203 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4204 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4205 gen_store_gpr(dc, rd, cpu_dst);
4206 break;
4207 case 0x02e: /* VIS I fcmpeq32 */
4208 CHECK_FPU_FEATURE(dc, VIS1);
4209 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4210 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4211 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4212 gen_store_gpr(dc, rd, cpu_dst);
4213 break;
4214 case 0x031: /* VIS I fmul8x16 */
4215 CHECK_FPU_FEATURE(dc, VIS1);
4216 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4217 break;
4218 case 0x033: /* VIS I fmul8x16au */
4219 CHECK_FPU_FEATURE(dc, VIS1);
4220 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4221 break;
4222 case 0x035: /* VIS I fmul8x16al */
4223 CHECK_FPU_FEATURE(dc, VIS1);
4224 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4225 break;
4226 case 0x036: /* VIS I fmul8sux16 */
4227 CHECK_FPU_FEATURE(dc, VIS1);
4228 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4229 break;
4230 case 0x037: /* VIS I fmul8ulx16 */
4231 CHECK_FPU_FEATURE(dc, VIS1);
4232 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4233 break;
4234 case 0x038: /* VIS I fmuld8sux16 */
4235 CHECK_FPU_FEATURE(dc, VIS1);
4236 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4237 break;
4238 case 0x039: /* VIS I fmuld8ulx16 */
4239 CHECK_FPU_FEATURE(dc, VIS1);
4240 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4241 break;
4242 case 0x03a: /* VIS I fpack32 */
4243 CHECK_FPU_FEATURE(dc, VIS1);
4244 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4245 break;
4246 case 0x03b: /* VIS I fpack16 */
4247 CHECK_FPU_FEATURE(dc, VIS1);
4248 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4249 cpu_dst_32 = gen_dest_fpr_F(dc);
4250 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4251 gen_store_fpr_F(dc, rd, cpu_dst_32);
4252 break;
4253 case 0x03d: /* VIS I fpackfix */
4254 CHECK_FPU_FEATURE(dc, VIS1);
4255 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4256 cpu_dst_32 = gen_dest_fpr_F(dc);
4257 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4258 gen_store_fpr_F(dc, rd, cpu_dst_32);
4259 break;
4260 case 0x03e: /* VIS I pdist */
4261 CHECK_FPU_FEATURE(dc, VIS1);
4262 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4263 break;
4264 case 0x048: /* VIS I faligndata */
4265 CHECK_FPU_FEATURE(dc, VIS1);
4266 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4267 break;
4268 case 0x04b: /* VIS I fpmerge */
4269 CHECK_FPU_FEATURE(dc, VIS1);
4270 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4271 break;
4272 case 0x04c: /* VIS II bshuffle */
4273 CHECK_FPU_FEATURE(dc, VIS2);
4274 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4275 break;
4276 case 0x04d: /* VIS I fexpand */
4277 CHECK_FPU_FEATURE(dc, VIS1);
4278 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4279 break;
4280 case 0x050: /* VIS I fpadd16 */
4281 CHECK_FPU_FEATURE(dc, VIS1);
4282 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4283 break;
4284 case 0x051: /* VIS I fpadd16s */
4285 CHECK_FPU_FEATURE(dc, VIS1);
4286 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4287 break;
4288 case 0x052: /* VIS I fpadd32 */
4289 CHECK_FPU_FEATURE(dc, VIS1);
4290 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4291 break;
4292 case 0x053: /* VIS I fpadd32s */
4293 CHECK_FPU_FEATURE(dc, VIS1);
4294 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4295 break;
4296 case 0x054: /* VIS I fpsub16 */
4297 CHECK_FPU_FEATURE(dc, VIS1);
4298 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4299 break;
4300 case 0x055: /* VIS I fpsub16s */
4301 CHECK_FPU_FEATURE(dc, VIS1);
4302 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4303 break;
4304 case 0x056: /* VIS I fpsub32 */
4305 CHECK_FPU_FEATURE(dc, VIS1);
4306 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4307 break;
4308 case 0x057: /* VIS I fpsub32s */
4309 CHECK_FPU_FEATURE(dc, VIS1);
4310 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4311 break;
4312 case 0x060: /* VIS I fzero */
4313 CHECK_FPU_FEATURE(dc, VIS1);
4314 cpu_dst_64 = gen_dest_fpr_D();
4315 tcg_gen_movi_i64(cpu_dst_64, 0);
4316 gen_store_fpr_D(dc, rd, cpu_dst_64);
4317 break;
4318 case 0x061: /* VIS I fzeros */
4319 CHECK_FPU_FEATURE(dc, VIS1);
4320 cpu_dst_32 = gen_dest_fpr_F(dc);
4321 tcg_gen_movi_i32(cpu_dst_32, 0);
4322 gen_store_fpr_F(dc, rd, cpu_dst_32);
4323 break;
4324 case 0x062: /* VIS I fnor */
4325 CHECK_FPU_FEATURE(dc, VIS1);
4326 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4327 break;
4328 case 0x063: /* VIS I fnors */
4329 CHECK_FPU_FEATURE(dc, VIS1);
4330 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4331 break;
4332 case 0x064: /* VIS I fandnot2 */
4333 CHECK_FPU_FEATURE(dc, VIS1);
4334 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4335 break;
4336 case 0x065: /* VIS I fandnot2s */
4337 CHECK_FPU_FEATURE(dc, VIS1);
4338 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4339 break;
4340 case 0x066: /* VIS I fnot2 */
4341 CHECK_FPU_FEATURE(dc, VIS1);
4342 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4343 break;
4344 case 0x067: /* VIS I fnot2s */
4345 CHECK_FPU_FEATURE(dc, VIS1);
4346 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4347 break;
4348 case 0x068: /* VIS I fandnot1 */
4349 CHECK_FPU_FEATURE(dc, VIS1);
4350 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4351 break;
4352 case 0x069: /* VIS I fandnot1s */
4353 CHECK_FPU_FEATURE(dc, VIS1);
4354 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4355 break;
4356 case 0x06a: /* VIS I fnot1 */
4357 CHECK_FPU_FEATURE(dc, VIS1);
4358 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4359 break;
4360 case 0x06b: /* VIS I fnot1s */
4361 CHECK_FPU_FEATURE(dc, VIS1);
4362 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4363 break;
4364 case 0x06c: /* VIS I fxor */
4365 CHECK_FPU_FEATURE(dc, VIS1);
4366 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4367 break;
4368 case 0x06d: /* VIS I fxors */
4369 CHECK_FPU_FEATURE(dc, VIS1);
4370 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4371 break;
4372 case 0x06e: /* VIS I fnand */
4373 CHECK_FPU_FEATURE(dc, VIS1);
4374 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4375 break;
4376 case 0x06f: /* VIS I fnands */
4377 CHECK_FPU_FEATURE(dc, VIS1);
4378 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4379 break;
4380 case 0x070: /* VIS I fand */
4381 CHECK_FPU_FEATURE(dc, VIS1);
4382 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4383 break;
4384 case 0x071: /* VIS I fands */
4385 CHECK_FPU_FEATURE(dc, VIS1);
4386 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4387 break;
4388 case 0x072: /* VIS I fxnor */
4389 CHECK_FPU_FEATURE(dc, VIS1);
4390 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4391 break;
4392 case 0x073: /* VIS I fxnors */
4393 CHECK_FPU_FEATURE(dc, VIS1);
4394 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4395 break;
4396 case 0x074: /* VIS I fsrc1 */
4397 CHECK_FPU_FEATURE(dc, VIS1);
4398 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4399 gen_store_fpr_D(dc, rd, cpu_src1_64);
4400 break;
4401 case 0x075: /* VIS I fsrc1s */
4402 CHECK_FPU_FEATURE(dc, VIS1);
4403 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4404 gen_store_fpr_F(dc, rd, cpu_src1_32);
4405 break;
4406 case 0x076: /* VIS I fornot2 */
4407 CHECK_FPU_FEATURE(dc, VIS1);
4408 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4409 break;
4410 case 0x077: /* VIS I fornot2s */
4411 CHECK_FPU_FEATURE(dc, VIS1);
4412 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4413 break;
4414 case 0x078: /* VIS I fsrc2 */
4415 CHECK_FPU_FEATURE(dc, VIS1);
4416 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4417 gen_store_fpr_D(dc, rd, cpu_src1_64);
4418 break;
4419 case 0x079: /* VIS I fsrc2s */
4420 CHECK_FPU_FEATURE(dc, VIS1);
4421 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4422 gen_store_fpr_F(dc, rd, cpu_src1_32);
4423 break;
4424 case 0x07a: /* VIS I fornot1 */
4425 CHECK_FPU_FEATURE(dc, VIS1);
4426 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4427 break;
4428 case 0x07b: /* VIS I fornot1s */
4429 CHECK_FPU_FEATURE(dc, VIS1);
4430 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4431 break;
4432 case 0x07c: /* VIS I for */
4433 CHECK_FPU_FEATURE(dc, VIS1);
4434 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4435 break;
4436 case 0x07d: /* VIS I fors */
4437 CHECK_FPU_FEATURE(dc, VIS1);
4438 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4439 break;
4440 case 0x07e: /* VIS I fone */
4441 CHECK_FPU_FEATURE(dc, VIS1);
4442 cpu_dst_64 = gen_dest_fpr_D();
4443 tcg_gen_movi_i64(cpu_dst_64, -1);
4444 gen_store_fpr_D(dc, rd, cpu_dst_64);
4445 break;
4446 case 0x07f: /* VIS I fones */
4447 CHECK_FPU_FEATURE(dc, VIS1);
4448 cpu_dst_32 = gen_dest_fpr_F(dc);
4449 tcg_gen_movi_i32(cpu_dst_32, -1);
4450 gen_store_fpr_F(dc, rd, cpu_dst_32);
4451 break;
4452 case 0x080: /* VIS I shutdown */
4453 case 0x081: /* VIS II siam */
4454 // XXX
4455 goto illegal_insn;
4456 default:
4457 goto illegal_insn;
4458 }
4459 #else
4460 goto ncp_insn;
4461 #endif
4462 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4463 #ifdef TARGET_SPARC64
4464 goto illegal_insn;
4465 #else
4466 goto ncp_insn;
4467 #endif
4468 #ifdef TARGET_SPARC64
4469 } else if (xop == 0x39) { /* V9 return */
4470 TCGv_i32 r_const;
4471
4472 save_state(dc);
4473 cpu_src1 = get_src1(dc, insn);
4474 if (IS_IMM) { /* immediate */
4475 simm = GET_FIELDs(insn, 19, 31);
4476 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
4477 } else { /* register */
4478 rs2 = GET_FIELD(insn, 27, 31);
4479 if (rs2) {
4480 cpu_src2 = gen_load_gpr(dc, rs2);
4481 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4482 } else {
4483 tcg_gen_mov_tl(cpu_dst, cpu_src1);
4484 }
4485 }
4486 gen_helper_restore(cpu_env);
4487 gen_mov_pc_npc(dc);
4488 r_const = tcg_const_i32(3);
4489 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4490 tcg_temp_free_i32(r_const);
4491 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4492 dc->npc = DYNAMIC_PC;
4493 goto jmp_insn;
4494 #endif
4495 } else {
4496 cpu_src1 = get_src1(dc, insn);
4497 if (IS_IMM) { /* immediate */
4498 simm = GET_FIELDs(insn, 19, 31);
4499 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
4500 } else { /* register */
4501 rs2 = GET_FIELD(insn, 27, 31);
4502 if (rs2) {
4503 cpu_src2 = gen_load_gpr(dc, rs2);
4504 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4505 } else {
4506 tcg_gen_mov_tl(cpu_dst, cpu_src1);
4507 }
4508 }
4509 switch (xop) {
4510 case 0x38: /* jmpl */
4511 {
4512 TCGv t;
4513 TCGv_i32 r_const;
4514
4515 t = gen_dest_gpr(dc, rd);
4516 tcg_gen_movi_tl(t, dc->pc);
4517 gen_store_gpr(dc, rd, t);
4518 gen_mov_pc_npc(dc);
4519 r_const = tcg_const_i32(3);
4520 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4521 tcg_temp_free_i32(r_const);
4522 gen_address_mask(dc, cpu_dst);
4523 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4524 dc->npc = DYNAMIC_PC;
4525 }
4526 goto jmp_insn;
4527 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4528 case 0x39: /* rett, V9 return */
4529 {
4530 TCGv_i32 r_const;
4531
4532 if (!supervisor(dc))
4533 goto priv_insn;
4534 gen_mov_pc_npc(dc);
4535 r_const = tcg_const_i32(3);
4536 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4537 tcg_temp_free_i32(r_const);
4538 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4539 dc->npc = DYNAMIC_PC;
4540 gen_helper_rett(cpu_env);
4541 }
4542 goto jmp_insn;
4543 #endif
4544 case 0x3b: /* flush */
4545 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4546 goto unimp_flush;
4547 /* nop */
4548 break;
4549 case 0x3c: /* save */
4550 save_state(dc);
4551 gen_helper_save(cpu_env);
4552 gen_store_gpr(dc, rd, cpu_dst);
4553 break;
4554 case 0x3d: /* restore */
4555 save_state(dc);
4556 gen_helper_restore(cpu_env);
4557 gen_store_gpr(dc, rd, cpu_dst);
4558 break;
4559 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4560 case 0x3e: /* V9 done/retry */
4561 {
4562 switch (rd) {
4563 case 0:
4564 if (!supervisor(dc))
4565 goto priv_insn;
4566 dc->npc = DYNAMIC_PC;
4567 dc->pc = DYNAMIC_PC;
4568 gen_helper_done(cpu_env);
4569 goto jmp_insn;
4570 case 1:
4571 if (!supervisor(dc))
4572 goto priv_insn;
4573 dc->npc = DYNAMIC_PC;
4574 dc->pc = DYNAMIC_PC;
4575 gen_helper_retry(cpu_env);
4576 goto jmp_insn;
4577 default:
4578 goto illegal_insn;
4579 }
4580 }
4581 break;
4582 #endif
4583 default:
4584 goto illegal_insn;
4585 }
4586 }
4587 break;
4588 }
4589 break;
4590 case 3: /* load/store instructions */
4591 {
4592 unsigned int xop = GET_FIELD(insn, 7, 12);
4593 /* ??? gen_address_mask prevents us from using a source
4594 register directly. Always generate a temporary. */
4595 TCGv cpu_addr = get_temp_tl(dc);
4596
4597 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
4598 if (xop == 0x3c || xop == 0x3e) {
4599 /* V9 casa/casxa : no offset */
4600 } else if (IS_IMM) { /* immediate */
4601 simm = GET_FIELDs(insn, 19, 31);
4602 if (simm != 0) {
4603 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
4604 }
4605 } else { /* register */
4606 rs2 = GET_FIELD(insn, 27, 31);
4607 if (rs2 != 0) {
4608 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
4609 }
4610 }
4611 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4612 (xop > 0x17 && xop <= 0x1d ) ||
4613 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4614 TCGv cpu_val = gen_dest_gpr(dc, rd);
4615
4616 switch (xop) {
4617 case 0x0: /* ld, V9 lduw, load unsigned word */
4618 gen_address_mask(dc, cpu_addr);
4619 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4620 break;
4621 case 0x1: /* ldub, load unsigned byte */
4622 gen_address_mask(dc, cpu_addr);
4623 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4624 break;
4625 case 0x2: /* lduh, load unsigned halfword */
4626 gen_address_mask(dc, cpu_addr);
4627 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4628 break;
4629 case 0x3: /* ldd, load double word */
4630 if (rd & 1)
4631 goto illegal_insn;
4632 else {
4633 TCGv_i32 r_const;
4634
4635 save_state(dc);
4636 r_const = tcg_const_i32(7);
4637 /* XXX remove alignment check */
4638 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4639 tcg_temp_free_i32(r_const);
4640 gen_address_mask(dc, cpu_addr);
4641 tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
4642 tcg_gen_trunc_i64_tl(cpu_tmp0, cpu_tmp64);
4643 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffffULL);
4644 gen_store_gpr(dc, rd + 1, cpu_tmp0);
4645 tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
4646 tcg_gen_trunc_i64_tl(cpu_val, cpu_tmp64);
4647 tcg_gen_andi_tl(cpu_val, cpu_val, 0xffffffffULL);
4648 }
4649 break;
4650 case 0x9: /* ldsb, load signed byte */
4651 gen_address_mask(dc, cpu_addr);
4652 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4653 break;
4654 case 0xa: /* ldsh, load signed halfword */
4655 gen_address_mask(dc, cpu_addr);
4656 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4657 break;
4658 case 0xd: /* ldstub -- XXX: should be atomically */
4659 {
4660 TCGv r_const;
4661
4662 gen_address_mask(dc, cpu_addr);
4663 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4664 r_const = tcg_const_tl(0xff);
4665 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4666 tcg_temp_free(r_const);
4667 }
4668 break;
4669 case 0x0f: /* swap, swap register with memory. Also
4670 atomically */
4671 CHECK_IU_FEATURE(dc, SWAP);
4672 cpu_src1 = gen_load_gpr(dc, rd);
4673 gen_address_mask(dc, cpu_addr);
4674 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4675 tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
4676 tcg_gen_mov_tl(cpu_val, cpu_tmp0);
4677 break;
4678 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4679 case 0x10: /* lda, V9 lduwa, load word alternate */
4680 #ifndef TARGET_SPARC64
4681 if (IS_IMM)
4682 goto illegal_insn;
4683 if (!supervisor(dc))
4684 goto priv_insn;
4685 #endif
4686 save_state(dc);
4687 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
4688 break;
4689 case 0x11: /* lduba, load unsigned byte alternate */
4690 #ifndef TARGET_SPARC64
4691 if (IS_IMM)
4692 goto illegal_insn;
4693 if (!supervisor(dc))
4694 goto priv_insn;
4695 #endif
4696 save_state(dc);
4697 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
4698 break;
4699 case 0x12: /* lduha, load unsigned halfword alternate */
4700 #ifndef TARGET_SPARC64
4701 if (IS_IMM)
4702 goto illegal_insn;
4703 if (!supervisor(dc))
4704 goto priv_insn;
4705 #endif
4706 save_state(dc);
4707 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
4708 break;
4709 case 0x13: /* ldda, load double word alternate */
4710 #ifndef TARGET_SPARC64
4711 if (IS_IMM)
4712 goto illegal_insn;
4713 if (!supervisor(dc))
4714 goto priv_insn;
4715 #endif
4716 if (rd & 1)
4717 goto illegal_insn;
4718 save_state(dc);
4719 gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
4720 goto skip_move;
4721 case 0x19: /* ldsba, load signed byte alternate */
4722 #ifndef TARGET_SPARC64
4723 if (IS_IMM)
4724 goto illegal_insn;
4725 if (!supervisor(dc))
4726 goto priv_insn;
4727 #endif
4728 save_state(dc);
4729 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
4730 break;
4731 case 0x1a: /* ldsha, load signed halfword alternate */
4732 #ifndef TARGET_SPARC64
4733 if (IS_IMM)
4734 goto illegal_insn;
4735 if (!supervisor(dc))
4736 goto priv_insn;
4737 #endif
4738 save_state(dc);
4739 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
4740 break;
4741 case 0x1d: /* ldstuba -- XXX: should be atomically */
4742 #ifndef TARGET_SPARC64
4743 if (IS_IMM)
4744 goto illegal_insn;
4745 if (!supervisor(dc))
4746 goto priv_insn;
4747 #endif
4748 save_state(dc);
4749 gen_ldstub_asi(cpu_val, cpu_addr, insn);
4750 break;
4751 case 0x1f: /* swapa, swap reg with alt. memory. Also
4752 atomically */
4753 CHECK_IU_FEATURE(dc, SWAP);
4754 #ifndef TARGET_SPARC64
4755 if (IS_IMM)
4756 goto illegal_insn;
4757 if (!supervisor(dc))
4758 goto priv_insn;
4759 #endif
4760 save_state(dc);
4761 cpu_src1 = gen_load_gpr(dc, rd);
4762 gen_swap_asi(cpu_val, cpu_src1, cpu_addr, insn);
4763 break;
4764
4765 #ifndef TARGET_SPARC64
4766 case 0x30: /* ldc */
4767 case 0x31: /* ldcsr */
4768 case 0x33: /* lddc */
4769 goto ncp_insn;
4770 #endif
4771 #endif
4772 #ifdef TARGET_SPARC64
4773 case 0x08: /* V9 ldsw */
4774 gen_address_mask(dc, cpu_addr);
4775 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4776 break;
4777 case 0x0b: /* V9 ldx */
4778 gen_address_mask(dc, cpu_addr);
4779 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4780 break;
4781 case 0x18: /* V9 ldswa */
4782 save_state(dc);
4783 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
4784 break;
4785 case 0x1b: /* V9 ldxa */
4786 save_state(dc);
4787 gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
4788 break;
4789 case 0x2d: /* V9 prefetch, no effect */
4790 goto skip_move;
4791 case 0x30: /* V9 ldfa */
4792 if (gen_trap_ifnofpu(dc)) {
4793 goto jmp_insn;
4794 }
4795 save_state(dc);
4796 gen_ldf_asi(cpu_addr, insn, 4, rd);
4797 gen_update_fprs_dirty(rd);
4798 goto skip_move;
4799 case 0x33: /* V9 lddfa */
4800 if (gen_trap_ifnofpu(dc)) {
4801 goto jmp_insn;
4802 }
4803 save_state(dc);
4804 gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
4805 gen_update_fprs_dirty(DFPREG(rd));
4806 goto skip_move;
4807 case 0x3d: /* V9 prefetcha, no effect */
4808 goto skip_move;
4809 case 0x32: /* V9 ldqfa */
4810 CHECK_FPU_FEATURE(dc, FLOAT128);
4811 if (gen_trap_ifnofpu(dc)) {
4812 goto jmp_insn;
4813 }
4814 save_state(dc);
4815 gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
4816 gen_update_fprs_dirty(QFPREG(rd));
4817 goto skip_move;
4818 #endif
4819 default:
4820 goto illegal_insn;
4821 }
4822 gen_store_gpr(dc, rd, cpu_val);
4823 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4824 skip_move: ;
4825 #endif
4826 } else if (xop >= 0x20 && xop < 0x24) {
4827 if (gen_trap_ifnofpu(dc)) {
4828 goto jmp_insn;
4829 }
4830 save_state(dc);
4831 switch (xop) {
4832 case 0x20: /* ldf, load fpreg */
4833 gen_address_mask(dc, cpu_addr);
4834 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4835 cpu_dst_32 = gen_dest_fpr_F(dc);
4836 tcg_gen_trunc_tl_i32(cpu_dst_32, cpu_tmp0);
4837 gen_store_fpr_F(dc, rd, cpu_dst_32);
4838 break;
4839 case 0x21: /* ldfsr, V9 ldxfsr */
4840 #ifdef TARGET_SPARC64
4841 gen_address_mask(dc, cpu_addr);
4842 if (rd == 1) {
4843 tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
4844 gen_helper_ldxfsr(cpu_env, cpu_tmp64);
4845 } else {
4846 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4847 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
4848 gen_helper_ldfsr(cpu_env, cpu_tmp32);
4849 }
4850 #else
4851 {
4852 tcg_gen_qemu_ld32u(cpu_tmp32, cpu_addr, dc->mem_idx);
4853 gen_helper_ldfsr(cpu_env, cpu_tmp32);
4854 }
4855 #endif
4856 break;
4857 case 0x22: /* ldqf, load quad fpreg */
4858 {
4859 TCGv_i32 r_const;
4860
4861 CHECK_FPU_FEATURE(dc, FLOAT128);
4862 r_const = tcg_const_i32(dc->mem_idx);
4863 gen_address_mask(dc, cpu_addr);
4864 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4865 tcg_temp_free_i32(r_const);
4866 gen_op_store_QT0_fpr(QFPREG(rd));
4867 gen_update_fprs_dirty(QFPREG(rd));
4868 }
4869 break;
4870 case 0x23: /* lddf, load double fpreg */
4871 gen_address_mask(dc, cpu_addr);
4872 cpu_dst_64 = gen_dest_fpr_D();
4873 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4874 gen_store_fpr_D(dc, rd, cpu_dst_64);
4875 break;
4876 default:
4877 goto illegal_insn;
4878 }
4879 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
4880 xop == 0xe || xop == 0x1e) {
4881 TCGv cpu_val = gen_load_gpr(dc, rd);
4882
4883 switch (xop) {
4884 case 0x4: /* st, store word */
4885 gen_address_mask(dc, cpu_addr);
4886 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4887 break;
4888 case 0x5: /* stb, store byte */
4889 gen_address_mask(dc, cpu_addr);
4890 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
4891 break;
4892 case 0x6: /* sth, store halfword */
4893 gen_address_mask(dc, cpu_addr);
4894 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
4895 break;
4896 case 0x7: /* std, store double word */
4897 if (rd & 1)
4898 goto illegal_insn;
4899 else {
4900 TCGv_i32 r_const;
4901 TCGv lo;
4902
4903 save_state(dc);
4904 gen_address_mask(dc, cpu_addr);
4905 r_const = tcg_const_i32(7);
4906 /* XXX remove alignment check */
4907 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4908 tcg_temp_free_i32(r_const);
4909 lo = gen_load_gpr(dc, rd + 1);
4910 tcg_gen_concat_tl_i64(cpu_tmp64, lo, cpu_val);
4911 tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
4912 }
4913 break;
4914 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4915 case 0x14: /* sta, V9 stwa, store word alternate */
4916 #ifndef TARGET_SPARC64
4917 if (IS_IMM)
4918 goto illegal_insn;
4919 if (!supervisor(dc))
4920 goto priv_insn;
4921 #endif
4922 save_state(dc);
4923 gen_st_asi(cpu_val, cpu_addr, insn, 4);
4924 dc->npc = DYNAMIC_PC;
4925 break;
4926 case 0x15: /* stba, store byte alternate */
4927 #ifndef TARGET_SPARC64
4928 if (IS_IMM)
4929 goto illegal_insn;
4930 if (!supervisor(dc))
4931 goto priv_insn;
4932 #endif
4933 save_state(dc);
4934 gen_st_asi(cpu_val, cpu_addr, insn, 1);
4935 dc->npc = DYNAMIC_PC;
4936 break;
4937 case 0x16: /* stha, store halfword alternate */
4938 #ifndef TARGET_SPARC64
4939 if (IS_IMM)
4940 goto illegal_insn;
4941 if (!supervisor(dc))
4942 goto priv_insn;
4943 #endif
4944 save_state(dc);
4945 gen_st_asi(cpu_val, cpu_addr, insn, 2);
4946 dc->npc = DYNAMIC_PC;
4947 break;
4948 case 0x17: /* stda, store double word alternate */
4949 #ifndef TARGET_SPARC64
4950 if (IS_IMM)
4951 goto illegal_insn;
4952 if (!supervisor(dc))
4953 goto priv_insn;
4954 #endif
4955 if (rd & 1)
4956 goto illegal_insn;
4957 else {
4958 save_state(dc);
4959 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
4960 }
4961 break;
4962 #endif
4963 #ifdef TARGET_SPARC64
4964 case 0x0e: /* V9 stx */
4965 gen_address_mask(dc, cpu_addr);
4966 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
4967 break;
4968 case 0x1e: /* V9 stxa */
4969 save_state(dc);
4970 gen_st_asi(cpu_val, cpu_addr, insn, 8);
4971 dc->npc = DYNAMIC_PC;
4972 break;
4973 #endif
4974 default:
4975 goto illegal_insn;
4976 }
4977 } else if (xop > 0x23 && xop < 0x28) {
4978 if (gen_trap_ifnofpu(dc)) {
4979 goto jmp_insn;
4980 }
4981 save_state(dc);
4982 switch (xop) {
4983 case 0x24: /* stf, store fpreg */
4984 gen_address_mask(dc, cpu_addr);
4985 cpu_src1_32 = gen_load_fpr_F(dc, rd);
4986 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_src1_32);
4987 tcg_gen_qemu_st32(cpu_tmp0, cpu_addr, dc->mem_idx);
4988 break;
4989 case 0x25: /* stfsr, V9 stxfsr */
4990 #ifdef TARGET_SPARC64
4991 gen_address_mask(dc, cpu_addr);
4992 tcg_gen_ld_i64(cpu_tmp64, cpu_env, offsetof(CPUSPARCState, fsr));
4993 if (rd == 1)
4994 tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
4995 else
4996 tcg_gen_qemu_st32(cpu_tmp64, cpu_addr, dc->mem_idx);
4997 #else
4998 tcg_gen_ld_i32(cpu_tmp32, cpu_env, offsetof(CPUSPARCState, fsr));
4999 tcg_gen_qemu_st32(cpu_tmp32, cpu_addr, dc->mem_idx);
5000 #endif
5001 break;
5002 case 0x26:
5003 #ifdef TARGET_SPARC64
5004 /* V9 stqf, store quad fpreg */
5005 {
5006 TCGv_i32 r_const;
5007
5008 CHECK_FPU_FEATURE(dc, FLOAT128);
5009 gen_op_load_fpr_QT0(QFPREG(rd));
5010 r_const = tcg_const_i32(dc->mem_idx);
5011 gen_address_mask(dc, cpu_addr);
5012 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5013 tcg_temp_free_i32(r_const);
5014 }
5015 break;
5016 #else /* !TARGET_SPARC64 */
5017 /* stdfq, store floating point queue */
5018 #if defined(CONFIG_USER_ONLY)
5019 goto illegal_insn;
5020 #else
5021 if (!supervisor(dc))
5022 goto priv_insn;
5023 if (gen_trap_ifnofpu(dc)) {
5024 goto jmp_insn;
5025 }
5026 goto nfq_insn;
5027 #endif
5028 #endif
5029 case 0x27: /* stdf, store double fpreg */
5030 gen_address_mask(dc, cpu_addr);
5031 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5032 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5033 break;
5034 default:
5035 goto illegal_insn;
5036 }
5037 } else if (xop > 0x33 && xop < 0x3f) {
5038 save_state(dc);
5039 switch (xop) {
5040 #ifdef TARGET_SPARC64
5041 case 0x34: /* V9 stfa */
5042 if (gen_trap_ifnofpu(dc)) {
5043 goto jmp_insn;
5044 }
5045 gen_stf_asi(cpu_addr, insn, 4, rd);
5046 break;
5047 case 0x36: /* V9 stqfa */
5048 {
5049 TCGv_i32 r_const;
5050
5051 CHECK_FPU_FEATURE(dc, FLOAT128);
5052 if (gen_trap_ifnofpu(dc)) {
5053 goto jmp_insn;
5054 }
5055 r_const = tcg_const_i32(7);
5056 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5057 tcg_temp_free_i32(r_const);
5058 gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
5059 }
5060 break;
5061 case 0x37: /* V9 stdfa */
5062 if (gen_trap_ifnofpu(dc)) {
5063 goto jmp_insn;
5064 }
5065 gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
5066 break;
5067 case 0x3c: /* V9 casa */
5068 rs2 = GET_FIELD(insn, 27, 31);
5069 cpu_src2 = gen_load_gpr(dc, rs2);
5070 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5071 break;
5072 case 0x3e: /* V9 casxa */
5073 rs2 = GET_FIELD(insn, 27, 31);
5074 cpu_src2 = gen_load_gpr(dc, rs2);
5075 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5076 break;
5077 #else
5078 case 0x34: /* stc */
5079 case 0x35: /* stcsr */
5080 case 0x36: /* stdcq */
5081 case 0x37: /* stdc */
5082 goto ncp_insn;
5083 #endif
5084 default:
5085 goto illegal_insn;
5086 }
5087 } else {
5088 goto illegal_insn;
5089 }
5090 }
5091 break;
5092 }
5093 /* default case for non jump instructions */
5094 if (dc->npc == DYNAMIC_PC) {
5095 dc->pc = DYNAMIC_PC;
5096 gen_op_next_insn();
5097 } else if (dc->npc == JUMP_PC) {
5098 /* we can do a static jump */
5099 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5100 dc->is_br = 1;
5101 } else {
5102 dc->pc = dc->npc;
5103 dc->npc = dc->npc + 4;
5104 }
5105 jmp_insn:
5106 goto egress;
5107 illegal_insn:
5108 {
5109 TCGv_i32 r_const;
5110
5111 save_state(dc);
5112 r_const = tcg_const_i32(TT_ILL_INSN);
5113 gen_helper_raise_exception(cpu_env, r_const);
5114 tcg_temp_free_i32(r_const);
5115 dc->is_br = 1;
5116 }
5117 goto egress;
5118 unimp_flush:
5119 {
5120 TCGv_i32 r_const;
5121
5122 save_state(dc);
5123 r_const = tcg_const_i32(TT_UNIMP_FLUSH);
5124 gen_helper_raise_exception(cpu_env, r_const);
5125 tcg_temp_free_i32(r_const);
5126 dc->is_br = 1;
5127 }
5128 goto egress;
5129 #if !defined(CONFIG_USER_ONLY)
5130 priv_insn:
5131 {
5132 TCGv_i32 r_const;
5133
5134 save_state(dc);
5135 r_const = tcg_const_i32(TT_PRIV_INSN);
5136 gen_helper_raise_exception(cpu_env, r_const);
5137 tcg_temp_free_i32(r_const);
5138 dc->is_br = 1;
5139 }
5140 goto egress;
5141 #endif
5142 nfpu_insn:
5143 save_state(dc);
5144 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
5145 dc->is_br = 1;
5146 goto egress;
5147 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5148 nfq_insn:
5149 save_state(dc);
5150 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
5151 dc->is_br = 1;
5152 goto egress;
5153 #endif
5154 #ifndef TARGET_SPARC64
5155 ncp_insn:
5156 {
5157 TCGv r_const;
5158
5159 save_state(dc);
5160 r_const = tcg_const_i32(TT_NCP_INSN);
5161 gen_helper_raise_exception(cpu_env, r_const);
5162 tcg_temp_free(r_const);
5163 dc->is_br = 1;
5164 }
5165 goto egress;
5166 #endif
5167 egress:
5168 if (dc->n_t32 != 0) {
5169 int i;
5170 for (i = dc->n_t32 - 1; i >= 0; --i) {
5171 tcg_temp_free_i32(dc->t32[i]);
5172 }
5173 dc->n_t32 = 0;
5174 }
5175 if (dc->n_ttl != 0) {
5176 int i;
5177 for (i = dc->n_ttl - 1; i >= 0; --i) {
5178 tcg_temp_free(dc->ttl[i]);
5179 }
5180 dc->n_ttl = 0;
5181 }
5182 }
5183
5184 static inline void gen_intermediate_code_internal(TranslationBlock * tb,
5185 int spc, CPUSPARCState *env)
5186 {
5187 target_ulong pc_start, last_pc;
5188 uint16_t *gen_opc_end;
5189 DisasContext dc1, *dc = &dc1;
5190 CPUBreakpoint *bp;
5191 int j, lj = -1;
5192 int num_insns;
5193 int max_insns;
5194 unsigned int insn;
5195
5196 memset(dc, 0, sizeof(DisasContext));
5197 dc->tb = tb;
5198 pc_start = tb->pc;
5199 dc->pc = pc_start;
5200 last_pc = dc->pc;
5201 dc->npc = (target_ulong) tb->cs_base;
5202 dc->cc_op = CC_OP_DYNAMIC;
5203 dc->mem_idx = cpu_mmu_index(env);
5204 dc->def = env->def;
5205 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5206 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5207 dc->singlestep = (env->singlestep_enabled || singlestep);
5208 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
5209
5210 num_insns = 0;
5211 max_insns = tb->cflags & CF_COUNT_MASK;
5212 if (max_insns == 0)
5213 max_insns = CF_COUNT_MASK;
5214 gen_icount_start();
5215 do {
5216 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
5217 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5218 if (bp->pc == dc->pc) {
5219 if (dc->pc != pc_start)
5220 save_state(dc);
5221 gen_helper_debug(cpu_env);
5222 tcg_gen_exit_tb(0);
5223 dc->is_br = 1;
5224 goto exit_gen_loop;
5225 }
5226 }
5227 }
5228 if (spc) {
5229 qemu_log("Search PC...\n");
5230 j = gen_opc_ptr - gen_opc_buf;
5231 if (lj < j) {
5232 lj++;
5233 while (lj < j)
5234 gen_opc_instr_start[lj++] = 0;
5235 gen_opc_pc[lj] = dc->pc;
5236 gen_opc_npc[lj] = dc->npc;
5237 gen_opc_instr_start[lj] = 1;
5238 gen_opc_icount[lj] = num_insns;
5239 }
5240 }
5241 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
5242 gen_io_start();
5243 last_pc = dc->pc;
5244 insn = cpu_ldl_code(env, dc->pc);
5245
5246 cpu_tmp0 = tcg_temp_new();
5247 cpu_tmp32 = tcg_temp_new_i32();
5248 cpu_tmp64 = tcg_temp_new_i64();
5249 cpu_dst = tcg_temp_new();
5250
5251 disas_sparc_insn(dc, insn);
5252 num_insns++;
5253
5254 tcg_temp_free(cpu_dst);
5255 tcg_temp_free_i64(cpu_tmp64);
5256 tcg_temp_free_i32(cpu_tmp32);
5257 tcg_temp_free(cpu_tmp0);
5258
5259 if (dc->is_br)
5260 break;
5261 /* if the next PC is different, we abort now */
5262 if (dc->pc != (last_pc + 4))
5263 break;
5264 /* if we reach a page boundary, we stop generation so that the
5265 PC of a TT_TFAULT exception is always in the right page */
5266 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5267 break;
5268 /* if single step mode, we generate only one instruction and
5269 generate an exception */
5270 if (dc->singlestep) {
5271 break;
5272 }
5273 } while ((gen_opc_ptr < gen_opc_end) &&
5274 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5275 num_insns < max_insns);
5276
5277 exit_gen_loop:
5278 if (tb->cflags & CF_LAST_IO) {
5279 gen_io_end();
5280 }
5281 if (!dc->is_br) {
5282 if (dc->pc != DYNAMIC_PC &&
5283 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5284 /* static PC and NPC: we can use direct chaining */
5285 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5286 } else {
5287 if (dc->pc != DYNAMIC_PC) {
5288 tcg_gen_movi_tl(cpu_pc, dc->pc);
5289 }
5290 save_npc(dc);
5291 tcg_gen_exit_tb(0);
5292 }
5293 }
5294 gen_icount_end(tb, num_insns);
5295 *gen_opc_ptr = INDEX_op_end;
5296 if (spc) {
5297 j = gen_opc_ptr - gen_opc_buf;
5298 lj++;
5299 while (lj <= j)
5300 gen_opc_instr_start[lj++] = 0;
5301 #if 0
5302 log_page_dump();
5303 #endif
5304 gen_opc_jump_pc[0] = dc->jump_pc[0];
5305 gen_opc_jump_pc[1] = dc->jump_pc[1];
5306 } else {
5307 tb->size = last_pc + 4 - pc_start;
5308 tb->icount = num_insns;
5309 }
5310 #ifdef DEBUG_DISAS
5311 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5312 qemu_log("--------------\n");
5313 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5314 log_target_disas(pc_start, last_pc + 4 - pc_start, 0);
5315 qemu_log("\n");
5316 }
5317 #endif
5318 }
5319
5320 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5321 {
5322 gen_intermediate_code_internal(tb, 0, env);
5323 }
5324
5325 void gen_intermediate_code_pc(CPUSPARCState * env, TranslationBlock * tb)
5326 {
5327 gen_intermediate_code_internal(tb, 1, env);
5328 }
5329
5330 void gen_intermediate_code_init(CPUSPARCState *env)
5331 {
5332 unsigned int i;
5333 static int inited;
5334 static const char * const gregnames[8] = {
5335 NULL, // g0 not used
5336 "g1",
5337 "g2",
5338 "g3",
5339 "g4",
5340 "g5",
5341 "g6",
5342 "g7",
5343 };
5344 static const char * const fregnames[32] = {
5345 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5346 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5347 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5348 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5349 };
5350
5351 /* init various static tables */
5352 if (!inited) {
5353 inited = 1;
5354
5355 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5356 cpu_regwptr = tcg_global_mem_new_ptr(TCG_AREG0,
5357 offsetof(CPUSPARCState, regwptr),
5358 "regwptr");
5359 #ifdef TARGET_SPARC64
5360 cpu_xcc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, xcc),
5361 "xcc");
5362 cpu_asi = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, asi),
5363 "asi");
5364 cpu_fprs = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, fprs),
5365 "fprs");
5366 cpu_gsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, gsr),
5367 "gsr");
5368 cpu_tick_cmpr = tcg_global_mem_new(TCG_AREG0,
5369 offsetof(CPUSPARCState, tick_cmpr),
5370 "tick_cmpr");
5371 cpu_stick_cmpr = tcg_global_mem_new(TCG_AREG0,
5372 offsetof(CPUSPARCState, stick_cmpr),
5373 "stick_cmpr");
5374 cpu_hstick_cmpr = tcg_global_mem_new(TCG_AREG0,
5375 offsetof(CPUSPARCState, hstick_cmpr),
5376 "hstick_cmpr");
5377 cpu_hintp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hintp),
5378 "hintp");
5379 cpu_htba = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, htba),
5380 "htba");
5381 cpu_hver = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hver),
5382 "hver");
5383 cpu_ssr = tcg_global_mem_new(TCG_AREG0,
5384 offsetof(CPUSPARCState, ssr), "ssr");
5385 cpu_ver = tcg_global_mem_new(TCG_AREG0,
5386 offsetof(CPUSPARCState, version), "ver");
5387 cpu_softint = tcg_global_mem_new_i32(TCG_AREG0,
5388 offsetof(CPUSPARCState, softint),
5389 "softint");
5390 #else
5391 cpu_wim = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, wim),
5392 "wim");
5393 #endif
5394 cpu_cond = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cond),
5395 "cond");
5396 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_src),
5397 "cc_src");
5398 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0,
5399 offsetof(CPUSPARCState, cc_src2),
5400 "cc_src2");
5401 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_dst),
5402 "cc_dst");
5403 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, cc_op),
5404 "cc_op");
5405 cpu_psr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, psr),
5406 "psr");
5407 cpu_fsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, fsr),
5408 "fsr");
5409 cpu_pc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, pc),
5410 "pc");
5411 cpu_npc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, npc),
5412 "npc");
5413 cpu_y = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, y), "y");
5414 #ifndef CONFIG_USER_ONLY
5415 cpu_tbr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, tbr),
5416 "tbr");
5417 #endif
5418 for (i = 1; i < 8; i++) {
5419 cpu_gregs[i] = tcg_global_mem_new(TCG_AREG0,
5420 offsetof(CPUSPARCState, gregs[i]),
5421 gregnames[i]);
5422 }
5423 for (i = 0; i < TARGET_DPREGS; i++) {
5424 cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
5425 offsetof(CPUSPARCState, fpr[i]),
5426 fregnames[i]);
5427 }
5428
5429 /* register helpers */
5430
5431 #define GEN_HELPER 2
5432 #include "helper.h"
5433 }
5434 }
5435
5436 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, int pc_pos)
5437 {
5438 target_ulong npc;
5439 env->pc = gen_opc_pc[pc_pos];
5440 npc = gen_opc_npc[pc_pos];
5441 if (npc == 1) {
5442 /* dynamic NPC: already stored */
5443 } else if (npc == 2) {
5444 /* jump PC: use 'cond' and the jump targets of the translation */
5445 if (env->cond) {
5446 env->npc = gen_opc_jump_pc[0];
5447 } else {
5448 env->npc = gen_opc_jump_pc[1];
5449 }
5450 } else {
5451 env->npc = npc;
5452 }
5453 }