]> git.proxmox.com Git - mirror_qemu.git/blob - target-sparc/translate.c
target-sparc: Use movcond in mulscc
[mirror_qemu.git] / target-sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "disas.h"
29 #include "helper.h"
30 #include "tcg-op.h"
31
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #define DEBUG_DISAS
36
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
40
41 /* global register indexes */
42 static TCGv_ptr cpu_env, cpu_regwptr;
43 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
44 static TCGv_i32 cpu_cc_op;
45 static TCGv_i32 cpu_psr;
46 static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
47 static TCGv cpu_y;
48 #ifndef CONFIG_USER_ONLY
49 static TCGv cpu_tbr;
50 #endif
51 static TCGv cpu_cond, cpu_dst, cpu_addr, cpu_val;
52 #ifdef TARGET_SPARC64
53 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
54 static TCGv cpu_gsr;
55 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
56 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
57 static TCGv_i32 cpu_softint;
58 #else
59 static TCGv cpu_wim;
60 #endif
61 /* local register indexes (only used inside old micro ops) */
62 static TCGv cpu_tmp0;
63 static TCGv_i32 cpu_tmp32;
64 static TCGv_i64 cpu_tmp64;
65 /* Floating point registers */
66 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67
68 static target_ulong gen_opc_npc[OPC_BUF_SIZE];
69 static target_ulong gen_opc_jump_pc[2];
70
71 #include "gen-icount.h"
72
73 typedef struct DisasContext {
74 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
75 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
76 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
77 int is_br;
78 int mem_idx;
79 int fpu_enabled;
80 int address_mask_32bit;
81 int singlestep;
82 uint32_t cc_op; /* current CC operation */
83 struct TranslationBlock *tb;
84 sparc_def_t *def;
85 TCGv_i32 t32[3];
86 int n_t32;
87 } DisasContext;
88
89 typedef struct {
90 TCGCond cond;
91 bool is_bool;
92 bool g1, g2;
93 TCGv c1, c2;
94 } DisasCompare;
95
96 // This function uses non-native bit order
97 #define GET_FIELD(X, FROM, TO) \
98 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
99
100 // This function uses the order in the manuals, i.e. bit 0 is 2^0
101 #define GET_FIELD_SP(X, FROM, TO) \
102 GET_FIELD(X, 31 - (TO), 31 - (FROM))
103
104 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
105 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
106
107 #ifdef TARGET_SPARC64
108 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
109 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
110 #else
111 #define DFPREG(r) (r & 0x1e)
112 #define QFPREG(r) (r & 0x1c)
113 #endif
114
115 #define UA2005_HTRAP_MASK 0xff
116 #define V8_TRAP_MASK 0x7f
117
118 static int sign_extend(int x, int len)
119 {
120 len = 32 - len;
121 return (x << len) >> len;
122 }
123
124 #define IS_IMM (insn & (1<<13))
125
126 static inline void gen_update_fprs_dirty(int rd)
127 {
128 #if defined(TARGET_SPARC64)
129 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
130 #endif
131 }
132
133 /* floating point registers moves */
134 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
135 {
136 #if TCG_TARGET_REG_BITS == 32
137 if (src & 1) {
138 return TCGV_LOW(cpu_fpr[src / 2]);
139 } else {
140 return TCGV_HIGH(cpu_fpr[src / 2]);
141 }
142 #else
143 if (src & 1) {
144 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
145 } else {
146 TCGv_i32 ret = tcg_temp_local_new_i32();
147 TCGv_i64 t = tcg_temp_new_i64();
148
149 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
150 tcg_gen_trunc_i64_i32(ret, t);
151 tcg_temp_free_i64(t);
152
153 dc->t32[dc->n_t32++] = ret;
154 assert(dc->n_t32 <= ARRAY_SIZE(dc->t32));
155
156 return ret;
157 }
158 #endif
159 }
160
161 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
162 {
163 #if TCG_TARGET_REG_BITS == 32
164 if (dst & 1) {
165 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
166 } else {
167 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
168 }
169 #else
170 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
171 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
172 (dst & 1 ? 0 : 32), 32);
173 #endif
174 gen_update_fprs_dirty(dst);
175 }
176
177 static TCGv_i32 gen_dest_fpr_F(void)
178 {
179 return cpu_tmp32;
180 }
181
182 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
183 {
184 src = DFPREG(src);
185 return cpu_fpr[src / 2];
186 }
187
188 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
189 {
190 dst = DFPREG(dst);
191 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
192 gen_update_fprs_dirty(dst);
193 }
194
195 static TCGv_i64 gen_dest_fpr_D(void)
196 {
197 return cpu_tmp64;
198 }
199
200 static void gen_op_load_fpr_QT0(unsigned int src)
201 {
202 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
203 offsetof(CPU_QuadU, ll.upper));
204 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
205 offsetof(CPU_QuadU, ll.lower));
206 }
207
208 static void gen_op_load_fpr_QT1(unsigned int src)
209 {
210 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
211 offsetof(CPU_QuadU, ll.upper));
212 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
213 offsetof(CPU_QuadU, ll.lower));
214 }
215
216 static void gen_op_store_QT0_fpr(unsigned int dst)
217 {
218 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
219 offsetof(CPU_QuadU, ll.upper));
220 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
221 offsetof(CPU_QuadU, ll.lower));
222 }
223
224 #ifdef TARGET_SPARC64
225 static void gen_move_Q(unsigned int rd, unsigned int rs)
226 {
227 rd = QFPREG(rd);
228 rs = QFPREG(rs);
229
230 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
231 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
232 gen_update_fprs_dirty(rd);
233 }
234 #endif
235
236 /* moves */
237 #ifdef CONFIG_USER_ONLY
238 #define supervisor(dc) 0
239 #ifdef TARGET_SPARC64
240 #define hypervisor(dc) 0
241 #endif
242 #else
243 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
244 #ifdef TARGET_SPARC64
245 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
246 #else
247 #endif
248 #endif
249
250 #ifdef TARGET_SPARC64
251 #ifndef TARGET_ABI32
252 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
253 #else
254 #define AM_CHECK(dc) (1)
255 #endif
256 #endif
257
258 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
259 {
260 #ifdef TARGET_SPARC64
261 if (AM_CHECK(dc))
262 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
263 #endif
264 }
265
266 static inline void gen_movl_reg_TN(int reg, TCGv tn)
267 {
268 if (reg == 0)
269 tcg_gen_movi_tl(tn, 0);
270 else if (reg < 8)
271 tcg_gen_mov_tl(tn, cpu_gregs[reg]);
272 else {
273 tcg_gen_ld_tl(tn, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
274 }
275 }
276
277 static inline void gen_movl_TN_reg(int reg, TCGv tn)
278 {
279 if (reg == 0)
280 return;
281 else if (reg < 8)
282 tcg_gen_mov_tl(cpu_gregs[reg], tn);
283 else {
284 tcg_gen_st_tl(tn, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
285 }
286 }
287
288 static inline void gen_goto_tb(DisasContext *s, int tb_num,
289 target_ulong pc, target_ulong npc)
290 {
291 TranslationBlock *tb;
292
293 tb = s->tb;
294 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
295 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
296 !s->singlestep) {
297 /* jump to same page: we can use a direct jump */
298 tcg_gen_goto_tb(tb_num);
299 tcg_gen_movi_tl(cpu_pc, pc);
300 tcg_gen_movi_tl(cpu_npc, npc);
301 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
302 } else {
303 /* jump to another page: currently not optimized */
304 tcg_gen_movi_tl(cpu_pc, pc);
305 tcg_gen_movi_tl(cpu_npc, npc);
306 tcg_gen_exit_tb(0);
307 }
308 }
309
310 // XXX suboptimal
311 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
312 {
313 tcg_gen_extu_i32_tl(reg, src);
314 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
315 tcg_gen_andi_tl(reg, reg, 0x1);
316 }
317
318 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
319 {
320 tcg_gen_extu_i32_tl(reg, src);
321 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
322 tcg_gen_andi_tl(reg, reg, 0x1);
323 }
324
325 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
326 {
327 tcg_gen_extu_i32_tl(reg, src);
328 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
329 tcg_gen_andi_tl(reg, reg, 0x1);
330 }
331
332 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
333 {
334 tcg_gen_extu_i32_tl(reg, src);
335 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
336 tcg_gen_andi_tl(reg, reg, 0x1);
337 }
338
339 static inline void gen_op_addi_cc(TCGv dst, TCGv src1, target_long src2)
340 {
341 tcg_gen_mov_tl(cpu_cc_src, src1);
342 tcg_gen_movi_tl(cpu_cc_src2, src2);
343 tcg_gen_addi_tl(cpu_cc_dst, cpu_cc_src, src2);
344 tcg_gen_mov_tl(dst, cpu_cc_dst);
345 }
346
347 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
348 {
349 tcg_gen_mov_tl(cpu_cc_src, src1);
350 tcg_gen_mov_tl(cpu_cc_src2, src2);
351 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
352 tcg_gen_mov_tl(dst, cpu_cc_dst);
353 }
354
355 static TCGv_i32 gen_add32_carry32(void)
356 {
357 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
358
359 /* Carry is computed from a previous add: (dst < src) */
360 #if TARGET_LONG_BITS == 64
361 cc_src1_32 = tcg_temp_new_i32();
362 cc_src2_32 = tcg_temp_new_i32();
363 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_dst);
364 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src);
365 #else
366 cc_src1_32 = cpu_cc_dst;
367 cc_src2_32 = cpu_cc_src;
368 #endif
369
370 carry_32 = tcg_temp_new_i32();
371 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
372
373 #if TARGET_LONG_BITS == 64
374 tcg_temp_free_i32(cc_src1_32);
375 tcg_temp_free_i32(cc_src2_32);
376 #endif
377
378 return carry_32;
379 }
380
381 static TCGv_i32 gen_sub32_carry32(void)
382 {
383 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
384
385 /* Carry is computed from a previous borrow: (src1 < src2) */
386 #if TARGET_LONG_BITS == 64
387 cc_src1_32 = tcg_temp_new_i32();
388 cc_src2_32 = tcg_temp_new_i32();
389 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_src);
390 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src2);
391 #else
392 cc_src1_32 = cpu_cc_src;
393 cc_src2_32 = cpu_cc_src2;
394 #endif
395
396 carry_32 = tcg_temp_new_i32();
397 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
398
399 #if TARGET_LONG_BITS == 64
400 tcg_temp_free_i32(cc_src1_32);
401 tcg_temp_free_i32(cc_src2_32);
402 #endif
403
404 return carry_32;
405 }
406
407 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
408 TCGv src2, int update_cc)
409 {
410 TCGv_i32 carry_32;
411 TCGv carry;
412
413 switch (dc->cc_op) {
414 case CC_OP_DIV:
415 case CC_OP_LOGIC:
416 /* Carry is known to be zero. Fall back to plain ADD. */
417 if (update_cc) {
418 gen_op_add_cc(dst, src1, src2);
419 } else {
420 tcg_gen_add_tl(dst, src1, src2);
421 }
422 return;
423
424 case CC_OP_ADD:
425 case CC_OP_TADD:
426 case CC_OP_TADDTV:
427 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
428 {
429 /* For 32-bit hosts, we can re-use the host's hardware carry
430 generation by using an ADD2 opcode. We discard the low
431 part of the output. Ideally we'd combine this operation
432 with the add that generated the carry in the first place. */
433 TCGv dst_low = tcg_temp_new();
434 tcg_gen_op6_i32(INDEX_op_add2_i32, dst_low, dst,
435 cpu_cc_src, src1, cpu_cc_src2, src2);
436 tcg_temp_free(dst_low);
437 goto add_done;
438 }
439 #endif
440 carry_32 = gen_add32_carry32();
441 break;
442
443 case CC_OP_SUB:
444 case CC_OP_TSUB:
445 case CC_OP_TSUBTV:
446 carry_32 = gen_sub32_carry32();
447 break;
448
449 default:
450 /* We need external help to produce the carry. */
451 carry_32 = tcg_temp_new_i32();
452 gen_helper_compute_C_icc(carry_32, cpu_env);
453 break;
454 }
455
456 #if TARGET_LONG_BITS == 64
457 carry = tcg_temp_new();
458 tcg_gen_extu_i32_i64(carry, carry_32);
459 #else
460 carry = carry_32;
461 #endif
462
463 tcg_gen_add_tl(dst, src1, src2);
464 tcg_gen_add_tl(dst, dst, carry);
465
466 tcg_temp_free_i32(carry_32);
467 #if TARGET_LONG_BITS == 64
468 tcg_temp_free(carry);
469 #endif
470
471 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
472 add_done:
473 #endif
474 if (update_cc) {
475 tcg_gen_mov_tl(cpu_cc_src, src1);
476 tcg_gen_mov_tl(cpu_cc_src2, src2);
477 tcg_gen_mov_tl(cpu_cc_dst, dst);
478 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
479 dc->cc_op = CC_OP_ADDX;
480 }
481 }
482
483 static inline void gen_op_subi_cc(TCGv dst, TCGv src1, target_long src2, DisasContext *dc)
484 {
485 tcg_gen_mov_tl(cpu_cc_src, src1);
486 tcg_gen_movi_tl(cpu_cc_src2, src2);
487 if (src2 == 0) {
488 tcg_gen_mov_tl(cpu_cc_dst, src1);
489 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
490 dc->cc_op = CC_OP_LOGIC;
491 } else {
492 tcg_gen_subi_tl(cpu_cc_dst, cpu_cc_src, src2);
493 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
494 dc->cc_op = CC_OP_SUB;
495 }
496 tcg_gen_mov_tl(dst, cpu_cc_dst);
497 }
498
499 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
500 {
501 tcg_gen_mov_tl(cpu_cc_src, src1);
502 tcg_gen_mov_tl(cpu_cc_src2, src2);
503 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
504 tcg_gen_mov_tl(dst, cpu_cc_dst);
505 }
506
507 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
508 TCGv src2, int update_cc)
509 {
510 TCGv_i32 carry_32;
511 TCGv carry;
512
513 switch (dc->cc_op) {
514 case CC_OP_DIV:
515 case CC_OP_LOGIC:
516 /* Carry is known to be zero. Fall back to plain SUB. */
517 if (update_cc) {
518 gen_op_sub_cc(dst, src1, src2);
519 } else {
520 tcg_gen_sub_tl(dst, src1, src2);
521 }
522 return;
523
524 case CC_OP_ADD:
525 case CC_OP_TADD:
526 case CC_OP_TADDTV:
527 carry_32 = gen_add32_carry32();
528 break;
529
530 case CC_OP_SUB:
531 case CC_OP_TSUB:
532 case CC_OP_TSUBTV:
533 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
534 {
535 /* For 32-bit hosts, we can re-use the host's hardware carry
536 generation by using a SUB2 opcode. We discard the low
537 part of the output. Ideally we'd combine this operation
538 with the add that generated the carry in the first place. */
539 TCGv dst_low = tcg_temp_new();
540 tcg_gen_op6_i32(INDEX_op_sub2_i32, dst_low, dst,
541 cpu_cc_src, src1, cpu_cc_src2, src2);
542 tcg_temp_free(dst_low);
543 goto sub_done;
544 }
545 #endif
546 carry_32 = gen_sub32_carry32();
547 break;
548
549 default:
550 /* We need external help to produce the carry. */
551 carry_32 = tcg_temp_new_i32();
552 gen_helper_compute_C_icc(carry_32, cpu_env);
553 break;
554 }
555
556 #if TARGET_LONG_BITS == 64
557 carry = tcg_temp_new();
558 tcg_gen_extu_i32_i64(carry, carry_32);
559 #else
560 carry = carry_32;
561 #endif
562
563 tcg_gen_sub_tl(dst, src1, src2);
564 tcg_gen_sub_tl(dst, dst, carry);
565
566 tcg_temp_free_i32(carry_32);
567 #if TARGET_LONG_BITS == 64
568 tcg_temp_free(carry);
569 #endif
570
571 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
572 sub_done:
573 #endif
574 if (update_cc) {
575 tcg_gen_mov_tl(cpu_cc_src, src1);
576 tcg_gen_mov_tl(cpu_cc_src2, src2);
577 tcg_gen_mov_tl(cpu_cc_dst, dst);
578 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
579 dc->cc_op = CC_OP_SUBX;
580 }
581 }
582
583 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
584 {
585 TCGv r_temp, zero;
586
587 r_temp = tcg_temp_new();
588
589 /* old op:
590 if (!(env->y & 1))
591 T1 = 0;
592 */
593 zero = tcg_const_tl(0);
594 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
595 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
596 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
597 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
598 zero, cpu_cc_src2);
599 tcg_temp_free(zero);
600
601 // b2 = T0 & 1;
602 // env->y = (b2 << 31) | (env->y >> 1);
603 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
604 tcg_gen_shli_tl(r_temp, r_temp, 31);
605 tcg_gen_shri_tl(cpu_tmp0, cpu_y, 1);
606 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0x7fffffff);
607 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, r_temp);
608 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
609
610 // b1 = N ^ V;
611 gen_mov_reg_N(cpu_tmp0, cpu_psr);
612 gen_mov_reg_V(r_temp, cpu_psr);
613 tcg_gen_xor_tl(cpu_tmp0, cpu_tmp0, r_temp);
614 tcg_temp_free(r_temp);
615
616 // T0 = (b1 << 31) | (T0 >> 1);
617 // src1 = T0;
618 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, 31);
619 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
620 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
621
622 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
623
624 tcg_gen_mov_tl(dst, cpu_cc_dst);
625 }
626
627 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
628 {
629 TCGv_i32 r_src1, r_src2;
630 TCGv_i64 r_temp, r_temp2;
631
632 r_src1 = tcg_temp_new_i32();
633 r_src2 = tcg_temp_new_i32();
634
635 tcg_gen_trunc_tl_i32(r_src1, src1);
636 tcg_gen_trunc_tl_i32(r_src2, src2);
637
638 r_temp = tcg_temp_new_i64();
639 r_temp2 = tcg_temp_new_i64();
640
641 if (sign_ext) {
642 tcg_gen_ext_i32_i64(r_temp, r_src2);
643 tcg_gen_ext_i32_i64(r_temp2, r_src1);
644 } else {
645 tcg_gen_extu_i32_i64(r_temp, r_src2);
646 tcg_gen_extu_i32_i64(r_temp2, r_src1);
647 }
648
649 tcg_gen_mul_i64(r_temp2, r_temp, r_temp2);
650
651 tcg_gen_shri_i64(r_temp, r_temp2, 32);
652 tcg_gen_trunc_i64_tl(cpu_tmp0, r_temp);
653 tcg_temp_free_i64(r_temp);
654 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
655
656 tcg_gen_trunc_i64_tl(dst, r_temp2);
657
658 tcg_temp_free_i64(r_temp2);
659
660 tcg_temp_free_i32(r_src1);
661 tcg_temp_free_i32(r_src2);
662 }
663
664 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
665 {
666 /* zero-extend truncated operands before multiplication */
667 gen_op_multiply(dst, src1, src2, 0);
668 }
669
670 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
671 {
672 /* sign-extend truncated operands before multiplication */
673 gen_op_multiply(dst, src1, src2, 1);
674 }
675
676 // 1
677 static inline void gen_op_eval_ba(TCGv dst)
678 {
679 tcg_gen_movi_tl(dst, 1);
680 }
681
682 // Z
683 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
684 {
685 gen_mov_reg_Z(dst, src);
686 }
687
688 // Z | (N ^ V)
689 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
690 {
691 gen_mov_reg_N(cpu_tmp0, src);
692 gen_mov_reg_V(dst, src);
693 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
694 gen_mov_reg_Z(cpu_tmp0, src);
695 tcg_gen_or_tl(dst, dst, cpu_tmp0);
696 }
697
698 // N ^ V
699 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
700 {
701 gen_mov_reg_V(cpu_tmp0, src);
702 gen_mov_reg_N(dst, src);
703 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
704 }
705
706 // C | Z
707 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
708 {
709 gen_mov_reg_Z(cpu_tmp0, src);
710 gen_mov_reg_C(dst, src);
711 tcg_gen_or_tl(dst, dst, cpu_tmp0);
712 }
713
714 // C
715 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
716 {
717 gen_mov_reg_C(dst, src);
718 }
719
720 // V
721 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
722 {
723 gen_mov_reg_V(dst, src);
724 }
725
726 // 0
727 static inline void gen_op_eval_bn(TCGv dst)
728 {
729 tcg_gen_movi_tl(dst, 0);
730 }
731
732 // N
733 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
734 {
735 gen_mov_reg_N(dst, src);
736 }
737
738 // !Z
739 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
740 {
741 gen_mov_reg_Z(dst, src);
742 tcg_gen_xori_tl(dst, dst, 0x1);
743 }
744
745 // !(Z | (N ^ V))
746 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
747 {
748 gen_mov_reg_N(cpu_tmp0, src);
749 gen_mov_reg_V(dst, src);
750 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
751 gen_mov_reg_Z(cpu_tmp0, src);
752 tcg_gen_or_tl(dst, dst, cpu_tmp0);
753 tcg_gen_xori_tl(dst, dst, 0x1);
754 }
755
756 // !(N ^ V)
757 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
758 {
759 gen_mov_reg_V(cpu_tmp0, src);
760 gen_mov_reg_N(dst, src);
761 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
762 tcg_gen_xori_tl(dst, dst, 0x1);
763 }
764
765 // !(C | Z)
766 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
767 {
768 gen_mov_reg_Z(cpu_tmp0, src);
769 gen_mov_reg_C(dst, src);
770 tcg_gen_or_tl(dst, dst, cpu_tmp0);
771 tcg_gen_xori_tl(dst, dst, 0x1);
772 }
773
774 // !C
775 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
776 {
777 gen_mov_reg_C(dst, src);
778 tcg_gen_xori_tl(dst, dst, 0x1);
779 }
780
781 // !N
782 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
783 {
784 gen_mov_reg_N(dst, src);
785 tcg_gen_xori_tl(dst, dst, 0x1);
786 }
787
788 // !V
789 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
790 {
791 gen_mov_reg_V(dst, src);
792 tcg_gen_xori_tl(dst, dst, 0x1);
793 }
794
795 /*
796 FPSR bit field FCC1 | FCC0:
797 0 =
798 1 <
799 2 >
800 3 unordered
801 */
802 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
803 unsigned int fcc_offset)
804 {
805 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
806 tcg_gen_andi_tl(reg, reg, 0x1);
807 }
808
809 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
810 unsigned int fcc_offset)
811 {
812 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
813 tcg_gen_andi_tl(reg, reg, 0x1);
814 }
815
816 // !0: FCC0 | FCC1
817 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
818 unsigned int fcc_offset)
819 {
820 gen_mov_reg_FCC0(dst, src, fcc_offset);
821 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
822 tcg_gen_or_tl(dst, dst, cpu_tmp0);
823 }
824
825 // 1 or 2: FCC0 ^ FCC1
826 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
827 unsigned int fcc_offset)
828 {
829 gen_mov_reg_FCC0(dst, src, fcc_offset);
830 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
831 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
832 }
833
834 // 1 or 3: FCC0
835 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
836 unsigned int fcc_offset)
837 {
838 gen_mov_reg_FCC0(dst, src, fcc_offset);
839 }
840
841 // 1: FCC0 & !FCC1
842 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
843 unsigned int fcc_offset)
844 {
845 gen_mov_reg_FCC0(dst, src, fcc_offset);
846 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
847 tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
848 tcg_gen_and_tl(dst, dst, cpu_tmp0);
849 }
850
851 // 2 or 3: FCC1
852 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
853 unsigned int fcc_offset)
854 {
855 gen_mov_reg_FCC1(dst, src, fcc_offset);
856 }
857
858 // 2: !FCC0 & FCC1
859 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
860 unsigned int fcc_offset)
861 {
862 gen_mov_reg_FCC0(dst, src, fcc_offset);
863 tcg_gen_xori_tl(dst, dst, 0x1);
864 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
865 tcg_gen_and_tl(dst, dst, cpu_tmp0);
866 }
867
868 // 3: FCC0 & FCC1
869 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
870 unsigned int fcc_offset)
871 {
872 gen_mov_reg_FCC0(dst, src, fcc_offset);
873 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
874 tcg_gen_and_tl(dst, dst, cpu_tmp0);
875 }
876
877 // 0: !(FCC0 | FCC1)
878 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
879 unsigned int fcc_offset)
880 {
881 gen_mov_reg_FCC0(dst, src, fcc_offset);
882 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
883 tcg_gen_or_tl(dst, dst, cpu_tmp0);
884 tcg_gen_xori_tl(dst, dst, 0x1);
885 }
886
887 // 0 or 3: !(FCC0 ^ FCC1)
888 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
889 unsigned int fcc_offset)
890 {
891 gen_mov_reg_FCC0(dst, src, fcc_offset);
892 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
893 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
894 tcg_gen_xori_tl(dst, dst, 0x1);
895 }
896
897 // 0 or 2: !FCC0
898 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
899 unsigned int fcc_offset)
900 {
901 gen_mov_reg_FCC0(dst, src, fcc_offset);
902 tcg_gen_xori_tl(dst, dst, 0x1);
903 }
904
905 // !1: !(FCC0 & !FCC1)
906 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
907 unsigned int fcc_offset)
908 {
909 gen_mov_reg_FCC0(dst, src, fcc_offset);
910 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
911 tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
912 tcg_gen_and_tl(dst, dst, cpu_tmp0);
913 tcg_gen_xori_tl(dst, dst, 0x1);
914 }
915
916 // 0 or 1: !FCC1
917 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
918 unsigned int fcc_offset)
919 {
920 gen_mov_reg_FCC1(dst, src, fcc_offset);
921 tcg_gen_xori_tl(dst, dst, 0x1);
922 }
923
924 // !2: !(!FCC0 & FCC1)
925 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
926 unsigned int fcc_offset)
927 {
928 gen_mov_reg_FCC0(dst, src, fcc_offset);
929 tcg_gen_xori_tl(dst, dst, 0x1);
930 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
931 tcg_gen_and_tl(dst, dst, cpu_tmp0);
932 tcg_gen_xori_tl(dst, dst, 0x1);
933 }
934
935 // !3: !(FCC0 & FCC1)
936 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
937 unsigned int fcc_offset)
938 {
939 gen_mov_reg_FCC0(dst, src, fcc_offset);
940 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
941 tcg_gen_and_tl(dst, dst, cpu_tmp0);
942 tcg_gen_xori_tl(dst, dst, 0x1);
943 }
944
945 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
946 target_ulong pc2, TCGv r_cond)
947 {
948 int l1;
949
950 l1 = gen_new_label();
951
952 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
953
954 gen_goto_tb(dc, 0, pc1, pc1 + 4);
955
956 gen_set_label(l1);
957 gen_goto_tb(dc, 1, pc2, pc2 + 4);
958 }
959
960 static inline void gen_branch_a(DisasContext *dc, target_ulong pc1,
961 target_ulong pc2, TCGv r_cond)
962 {
963 int l1;
964
965 l1 = gen_new_label();
966
967 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
968
969 gen_goto_tb(dc, 0, pc2, pc1);
970
971 gen_set_label(l1);
972 gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8);
973 }
974
975 static inline void gen_generic_branch(DisasContext *dc)
976 {
977 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
978 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
979 TCGv zero = tcg_const_tl(0);
980
981 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
982
983 tcg_temp_free(npc0);
984 tcg_temp_free(npc1);
985 tcg_temp_free(zero);
986 }
987
988 /* call this function before using the condition register as it may
989 have been set for a jump */
990 static inline void flush_cond(DisasContext *dc)
991 {
992 if (dc->npc == JUMP_PC) {
993 gen_generic_branch(dc);
994 dc->npc = DYNAMIC_PC;
995 }
996 }
997
998 static inline void save_npc(DisasContext *dc)
999 {
1000 if (dc->npc == JUMP_PC) {
1001 gen_generic_branch(dc);
1002 dc->npc = DYNAMIC_PC;
1003 } else if (dc->npc != DYNAMIC_PC) {
1004 tcg_gen_movi_tl(cpu_npc, dc->npc);
1005 }
1006 }
1007
1008 static inline void save_state(DisasContext *dc)
1009 {
1010 tcg_gen_movi_tl(cpu_pc, dc->pc);
1011 /* flush pending conditional evaluations before exposing cpu state */
1012 if (dc->cc_op != CC_OP_FLAGS) {
1013 dc->cc_op = CC_OP_FLAGS;
1014 gen_helper_compute_psr(cpu_env);
1015 }
1016 save_npc(dc);
1017 }
1018
1019 static inline void gen_mov_pc_npc(DisasContext *dc)
1020 {
1021 if (dc->npc == JUMP_PC) {
1022 gen_generic_branch(dc);
1023 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1024 dc->pc = DYNAMIC_PC;
1025 } else if (dc->npc == DYNAMIC_PC) {
1026 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1027 dc->pc = DYNAMIC_PC;
1028 } else {
1029 dc->pc = dc->npc;
1030 }
1031 }
1032
1033 static inline void gen_op_next_insn(void)
1034 {
1035 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1036 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1037 }
1038
1039 static void free_compare(DisasCompare *cmp)
1040 {
1041 if (!cmp->g1) {
1042 tcg_temp_free(cmp->c1);
1043 }
1044 if (!cmp->g2) {
1045 tcg_temp_free(cmp->c2);
1046 }
1047 }
1048
1049 static void gen_compare(DisasCompare *cmp, unsigned int cc, unsigned int cond,
1050 DisasContext *dc)
1051 {
1052 TCGv_i32 r_src;
1053 TCGv r_dst;
1054
1055 /* For now we still generate a straight boolean result. */
1056 cmp->cond = TCG_COND_NE;
1057 cmp->is_bool = true;
1058 cmp->g1 = cmp->g2 = false;
1059 cmp->c1 = r_dst = tcg_temp_new();
1060 cmp->c2 = tcg_const_tl(0);
1061
1062 #ifdef TARGET_SPARC64
1063 if (cc)
1064 r_src = cpu_xcc;
1065 else
1066 r_src = cpu_psr;
1067 #else
1068 r_src = cpu_psr;
1069 #endif
1070 switch (dc->cc_op) {
1071 case CC_OP_FLAGS:
1072 break;
1073 default:
1074 gen_helper_compute_psr(cpu_env);
1075 dc->cc_op = CC_OP_FLAGS;
1076 break;
1077 }
1078 switch (cond) {
1079 case 0x0:
1080 gen_op_eval_bn(r_dst);
1081 break;
1082 case 0x1:
1083 gen_op_eval_be(r_dst, r_src);
1084 break;
1085 case 0x2:
1086 gen_op_eval_ble(r_dst, r_src);
1087 break;
1088 case 0x3:
1089 gen_op_eval_bl(r_dst, r_src);
1090 break;
1091 case 0x4:
1092 gen_op_eval_bleu(r_dst, r_src);
1093 break;
1094 case 0x5:
1095 gen_op_eval_bcs(r_dst, r_src);
1096 break;
1097 case 0x6:
1098 gen_op_eval_bneg(r_dst, r_src);
1099 break;
1100 case 0x7:
1101 gen_op_eval_bvs(r_dst, r_src);
1102 break;
1103 case 0x8:
1104 gen_op_eval_ba(r_dst);
1105 break;
1106 case 0x9:
1107 gen_op_eval_bne(r_dst, r_src);
1108 break;
1109 case 0xa:
1110 gen_op_eval_bg(r_dst, r_src);
1111 break;
1112 case 0xb:
1113 gen_op_eval_bge(r_dst, r_src);
1114 break;
1115 case 0xc:
1116 gen_op_eval_bgu(r_dst, r_src);
1117 break;
1118 case 0xd:
1119 gen_op_eval_bcc(r_dst, r_src);
1120 break;
1121 case 0xe:
1122 gen_op_eval_bpos(r_dst, r_src);
1123 break;
1124 case 0xf:
1125 gen_op_eval_bvc(r_dst, r_src);
1126 break;
1127 }
1128 }
1129
1130 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1131 {
1132 unsigned int offset;
1133 TCGv r_dst;
1134
1135 /* For now we still generate a straight boolean result. */
1136 cmp->cond = TCG_COND_NE;
1137 cmp->is_bool = true;
1138 cmp->g1 = cmp->g2 = false;
1139 cmp->c1 = r_dst = tcg_temp_new();
1140 cmp->c2 = tcg_const_tl(0);
1141
1142 switch (cc) {
1143 default:
1144 case 0x0:
1145 offset = 0;
1146 break;
1147 case 0x1:
1148 offset = 32 - 10;
1149 break;
1150 case 0x2:
1151 offset = 34 - 10;
1152 break;
1153 case 0x3:
1154 offset = 36 - 10;
1155 break;
1156 }
1157
1158 switch (cond) {
1159 case 0x0:
1160 gen_op_eval_bn(r_dst);
1161 break;
1162 case 0x1:
1163 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1164 break;
1165 case 0x2:
1166 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1167 break;
1168 case 0x3:
1169 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1170 break;
1171 case 0x4:
1172 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1173 break;
1174 case 0x5:
1175 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1176 break;
1177 case 0x6:
1178 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1179 break;
1180 case 0x7:
1181 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1182 break;
1183 case 0x8:
1184 gen_op_eval_ba(r_dst);
1185 break;
1186 case 0x9:
1187 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1188 break;
1189 case 0xa:
1190 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1191 break;
1192 case 0xb:
1193 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1194 break;
1195 case 0xc:
1196 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1197 break;
1198 case 0xd:
1199 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1200 break;
1201 case 0xe:
1202 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1203 break;
1204 case 0xf:
1205 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1206 break;
1207 }
1208 }
1209
1210 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1211 DisasContext *dc)
1212 {
1213 DisasCompare cmp;
1214 gen_compare(&cmp, cc, cond, dc);
1215
1216 /* The interface is to return a boolean in r_dst. */
1217 if (cmp.is_bool) {
1218 tcg_gen_mov_tl(r_dst, cmp.c1);
1219 } else {
1220 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1221 }
1222
1223 free_compare(&cmp);
1224 }
1225
1226 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1227 {
1228 DisasCompare cmp;
1229 gen_fcompare(&cmp, cc, cond);
1230
1231 /* The interface is to return a boolean in r_dst. */
1232 if (cmp.is_bool) {
1233 tcg_gen_mov_tl(r_dst, cmp.c1);
1234 } else {
1235 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1236 }
1237
1238 free_compare(&cmp);
1239 }
1240
1241 #ifdef TARGET_SPARC64
1242 // Inverted logic
1243 static const int gen_tcg_cond_reg[8] = {
1244 -1,
1245 TCG_COND_NE,
1246 TCG_COND_GT,
1247 TCG_COND_GE,
1248 -1,
1249 TCG_COND_EQ,
1250 TCG_COND_LE,
1251 TCG_COND_LT,
1252 };
1253
1254 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1255 {
1256 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1257 cmp->is_bool = false;
1258 cmp->g1 = true;
1259 cmp->g2 = false;
1260 cmp->c1 = r_src;
1261 cmp->c2 = tcg_const_tl(0);
1262 }
1263
1264 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1265 {
1266 DisasCompare cmp;
1267 gen_compare_reg(&cmp, cond, r_src);
1268
1269 /* The interface is to return a boolean in r_dst. */
1270 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1271
1272 free_compare(&cmp);
1273 }
1274 #endif
1275
1276 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1277 {
1278 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1279 target_ulong target = dc->pc + offset;
1280
1281 #ifdef TARGET_SPARC64
1282 if (unlikely(AM_CHECK(dc))) {
1283 target &= 0xffffffffULL;
1284 }
1285 #endif
1286 if (cond == 0x0) {
1287 /* unconditional not taken */
1288 if (a) {
1289 dc->pc = dc->npc + 4;
1290 dc->npc = dc->pc + 4;
1291 } else {
1292 dc->pc = dc->npc;
1293 dc->npc = dc->pc + 4;
1294 }
1295 } else if (cond == 0x8) {
1296 /* unconditional taken */
1297 if (a) {
1298 dc->pc = target;
1299 dc->npc = dc->pc + 4;
1300 } else {
1301 dc->pc = dc->npc;
1302 dc->npc = target;
1303 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1304 }
1305 } else {
1306 flush_cond(dc);
1307 gen_cond(cpu_cond, cc, cond, dc);
1308 if (a) {
1309 gen_branch_a(dc, target, dc->npc, cpu_cond);
1310 dc->is_br = 1;
1311 } else {
1312 dc->pc = dc->npc;
1313 dc->jump_pc[0] = target;
1314 if (unlikely(dc->npc == DYNAMIC_PC)) {
1315 dc->jump_pc[1] = DYNAMIC_PC;
1316 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1317 } else {
1318 dc->jump_pc[1] = dc->npc + 4;
1319 dc->npc = JUMP_PC;
1320 }
1321 }
1322 }
1323 }
1324
1325 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1326 {
1327 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1328 target_ulong target = dc->pc + offset;
1329
1330 #ifdef TARGET_SPARC64
1331 if (unlikely(AM_CHECK(dc))) {
1332 target &= 0xffffffffULL;
1333 }
1334 #endif
1335 if (cond == 0x0) {
1336 /* unconditional not taken */
1337 if (a) {
1338 dc->pc = dc->npc + 4;
1339 dc->npc = dc->pc + 4;
1340 } else {
1341 dc->pc = dc->npc;
1342 dc->npc = dc->pc + 4;
1343 }
1344 } else if (cond == 0x8) {
1345 /* unconditional taken */
1346 if (a) {
1347 dc->pc = target;
1348 dc->npc = dc->pc + 4;
1349 } else {
1350 dc->pc = dc->npc;
1351 dc->npc = target;
1352 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1353 }
1354 } else {
1355 flush_cond(dc);
1356 gen_fcond(cpu_cond, cc, cond);
1357 if (a) {
1358 gen_branch_a(dc, target, dc->npc, cpu_cond);
1359 dc->is_br = 1;
1360 } else {
1361 dc->pc = dc->npc;
1362 dc->jump_pc[0] = target;
1363 if (unlikely(dc->npc == DYNAMIC_PC)) {
1364 dc->jump_pc[1] = DYNAMIC_PC;
1365 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1366 } else {
1367 dc->jump_pc[1] = dc->npc + 4;
1368 dc->npc = JUMP_PC;
1369 }
1370 }
1371 }
1372 }
1373
1374 #ifdef TARGET_SPARC64
1375 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1376 TCGv r_reg)
1377 {
1378 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1379 target_ulong target = dc->pc + offset;
1380
1381 if (unlikely(AM_CHECK(dc))) {
1382 target &= 0xffffffffULL;
1383 }
1384 flush_cond(dc);
1385 gen_cond_reg(cpu_cond, cond, r_reg);
1386 if (a) {
1387 gen_branch_a(dc, target, dc->npc, cpu_cond);
1388 dc->is_br = 1;
1389 } else {
1390 dc->pc = dc->npc;
1391 dc->jump_pc[0] = target;
1392 if (unlikely(dc->npc == DYNAMIC_PC)) {
1393 dc->jump_pc[1] = DYNAMIC_PC;
1394 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1395 } else {
1396 dc->jump_pc[1] = dc->npc + 4;
1397 dc->npc = JUMP_PC;
1398 }
1399 }
1400 }
1401
1402 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1403 {
1404 switch (fccno) {
1405 case 0:
1406 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1407 break;
1408 case 1:
1409 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1410 break;
1411 case 2:
1412 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1413 break;
1414 case 3:
1415 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1416 break;
1417 }
1418 }
1419
1420 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1421 {
1422 switch (fccno) {
1423 case 0:
1424 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1425 break;
1426 case 1:
1427 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1428 break;
1429 case 2:
1430 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1431 break;
1432 case 3:
1433 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1434 break;
1435 }
1436 }
1437
1438 static inline void gen_op_fcmpq(int fccno)
1439 {
1440 switch (fccno) {
1441 case 0:
1442 gen_helper_fcmpq(cpu_env);
1443 break;
1444 case 1:
1445 gen_helper_fcmpq_fcc1(cpu_env);
1446 break;
1447 case 2:
1448 gen_helper_fcmpq_fcc2(cpu_env);
1449 break;
1450 case 3:
1451 gen_helper_fcmpq_fcc3(cpu_env);
1452 break;
1453 }
1454 }
1455
1456 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1457 {
1458 switch (fccno) {
1459 case 0:
1460 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1461 break;
1462 case 1:
1463 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1464 break;
1465 case 2:
1466 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1467 break;
1468 case 3:
1469 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1470 break;
1471 }
1472 }
1473
1474 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1475 {
1476 switch (fccno) {
1477 case 0:
1478 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1479 break;
1480 case 1:
1481 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1482 break;
1483 case 2:
1484 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1485 break;
1486 case 3:
1487 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1488 break;
1489 }
1490 }
1491
1492 static inline void gen_op_fcmpeq(int fccno)
1493 {
1494 switch (fccno) {
1495 case 0:
1496 gen_helper_fcmpeq(cpu_env);
1497 break;
1498 case 1:
1499 gen_helper_fcmpeq_fcc1(cpu_env);
1500 break;
1501 case 2:
1502 gen_helper_fcmpeq_fcc2(cpu_env);
1503 break;
1504 case 3:
1505 gen_helper_fcmpeq_fcc3(cpu_env);
1506 break;
1507 }
1508 }
1509
1510 #else
1511
1512 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1513 {
1514 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1515 }
1516
1517 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1518 {
1519 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1520 }
1521
1522 static inline void gen_op_fcmpq(int fccno)
1523 {
1524 gen_helper_fcmpq(cpu_env);
1525 }
1526
1527 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1528 {
1529 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1530 }
1531
1532 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1533 {
1534 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1535 }
1536
1537 static inline void gen_op_fcmpeq(int fccno)
1538 {
1539 gen_helper_fcmpeq(cpu_env);
1540 }
1541 #endif
1542
1543 static inline void gen_op_fpexception_im(int fsr_flags)
1544 {
1545 TCGv_i32 r_const;
1546
1547 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1548 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1549 r_const = tcg_const_i32(TT_FP_EXCP);
1550 gen_helper_raise_exception(cpu_env, r_const);
1551 tcg_temp_free_i32(r_const);
1552 }
1553
1554 static int gen_trap_ifnofpu(DisasContext *dc)
1555 {
1556 #if !defined(CONFIG_USER_ONLY)
1557 if (!dc->fpu_enabled) {
1558 TCGv_i32 r_const;
1559
1560 save_state(dc);
1561 r_const = tcg_const_i32(TT_NFPU_INSN);
1562 gen_helper_raise_exception(cpu_env, r_const);
1563 tcg_temp_free_i32(r_const);
1564 dc->is_br = 1;
1565 return 1;
1566 }
1567 #endif
1568 return 0;
1569 }
1570
1571 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1572 {
1573 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1574 }
1575
1576 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1577 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1578 {
1579 TCGv_i32 dst, src;
1580
1581 src = gen_load_fpr_F(dc, rs);
1582 dst = gen_dest_fpr_F();
1583
1584 gen(dst, cpu_env, src);
1585
1586 gen_store_fpr_F(dc, rd, dst);
1587 }
1588
1589 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1590 void (*gen)(TCGv_i32, TCGv_i32))
1591 {
1592 TCGv_i32 dst, src;
1593
1594 src = gen_load_fpr_F(dc, rs);
1595 dst = gen_dest_fpr_F();
1596
1597 gen(dst, src);
1598
1599 gen_store_fpr_F(dc, rd, dst);
1600 }
1601
1602 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1603 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1604 {
1605 TCGv_i32 dst, src1, src2;
1606
1607 src1 = gen_load_fpr_F(dc, rs1);
1608 src2 = gen_load_fpr_F(dc, rs2);
1609 dst = gen_dest_fpr_F();
1610
1611 gen(dst, cpu_env, src1, src2);
1612
1613 gen_store_fpr_F(dc, rd, dst);
1614 }
1615
1616 #ifdef TARGET_SPARC64
1617 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1618 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1619 {
1620 TCGv_i32 dst, src1, src2;
1621
1622 src1 = gen_load_fpr_F(dc, rs1);
1623 src2 = gen_load_fpr_F(dc, rs2);
1624 dst = gen_dest_fpr_F();
1625
1626 gen(dst, src1, src2);
1627
1628 gen_store_fpr_F(dc, rd, dst);
1629 }
1630 #endif
1631
1632 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1633 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1634 {
1635 TCGv_i64 dst, src;
1636
1637 src = gen_load_fpr_D(dc, rs);
1638 dst = gen_dest_fpr_D();
1639
1640 gen(dst, cpu_env, src);
1641
1642 gen_store_fpr_D(dc, rd, dst);
1643 }
1644
1645 #ifdef TARGET_SPARC64
1646 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1647 void (*gen)(TCGv_i64, TCGv_i64))
1648 {
1649 TCGv_i64 dst, src;
1650
1651 src = gen_load_fpr_D(dc, rs);
1652 dst = gen_dest_fpr_D();
1653
1654 gen(dst, src);
1655
1656 gen_store_fpr_D(dc, rd, dst);
1657 }
1658 #endif
1659
1660 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1661 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1662 {
1663 TCGv_i64 dst, src1, src2;
1664
1665 src1 = gen_load_fpr_D(dc, rs1);
1666 src2 = gen_load_fpr_D(dc, rs2);
1667 dst = gen_dest_fpr_D();
1668
1669 gen(dst, cpu_env, src1, src2);
1670
1671 gen_store_fpr_D(dc, rd, dst);
1672 }
1673
1674 #ifdef TARGET_SPARC64
1675 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1676 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1677 {
1678 TCGv_i64 dst, src1, src2;
1679
1680 src1 = gen_load_fpr_D(dc, rs1);
1681 src2 = gen_load_fpr_D(dc, rs2);
1682 dst = gen_dest_fpr_D();
1683
1684 gen(dst, src1, src2);
1685
1686 gen_store_fpr_D(dc, rd, dst);
1687 }
1688
1689 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1690 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1691 {
1692 TCGv_i64 dst, src1, src2;
1693
1694 src1 = gen_load_fpr_D(dc, rs1);
1695 src2 = gen_load_fpr_D(dc, rs2);
1696 dst = gen_dest_fpr_D();
1697
1698 gen(dst, cpu_gsr, src1, src2);
1699
1700 gen_store_fpr_D(dc, rd, dst);
1701 }
1702
1703 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1704 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1705 {
1706 TCGv_i64 dst, src0, src1, src2;
1707
1708 src1 = gen_load_fpr_D(dc, rs1);
1709 src2 = gen_load_fpr_D(dc, rs2);
1710 src0 = gen_load_fpr_D(dc, rd);
1711 dst = gen_dest_fpr_D();
1712
1713 gen(dst, src0, src1, src2);
1714
1715 gen_store_fpr_D(dc, rd, dst);
1716 }
1717 #endif
1718
1719 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1720 void (*gen)(TCGv_ptr))
1721 {
1722 gen_op_load_fpr_QT1(QFPREG(rs));
1723
1724 gen(cpu_env);
1725
1726 gen_op_store_QT0_fpr(QFPREG(rd));
1727 gen_update_fprs_dirty(QFPREG(rd));
1728 }
1729
1730 #ifdef TARGET_SPARC64
1731 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1732 void (*gen)(TCGv_ptr))
1733 {
1734 gen_op_load_fpr_QT1(QFPREG(rs));
1735
1736 gen(cpu_env);
1737
1738 gen_op_store_QT0_fpr(QFPREG(rd));
1739 gen_update_fprs_dirty(QFPREG(rd));
1740 }
1741 #endif
1742
1743 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1744 void (*gen)(TCGv_ptr))
1745 {
1746 gen_op_load_fpr_QT0(QFPREG(rs1));
1747 gen_op_load_fpr_QT1(QFPREG(rs2));
1748
1749 gen(cpu_env);
1750
1751 gen_op_store_QT0_fpr(QFPREG(rd));
1752 gen_update_fprs_dirty(QFPREG(rd));
1753 }
1754
1755 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1756 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1757 {
1758 TCGv_i64 dst;
1759 TCGv_i32 src1, src2;
1760
1761 src1 = gen_load_fpr_F(dc, rs1);
1762 src2 = gen_load_fpr_F(dc, rs2);
1763 dst = gen_dest_fpr_D();
1764
1765 gen(dst, cpu_env, src1, src2);
1766
1767 gen_store_fpr_D(dc, rd, dst);
1768 }
1769
1770 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1771 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1772 {
1773 TCGv_i64 src1, src2;
1774
1775 src1 = gen_load_fpr_D(dc, rs1);
1776 src2 = gen_load_fpr_D(dc, rs2);
1777
1778 gen(cpu_env, src1, src2);
1779
1780 gen_op_store_QT0_fpr(QFPREG(rd));
1781 gen_update_fprs_dirty(QFPREG(rd));
1782 }
1783
1784 #ifdef TARGET_SPARC64
1785 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1786 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1787 {
1788 TCGv_i64 dst;
1789 TCGv_i32 src;
1790
1791 src = gen_load_fpr_F(dc, rs);
1792 dst = gen_dest_fpr_D();
1793
1794 gen(dst, cpu_env, src);
1795
1796 gen_store_fpr_D(dc, rd, dst);
1797 }
1798 #endif
1799
1800 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1801 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1802 {
1803 TCGv_i64 dst;
1804 TCGv_i32 src;
1805
1806 src = gen_load_fpr_F(dc, rs);
1807 dst = gen_dest_fpr_D();
1808
1809 gen(dst, cpu_env, src);
1810
1811 gen_store_fpr_D(dc, rd, dst);
1812 }
1813
1814 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1815 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1816 {
1817 TCGv_i32 dst;
1818 TCGv_i64 src;
1819
1820 src = gen_load_fpr_D(dc, rs);
1821 dst = gen_dest_fpr_F();
1822
1823 gen(dst, cpu_env, src);
1824
1825 gen_store_fpr_F(dc, rd, dst);
1826 }
1827
1828 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1829 void (*gen)(TCGv_i32, TCGv_ptr))
1830 {
1831 TCGv_i32 dst;
1832
1833 gen_op_load_fpr_QT1(QFPREG(rs));
1834 dst = gen_dest_fpr_F();
1835
1836 gen(dst, cpu_env);
1837
1838 gen_store_fpr_F(dc, rd, dst);
1839 }
1840
1841 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1842 void (*gen)(TCGv_i64, TCGv_ptr))
1843 {
1844 TCGv_i64 dst;
1845
1846 gen_op_load_fpr_QT1(QFPREG(rs));
1847 dst = gen_dest_fpr_D();
1848
1849 gen(dst, cpu_env);
1850
1851 gen_store_fpr_D(dc, rd, dst);
1852 }
1853
1854 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1855 void (*gen)(TCGv_ptr, TCGv_i32))
1856 {
1857 TCGv_i32 src;
1858
1859 src = gen_load_fpr_F(dc, rs);
1860
1861 gen(cpu_env, src);
1862
1863 gen_op_store_QT0_fpr(QFPREG(rd));
1864 gen_update_fprs_dirty(QFPREG(rd));
1865 }
1866
1867 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1868 void (*gen)(TCGv_ptr, TCGv_i64))
1869 {
1870 TCGv_i64 src;
1871
1872 src = gen_load_fpr_D(dc, rs);
1873
1874 gen(cpu_env, src);
1875
1876 gen_op_store_QT0_fpr(QFPREG(rd));
1877 gen_update_fprs_dirty(QFPREG(rd));
1878 }
1879
1880 /* asi moves */
1881 #ifdef TARGET_SPARC64
1882 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
1883 {
1884 int asi;
1885 TCGv_i32 r_asi;
1886
1887 if (IS_IMM) {
1888 r_asi = tcg_temp_new_i32();
1889 tcg_gen_mov_i32(r_asi, cpu_asi);
1890 } else {
1891 asi = GET_FIELD(insn, 19, 26);
1892 r_asi = tcg_const_i32(asi);
1893 }
1894 return r_asi;
1895 }
1896
1897 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
1898 int sign)
1899 {
1900 TCGv_i32 r_asi, r_size, r_sign;
1901
1902 r_asi = gen_get_asi(insn, addr);
1903 r_size = tcg_const_i32(size);
1904 r_sign = tcg_const_i32(sign);
1905 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
1906 tcg_temp_free_i32(r_sign);
1907 tcg_temp_free_i32(r_size);
1908 tcg_temp_free_i32(r_asi);
1909 }
1910
1911 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
1912 {
1913 TCGv_i32 r_asi, r_size;
1914
1915 r_asi = gen_get_asi(insn, addr);
1916 r_size = tcg_const_i32(size);
1917 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
1918 tcg_temp_free_i32(r_size);
1919 tcg_temp_free_i32(r_asi);
1920 }
1921
1922 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
1923 {
1924 TCGv_i32 r_asi, r_size, r_rd;
1925
1926 r_asi = gen_get_asi(insn, addr);
1927 r_size = tcg_const_i32(size);
1928 r_rd = tcg_const_i32(rd);
1929 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
1930 tcg_temp_free_i32(r_rd);
1931 tcg_temp_free_i32(r_size);
1932 tcg_temp_free_i32(r_asi);
1933 }
1934
1935 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
1936 {
1937 TCGv_i32 r_asi, r_size, r_rd;
1938
1939 r_asi = gen_get_asi(insn, addr);
1940 r_size = tcg_const_i32(size);
1941 r_rd = tcg_const_i32(rd);
1942 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
1943 tcg_temp_free_i32(r_rd);
1944 tcg_temp_free_i32(r_size);
1945 tcg_temp_free_i32(r_asi);
1946 }
1947
1948 static inline void gen_swap_asi(TCGv dst, TCGv addr, int insn)
1949 {
1950 TCGv_i32 r_asi, r_size, r_sign;
1951
1952 r_asi = gen_get_asi(insn, addr);
1953 r_size = tcg_const_i32(4);
1954 r_sign = tcg_const_i32(0);
1955 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
1956 tcg_temp_free_i32(r_sign);
1957 gen_helper_st_asi(cpu_env, addr, dst, r_asi, r_size);
1958 tcg_temp_free_i32(r_size);
1959 tcg_temp_free_i32(r_asi);
1960 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
1961 }
1962
1963 static inline void gen_ldda_asi(TCGv hi, TCGv addr, int insn, int rd)
1964 {
1965 TCGv_i32 r_asi, r_rd;
1966
1967 r_asi = gen_get_asi(insn, addr);
1968 r_rd = tcg_const_i32(rd);
1969 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
1970 tcg_temp_free_i32(r_rd);
1971 tcg_temp_free_i32(r_asi);
1972 }
1973
1974 static inline void gen_stda_asi(TCGv hi, TCGv addr, int insn, int rd)
1975 {
1976 TCGv_i32 r_asi, r_size;
1977
1978 gen_movl_reg_TN(rd + 1, cpu_tmp0);
1979 tcg_gen_concat_tl_i64(cpu_tmp64, cpu_tmp0, hi);
1980 r_asi = gen_get_asi(insn, addr);
1981 r_size = tcg_const_i32(8);
1982 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
1983 tcg_temp_free_i32(r_size);
1984 tcg_temp_free_i32(r_asi);
1985 }
1986
1987 static inline void gen_cas_asi(TCGv dst, TCGv addr, TCGv val2, int insn,
1988 int rd)
1989 {
1990 TCGv r_val1;
1991 TCGv_i32 r_asi;
1992
1993 r_val1 = tcg_temp_new();
1994 gen_movl_reg_TN(rd, r_val1);
1995 r_asi = gen_get_asi(insn, addr);
1996 gen_helper_cas_asi(dst, cpu_env, addr, r_val1, val2, r_asi);
1997 tcg_temp_free_i32(r_asi);
1998 tcg_temp_free(r_val1);
1999 }
2000
2001 static inline void gen_casx_asi(TCGv dst, TCGv addr, TCGv val2, int insn,
2002 int rd)
2003 {
2004 TCGv_i32 r_asi;
2005
2006 gen_movl_reg_TN(rd, cpu_tmp64);
2007 r_asi = gen_get_asi(insn, addr);
2008 gen_helper_casx_asi(dst, cpu_env, addr, cpu_tmp64, val2, r_asi);
2009 tcg_temp_free_i32(r_asi);
2010 }
2011
2012 #elif !defined(CONFIG_USER_ONLY)
2013
2014 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2015 int sign)
2016 {
2017 TCGv_i32 r_asi, r_size, r_sign;
2018
2019 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2020 r_size = tcg_const_i32(size);
2021 r_sign = tcg_const_i32(sign);
2022 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2023 tcg_temp_free(r_sign);
2024 tcg_temp_free(r_size);
2025 tcg_temp_free(r_asi);
2026 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2027 }
2028
2029 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2030 {
2031 TCGv_i32 r_asi, r_size;
2032
2033 tcg_gen_extu_tl_i64(cpu_tmp64, src);
2034 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2035 r_size = tcg_const_i32(size);
2036 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2037 tcg_temp_free(r_size);
2038 tcg_temp_free(r_asi);
2039 }
2040
2041 static inline void gen_swap_asi(TCGv dst, TCGv addr, int insn)
2042 {
2043 TCGv_i32 r_asi, r_size, r_sign;
2044 TCGv_i64 r_val;
2045
2046 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2047 r_size = tcg_const_i32(4);
2048 r_sign = tcg_const_i32(0);
2049 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2050 tcg_temp_free(r_sign);
2051 r_val = tcg_temp_new_i64();
2052 tcg_gen_extu_tl_i64(r_val, dst);
2053 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2054 tcg_temp_free_i64(r_val);
2055 tcg_temp_free(r_size);
2056 tcg_temp_free(r_asi);
2057 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2058 }
2059
2060 static inline void gen_ldda_asi(TCGv hi, TCGv addr, int insn, int rd)
2061 {
2062 TCGv_i32 r_asi, r_size, r_sign;
2063
2064 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2065 r_size = tcg_const_i32(8);
2066 r_sign = tcg_const_i32(0);
2067 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2068 tcg_temp_free(r_sign);
2069 tcg_temp_free(r_size);
2070 tcg_temp_free(r_asi);
2071 tcg_gen_trunc_i64_tl(cpu_tmp0, cpu_tmp64);
2072 gen_movl_TN_reg(rd + 1, cpu_tmp0);
2073 tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
2074 tcg_gen_trunc_i64_tl(hi, cpu_tmp64);
2075 gen_movl_TN_reg(rd, hi);
2076 }
2077
2078 static inline void gen_stda_asi(TCGv hi, TCGv addr, int insn, int rd)
2079 {
2080 TCGv_i32 r_asi, r_size;
2081
2082 gen_movl_reg_TN(rd + 1, cpu_tmp0);
2083 tcg_gen_concat_tl_i64(cpu_tmp64, cpu_tmp0, hi);
2084 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2085 r_size = tcg_const_i32(8);
2086 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2087 tcg_temp_free(r_size);
2088 tcg_temp_free(r_asi);
2089 }
2090 #endif
2091
2092 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2093 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2094 {
2095 TCGv_i64 r_val;
2096 TCGv_i32 r_asi, r_size;
2097
2098 gen_ld_asi(dst, addr, insn, 1, 0);
2099
2100 r_val = tcg_const_i64(0xffULL);
2101 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2102 r_size = tcg_const_i32(1);
2103 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2104 tcg_temp_free_i32(r_size);
2105 tcg_temp_free_i32(r_asi);
2106 tcg_temp_free_i64(r_val);
2107 }
2108 #endif
2109
2110 static inline TCGv get_src1(unsigned int insn, TCGv def)
2111 {
2112 TCGv r_rs1 = def;
2113 unsigned int rs1;
2114
2115 rs1 = GET_FIELD(insn, 13, 17);
2116 if (rs1 == 0) {
2117 tcg_gen_movi_tl(def, 0);
2118 } else if (rs1 < 8) {
2119 r_rs1 = cpu_gregs[rs1];
2120 } else {
2121 tcg_gen_ld_tl(def, cpu_regwptr, (rs1 - 8) * sizeof(target_ulong));
2122 }
2123 return r_rs1;
2124 }
2125
2126 static inline TCGv get_src2(unsigned int insn, TCGv def)
2127 {
2128 TCGv r_rs2 = def;
2129
2130 if (IS_IMM) { /* immediate */
2131 target_long simm = GET_FIELDs(insn, 19, 31);
2132 tcg_gen_movi_tl(def, simm);
2133 } else { /* register */
2134 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2135 if (rs2 == 0) {
2136 tcg_gen_movi_tl(def, 0);
2137 } else if (rs2 < 8) {
2138 r_rs2 = cpu_gregs[rs2];
2139 } else {
2140 tcg_gen_ld_tl(def, cpu_regwptr, (rs2 - 8) * sizeof(target_ulong));
2141 }
2142 }
2143 return r_rs2;
2144 }
2145
2146 #ifdef TARGET_SPARC64
2147 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2148 {
2149 TCGv_i32 c32, zero, dst, s1, s2;
2150
2151 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2152 or fold the comparison down to 32 bits and use movcond_i32. Choose
2153 the later. */
2154 c32 = tcg_temp_new_i32();
2155 if (cmp->is_bool) {
2156 tcg_gen_trunc_i64_i32(c32, cmp->c1);
2157 } else {
2158 TCGv_i64 c64 = tcg_temp_new_i64();
2159 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2160 tcg_gen_trunc_i64_i32(c32, c64);
2161 tcg_temp_free_i64(c64);
2162 }
2163
2164 s1 = gen_load_fpr_F(dc, rs);
2165 s2 = gen_load_fpr_F(dc, rd);
2166 dst = gen_dest_fpr_F();
2167 zero = tcg_const_i32(0);
2168
2169 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2170
2171 tcg_temp_free_i32(c32);
2172 tcg_temp_free_i32(zero);
2173 gen_store_fpr_F(dc, rd, dst);
2174 }
2175
2176 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2177 {
2178 TCGv_i64 dst = gen_dest_fpr_D();
2179 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2180 gen_load_fpr_D(dc, rs),
2181 gen_load_fpr_D(dc, rd));
2182 gen_store_fpr_D(dc, rd, dst);
2183 }
2184
2185 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2186 {
2187 int qd = QFPREG(rd);
2188 int qs = QFPREG(rs);
2189
2190 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2191 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2192 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2193 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2194
2195 gen_update_fprs_dirty(qd);
2196 }
2197
2198 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2199 {
2200 TCGv_i32 r_tl = tcg_temp_new_i32();
2201
2202 /* load env->tl into r_tl */
2203 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2204
2205 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2206 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2207
2208 /* calculate offset to current trap state from env->ts, reuse r_tl */
2209 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2210 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2211
2212 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2213 {
2214 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2215 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2216 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2217 tcg_temp_free_ptr(r_tl_tmp);
2218 }
2219
2220 tcg_temp_free_i32(r_tl);
2221 }
2222
2223 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2224 int width, bool cc, bool left)
2225 {
2226 TCGv lo1, lo2, t1, t2;
2227 uint64_t amask, tabl, tabr;
2228 int shift, imask, omask;
2229
2230 if (cc) {
2231 tcg_gen_mov_tl(cpu_cc_src, s1);
2232 tcg_gen_mov_tl(cpu_cc_src2, s2);
2233 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2234 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2235 dc->cc_op = CC_OP_SUB;
2236 }
2237
2238 /* Theory of operation: there are two tables, left and right (not to
2239 be confused with the left and right versions of the opcode). These
2240 are indexed by the low 3 bits of the inputs. To make things "easy",
2241 these tables are loaded into two constants, TABL and TABR below.
2242 The operation index = (input & imask) << shift calculates the index
2243 into the constant, while val = (table >> index) & omask calculates
2244 the value we're looking for. */
2245 switch (width) {
2246 case 8:
2247 imask = 0x7;
2248 shift = 3;
2249 omask = 0xff;
2250 if (left) {
2251 tabl = 0x80c0e0f0f8fcfeffULL;
2252 tabr = 0xff7f3f1f0f070301ULL;
2253 } else {
2254 tabl = 0x0103070f1f3f7fffULL;
2255 tabr = 0xfffefcf8f0e0c080ULL;
2256 }
2257 break;
2258 case 16:
2259 imask = 0x6;
2260 shift = 1;
2261 omask = 0xf;
2262 if (left) {
2263 tabl = 0x8cef;
2264 tabr = 0xf731;
2265 } else {
2266 tabl = 0x137f;
2267 tabr = 0xfec8;
2268 }
2269 break;
2270 case 32:
2271 imask = 0x4;
2272 shift = 0;
2273 omask = 0x3;
2274 if (left) {
2275 tabl = (2 << 2) | 3;
2276 tabr = (3 << 2) | 1;
2277 } else {
2278 tabl = (1 << 2) | 3;
2279 tabr = (3 << 2) | 2;
2280 }
2281 break;
2282 default:
2283 abort();
2284 }
2285
2286 lo1 = tcg_temp_new();
2287 lo2 = tcg_temp_new();
2288 tcg_gen_andi_tl(lo1, s1, imask);
2289 tcg_gen_andi_tl(lo2, s2, imask);
2290 tcg_gen_shli_tl(lo1, lo1, shift);
2291 tcg_gen_shli_tl(lo2, lo2, shift);
2292
2293 t1 = tcg_const_tl(tabl);
2294 t2 = tcg_const_tl(tabr);
2295 tcg_gen_shr_tl(lo1, t1, lo1);
2296 tcg_gen_shr_tl(lo2, t2, lo2);
2297 tcg_gen_andi_tl(dst, lo1, omask);
2298 tcg_gen_andi_tl(lo2, lo2, omask);
2299
2300 amask = -8;
2301 if (AM_CHECK(dc)) {
2302 amask &= 0xffffffffULL;
2303 }
2304 tcg_gen_andi_tl(s1, s1, amask);
2305 tcg_gen_andi_tl(s2, s2, amask);
2306
2307 /* We want to compute
2308 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2309 We've already done dst = lo1, so this reduces to
2310 dst &= (s1 == s2 ? -1 : lo2)
2311 Which we perform by
2312 lo2 |= -(s1 == s2)
2313 dst &= lo2
2314 */
2315 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2316 tcg_gen_neg_tl(t1, t1);
2317 tcg_gen_or_tl(lo2, lo2, t1);
2318 tcg_gen_and_tl(dst, dst, lo2);
2319
2320 tcg_temp_free(lo1);
2321 tcg_temp_free(lo2);
2322 tcg_temp_free(t1);
2323 tcg_temp_free(t2);
2324 }
2325
2326 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2327 {
2328 TCGv tmp = tcg_temp_new();
2329
2330 tcg_gen_add_tl(tmp, s1, s2);
2331 tcg_gen_andi_tl(dst, tmp, -8);
2332 if (left) {
2333 tcg_gen_neg_tl(tmp, tmp);
2334 }
2335 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2336
2337 tcg_temp_free(tmp);
2338 }
2339
2340 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2341 {
2342 TCGv t1, t2, shift;
2343
2344 t1 = tcg_temp_new();
2345 t2 = tcg_temp_new();
2346 shift = tcg_temp_new();
2347
2348 tcg_gen_andi_tl(shift, gsr, 7);
2349 tcg_gen_shli_tl(shift, shift, 3);
2350 tcg_gen_shl_tl(t1, s1, shift);
2351
2352 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2353 shift of (up to 63) followed by a constant shift of 1. */
2354 tcg_gen_xori_tl(shift, shift, 63);
2355 tcg_gen_shr_tl(t2, s2, shift);
2356 tcg_gen_shri_tl(t2, t2, 1);
2357
2358 tcg_gen_or_tl(dst, t1, t2);
2359
2360 tcg_temp_free(t1);
2361 tcg_temp_free(t2);
2362 tcg_temp_free(shift);
2363 }
2364 #endif
2365
2366 #define CHECK_IU_FEATURE(dc, FEATURE) \
2367 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2368 goto illegal_insn;
2369 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2370 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2371 goto nfpu_insn;
2372
2373 /* before an instruction, dc->pc must be static */
2374 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2375 {
2376 unsigned int opc, rs1, rs2, rd;
2377 TCGv cpu_src1, cpu_src2, cpu_tmp1, cpu_tmp2;
2378 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2379 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2380 target_long simm;
2381
2382 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2383 tcg_gen_debug_insn_start(dc->pc);
2384 }
2385
2386 opc = GET_FIELD(insn, 0, 1);
2387
2388 rd = GET_FIELD(insn, 2, 6);
2389
2390 cpu_tmp1 = cpu_src1 = tcg_temp_new();
2391 cpu_tmp2 = cpu_src2 = tcg_temp_new();
2392
2393 switch (opc) {
2394 case 0: /* branches/sethi */
2395 {
2396 unsigned int xop = GET_FIELD(insn, 7, 9);
2397 int32_t target;
2398 switch (xop) {
2399 #ifdef TARGET_SPARC64
2400 case 0x1: /* V9 BPcc */
2401 {
2402 int cc;
2403
2404 target = GET_FIELD_SP(insn, 0, 18);
2405 target = sign_extend(target, 19);
2406 target <<= 2;
2407 cc = GET_FIELD_SP(insn, 20, 21);
2408 if (cc == 0)
2409 do_branch(dc, target, insn, 0);
2410 else if (cc == 2)
2411 do_branch(dc, target, insn, 1);
2412 else
2413 goto illegal_insn;
2414 goto jmp_insn;
2415 }
2416 case 0x3: /* V9 BPr */
2417 {
2418 target = GET_FIELD_SP(insn, 0, 13) |
2419 (GET_FIELD_SP(insn, 20, 21) << 14);
2420 target = sign_extend(target, 16);
2421 target <<= 2;
2422 cpu_src1 = get_src1(insn, cpu_src1);
2423 do_branch_reg(dc, target, insn, cpu_src1);
2424 goto jmp_insn;
2425 }
2426 case 0x5: /* V9 FBPcc */
2427 {
2428 int cc = GET_FIELD_SP(insn, 20, 21);
2429 if (gen_trap_ifnofpu(dc)) {
2430 goto jmp_insn;
2431 }
2432 target = GET_FIELD_SP(insn, 0, 18);
2433 target = sign_extend(target, 19);
2434 target <<= 2;
2435 do_fbranch(dc, target, insn, cc);
2436 goto jmp_insn;
2437 }
2438 #else
2439 case 0x7: /* CBN+x */
2440 {
2441 goto ncp_insn;
2442 }
2443 #endif
2444 case 0x2: /* BN+x */
2445 {
2446 target = GET_FIELD(insn, 10, 31);
2447 target = sign_extend(target, 22);
2448 target <<= 2;
2449 do_branch(dc, target, insn, 0);
2450 goto jmp_insn;
2451 }
2452 case 0x6: /* FBN+x */
2453 {
2454 if (gen_trap_ifnofpu(dc)) {
2455 goto jmp_insn;
2456 }
2457 target = GET_FIELD(insn, 10, 31);
2458 target = sign_extend(target, 22);
2459 target <<= 2;
2460 do_fbranch(dc, target, insn, 0);
2461 goto jmp_insn;
2462 }
2463 case 0x4: /* SETHI */
2464 if (rd) { // nop
2465 uint32_t value = GET_FIELD(insn, 10, 31);
2466 TCGv r_const;
2467
2468 r_const = tcg_const_tl(value << 10);
2469 gen_movl_TN_reg(rd, r_const);
2470 tcg_temp_free(r_const);
2471 }
2472 break;
2473 case 0x0: /* UNIMPL */
2474 default:
2475 goto illegal_insn;
2476 }
2477 break;
2478 }
2479 break;
2480 case 1: /*CALL*/
2481 {
2482 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2483 TCGv r_const;
2484
2485 r_const = tcg_const_tl(dc->pc);
2486 gen_movl_TN_reg(15, r_const);
2487 tcg_temp_free(r_const);
2488 target += dc->pc;
2489 gen_mov_pc_npc(dc);
2490 #ifdef TARGET_SPARC64
2491 if (unlikely(AM_CHECK(dc))) {
2492 target &= 0xffffffffULL;
2493 }
2494 #endif
2495 dc->npc = target;
2496 }
2497 goto jmp_insn;
2498 case 2: /* FPU & Logical Operations */
2499 {
2500 unsigned int xop = GET_FIELD(insn, 7, 12);
2501 if (xop == 0x3a) { /* generate trap */
2502 int cond = GET_FIELD(insn, 3, 6);
2503 TCGv_i32 trap;
2504 int l1 = -1, mask;
2505
2506 if (cond == 0) {
2507 /* Trap never. */
2508 break;
2509 }
2510
2511 save_state(dc);
2512
2513 if (cond != 8) {
2514 /* Conditional trap. */
2515 DisasCompare cmp;
2516 #ifdef TARGET_SPARC64
2517 /* V9 icc/xcc */
2518 int cc = GET_FIELD_SP(insn, 11, 12);
2519 if (cc == 0) {
2520 gen_compare(&cmp, 0, cond, dc);
2521 } else if (cc == 2) {
2522 gen_compare(&cmp, 1, cond, dc);
2523 } else {
2524 goto illegal_insn;
2525 }
2526 #else
2527 gen_compare(&cmp, 0, cond, dc);
2528 #endif
2529 l1 = gen_new_label();
2530 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2531 cmp.c1, cmp.c2, l1);
2532 free_compare(&cmp);
2533 }
2534
2535 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2536 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2537
2538 /* Don't use the normal temporaries, as they may well have
2539 gone out of scope with the branch above. While we're
2540 doing that we might as well pre-truncate to 32-bit. */
2541 trap = tcg_temp_new_i32();
2542
2543 rs1 = GET_FIELD_SP(insn, 14, 18);
2544 if (IS_IMM) {
2545 rs2 = GET_FIELD_SP(insn, 0, 6);
2546 if (rs1 == 0) {
2547 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2548 /* Signal that the trap value is fully constant. */
2549 mask = 0;
2550 } else {
2551 TCGv t1 = tcg_temp_new();
2552 gen_movl_reg_TN(rs1, t1);
2553 tcg_gen_trunc_tl_i32(trap, t1);
2554 tcg_temp_free(t1);
2555 tcg_gen_addi_i32(trap, trap, rs2);
2556 }
2557 } else {
2558 TCGv t1 = tcg_temp_new();
2559 TCGv t2 = tcg_temp_new();
2560 rs2 = GET_FIELD_SP(insn, 0, 4);
2561 gen_movl_reg_TN(rs1, t1);
2562 gen_movl_reg_TN(rs2, t2);
2563 tcg_gen_add_tl(t1, t1, t2);
2564 tcg_gen_trunc_tl_i32(trap, t1);
2565 tcg_temp_free(t1);
2566 tcg_temp_free(t2);
2567 }
2568 if (mask != 0) {
2569 tcg_gen_andi_i32(trap, trap, mask);
2570 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2571 }
2572
2573 gen_helper_raise_exception(cpu_env, trap);
2574 tcg_temp_free_i32(trap);
2575
2576 if (cond != 8) {
2577 gen_set_label(l1);
2578 gen_op_next_insn();
2579 tcg_gen_exit_tb(0);
2580 }
2581 dc->is_br = 1;
2582 goto jmp_insn;
2583 } else if (xop == 0x28) {
2584 rs1 = GET_FIELD(insn, 13, 17);
2585 switch(rs1) {
2586 case 0: /* rdy */
2587 #ifndef TARGET_SPARC64
2588 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2589 manual, rdy on the microSPARC
2590 II */
2591 case 0x0f: /* stbar in the SPARCv8 manual,
2592 rdy on the microSPARC II */
2593 case 0x10 ... 0x1f: /* implementation-dependent in the
2594 SPARCv8 manual, rdy on the
2595 microSPARC II */
2596 /* Read Asr17 */
2597 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2598 TCGv r_const;
2599
2600 /* Read Asr17 for a Leon3 monoprocessor */
2601 r_const = tcg_const_tl((1 << 8)
2602 | (dc->def->nwindows - 1));
2603 gen_movl_TN_reg(rd, r_const);
2604 tcg_temp_free(r_const);
2605 break;
2606 }
2607 #endif
2608 gen_movl_TN_reg(rd, cpu_y);
2609 break;
2610 #ifdef TARGET_SPARC64
2611 case 0x2: /* V9 rdccr */
2612 gen_helper_compute_psr(cpu_env);
2613 gen_helper_rdccr(cpu_dst, cpu_env);
2614 gen_movl_TN_reg(rd, cpu_dst);
2615 break;
2616 case 0x3: /* V9 rdasi */
2617 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2618 gen_movl_TN_reg(rd, cpu_dst);
2619 break;
2620 case 0x4: /* V9 rdtick */
2621 {
2622 TCGv_ptr r_tickptr;
2623
2624 r_tickptr = tcg_temp_new_ptr();
2625 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2626 offsetof(CPUSPARCState, tick));
2627 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2628 tcg_temp_free_ptr(r_tickptr);
2629 gen_movl_TN_reg(rd, cpu_dst);
2630 }
2631 break;
2632 case 0x5: /* V9 rdpc */
2633 {
2634 TCGv r_const;
2635
2636 if (unlikely(AM_CHECK(dc))) {
2637 r_const = tcg_const_tl(dc->pc & 0xffffffffULL);
2638 } else {
2639 r_const = tcg_const_tl(dc->pc);
2640 }
2641 gen_movl_TN_reg(rd, r_const);
2642 tcg_temp_free(r_const);
2643 }
2644 break;
2645 case 0x6: /* V9 rdfprs */
2646 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2647 gen_movl_TN_reg(rd, cpu_dst);
2648 break;
2649 case 0xf: /* V9 membar */
2650 break; /* no effect */
2651 case 0x13: /* Graphics Status */
2652 if (gen_trap_ifnofpu(dc)) {
2653 goto jmp_insn;
2654 }
2655 gen_movl_TN_reg(rd, cpu_gsr);
2656 break;
2657 case 0x16: /* Softint */
2658 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2659 gen_movl_TN_reg(rd, cpu_dst);
2660 break;
2661 case 0x17: /* Tick compare */
2662 gen_movl_TN_reg(rd, cpu_tick_cmpr);
2663 break;
2664 case 0x18: /* System tick */
2665 {
2666 TCGv_ptr r_tickptr;
2667
2668 r_tickptr = tcg_temp_new_ptr();
2669 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2670 offsetof(CPUSPARCState, stick));
2671 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2672 tcg_temp_free_ptr(r_tickptr);
2673 gen_movl_TN_reg(rd, cpu_dst);
2674 }
2675 break;
2676 case 0x19: /* System tick compare */
2677 gen_movl_TN_reg(rd, cpu_stick_cmpr);
2678 break;
2679 case 0x10: /* Performance Control */
2680 case 0x11: /* Performance Instrumentation Counter */
2681 case 0x12: /* Dispatch Control */
2682 case 0x14: /* Softint set, WO */
2683 case 0x15: /* Softint clear, WO */
2684 #endif
2685 default:
2686 goto illegal_insn;
2687 }
2688 #if !defined(CONFIG_USER_ONLY)
2689 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2690 #ifndef TARGET_SPARC64
2691 if (!supervisor(dc))
2692 goto priv_insn;
2693 gen_helper_compute_psr(cpu_env);
2694 dc->cc_op = CC_OP_FLAGS;
2695 gen_helper_rdpsr(cpu_dst, cpu_env);
2696 #else
2697 CHECK_IU_FEATURE(dc, HYPV);
2698 if (!hypervisor(dc))
2699 goto priv_insn;
2700 rs1 = GET_FIELD(insn, 13, 17);
2701 switch (rs1) {
2702 case 0: // hpstate
2703 // gen_op_rdhpstate();
2704 break;
2705 case 1: // htstate
2706 // gen_op_rdhtstate();
2707 break;
2708 case 3: // hintp
2709 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2710 break;
2711 case 5: // htba
2712 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2713 break;
2714 case 6: // hver
2715 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2716 break;
2717 case 31: // hstick_cmpr
2718 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2719 break;
2720 default:
2721 goto illegal_insn;
2722 }
2723 #endif
2724 gen_movl_TN_reg(rd, cpu_dst);
2725 break;
2726 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2727 if (!supervisor(dc))
2728 goto priv_insn;
2729 #ifdef TARGET_SPARC64
2730 rs1 = GET_FIELD(insn, 13, 17);
2731 switch (rs1) {
2732 case 0: // tpc
2733 {
2734 TCGv_ptr r_tsptr;
2735
2736 r_tsptr = tcg_temp_new_ptr();
2737 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2738 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2739 offsetof(trap_state, tpc));
2740 tcg_temp_free_ptr(r_tsptr);
2741 }
2742 break;
2743 case 1: // tnpc
2744 {
2745 TCGv_ptr r_tsptr;
2746
2747 r_tsptr = tcg_temp_new_ptr();
2748 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2749 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2750 offsetof(trap_state, tnpc));
2751 tcg_temp_free_ptr(r_tsptr);
2752 }
2753 break;
2754 case 2: // tstate
2755 {
2756 TCGv_ptr r_tsptr;
2757
2758 r_tsptr = tcg_temp_new_ptr();
2759 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2760 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2761 offsetof(trap_state, tstate));
2762 tcg_temp_free_ptr(r_tsptr);
2763 }
2764 break;
2765 case 3: // tt
2766 {
2767 TCGv_ptr r_tsptr;
2768
2769 r_tsptr = tcg_temp_new_ptr();
2770 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2771 tcg_gen_ld_i32(cpu_tmp32, r_tsptr,
2772 offsetof(trap_state, tt));
2773 tcg_temp_free_ptr(r_tsptr);
2774 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2775 }
2776 break;
2777 case 4: // tick
2778 {
2779 TCGv_ptr r_tickptr;
2780
2781 r_tickptr = tcg_temp_new_ptr();
2782 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2783 offsetof(CPUSPARCState, tick));
2784 gen_helper_tick_get_count(cpu_tmp0, r_tickptr);
2785 gen_movl_TN_reg(rd, cpu_tmp0);
2786 tcg_temp_free_ptr(r_tickptr);
2787 }
2788 break;
2789 case 5: // tba
2790 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2791 break;
2792 case 6: // pstate
2793 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2794 offsetof(CPUSPARCState, pstate));
2795 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2796 break;
2797 case 7: // tl
2798 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2799 offsetof(CPUSPARCState, tl));
2800 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2801 break;
2802 case 8: // pil
2803 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2804 offsetof(CPUSPARCState, psrpil));
2805 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2806 break;
2807 case 9: // cwp
2808 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2809 break;
2810 case 10: // cansave
2811 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2812 offsetof(CPUSPARCState, cansave));
2813 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2814 break;
2815 case 11: // canrestore
2816 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2817 offsetof(CPUSPARCState, canrestore));
2818 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2819 break;
2820 case 12: // cleanwin
2821 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2822 offsetof(CPUSPARCState, cleanwin));
2823 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2824 break;
2825 case 13: // otherwin
2826 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2827 offsetof(CPUSPARCState, otherwin));
2828 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2829 break;
2830 case 14: // wstate
2831 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2832 offsetof(CPUSPARCState, wstate));
2833 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2834 break;
2835 case 16: // UA2005 gl
2836 CHECK_IU_FEATURE(dc, GL);
2837 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2838 offsetof(CPUSPARCState, gl));
2839 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2840 break;
2841 case 26: // UA2005 strand status
2842 CHECK_IU_FEATURE(dc, HYPV);
2843 if (!hypervisor(dc))
2844 goto priv_insn;
2845 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2846 break;
2847 case 31: // ver
2848 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2849 break;
2850 case 15: // fq
2851 default:
2852 goto illegal_insn;
2853 }
2854 #else
2855 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2856 #endif
2857 gen_movl_TN_reg(rd, cpu_tmp0);
2858 break;
2859 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2860 #ifdef TARGET_SPARC64
2861 save_state(dc);
2862 gen_helper_flushw(cpu_env);
2863 #else
2864 if (!supervisor(dc))
2865 goto priv_insn;
2866 gen_movl_TN_reg(rd, cpu_tbr);
2867 #endif
2868 break;
2869 #endif
2870 } else if (xop == 0x34) { /* FPU Operations */
2871 if (gen_trap_ifnofpu(dc)) {
2872 goto jmp_insn;
2873 }
2874 gen_op_clear_ieee_excp_and_FTT();
2875 rs1 = GET_FIELD(insn, 13, 17);
2876 rs2 = GET_FIELD(insn, 27, 31);
2877 xop = GET_FIELD(insn, 18, 26);
2878 save_state(dc);
2879 switch (xop) {
2880 case 0x1: /* fmovs */
2881 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2882 gen_store_fpr_F(dc, rd, cpu_src1_32);
2883 break;
2884 case 0x5: /* fnegs */
2885 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2886 break;
2887 case 0x9: /* fabss */
2888 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2889 break;
2890 case 0x29: /* fsqrts */
2891 CHECK_FPU_FEATURE(dc, FSQRT);
2892 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2893 break;
2894 case 0x2a: /* fsqrtd */
2895 CHECK_FPU_FEATURE(dc, FSQRT);
2896 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
2897 break;
2898 case 0x2b: /* fsqrtq */
2899 CHECK_FPU_FEATURE(dc, FLOAT128);
2900 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
2901 break;
2902 case 0x41: /* fadds */
2903 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
2904 break;
2905 case 0x42: /* faddd */
2906 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
2907 break;
2908 case 0x43: /* faddq */
2909 CHECK_FPU_FEATURE(dc, FLOAT128);
2910 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
2911 break;
2912 case 0x45: /* fsubs */
2913 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
2914 break;
2915 case 0x46: /* fsubd */
2916 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
2917 break;
2918 case 0x47: /* fsubq */
2919 CHECK_FPU_FEATURE(dc, FLOAT128);
2920 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
2921 break;
2922 case 0x49: /* fmuls */
2923 CHECK_FPU_FEATURE(dc, FMUL);
2924 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
2925 break;
2926 case 0x4a: /* fmuld */
2927 CHECK_FPU_FEATURE(dc, FMUL);
2928 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
2929 break;
2930 case 0x4b: /* fmulq */
2931 CHECK_FPU_FEATURE(dc, FLOAT128);
2932 CHECK_FPU_FEATURE(dc, FMUL);
2933 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
2934 break;
2935 case 0x4d: /* fdivs */
2936 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
2937 break;
2938 case 0x4e: /* fdivd */
2939 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
2940 break;
2941 case 0x4f: /* fdivq */
2942 CHECK_FPU_FEATURE(dc, FLOAT128);
2943 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
2944 break;
2945 case 0x69: /* fsmuld */
2946 CHECK_FPU_FEATURE(dc, FSMULD);
2947 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
2948 break;
2949 case 0x6e: /* fdmulq */
2950 CHECK_FPU_FEATURE(dc, FLOAT128);
2951 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
2952 break;
2953 case 0xc4: /* fitos */
2954 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
2955 break;
2956 case 0xc6: /* fdtos */
2957 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
2958 break;
2959 case 0xc7: /* fqtos */
2960 CHECK_FPU_FEATURE(dc, FLOAT128);
2961 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
2962 break;
2963 case 0xc8: /* fitod */
2964 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
2965 break;
2966 case 0xc9: /* fstod */
2967 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
2968 break;
2969 case 0xcb: /* fqtod */
2970 CHECK_FPU_FEATURE(dc, FLOAT128);
2971 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
2972 break;
2973 case 0xcc: /* fitoq */
2974 CHECK_FPU_FEATURE(dc, FLOAT128);
2975 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
2976 break;
2977 case 0xcd: /* fstoq */
2978 CHECK_FPU_FEATURE(dc, FLOAT128);
2979 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
2980 break;
2981 case 0xce: /* fdtoq */
2982 CHECK_FPU_FEATURE(dc, FLOAT128);
2983 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
2984 break;
2985 case 0xd1: /* fstoi */
2986 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
2987 break;
2988 case 0xd2: /* fdtoi */
2989 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
2990 break;
2991 case 0xd3: /* fqtoi */
2992 CHECK_FPU_FEATURE(dc, FLOAT128);
2993 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
2994 break;
2995 #ifdef TARGET_SPARC64
2996 case 0x2: /* V9 fmovd */
2997 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
2998 gen_store_fpr_D(dc, rd, cpu_src1_64);
2999 break;
3000 case 0x3: /* V9 fmovq */
3001 CHECK_FPU_FEATURE(dc, FLOAT128);
3002 gen_move_Q(rd, rs2);
3003 break;
3004 case 0x6: /* V9 fnegd */
3005 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3006 break;
3007 case 0x7: /* V9 fnegq */
3008 CHECK_FPU_FEATURE(dc, FLOAT128);
3009 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3010 break;
3011 case 0xa: /* V9 fabsd */
3012 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3013 break;
3014 case 0xb: /* V9 fabsq */
3015 CHECK_FPU_FEATURE(dc, FLOAT128);
3016 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3017 break;
3018 case 0x81: /* V9 fstox */
3019 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3020 break;
3021 case 0x82: /* V9 fdtox */
3022 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3023 break;
3024 case 0x83: /* V9 fqtox */
3025 CHECK_FPU_FEATURE(dc, FLOAT128);
3026 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3027 break;
3028 case 0x84: /* V9 fxtos */
3029 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3030 break;
3031 case 0x88: /* V9 fxtod */
3032 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3033 break;
3034 case 0x8c: /* V9 fxtoq */
3035 CHECK_FPU_FEATURE(dc, FLOAT128);
3036 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3037 break;
3038 #endif
3039 default:
3040 goto illegal_insn;
3041 }
3042 } else if (xop == 0x35) { /* FPU Operations */
3043 #ifdef TARGET_SPARC64
3044 int cond;
3045 #endif
3046 if (gen_trap_ifnofpu(dc)) {
3047 goto jmp_insn;
3048 }
3049 gen_op_clear_ieee_excp_and_FTT();
3050 rs1 = GET_FIELD(insn, 13, 17);
3051 rs2 = GET_FIELD(insn, 27, 31);
3052 xop = GET_FIELD(insn, 18, 26);
3053 save_state(dc);
3054 #ifdef TARGET_SPARC64
3055 if ((xop & 0x11f) == 0x005) { // V9 fmovsr
3056 int l1;
3057
3058 l1 = gen_new_label();
3059 cond = GET_FIELD_SP(insn, 14, 17);
3060 cpu_src1 = get_src1(insn, cpu_src1);
3061 tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], cpu_src1,
3062 0, l1);
3063 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3064 gen_store_fpr_F(dc, rd, cpu_src1_32);
3065 gen_set_label(l1);
3066 break;
3067 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3068 int l1;
3069
3070 l1 = gen_new_label();
3071 cond = GET_FIELD_SP(insn, 14, 17);
3072 cpu_src1 = get_src1(insn, cpu_src1);
3073 tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], cpu_src1,
3074 0, l1);
3075 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3076 gen_store_fpr_D(dc, rd, cpu_src1_64);
3077 gen_set_label(l1);
3078 break;
3079 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3080 int l1;
3081
3082 CHECK_FPU_FEATURE(dc, FLOAT128);
3083 l1 = gen_new_label();
3084 cond = GET_FIELD_SP(insn, 14, 17);
3085 cpu_src1 = get_src1(insn, cpu_src1);
3086 tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], cpu_src1,
3087 0, l1);
3088 gen_move_Q(rd, rs2);
3089 gen_set_label(l1);
3090 break;
3091 }
3092 #endif
3093 switch (xop) {
3094 #ifdef TARGET_SPARC64
3095 #define FMOVCC(fcc, sz) \
3096 do { \
3097 DisasCompare cmp; \
3098 cond = GET_FIELD_SP(insn, 14, 17); \
3099 gen_fcompare(&cmp, fcc, cond); \
3100 gen_fmov##sz(dc, &cmp, rd, rs2); \
3101 free_compare(&cmp); \
3102 } while (0)
3103
3104 case 0x001: /* V9 fmovscc %fcc0 */
3105 FMOVCC(0, s);
3106 break;
3107 case 0x002: /* V9 fmovdcc %fcc0 */
3108 FMOVCC(0, d);
3109 break;
3110 case 0x003: /* V9 fmovqcc %fcc0 */
3111 CHECK_FPU_FEATURE(dc, FLOAT128);
3112 FMOVCC(0, q);
3113 break;
3114 case 0x041: /* V9 fmovscc %fcc1 */
3115 FMOVCC(1, s);
3116 break;
3117 case 0x042: /* V9 fmovdcc %fcc1 */
3118 FMOVCC(1, d);
3119 break;
3120 case 0x043: /* V9 fmovqcc %fcc1 */
3121 CHECK_FPU_FEATURE(dc, FLOAT128);
3122 FMOVCC(1, q);
3123 break;
3124 case 0x081: /* V9 fmovscc %fcc2 */
3125 FMOVCC(2, s);
3126 break;
3127 case 0x082: /* V9 fmovdcc %fcc2 */
3128 FMOVCC(2, d);
3129 break;
3130 case 0x083: /* V9 fmovqcc %fcc2 */
3131 CHECK_FPU_FEATURE(dc, FLOAT128);
3132 FMOVCC(2, q);
3133 break;
3134 case 0x0c1: /* V9 fmovscc %fcc3 */
3135 FMOVCC(3, s);
3136 break;
3137 case 0x0c2: /* V9 fmovdcc %fcc3 */
3138 FMOVCC(3, d);
3139 break;
3140 case 0x0c3: /* V9 fmovqcc %fcc3 */
3141 CHECK_FPU_FEATURE(dc, FLOAT128);
3142 FMOVCC(3, q);
3143 break;
3144 #undef FMOVCC
3145 #define FMOVCC(xcc, sz) \
3146 do { \
3147 DisasCompare cmp; \
3148 cond = GET_FIELD_SP(insn, 14, 17); \
3149 gen_compare(&cmp, xcc, cond, dc); \
3150 gen_fmov##sz(dc, &cmp, rd, rs2); \
3151 free_compare(&cmp); \
3152 } while (0)
3153
3154 case 0x101: /* V9 fmovscc %icc */
3155 FMOVCC(0, s);
3156 break;
3157 case 0x102: /* V9 fmovdcc %icc */
3158 FMOVCC(0, d);
3159 break;
3160 case 0x103: /* V9 fmovqcc %icc */
3161 CHECK_FPU_FEATURE(dc, FLOAT128);
3162 FMOVCC(0, q);
3163 break;
3164 case 0x181: /* V9 fmovscc %xcc */
3165 FMOVCC(1, s);
3166 break;
3167 case 0x182: /* V9 fmovdcc %xcc */
3168 FMOVCC(1, d);
3169 break;
3170 case 0x183: /* V9 fmovqcc %xcc */
3171 CHECK_FPU_FEATURE(dc, FLOAT128);
3172 FMOVCC(1, q);
3173 break;
3174 #undef FMOVCC
3175 #endif
3176 case 0x51: /* fcmps, V9 %fcc */
3177 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3178 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3179 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3180 break;
3181 case 0x52: /* fcmpd, V9 %fcc */
3182 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3183 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3184 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3185 break;
3186 case 0x53: /* fcmpq, V9 %fcc */
3187 CHECK_FPU_FEATURE(dc, FLOAT128);
3188 gen_op_load_fpr_QT0(QFPREG(rs1));
3189 gen_op_load_fpr_QT1(QFPREG(rs2));
3190 gen_op_fcmpq(rd & 3);
3191 break;
3192 case 0x55: /* fcmpes, V9 %fcc */
3193 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3194 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3195 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3196 break;
3197 case 0x56: /* fcmped, V9 %fcc */
3198 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3199 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3200 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3201 break;
3202 case 0x57: /* fcmpeq, V9 %fcc */
3203 CHECK_FPU_FEATURE(dc, FLOAT128);
3204 gen_op_load_fpr_QT0(QFPREG(rs1));
3205 gen_op_load_fpr_QT1(QFPREG(rs2));
3206 gen_op_fcmpeq(rd & 3);
3207 break;
3208 default:
3209 goto illegal_insn;
3210 }
3211 } else if (xop == 0x2) {
3212 // clr/mov shortcut
3213
3214 rs1 = GET_FIELD(insn, 13, 17);
3215 if (rs1 == 0) {
3216 // or %g0, x, y -> mov T0, x; mov y, T0
3217 if (IS_IMM) { /* immediate */
3218 TCGv r_const;
3219
3220 simm = GET_FIELDs(insn, 19, 31);
3221 r_const = tcg_const_tl(simm);
3222 gen_movl_TN_reg(rd, r_const);
3223 tcg_temp_free(r_const);
3224 } else { /* register */
3225 rs2 = GET_FIELD(insn, 27, 31);
3226 gen_movl_reg_TN(rs2, cpu_dst);
3227 gen_movl_TN_reg(rd, cpu_dst);
3228 }
3229 } else {
3230 cpu_src1 = get_src1(insn, cpu_src1);
3231 if (IS_IMM) { /* immediate */
3232 simm = GET_FIELDs(insn, 19, 31);
3233 tcg_gen_ori_tl(cpu_dst, cpu_src1, simm);
3234 gen_movl_TN_reg(rd, cpu_dst);
3235 } else { /* register */
3236 // or x, %g0, y -> mov T1, x; mov y, T1
3237 rs2 = GET_FIELD(insn, 27, 31);
3238 if (rs2 != 0) {
3239 gen_movl_reg_TN(rs2, cpu_src2);
3240 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3241 gen_movl_TN_reg(rd, cpu_dst);
3242 } else
3243 gen_movl_TN_reg(rd, cpu_src1);
3244 }
3245 }
3246 #ifdef TARGET_SPARC64
3247 } else if (xop == 0x25) { /* sll, V9 sllx */
3248 cpu_src1 = get_src1(insn, cpu_src1);
3249 if (IS_IMM) { /* immediate */
3250 simm = GET_FIELDs(insn, 20, 31);
3251 if (insn & (1 << 12)) {
3252 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3253 } else {
3254 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3255 }
3256 } else { /* register */
3257 rs2 = GET_FIELD(insn, 27, 31);
3258 gen_movl_reg_TN(rs2, cpu_src2);
3259 if (insn & (1 << 12)) {
3260 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3261 } else {
3262 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3263 }
3264 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3265 }
3266 gen_movl_TN_reg(rd, cpu_dst);
3267 } else if (xop == 0x26) { /* srl, V9 srlx */
3268 cpu_src1 = get_src1(insn, cpu_src1);
3269 if (IS_IMM) { /* immediate */
3270 simm = GET_FIELDs(insn, 20, 31);
3271 if (insn & (1 << 12)) {
3272 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3273 } else {
3274 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3275 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3276 }
3277 } else { /* register */
3278 rs2 = GET_FIELD(insn, 27, 31);
3279 gen_movl_reg_TN(rs2, cpu_src2);
3280 if (insn & (1 << 12)) {
3281 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3282 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3283 } else {
3284 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3285 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3286 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3287 }
3288 }
3289 gen_movl_TN_reg(rd, cpu_dst);
3290 } else if (xop == 0x27) { /* sra, V9 srax */
3291 cpu_src1 = get_src1(insn, cpu_src1);
3292 if (IS_IMM) { /* immediate */
3293 simm = GET_FIELDs(insn, 20, 31);
3294 if (insn & (1 << 12)) {
3295 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3296 } else {
3297 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3298 tcg_gen_ext32s_i64(cpu_dst, cpu_dst);
3299 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3300 }
3301 } else { /* register */
3302 rs2 = GET_FIELD(insn, 27, 31);
3303 gen_movl_reg_TN(rs2, cpu_src2);
3304 if (insn & (1 << 12)) {
3305 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3306 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3307 } else {
3308 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3309 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3310 tcg_gen_ext32s_i64(cpu_dst, cpu_dst);
3311 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3312 }
3313 }
3314 gen_movl_TN_reg(rd, cpu_dst);
3315 #endif
3316 } else if (xop < 0x36) {
3317 if (xop < 0x20) {
3318 cpu_src1 = get_src1(insn, cpu_src1);
3319 cpu_src2 = get_src2(insn, cpu_src2);
3320 switch (xop & ~0x10) {
3321 case 0x0: /* add */
3322 if (IS_IMM) {
3323 simm = GET_FIELDs(insn, 19, 31);
3324 if (xop & 0x10) {
3325 gen_op_addi_cc(cpu_dst, cpu_src1, simm);
3326 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3327 dc->cc_op = CC_OP_ADD;
3328 } else {
3329 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
3330 }
3331 } else {
3332 if (xop & 0x10) {
3333 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3334 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3335 dc->cc_op = CC_OP_ADD;
3336 } else {
3337 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3338 }
3339 }
3340 break;
3341 case 0x1: /* and */
3342 if (IS_IMM) {
3343 simm = GET_FIELDs(insn, 19, 31);
3344 tcg_gen_andi_tl(cpu_dst, cpu_src1, simm);
3345 } else {
3346 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3347 }
3348 if (xop & 0x10) {
3349 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3350 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3351 dc->cc_op = CC_OP_LOGIC;
3352 }
3353 break;
3354 case 0x2: /* or */
3355 if (IS_IMM) {
3356 simm = GET_FIELDs(insn, 19, 31);
3357 tcg_gen_ori_tl(cpu_dst, cpu_src1, simm);
3358 } else {
3359 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3360 }
3361 if (xop & 0x10) {
3362 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3363 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3364 dc->cc_op = CC_OP_LOGIC;
3365 }
3366 break;
3367 case 0x3: /* xor */
3368 if (IS_IMM) {
3369 simm = GET_FIELDs(insn, 19, 31);
3370 tcg_gen_xori_tl(cpu_dst, cpu_src1, simm);
3371 } else {
3372 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3373 }
3374 if (xop & 0x10) {
3375 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3376 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3377 dc->cc_op = CC_OP_LOGIC;
3378 }
3379 break;
3380 case 0x4: /* sub */
3381 if (IS_IMM) {
3382 simm = GET_FIELDs(insn, 19, 31);
3383 if (xop & 0x10) {
3384 gen_op_subi_cc(cpu_dst, cpu_src1, simm, dc);
3385 } else {
3386 tcg_gen_subi_tl(cpu_dst, cpu_src1, simm);
3387 }
3388 } else {
3389 if (xop & 0x10) {
3390 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3391 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3392 dc->cc_op = CC_OP_SUB;
3393 } else {
3394 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3395 }
3396 }
3397 break;
3398 case 0x5: /* andn */
3399 if (IS_IMM) {
3400 simm = GET_FIELDs(insn, 19, 31);
3401 tcg_gen_andi_tl(cpu_dst, cpu_src1, ~simm);
3402 } else {
3403 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3404 }
3405 if (xop & 0x10) {
3406 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3407 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3408 dc->cc_op = CC_OP_LOGIC;
3409 }
3410 break;
3411 case 0x6: /* orn */
3412 if (IS_IMM) {
3413 simm = GET_FIELDs(insn, 19, 31);
3414 tcg_gen_ori_tl(cpu_dst, cpu_src1, ~simm);
3415 } else {
3416 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3417 }
3418 if (xop & 0x10) {
3419 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3420 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3421 dc->cc_op = CC_OP_LOGIC;
3422 }
3423 break;
3424 case 0x7: /* xorn */
3425 if (IS_IMM) {
3426 simm = GET_FIELDs(insn, 19, 31);
3427 tcg_gen_xori_tl(cpu_dst, cpu_src1, ~simm);
3428 } else {
3429 tcg_gen_not_tl(cpu_tmp0, cpu_src2);
3430 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_tmp0);
3431 }
3432 if (xop & 0x10) {
3433 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3434 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3435 dc->cc_op = CC_OP_LOGIC;
3436 }
3437 break;
3438 case 0x8: /* addx, V9 addc */
3439 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3440 (xop & 0x10));
3441 break;
3442 #ifdef TARGET_SPARC64
3443 case 0x9: /* V9 mulx */
3444 if (IS_IMM) {
3445 simm = GET_FIELDs(insn, 19, 31);
3446 tcg_gen_muli_i64(cpu_dst, cpu_src1, simm);
3447 } else {
3448 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3449 }
3450 break;
3451 #endif
3452 case 0xa: /* umul */
3453 CHECK_IU_FEATURE(dc, MUL);
3454 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3455 if (xop & 0x10) {
3456 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3457 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3458 dc->cc_op = CC_OP_LOGIC;
3459 }
3460 break;
3461 case 0xb: /* smul */
3462 CHECK_IU_FEATURE(dc, MUL);
3463 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3464 if (xop & 0x10) {
3465 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3466 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3467 dc->cc_op = CC_OP_LOGIC;
3468 }
3469 break;
3470 case 0xc: /* subx, V9 subc */
3471 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3472 (xop & 0x10));
3473 break;
3474 #ifdef TARGET_SPARC64
3475 case 0xd: /* V9 udivx */
3476 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3477 break;
3478 #endif
3479 case 0xe: /* udiv */
3480 CHECK_IU_FEATURE(dc, DIV);
3481 if (xop & 0x10) {
3482 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3483 cpu_src2);
3484 dc->cc_op = CC_OP_DIV;
3485 } else {
3486 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3487 cpu_src2);
3488 }
3489 break;
3490 case 0xf: /* sdiv */
3491 CHECK_IU_FEATURE(dc, DIV);
3492 if (xop & 0x10) {
3493 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3494 cpu_src2);
3495 dc->cc_op = CC_OP_DIV;
3496 } else {
3497 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3498 cpu_src2);
3499 }
3500 break;
3501 default:
3502 goto illegal_insn;
3503 }
3504 gen_movl_TN_reg(rd, cpu_dst);
3505 } else {
3506 cpu_src1 = get_src1(insn, cpu_src1);
3507 cpu_src2 = get_src2(insn, cpu_src2);
3508 switch (xop) {
3509 case 0x20: /* taddcc */
3510 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3511 gen_movl_TN_reg(rd, cpu_dst);
3512 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3513 dc->cc_op = CC_OP_TADD;
3514 break;
3515 case 0x21: /* tsubcc */
3516 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3517 gen_movl_TN_reg(rd, cpu_dst);
3518 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3519 dc->cc_op = CC_OP_TSUB;
3520 break;
3521 case 0x22: /* taddcctv */
3522 gen_helper_taddcctv(cpu_dst, cpu_env,
3523 cpu_src1, cpu_src2);
3524 gen_movl_TN_reg(rd, cpu_dst);
3525 dc->cc_op = CC_OP_TADDTV;
3526 break;
3527 case 0x23: /* tsubcctv */
3528 gen_helper_tsubcctv(cpu_dst, cpu_env,
3529 cpu_src1, cpu_src2);
3530 gen_movl_TN_reg(rd, cpu_dst);
3531 dc->cc_op = CC_OP_TSUBTV;
3532 break;
3533 case 0x24: /* mulscc */
3534 gen_helper_compute_psr(cpu_env);
3535 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3536 gen_movl_TN_reg(rd, cpu_dst);
3537 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3538 dc->cc_op = CC_OP_ADD;
3539 break;
3540 #ifndef TARGET_SPARC64
3541 case 0x25: /* sll */
3542 if (IS_IMM) { /* immediate */
3543 simm = GET_FIELDs(insn, 20, 31);
3544 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3545 } else { /* register */
3546 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3547 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3548 }
3549 gen_movl_TN_reg(rd, cpu_dst);
3550 break;
3551 case 0x26: /* srl */
3552 if (IS_IMM) { /* immediate */
3553 simm = GET_FIELDs(insn, 20, 31);
3554 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3555 } else { /* register */
3556 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3557 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3558 }
3559 gen_movl_TN_reg(rd, cpu_dst);
3560 break;
3561 case 0x27: /* sra */
3562 if (IS_IMM) { /* immediate */
3563 simm = GET_FIELDs(insn, 20, 31);
3564 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3565 } else { /* register */
3566 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3567 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3568 }
3569 gen_movl_TN_reg(rd, cpu_dst);
3570 break;
3571 #endif
3572 case 0x30:
3573 {
3574 switch(rd) {
3575 case 0: /* wry */
3576 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3577 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3578 break;
3579 #ifndef TARGET_SPARC64
3580 case 0x01 ... 0x0f: /* undefined in the
3581 SPARCv8 manual, nop
3582 on the microSPARC
3583 II */
3584 case 0x10 ... 0x1f: /* implementation-dependent
3585 in the SPARCv8
3586 manual, nop on the
3587 microSPARC II */
3588 break;
3589 #else
3590 case 0x2: /* V9 wrccr */
3591 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3592 gen_helper_wrccr(cpu_env, cpu_dst);
3593 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3594 dc->cc_op = CC_OP_FLAGS;
3595 break;
3596 case 0x3: /* V9 wrasi */
3597 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3598 tcg_gen_andi_tl(cpu_dst, cpu_dst, 0xff);
3599 tcg_gen_trunc_tl_i32(cpu_asi, cpu_dst);
3600 break;
3601 case 0x6: /* V9 wrfprs */
3602 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3603 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_dst);
3604 save_state(dc);
3605 gen_op_next_insn();
3606 tcg_gen_exit_tb(0);
3607 dc->is_br = 1;
3608 break;
3609 case 0xf: /* V9 sir, nop if user */
3610 #if !defined(CONFIG_USER_ONLY)
3611 if (supervisor(dc)) {
3612 ; // XXX
3613 }
3614 #endif
3615 break;
3616 case 0x13: /* Graphics Status */
3617 if (gen_trap_ifnofpu(dc)) {
3618 goto jmp_insn;
3619 }
3620 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3621 break;
3622 case 0x14: /* Softint set */
3623 if (!supervisor(dc))
3624 goto illegal_insn;
3625 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3626 gen_helper_set_softint(cpu_env, cpu_tmp64);
3627 break;
3628 case 0x15: /* Softint clear */
3629 if (!supervisor(dc))
3630 goto illegal_insn;
3631 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3632 gen_helper_clear_softint(cpu_env, cpu_tmp64);
3633 break;
3634 case 0x16: /* Softint write */
3635 if (!supervisor(dc))
3636 goto illegal_insn;
3637 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3638 gen_helper_write_softint(cpu_env, cpu_tmp64);
3639 break;
3640 case 0x17: /* Tick compare */
3641 #if !defined(CONFIG_USER_ONLY)
3642 if (!supervisor(dc))
3643 goto illegal_insn;
3644 #endif
3645 {
3646 TCGv_ptr r_tickptr;
3647
3648 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3649 cpu_src2);
3650 r_tickptr = tcg_temp_new_ptr();
3651 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3652 offsetof(CPUSPARCState, tick));
3653 gen_helper_tick_set_limit(r_tickptr,
3654 cpu_tick_cmpr);
3655 tcg_temp_free_ptr(r_tickptr);
3656 }
3657 break;
3658 case 0x18: /* System tick */
3659 #if !defined(CONFIG_USER_ONLY)
3660 if (!supervisor(dc))
3661 goto illegal_insn;
3662 #endif
3663 {
3664 TCGv_ptr r_tickptr;
3665
3666 tcg_gen_xor_tl(cpu_dst, cpu_src1,
3667 cpu_src2);
3668 r_tickptr = tcg_temp_new_ptr();
3669 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3670 offsetof(CPUSPARCState, stick));
3671 gen_helper_tick_set_count(r_tickptr,
3672 cpu_dst);
3673 tcg_temp_free_ptr(r_tickptr);
3674 }
3675 break;
3676 case 0x19: /* System tick compare */
3677 #if !defined(CONFIG_USER_ONLY)
3678 if (!supervisor(dc))
3679 goto illegal_insn;
3680 #endif
3681 {
3682 TCGv_ptr r_tickptr;
3683
3684 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3685 cpu_src2);
3686 r_tickptr = tcg_temp_new_ptr();
3687 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3688 offsetof(CPUSPARCState, stick));
3689 gen_helper_tick_set_limit(r_tickptr,
3690 cpu_stick_cmpr);
3691 tcg_temp_free_ptr(r_tickptr);
3692 }
3693 break;
3694
3695 case 0x10: /* Performance Control */
3696 case 0x11: /* Performance Instrumentation
3697 Counter */
3698 case 0x12: /* Dispatch Control */
3699 #endif
3700 default:
3701 goto illegal_insn;
3702 }
3703 }
3704 break;
3705 #if !defined(CONFIG_USER_ONLY)
3706 case 0x31: /* wrpsr, V9 saved, restored */
3707 {
3708 if (!supervisor(dc))
3709 goto priv_insn;
3710 #ifdef TARGET_SPARC64
3711 switch (rd) {
3712 case 0:
3713 gen_helper_saved(cpu_env);
3714 break;
3715 case 1:
3716 gen_helper_restored(cpu_env);
3717 break;
3718 case 2: /* UA2005 allclean */
3719 case 3: /* UA2005 otherw */
3720 case 4: /* UA2005 normalw */
3721 case 5: /* UA2005 invalw */
3722 // XXX
3723 default:
3724 goto illegal_insn;
3725 }
3726 #else
3727 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3728 gen_helper_wrpsr(cpu_env, cpu_dst);
3729 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3730 dc->cc_op = CC_OP_FLAGS;
3731 save_state(dc);
3732 gen_op_next_insn();
3733 tcg_gen_exit_tb(0);
3734 dc->is_br = 1;
3735 #endif
3736 }
3737 break;
3738 case 0x32: /* wrwim, V9 wrpr */
3739 {
3740 if (!supervisor(dc))
3741 goto priv_insn;
3742 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3743 #ifdef TARGET_SPARC64
3744 switch (rd) {
3745 case 0: // tpc
3746 {
3747 TCGv_ptr r_tsptr;
3748
3749 r_tsptr = tcg_temp_new_ptr();
3750 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3751 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3752 offsetof(trap_state, tpc));
3753 tcg_temp_free_ptr(r_tsptr);
3754 }
3755 break;
3756 case 1: // tnpc
3757 {
3758 TCGv_ptr r_tsptr;
3759
3760 r_tsptr = tcg_temp_new_ptr();
3761 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3762 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3763 offsetof(trap_state, tnpc));
3764 tcg_temp_free_ptr(r_tsptr);
3765 }
3766 break;
3767 case 2: // tstate
3768 {
3769 TCGv_ptr r_tsptr;
3770
3771 r_tsptr = tcg_temp_new_ptr();
3772 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3773 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3774 offsetof(trap_state,
3775 tstate));
3776 tcg_temp_free_ptr(r_tsptr);
3777 }
3778 break;
3779 case 3: // tt
3780 {
3781 TCGv_ptr r_tsptr;
3782
3783 r_tsptr = tcg_temp_new_ptr();
3784 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3785 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3786 tcg_gen_st_i32(cpu_tmp32, r_tsptr,
3787 offsetof(trap_state, tt));
3788 tcg_temp_free_ptr(r_tsptr);
3789 }
3790 break;
3791 case 4: // tick
3792 {
3793 TCGv_ptr r_tickptr;
3794
3795 r_tickptr = tcg_temp_new_ptr();
3796 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3797 offsetof(CPUSPARCState, tick));
3798 gen_helper_tick_set_count(r_tickptr,
3799 cpu_tmp0);
3800 tcg_temp_free_ptr(r_tickptr);
3801 }
3802 break;
3803 case 5: // tba
3804 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3805 break;
3806 case 6: // pstate
3807 {
3808 TCGv r_tmp = tcg_temp_local_new();
3809
3810 tcg_gen_mov_tl(r_tmp, cpu_tmp0);
3811 save_state(dc);
3812 gen_helper_wrpstate(cpu_env, r_tmp);
3813 tcg_temp_free(r_tmp);
3814 dc->npc = DYNAMIC_PC;
3815 }
3816 break;
3817 case 7: // tl
3818 {
3819 TCGv r_tmp = tcg_temp_local_new();
3820
3821 tcg_gen_mov_tl(r_tmp, cpu_tmp0);
3822 save_state(dc);
3823 tcg_gen_trunc_tl_i32(cpu_tmp32, r_tmp);
3824 tcg_temp_free(r_tmp);
3825 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3826 offsetof(CPUSPARCState, tl));
3827 dc->npc = DYNAMIC_PC;
3828 }
3829 break;
3830 case 8: // pil
3831 gen_helper_wrpil(cpu_env, cpu_tmp0);
3832 break;
3833 case 9: // cwp
3834 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3835 break;
3836 case 10: // cansave
3837 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3838 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3839 offsetof(CPUSPARCState,
3840 cansave));
3841 break;
3842 case 11: // canrestore
3843 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3844 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3845 offsetof(CPUSPARCState,
3846 canrestore));
3847 break;
3848 case 12: // cleanwin
3849 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3850 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3851 offsetof(CPUSPARCState,
3852 cleanwin));
3853 break;
3854 case 13: // otherwin
3855 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3856 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3857 offsetof(CPUSPARCState,
3858 otherwin));
3859 break;
3860 case 14: // wstate
3861 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3862 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3863 offsetof(CPUSPARCState,
3864 wstate));
3865 break;
3866 case 16: // UA2005 gl
3867 CHECK_IU_FEATURE(dc, GL);
3868 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3869 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3870 offsetof(CPUSPARCState, gl));
3871 break;
3872 case 26: // UA2005 strand status
3873 CHECK_IU_FEATURE(dc, HYPV);
3874 if (!hypervisor(dc))
3875 goto priv_insn;
3876 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
3877 break;
3878 default:
3879 goto illegal_insn;
3880 }
3881 #else
3882 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3883 if (dc->def->nwindows != 32)
3884 tcg_gen_andi_tl(cpu_tmp32, cpu_tmp32,
3885 (1 << dc->def->nwindows) - 1);
3886 tcg_gen_mov_i32(cpu_wim, cpu_tmp32);
3887 #endif
3888 }
3889 break;
3890 case 0x33: /* wrtbr, UA2005 wrhpr */
3891 {
3892 #ifndef TARGET_SPARC64
3893 if (!supervisor(dc))
3894 goto priv_insn;
3895 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
3896 #else
3897 CHECK_IU_FEATURE(dc, HYPV);
3898 if (!hypervisor(dc))
3899 goto priv_insn;
3900 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3901 switch (rd) {
3902 case 0: // hpstate
3903 // XXX gen_op_wrhpstate();
3904 save_state(dc);
3905 gen_op_next_insn();
3906 tcg_gen_exit_tb(0);
3907 dc->is_br = 1;
3908 break;
3909 case 1: // htstate
3910 // XXX gen_op_wrhtstate();
3911 break;
3912 case 3: // hintp
3913 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
3914 break;
3915 case 5: // htba
3916 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
3917 break;
3918 case 31: // hstick_cmpr
3919 {
3920 TCGv_ptr r_tickptr;
3921
3922 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
3923 r_tickptr = tcg_temp_new_ptr();
3924 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3925 offsetof(CPUSPARCState, hstick));
3926 gen_helper_tick_set_limit(r_tickptr,
3927 cpu_hstick_cmpr);
3928 tcg_temp_free_ptr(r_tickptr);
3929 }
3930 break;
3931 case 6: // hver readonly
3932 default:
3933 goto illegal_insn;
3934 }
3935 #endif
3936 }
3937 break;
3938 #endif
3939 #ifdef TARGET_SPARC64
3940 case 0x2c: /* V9 movcc */
3941 {
3942 int cc = GET_FIELD_SP(insn, 11, 12);
3943 int cond = GET_FIELD_SP(insn, 14, 17);
3944 DisasCompare cmp;
3945
3946 if (insn & (1 << 18)) {
3947 if (cc == 0) {
3948 gen_compare(&cmp, 0, cond, dc);
3949 } else if (cc == 2) {
3950 gen_compare(&cmp, 1, cond, dc);
3951 } else {
3952 goto illegal_insn;
3953 }
3954 } else {
3955 gen_fcompare(&cmp, cc, cond);
3956 }
3957
3958 /* The get_src2 above loaded the normal 13-bit
3959 immediate field, not the 11-bit field we have
3960 in movcc. But it did handle the reg case. */
3961 if (IS_IMM) {
3962 simm = GET_FIELD_SPs(insn, 0, 10);
3963 tcg_gen_movi_tl(cpu_src2, simm);
3964 }
3965
3966 gen_movl_reg_TN(rd, cpu_dst);
3967 tcg_gen_movcond_tl(cmp.cond, cpu_dst,
3968 cmp.c1, cmp.c2,
3969 cpu_src2, cpu_dst);
3970 free_compare(&cmp);
3971 gen_movl_TN_reg(rd, cpu_dst);
3972 break;
3973 }
3974 case 0x2d: /* V9 sdivx */
3975 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3976 gen_movl_TN_reg(rd, cpu_dst);
3977 break;
3978 case 0x2e: /* V9 popc */
3979 {
3980 cpu_src2 = get_src2(insn, cpu_src2);
3981 gen_helper_popc(cpu_dst, cpu_src2);
3982 gen_movl_TN_reg(rd, cpu_dst);
3983 }
3984 case 0x2f: /* V9 movr */
3985 {
3986 int cond = GET_FIELD_SP(insn, 10, 12);
3987 DisasCompare cmp;
3988
3989 gen_compare_reg(&cmp, cond, cpu_src1);
3990
3991 /* The get_src2 above loaded the normal 13-bit
3992 immediate field, not the 10-bit field we have
3993 in movr. But it did handle the reg case. */
3994 if (IS_IMM) {
3995 simm = GET_FIELD_SPs(insn, 0, 9);
3996 tcg_gen_movi_tl(cpu_src2, simm);
3997 }
3998
3999 gen_movl_reg_TN(rd, cpu_dst);
4000 tcg_gen_movcond_tl(cmp.cond, cpu_dst,
4001 cmp.c1, cmp.c2,
4002 cpu_src2, cpu_dst);
4003 free_compare(&cmp);
4004 gen_movl_TN_reg(rd, cpu_dst);
4005 break;
4006 }
4007 #endif
4008 default:
4009 goto illegal_insn;
4010 }
4011 }
4012 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4013 #ifdef TARGET_SPARC64
4014 int opf = GET_FIELD_SP(insn, 5, 13);
4015 rs1 = GET_FIELD(insn, 13, 17);
4016 rs2 = GET_FIELD(insn, 27, 31);
4017 if (gen_trap_ifnofpu(dc)) {
4018 goto jmp_insn;
4019 }
4020
4021 switch (opf) {
4022 case 0x000: /* VIS I edge8cc */
4023 CHECK_FPU_FEATURE(dc, VIS1);
4024 gen_movl_reg_TN(rs1, cpu_src1);
4025 gen_movl_reg_TN(rs2, cpu_src2);
4026 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4027 gen_movl_TN_reg(rd, cpu_dst);
4028 break;
4029 case 0x001: /* VIS II edge8n */
4030 CHECK_FPU_FEATURE(dc, VIS2);
4031 gen_movl_reg_TN(rs1, cpu_src1);
4032 gen_movl_reg_TN(rs2, cpu_src2);
4033 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4034 gen_movl_TN_reg(rd, cpu_dst);
4035 break;
4036 case 0x002: /* VIS I edge8lcc */
4037 CHECK_FPU_FEATURE(dc, VIS1);
4038 gen_movl_reg_TN(rs1, cpu_src1);
4039 gen_movl_reg_TN(rs2, cpu_src2);
4040 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4041 gen_movl_TN_reg(rd, cpu_dst);
4042 break;
4043 case 0x003: /* VIS II edge8ln */
4044 CHECK_FPU_FEATURE(dc, VIS2);
4045 gen_movl_reg_TN(rs1, cpu_src1);
4046 gen_movl_reg_TN(rs2, cpu_src2);
4047 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4048 gen_movl_TN_reg(rd, cpu_dst);
4049 break;
4050 case 0x004: /* VIS I edge16cc */
4051 CHECK_FPU_FEATURE(dc, VIS1);
4052 gen_movl_reg_TN(rs1, cpu_src1);
4053 gen_movl_reg_TN(rs2, cpu_src2);
4054 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4055 gen_movl_TN_reg(rd, cpu_dst);
4056 break;
4057 case 0x005: /* VIS II edge16n */
4058 CHECK_FPU_FEATURE(dc, VIS2);
4059 gen_movl_reg_TN(rs1, cpu_src1);
4060 gen_movl_reg_TN(rs2, cpu_src2);
4061 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4062 gen_movl_TN_reg(rd, cpu_dst);
4063 break;
4064 case 0x006: /* VIS I edge16lcc */
4065 CHECK_FPU_FEATURE(dc, VIS1);
4066 gen_movl_reg_TN(rs1, cpu_src1);
4067 gen_movl_reg_TN(rs2, cpu_src2);
4068 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4069 gen_movl_TN_reg(rd, cpu_dst);
4070 break;
4071 case 0x007: /* VIS II edge16ln */
4072 CHECK_FPU_FEATURE(dc, VIS2);
4073 gen_movl_reg_TN(rs1, cpu_src1);
4074 gen_movl_reg_TN(rs2, cpu_src2);
4075 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4076 gen_movl_TN_reg(rd, cpu_dst);
4077 break;
4078 case 0x008: /* VIS I edge32cc */
4079 CHECK_FPU_FEATURE(dc, VIS1);
4080 gen_movl_reg_TN(rs1, cpu_src1);
4081 gen_movl_reg_TN(rs2, cpu_src2);
4082 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4083 gen_movl_TN_reg(rd, cpu_dst);
4084 break;
4085 case 0x009: /* VIS II edge32n */
4086 CHECK_FPU_FEATURE(dc, VIS2);
4087 gen_movl_reg_TN(rs1, cpu_src1);
4088 gen_movl_reg_TN(rs2, cpu_src2);
4089 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4090 gen_movl_TN_reg(rd, cpu_dst);
4091 break;
4092 case 0x00a: /* VIS I edge32lcc */
4093 CHECK_FPU_FEATURE(dc, VIS1);
4094 gen_movl_reg_TN(rs1, cpu_src1);
4095 gen_movl_reg_TN(rs2, cpu_src2);
4096 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4097 gen_movl_TN_reg(rd, cpu_dst);
4098 break;
4099 case 0x00b: /* VIS II edge32ln */
4100 CHECK_FPU_FEATURE(dc, VIS2);
4101 gen_movl_reg_TN(rs1, cpu_src1);
4102 gen_movl_reg_TN(rs2, cpu_src2);
4103 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4104 gen_movl_TN_reg(rd, cpu_dst);
4105 break;
4106 case 0x010: /* VIS I array8 */
4107 CHECK_FPU_FEATURE(dc, VIS1);
4108 cpu_src1 = get_src1(insn, cpu_src1);
4109 gen_movl_reg_TN(rs2, cpu_src2);
4110 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4111 gen_movl_TN_reg(rd, cpu_dst);
4112 break;
4113 case 0x012: /* VIS I array16 */
4114 CHECK_FPU_FEATURE(dc, VIS1);
4115 cpu_src1 = get_src1(insn, cpu_src1);
4116 gen_movl_reg_TN(rs2, cpu_src2);
4117 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4118 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4119 gen_movl_TN_reg(rd, cpu_dst);
4120 break;
4121 case 0x014: /* VIS I array32 */
4122 CHECK_FPU_FEATURE(dc, VIS1);
4123 cpu_src1 = get_src1(insn, cpu_src1);
4124 gen_movl_reg_TN(rs2, cpu_src2);
4125 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4126 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4127 gen_movl_TN_reg(rd, cpu_dst);
4128 break;
4129 case 0x018: /* VIS I alignaddr */
4130 CHECK_FPU_FEATURE(dc, VIS1);
4131 cpu_src1 = get_src1(insn, cpu_src1);
4132 gen_movl_reg_TN(rs2, cpu_src2);
4133 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4134 gen_movl_TN_reg(rd, cpu_dst);
4135 break;
4136 case 0x01a: /* VIS I alignaddrl */
4137 CHECK_FPU_FEATURE(dc, VIS1);
4138 cpu_src1 = get_src1(insn, cpu_src1);
4139 gen_movl_reg_TN(rs2, cpu_src2);
4140 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4141 gen_movl_TN_reg(rd, cpu_dst);
4142 break;
4143 case 0x019: /* VIS II bmask */
4144 CHECK_FPU_FEATURE(dc, VIS2);
4145 cpu_src1 = get_src1(insn, cpu_src1);
4146 cpu_src2 = get_src1(insn, cpu_src2);
4147 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4148 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4149 gen_movl_TN_reg(rd, cpu_dst);
4150 break;
4151 case 0x020: /* VIS I fcmple16 */
4152 CHECK_FPU_FEATURE(dc, VIS1);
4153 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4154 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4155 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4156 gen_movl_TN_reg(rd, cpu_dst);
4157 break;
4158 case 0x022: /* VIS I fcmpne16 */
4159 CHECK_FPU_FEATURE(dc, VIS1);
4160 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4161 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4162 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4163 gen_movl_TN_reg(rd, cpu_dst);
4164 break;
4165 case 0x024: /* VIS I fcmple32 */
4166 CHECK_FPU_FEATURE(dc, VIS1);
4167 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4168 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4169 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4170 gen_movl_TN_reg(rd, cpu_dst);
4171 break;
4172 case 0x026: /* VIS I fcmpne32 */
4173 CHECK_FPU_FEATURE(dc, VIS1);
4174 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4175 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4176 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4177 gen_movl_TN_reg(rd, cpu_dst);
4178 break;
4179 case 0x028: /* VIS I fcmpgt16 */
4180 CHECK_FPU_FEATURE(dc, VIS1);
4181 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4182 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4183 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4184 gen_movl_TN_reg(rd, cpu_dst);
4185 break;
4186 case 0x02a: /* VIS I fcmpeq16 */
4187 CHECK_FPU_FEATURE(dc, VIS1);
4188 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4189 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4190 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4191 gen_movl_TN_reg(rd, cpu_dst);
4192 break;
4193 case 0x02c: /* VIS I fcmpgt32 */
4194 CHECK_FPU_FEATURE(dc, VIS1);
4195 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4196 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4197 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4198 gen_movl_TN_reg(rd, cpu_dst);
4199 break;
4200 case 0x02e: /* VIS I fcmpeq32 */
4201 CHECK_FPU_FEATURE(dc, VIS1);
4202 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4203 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4204 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4205 gen_movl_TN_reg(rd, cpu_dst);
4206 break;
4207 case 0x031: /* VIS I fmul8x16 */
4208 CHECK_FPU_FEATURE(dc, VIS1);
4209 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4210 break;
4211 case 0x033: /* VIS I fmul8x16au */
4212 CHECK_FPU_FEATURE(dc, VIS1);
4213 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4214 break;
4215 case 0x035: /* VIS I fmul8x16al */
4216 CHECK_FPU_FEATURE(dc, VIS1);
4217 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4218 break;
4219 case 0x036: /* VIS I fmul8sux16 */
4220 CHECK_FPU_FEATURE(dc, VIS1);
4221 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4222 break;
4223 case 0x037: /* VIS I fmul8ulx16 */
4224 CHECK_FPU_FEATURE(dc, VIS1);
4225 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4226 break;
4227 case 0x038: /* VIS I fmuld8sux16 */
4228 CHECK_FPU_FEATURE(dc, VIS1);
4229 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4230 break;
4231 case 0x039: /* VIS I fmuld8ulx16 */
4232 CHECK_FPU_FEATURE(dc, VIS1);
4233 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4234 break;
4235 case 0x03a: /* VIS I fpack32 */
4236 CHECK_FPU_FEATURE(dc, VIS1);
4237 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4238 break;
4239 case 0x03b: /* VIS I fpack16 */
4240 CHECK_FPU_FEATURE(dc, VIS1);
4241 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4242 cpu_dst_32 = gen_dest_fpr_F();
4243 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4244 gen_store_fpr_F(dc, rd, cpu_dst_32);
4245 break;
4246 case 0x03d: /* VIS I fpackfix */
4247 CHECK_FPU_FEATURE(dc, VIS1);
4248 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4249 cpu_dst_32 = gen_dest_fpr_F();
4250 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4251 gen_store_fpr_F(dc, rd, cpu_dst_32);
4252 break;
4253 case 0x03e: /* VIS I pdist */
4254 CHECK_FPU_FEATURE(dc, VIS1);
4255 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4256 break;
4257 case 0x048: /* VIS I faligndata */
4258 CHECK_FPU_FEATURE(dc, VIS1);
4259 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4260 break;
4261 case 0x04b: /* VIS I fpmerge */
4262 CHECK_FPU_FEATURE(dc, VIS1);
4263 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4264 break;
4265 case 0x04c: /* VIS II bshuffle */
4266 CHECK_FPU_FEATURE(dc, VIS2);
4267 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4268 break;
4269 case 0x04d: /* VIS I fexpand */
4270 CHECK_FPU_FEATURE(dc, VIS1);
4271 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4272 break;
4273 case 0x050: /* VIS I fpadd16 */
4274 CHECK_FPU_FEATURE(dc, VIS1);
4275 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4276 break;
4277 case 0x051: /* VIS I fpadd16s */
4278 CHECK_FPU_FEATURE(dc, VIS1);
4279 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4280 break;
4281 case 0x052: /* VIS I fpadd32 */
4282 CHECK_FPU_FEATURE(dc, VIS1);
4283 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4284 break;
4285 case 0x053: /* VIS I fpadd32s */
4286 CHECK_FPU_FEATURE(dc, VIS1);
4287 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4288 break;
4289 case 0x054: /* VIS I fpsub16 */
4290 CHECK_FPU_FEATURE(dc, VIS1);
4291 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4292 break;
4293 case 0x055: /* VIS I fpsub16s */
4294 CHECK_FPU_FEATURE(dc, VIS1);
4295 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4296 break;
4297 case 0x056: /* VIS I fpsub32 */
4298 CHECK_FPU_FEATURE(dc, VIS1);
4299 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4300 break;
4301 case 0x057: /* VIS I fpsub32s */
4302 CHECK_FPU_FEATURE(dc, VIS1);
4303 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4304 break;
4305 case 0x060: /* VIS I fzero */
4306 CHECK_FPU_FEATURE(dc, VIS1);
4307 cpu_dst_64 = gen_dest_fpr_D();
4308 tcg_gen_movi_i64(cpu_dst_64, 0);
4309 gen_store_fpr_D(dc, rd, cpu_dst_64);
4310 break;
4311 case 0x061: /* VIS I fzeros */
4312 CHECK_FPU_FEATURE(dc, VIS1);
4313 cpu_dst_32 = gen_dest_fpr_F();
4314 tcg_gen_movi_i32(cpu_dst_32, 0);
4315 gen_store_fpr_F(dc, rd, cpu_dst_32);
4316 break;
4317 case 0x062: /* VIS I fnor */
4318 CHECK_FPU_FEATURE(dc, VIS1);
4319 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4320 break;
4321 case 0x063: /* VIS I fnors */
4322 CHECK_FPU_FEATURE(dc, VIS1);
4323 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4324 break;
4325 case 0x064: /* VIS I fandnot2 */
4326 CHECK_FPU_FEATURE(dc, VIS1);
4327 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4328 break;
4329 case 0x065: /* VIS I fandnot2s */
4330 CHECK_FPU_FEATURE(dc, VIS1);
4331 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4332 break;
4333 case 0x066: /* VIS I fnot2 */
4334 CHECK_FPU_FEATURE(dc, VIS1);
4335 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4336 break;
4337 case 0x067: /* VIS I fnot2s */
4338 CHECK_FPU_FEATURE(dc, VIS1);
4339 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4340 break;
4341 case 0x068: /* VIS I fandnot1 */
4342 CHECK_FPU_FEATURE(dc, VIS1);
4343 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4344 break;
4345 case 0x069: /* VIS I fandnot1s */
4346 CHECK_FPU_FEATURE(dc, VIS1);
4347 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4348 break;
4349 case 0x06a: /* VIS I fnot1 */
4350 CHECK_FPU_FEATURE(dc, VIS1);
4351 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4352 break;
4353 case 0x06b: /* VIS I fnot1s */
4354 CHECK_FPU_FEATURE(dc, VIS1);
4355 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4356 break;
4357 case 0x06c: /* VIS I fxor */
4358 CHECK_FPU_FEATURE(dc, VIS1);
4359 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4360 break;
4361 case 0x06d: /* VIS I fxors */
4362 CHECK_FPU_FEATURE(dc, VIS1);
4363 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4364 break;
4365 case 0x06e: /* VIS I fnand */
4366 CHECK_FPU_FEATURE(dc, VIS1);
4367 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4368 break;
4369 case 0x06f: /* VIS I fnands */
4370 CHECK_FPU_FEATURE(dc, VIS1);
4371 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4372 break;
4373 case 0x070: /* VIS I fand */
4374 CHECK_FPU_FEATURE(dc, VIS1);
4375 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4376 break;
4377 case 0x071: /* VIS I fands */
4378 CHECK_FPU_FEATURE(dc, VIS1);
4379 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4380 break;
4381 case 0x072: /* VIS I fxnor */
4382 CHECK_FPU_FEATURE(dc, VIS1);
4383 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4384 break;
4385 case 0x073: /* VIS I fxnors */
4386 CHECK_FPU_FEATURE(dc, VIS1);
4387 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4388 break;
4389 case 0x074: /* VIS I fsrc1 */
4390 CHECK_FPU_FEATURE(dc, VIS1);
4391 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4392 gen_store_fpr_D(dc, rd, cpu_src1_64);
4393 break;
4394 case 0x075: /* VIS I fsrc1s */
4395 CHECK_FPU_FEATURE(dc, VIS1);
4396 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4397 gen_store_fpr_F(dc, rd, cpu_src1_32);
4398 break;
4399 case 0x076: /* VIS I fornot2 */
4400 CHECK_FPU_FEATURE(dc, VIS1);
4401 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4402 break;
4403 case 0x077: /* VIS I fornot2s */
4404 CHECK_FPU_FEATURE(dc, VIS1);
4405 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4406 break;
4407 case 0x078: /* VIS I fsrc2 */
4408 CHECK_FPU_FEATURE(dc, VIS1);
4409 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4410 gen_store_fpr_D(dc, rd, cpu_src1_64);
4411 break;
4412 case 0x079: /* VIS I fsrc2s */
4413 CHECK_FPU_FEATURE(dc, VIS1);
4414 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4415 gen_store_fpr_F(dc, rd, cpu_src1_32);
4416 break;
4417 case 0x07a: /* VIS I fornot1 */
4418 CHECK_FPU_FEATURE(dc, VIS1);
4419 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4420 break;
4421 case 0x07b: /* VIS I fornot1s */
4422 CHECK_FPU_FEATURE(dc, VIS1);
4423 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4424 break;
4425 case 0x07c: /* VIS I for */
4426 CHECK_FPU_FEATURE(dc, VIS1);
4427 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4428 break;
4429 case 0x07d: /* VIS I fors */
4430 CHECK_FPU_FEATURE(dc, VIS1);
4431 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4432 break;
4433 case 0x07e: /* VIS I fone */
4434 CHECK_FPU_FEATURE(dc, VIS1);
4435 cpu_dst_64 = gen_dest_fpr_D();
4436 tcg_gen_movi_i64(cpu_dst_64, -1);
4437 gen_store_fpr_D(dc, rd, cpu_dst_64);
4438 break;
4439 case 0x07f: /* VIS I fones */
4440 CHECK_FPU_FEATURE(dc, VIS1);
4441 cpu_dst_32 = gen_dest_fpr_F();
4442 tcg_gen_movi_i32(cpu_dst_32, -1);
4443 gen_store_fpr_F(dc, rd, cpu_dst_32);
4444 break;
4445 case 0x080: /* VIS I shutdown */
4446 case 0x081: /* VIS II siam */
4447 // XXX
4448 goto illegal_insn;
4449 default:
4450 goto illegal_insn;
4451 }
4452 #else
4453 goto ncp_insn;
4454 #endif
4455 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4456 #ifdef TARGET_SPARC64
4457 goto illegal_insn;
4458 #else
4459 goto ncp_insn;
4460 #endif
4461 #ifdef TARGET_SPARC64
4462 } else if (xop == 0x39) { /* V9 return */
4463 TCGv_i32 r_const;
4464
4465 save_state(dc);
4466 cpu_src1 = get_src1(insn, cpu_src1);
4467 if (IS_IMM) { /* immediate */
4468 simm = GET_FIELDs(insn, 19, 31);
4469 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
4470 } else { /* register */
4471 rs2 = GET_FIELD(insn, 27, 31);
4472 if (rs2) {
4473 gen_movl_reg_TN(rs2, cpu_src2);
4474 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4475 } else
4476 tcg_gen_mov_tl(cpu_dst, cpu_src1);
4477 }
4478 gen_helper_restore(cpu_env);
4479 gen_mov_pc_npc(dc);
4480 r_const = tcg_const_i32(3);
4481 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4482 tcg_temp_free_i32(r_const);
4483 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4484 dc->npc = DYNAMIC_PC;
4485 goto jmp_insn;
4486 #endif
4487 } else {
4488 cpu_src1 = get_src1(insn, cpu_src1);
4489 if (IS_IMM) { /* immediate */
4490 simm = GET_FIELDs(insn, 19, 31);
4491 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
4492 } else { /* register */
4493 rs2 = GET_FIELD(insn, 27, 31);
4494 if (rs2) {
4495 gen_movl_reg_TN(rs2, cpu_src2);
4496 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4497 } else
4498 tcg_gen_mov_tl(cpu_dst, cpu_src1);
4499 }
4500 switch (xop) {
4501 case 0x38: /* jmpl */
4502 {
4503 TCGv r_pc;
4504 TCGv_i32 r_const;
4505
4506 r_pc = tcg_const_tl(dc->pc);
4507 gen_movl_TN_reg(rd, r_pc);
4508 tcg_temp_free(r_pc);
4509 gen_mov_pc_npc(dc);
4510 r_const = tcg_const_i32(3);
4511 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4512 tcg_temp_free_i32(r_const);
4513 gen_address_mask(dc, cpu_dst);
4514 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4515 dc->npc = DYNAMIC_PC;
4516 }
4517 goto jmp_insn;
4518 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4519 case 0x39: /* rett, V9 return */
4520 {
4521 TCGv_i32 r_const;
4522
4523 if (!supervisor(dc))
4524 goto priv_insn;
4525 gen_mov_pc_npc(dc);
4526 r_const = tcg_const_i32(3);
4527 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4528 tcg_temp_free_i32(r_const);
4529 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4530 dc->npc = DYNAMIC_PC;
4531 gen_helper_rett(cpu_env);
4532 }
4533 goto jmp_insn;
4534 #endif
4535 case 0x3b: /* flush */
4536 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4537 goto unimp_flush;
4538 /* nop */
4539 break;
4540 case 0x3c: /* save */
4541 save_state(dc);
4542 gen_helper_save(cpu_env);
4543 gen_movl_TN_reg(rd, cpu_dst);
4544 break;
4545 case 0x3d: /* restore */
4546 save_state(dc);
4547 gen_helper_restore(cpu_env);
4548 gen_movl_TN_reg(rd, cpu_dst);
4549 break;
4550 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4551 case 0x3e: /* V9 done/retry */
4552 {
4553 switch (rd) {
4554 case 0:
4555 if (!supervisor(dc))
4556 goto priv_insn;
4557 dc->npc = DYNAMIC_PC;
4558 dc->pc = DYNAMIC_PC;
4559 gen_helper_done(cpu_env);
4560 goto jmp_insn;
4561 case 1:
4562 if (!supervisor(dc))
4563 goto priv_insn;
4564 dc->npc = DYNAMIC_PC;
4565 dc->pc = DYNAMIC_PC;
4566 gen_helper_retry(cpu_env);
4567 goto jmp_insn;
4568 default:
4569 goto illegal_insn;
4570 }
4571 }
4572 break;
4573 #endif
4574 default:
4575 goto illegal_insn;
4576 }
4577 }
4578 break;
4579 }
4580 break;
4581 case 3: /* load/store instructions */
4582 {
4583 unsigned int xop = GET_FIELD(insn, 7, 12);
4584
4585 /* flush pending conditional evaluations before exposing
4586 cpu state */
4587 if (dc->cc_op != CC_OP_FLAGS) {
4588 dc->cc_op = CC_OP_FLAGS;
4589 gen_helper_compute_psr(cpu_env);
4590 }
4591 cpu_src1 = get_src1(insn, cpu_src1);
4592 if (xop == 0x3c || xop == 0x3e) { // V9 casa/casxa
4593 rs2 = GET_FIELD(insn, 27, 31);
4594 gen_movl_reg_TN(rs2, cpu_src2);
4595 tcg_gen_mov_tl(cpu_addr, cpu_src1);
4596 } else if (IS_IMM) { /* immediate */
4597 simm = GET_FIELDs(insn, 19, 31);
4598 tcg_gen_addi_tl(cpu_addr, cpu_src1, simm);
4599 } else { /* register */
4600 rs2 = GET_FIELD(insn, 27, 31);
4601 if (rs2 != 0) {
4602 gen_movl_reg_TN(rs2, cpu_src2);
4603 tcg_gen_add_tl(cpu_addr, cpu_src1, cpu_src2);
4604 } else
4605 tcg_gen_mov_tl(cpu_addr, cpu_src1);
4606 }
4607 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4608 (xop > 0x17 && xop <= 0x1d ) ||
4609 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4610 switch (xop) {
4611 case 0x0: /* ld, V9 lduw, load unsigned word */
4612 gen_address_mask(dc, cpu_addr);
4613 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4614 break;
4615 case 0x1: /* ldub, load unsigned byte */
4616 gen_address_mask(dc, cpu_addr);
4617 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4618 break;
4619 case 0x2: /* lduh, load unsigned halfword */
4620 gen_address_mask(dc, cpu_addr);
4621 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4622 break;
4623 case 0x3: /* ldd, load double word */
4624 if (rd & 1)
4625 goto illegal_insn;
4626 else {
4627 TCGv_i32 r_const;
4628
4629 save_state(dc);
4630 r_const = tcg_const_i32(7);
4631 /* XXX remove alignment check */
4632 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4633 tcg_temp_free_i32(r_const);
4634 gen_address_mask(dc, cpu_addr);
4635 tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
4636 tcg_gen_trunc_i64_tl(cpu_tmp0, cpu_tmp64);
4637 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffffULL);
4638 gen_movl_TN_reg(rd + 1, cpu_tmp0);
4639 tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
4640 tcg_gen_trunc_i64_tl(cpu_val, cpu_tmp64);
4641 tcg_gen_andi_tl(cpu_val, cpu_val, 0xffffffffULL);
4642 }
4643 break;
4644 case 0x9: /* ldsb, load signed byte */
4645 gen_address_mask(dc, cpu_addr);
4646 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4647 break;
4648 case 0xa: /* ldsh, load signed halfword */
4649 gen_address_mask(dc, cpu_addr);
4650 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4651 break;
4652 case 0xd: /* ldstub -- XXX: should be atomically */
4653 {
4654 TCGv r_const;
4655
4656 gen_address_mask(dc, cpu_addr);
4657 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4658 r_const = tcg_const_tl(0xff);
4659 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4660 tcg_temp_free(r_const);
4661 }
4662 break;
4663 case 0x0f: /* swap, swap register with memory. Also
4664 atomically */
4665 CHECK_IU_FEATURE(dc, SWAP);
4666 gen_movl_reg_TN(rd, cpu_val);
4667 gen_address_mask(dc, cpu_addr);
4668 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4669 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4670 tcg_gen_mov_tl(cpu_val, cpu_tmp0);
4671 break;
4672 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4673 case 0x10: /* lda, V9 lduwa, load word alternate */
4674 #ifndef TARGET_SPARC64
4675 if (IS_IMM)
4676 goto illegal_insn;
4677 if (!supervisor(dc))
4678 goto priv_insn;
4679 #endif
4680 save_state(dc);
4681 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
4682 break;
4683 case 0x11: /* lduba, load unsigned byte alternate */
4684 #ifndef TARGET_SPARC64
4685 if (IS_IMM)
4686 goto illegal_insn;
4687 if (!supervisor(dc))
4688 goto priv_insn;
4689 #endif
4690 save_state(dc);
4691 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
4692 break;
4693 case 0x12: /* lduha, load unsigned halfword alternate */
4694 #ifndef TARGET_SPARC64
4695 if (IS_IMM)
4696 goto illegal_insn;
4697 if (!supervisor(dc))
4698 goto priv_insn;
4699 #endif
4700 save_state(dc);
4701 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
4702 break;
4703 case 0x13: /* ldda, load double word alternate */
4704 #ifndef TARGET_SPARC64
4705 if (IS_IMM)
4706 goto illegal_insn;
4707 if (!supervisor(dc))
4708 goto priv_insn;
4709 #endif
4710 if (rd & 1)
4711 goto illegal_insn;
4712 save_state(dc);
4713 gen_ldda_asi(cpu_val, cpu_addr, insn, rd);
4714 goto skip_move;
4715 case 0x19: /* ldsba, load signed byte alternate */
4716 #ifndef TARGET_SPARC64
4717 if (IS_IMM)
4718 goto illegal_insn;
4719 if (!supervisor(dc))
4720 goto priv_insn;
4721 #endif
4722 save_state(dc);
4723 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
4724 break;
4725 case 0x1a: /* ldsha, load signed halfword alternate */
4726 #ifndef TARGET_SPARC64
4727 if (IS_IMM)
4728 goto illegal_insn;
4729 if (!supervisor(dc))
4730 goto priv_insn;
4731 #endif
4732 save_state(dc);
4733 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
4734 break;
4735 case 0x1d: /* ldstuba -- XXX: should be atomically */
4736 #ifndef TARGET_SPARC64
4737 if (IS_IMM)
4738 goto illegal_insn;
4739 if (!supervisor(dc))
4740 goto priv_insn;
4741 #endif
4742 save_state(dc);
4743 gen_ldstub_asi(cpu_val, cpu_addr, insn);
4744 break;
4745 case 0x1f: /* swapa, swap reg with alt. memory. Also
4746 atomically */
4747 CHECK_IU_FEATURE(dc, SWAP);
4748 #ifndef TARGET_SPARC64
4749 if (IS_IMM)
4750 goto illegal_insn;
4751 if (!supervisor(dc))
4752 goto priv_insn;
4753 #endif
4754 save_state(dc);
4755 gen_movl_reg_TN(rd, cpu_val);
4756 gen_swap_asi(cpu_val, cpu_addr, insn);
4757 break;
4758
4759 #ifndef TARGET_SPARC64
4760 case 0x30: /* ldc */
4761 case 0x31: /* ldcsr */
4762 case 0x33: /* lddc */
4763 goto ncp_insn;
4764 #endif
4765 #endif
4766 #ifdef TARGET_SPARC64
4767 case 0x08: /* V9 ldsw */
4768 gen_address_mask(dc, cpu_addr);
4769 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4770 break;
4771 case 0x0b: /* V9 ldx */
4772 gen_address_mask(dc, cpu_addr);
4773 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4774 break;
4775 case 0x18: /* V9 ldswa */
4776 save_state(dc);
4777 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
4778 break;
4779 case 0x1b: /* V9 ldxa */
4780 save_state(dc);
4781 gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
4782 break;
4783 case 0x2d: /* V9 prefetch, no effect */
4784 goto skip_move;
4785 case 0x30: /* V9 ldfa */
4786 if (gen_trap_ifnofpu(dc)) {
4787 goto jmp_insn;
4788 }
4789 save_state(dc);
4790 gen_ldf_asi(cpu_addr, insn, 4, rd);
4791 gen_update_fprs_dirty(rd);
4792 goto skip_move;
4793 case 0x33: /* V9 lddfa */
4794 if (gen_trap_ifnofpu(dc)) {
4795 goto jmp_insn;
4796 }
4797 save_state(dc);
4798 gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
4799 gen_update_fprs_dirty(DFPREG(rd));
4800 goto skip_move;
4801 case 0x3d: /* V9 prefetcha, no effect */
4802 goto skip_move;
4803 case 0x32: /* V9 ldqfa */
4804 CHECK_FPU_FEATURE(dc, FLOAT128);
4805 if (gen_trap_ifnofpu(dc)) {
4806 goto jmp_insn;
4807 }
4808 save_state(dc);
4809 gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
4810 gen_update_fprs_dirty(QFPREG(rd));
4811 goto skip_move;
4812 #endif
4813 default:
4814 goto illegal_insn;
4815 }
4816 gen_movl_TN_reg(rd, cpu_val);
4817 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4818 skip_move: ;
4819 #endif
4820 } else if (xop >= 0x20 && xop < 0x24) {
4821 if (gen_trap_ifnofpu(dc)) {
4822 goto jmp_insn;
4823 }
4824 save_state(dc);
4825 switch (xop) {
4826 case 0x20: /* ldf, load fpreg */
4827 gen_address_mask(dc, cpu_addr);
4828 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4829 cpu_dst_32 = gen_dest_fpr_F();
4830 tcg_gen_trunc_tl_i32(cpu_dst_32, cpu_tmp0);
4831 gen_store_fpr_F(dc, rd, cpu_dst_32);
4832 break;
4833 case 0x21: /* ldfsr, V9 ldxfsr */
4834 #ifdef TARGET_SPARC64
4835 gen_address_mask(dc, cpu_addr);
4836 if (rd == 1) {
4837 tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
4838 gen_helper_ldxfsr(cpu_env, cpu_tmp64);
4839 } else {
4840 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4841 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
4842 gen_helper_ldfsr(cpu_env, cpu_tmp32);
4843 }
4844 #else
4845 {
4846 tcg_gen_qemu_ld32u(cpu_tmp32, cpu_addr, dc->mem_idx);
4847 gen_helper_ldfsr(cpu_env, cpu_tmp32);
4848 }
4849 #endif
4850 break;
4851 case 0x22: /* ldqf, load quad fpreg */
4852 {
4853 TCGv_i32 r_const;
4854
4855 CHECK_FPU_FEATURE(dc, FLOAT128);
4856 r_const = tcg_const_i32(dc->mem_idx);
4857 gen_address_mask(dc, cpu_addr);
4858 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4859 tcg_temp_free_i32(r_const);
4860 gen_op_store_QT0_fpr(QFPREG(rd));
4861 gen_update_fprs_dirty(QFPREG(rd));
4862 }
4863 break;
4864 case 0x23: /* lddf, load double fpreg */
4865 gen_address_mask(dc, cpu_addr);
4866 cpu_dst_64 = gen_dest_fpr_D();
4867 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4868 gen_store_fpr_D(dc, rd, cpu_dst_64);
4869 break;
4870 default:
4871 goto illegal_insn;
4872 }
4873 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
4874 xop == 0xe || xop == 0x1e) {
4875 gen_movl_reg_TN(rd, cpu_val);
4876 switch (xop) {
4877 case 0x4: /* st, store word */
4878 gen_address_mask(dc, cpu_addr);
4879 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4880 break;
4881 case 0x5: /* stb, store byte */
4882 gen_address_mask(dc, cpu_addr);
4883 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
4884 break;
4885 case 0x6: /* sth, store halfword */
4886 gen_address_mask(dc, cpu_addr);
4887 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
4888 break;
4889 case 0x7: /* std, store double word */
4890 if (rd & 1)
4891 goto illegal_insn;
4892 else {
4893 TCGv_i32 r_const;
4894
4895 save_state(dc);
4896 gen_address_mask(dc, cpu_addr);
4897 r_const = tcg_const_i32(7);
4898 /* XXX remove alignment check */
4899 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4900 tcg_temp_free_i32(r_const);
4901 gen_movl_reg_TN(rd + 1, cpu_tmp0);
4902 tcg_gen_concat_tl_i64(cpu_tmp64, cpu_tmp0, cpu_val);
4903 tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
4904 }
4905 break;
4906 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4907 case 0x14: /* sta, V9 stwa, store word alternate */
4908 #ifndef TARGET_SPARC64
4909 if (IS_IMM)
4910 goto illegal_insn;
4911 if (!supervisor(dc))
4912 goto priv_insn;
4913 #endif
4914 save_state(dc);
4915 gen_st_asi(cpu_val, cpu_addr, insn, 4);
4916 dc->npc = DYNAMIC_PC;
4917 break;
4918 case 0x15: /* stba, store byte alternate */
4919 #ifndef TARGET_SPARC64
4920 if (IS_IMM)
4921 goto illegal_insn;
4922 if (!supervisor(dc))
4923 goto priv_insn;
4924 #endif
4925 save_state(dc);
4926 gen_st_asi(cpu_val, cpu_addr, insn, 1);
4927 dc->npc = DYNAMIC_PC;
4928 break;
4929 case 0x16: /* stha, store halfword alternate */
4930 #ifndef TARGET_SPARC64
4931 if (IS_IMM)
4932 goto illegal_insn;
4933 if (!supervisor(dc))
4934 goto priv_insn;
4935 #endif
4936 save_state(dc);
4937 gen_st_asi(cpu_val, cpu_addr, insn, 2);
4938 dc->npc = DYNAMIC_PC;
4939 break;
4940 case 0x17: /* stda, store double word alternate */
4941 #ifndef TARGET_SPARC64
4942 if (IS_IMM)
4943 goto illegal_insn;
4944 if (!supervisor(dc))
4945 goto priv_insn;
4946 #endif
4947 if (rd & 1)
4948 goto illegal_insn;
4949 else {
4950 save_state(dc);
4951 gen_stda_asi(cpu_val, cpu_addr, insn, rd);
4952 }
4953 break;
4954 #endif
4955 #ifdef TARGET_SPARC64
4956 case 0x0e: /* V9 stx */
4957 gen_address_mask(dc, cpu_addr);
4958 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
4959 break;
4960 case 0x1e: /* V9 stxa */
4961 save_state(dc);
4962 gen_st_asi(cpu_val, cpu_addr, insn, 8);
4963 dc->npc = DYNAMIC_PC;
4964 break;
4965 #endif
4966 default:
4967 goto illegal_insn;
4968 }
4969 } else if (xop > 0x23 && xop < 0x28) {
4970 if (gen_trap_ifnofpu(dc)) {
4971 goto jmp_insn;
4972 }
4973 save_state(dc);
4974 switch (xop) {
4975 case 0x24: /* stf, store fpreg */
4976 gen_address_mask(dc, cpu_addr);
4977 cpu_src1_32 = gen_load_fpr_F(dc, rd);
4978 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_src1_32);
4979 tcg_gen_qemu_st32(cpu_tmp0, cpu_addr, dc->mem_idx);
4980 break;
4981 case 0x25: /* stfsr, V9 stxfsr */
4982 #ifdef TARGET_SPARC64
4983 gen_address_mask(dc, cpu_addr);
4984 tcg_gen_ld_i64(cpu_tmp64, cpu_env, offsetof(CPUSPARCState, fsr));
4985 if (rd == 1)
4986 tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
4987 else
4988 tcg_gen_qemu_st32(cpu_tmp64, cpu_addr, dc->mem_idx);
4989 #else
4990 tcg_gen_ld_i32(cpu_tmp32, cpu_env, offsetof(CPUSPARCState, fsr));
4991 tcg_gen_qemu_st32(cpu_tmp32, cpu_addr, dc->mem_idx);
4992 #endif
4993 break;
4994 case 0x26:
4995 #ifdef TARGET_SPARC64
4996 /* V9 stqf, store quad fpreg */
4997 {
4998 TCGv_i32 r_const;
4999
5000 CHECK_FPU_FEATURE(dc, FLOAT128);
5001 gen_op_load_fpr_QT0(QFPREG(rd));
5002 r_const = tcg_const_i32(dc->mem_idx);
5003 gen_address_mask(dc, cpu_addr);
5004 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5005 tcg_temp_free_i32(r_const);
5006 }
5007 break;
5008 #else /* !TARGET_SPARC64 */
5009 /* stdfq, store floating point queue */
5010 #if defined(CONFIG_USER_ONLY)
5011 goto illegal_insn;
5012 #else
5013 if (!supervisor(dc))
5014 goto priv_insn;
5015 if (gen_trap_ifnofpu(dc)) {
5016 goto jmp_insn;
5017 }
5018 goto nfq_insn;
5019 #endif
5020 #endif
5021 case 0x27: /* stdf, store double fpreg */
5022 gen_address_mask(dc, cpu_addr);
5023 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5024 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5025 break;
5026 default:
5027 goto illegal_insn;
5028 }
5029 } else if (xop > 0x33 && xop < 0x3f) {
5030 save_state(dc);
5031 switch (xop) {
5032 #ifdef TARGET_SPARC64
5033 case 0x34: /* V9 stfa */
5034 if (gen_trap_ifnofpu(dc)) {
5035 goto jmp_insn;
5036 }
5037 gen_stf_asi(cpu_addr, insn, 4, rd);
5038 break;
5039 case 0x36: /* V9 stqfa */
5040 {
5041 TCGv_i32 r_const;
5042
5043 CHECK_FPU_FEATURE(dc, FLOAT128);
5044 if (gen_trap_ifnofpu(dc)) {
5045 goto jmp_insn;
5046 }
5047 r_const = tcg_const_i32(7);
5048 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5049 tcg_temp_free_i32(r_const);
5050 gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
5051 }
5052 break;
5053 case 0x37: /* V9 stdfa */
5054 if (gen_trap_ifnofpu(dc)) {
5055 goto jmp_insn;
5056 }
5057 gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
5058 break;
5059 case 0x3c: /* V9 casa */
5060 gen_cas_asi(cpu_val, cpu_addr, cpu_src2, insn, rd);
5061 gen_movl_TN_reg(rd, cpu_val);
5062 break;
5063 case 0x3e: /* V9 casxa */
5064 gen_casx_asi(cpu_val, cpu_addr, cpu_src2, insn, rd);
5065 gen_movl_TN_reg(rd, cpu_val);
5066 break;
5067 #else
5068 case 0x34: /* stc */
5069 case 0x35: /* stcsr */
5070 case 0x36: /* stdcq */
5071 case 0x37: /* stdc */
5072 goto ncp_insn;
5073 #endif
5074 default:
5075 goto illegal_insn;
5076 }
5077 } else
5078 goto illegal_insn;
5079 }
5080 break;
5081 }
5082 /* default case for non jump instructions */
5083 if (dc->npc == DYNAMIC_PC) {
5084 dc->pc = DYNAMIC_PC;
5085 gen_op_next_insn();
5086 } else if (dc->npc == JUMP_PC) {
5087 /* we can do a static jump */
5088 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5089 dc->is_br = 1;
5090 } else {
5091 dc->pc = dc->npc;
5092 dc->npc = dc->npc + 4;
5093 }
5094 jmp_insn:
5095 goto egress;
5096 illegal_insn:
5097 {
5098 TCGv_i32 r_const;
5099
5100 save_state(dc);
5101 r_const = tcg_const_i32(TT_ILL_INSN);
5102 gen_helper_raise_exception(cpu_env, r_const);
5103 tcg_temp_free_i32(r_const);
5104 dc->is_br = 1;
5105 }
5106 goto egress;
5107 unimp_flush:
5108 {
5109 TCGv_i32 r_const;
5110
5111 save_state(dc);
5112 r_const = tcg_const_i32(TT_UNIMP_FLUSH);
5113 gen_helper_raise_exception(cpu_env, r_const);
5114 tcg_temp_free_i32(r_const);
5115 dc->is_br = 1;
5116 }
5117 goto egress;
5118 #if !defined(CONFIG_USER_ONLY)
5119 priv_insn:
5120 {
5121 TCGv_i32 r_const;
5122
5123 save_state(dc);
5124 r_const = tcg_const_i32(TT_PRIV_INSN);
5125 gen_helper_raise_exception(cpu_env, r_const);
5126 tcg_temp_free_i32(r_const);
5127 dc->is_br = 1;
5128 }
5129 goto egress;
5130 #endif
5131 nfpu_insn:
5132 save_state(dc);
5133 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
5134 dc->is_br = 1;
5135 goto egress;
5136 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5137 nfq_insn:
5138 save_state(dc);
5139 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
5140 dc->is_br = 1;
5141 goto egress;
5142 #endif
5143 #ifndef TARGET_SPARC64
5144 ncp_insn:
5145 {
5146 TCGv r_const;
5147
5148 save_state(dc);
5149 r_const = tcg_const_i32(TT_NCP_INSN);
5150 gen_helper_raise_exception(cpu_env, r_const);
5151 tcg_temp_free(r_const);
5152 dc->is_br = 1;
5153 }
5154 goto egress;
5155 #endif
5156 egress:
5157 tcg_temp_free(cpu_tmp1);
5158 tcg_temp_free(cpu_tmp2);
5159 if (dc->n_t32 != 0) {
5160 int i;
5161 for (i = dc->n_t32 - 1; i >= 0; --i) {
5162 tcg_temp_free_i32(dc->t32[i]);
5163 }
5164 dc->n_t32 = 0;
5165 }
5166 }
5167
5168 static inline void gen_intermediate_code_internal(TranslationBlock * tb,
5169 int spc, CPUSPARCState *env)
5170 {
5171 target_ulong pc_start, last_pc;
5172 uint16_t *gen_opc_end;
5173 DisasContext dc1, *dc = &dc1;
5174 CPUBreakpoint *bp;
5175 int j, lj = -1;
5176 int num_insns;
5177 int max_insns;
5178 unsigned int insn;
5179
5180 memset(dc, 0, sizeof(DisasContext));
5181 dc->tb = tb;
5182 pc_start = tb->pc;
5183 dc->pc = pc_start;
5184 last_pc = dc->pc;
5185 dc->npc = (target_ulong) tb->cs_base;
5186 dc->cc_op = CC_OP_DYNAMIC;
5187 dc->mem_idx = cpu_mmu_index(env);
5188 dc->def = env->def;
5189 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5190 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5191 dc->singlestep = (env->singlestep_enabled || singlestep);
5192 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
5193
5194 cpu_tmp0 = tcg_temp_new();
5195 cpu_tmp32 = tcg_temp_new_i32();
5196 cpu_tmp64 = tcg_temp_new_i64();
5197
5198 cpu_dst = tcg_temp_local_new();
5199
5200 // loads and stores
5201 cpu_val = tcg_temp_local_new();
5202 cpu_addr = tcg_temp_local_new();
5203
5204 num_insns = 0;
5205 max_insns = tb->cflags & CF_COUNT_MASK;
5206 if (max_insns == 0)
5207 max_insns = CF_COUNT_MASK;
5208 gen_icount_start();
5209 do {
5210 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
5211 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5212 if (bp->pc == dc->pc) {
5213 if (dc->pc != pc_start)
5214 save_state(dc);
5215 gen_helper_debug(cpu_env);
5216 tcg_gen_exit_tb(0);
5217 dc->is_br = 1;
5218 goto exit_gen_loop;
5219 }
5220 }
5221 }
5222 if (spc) {
5223 qemu_log("Search PC...\n");
5224 j = gen_opc_ptr - gen_opc_buf;
5225 if (lj < j) {
5226 lj++;
5227 while (lj < j)
5228 gen_opc_instr_start[lj++] = 0;
5229 gen_opc_pc[lj] = dc->pc;
5230 gen_opc_npc[lj] = dc->npc;
5231 gen_opc_instr_start[lj] = 1;
5232 gen_opc_icount[lj] = num_insns;
5233 }
5234 }
5235 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
5236 gen_io_start();
5237 last_pc = dc->pc;
5238 insn = cpu_ldl_code(env, dc->pc);
5239 disas_sparc_insn(dc, insn);
5240 num_insns++;
5241
5242 if (dc->is_br)
5243 break;
5244 /* if the next PC is different, we abort now */
5245 if (dc->pc != (last_pc + 4))
5246 break;
5247 /* if we reach a page boundary, we stop generation so that the
5248 PC of a TT_TFAULT exception is always in the right page */
5249 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5250 break;
5251 /* if single step mode, we generate only one instruction and
5252 generate an exception */
5253 if (dc->singlestep) {
5254 break;
5255 }
5256 } while ((gen_opc_ptr < gen_opc_end) &&
5257 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5258 num_insns < max_insns);
5259
5260 exit_gen_loop:
5261 tcg_temp_free(cpu_addr);
5262 tcg_temp_free(cpu_val);
5263 tcg_temp_free(cpu_dst);
5264 tcg_temp_free_i64(cpu_tmp64);
5265 tcg_temp_free_i32(cpu_tmp32);
5266 tcg_temp_free(cpu_tmp0);
5267
5268 if (tb->cflags & CF_LAST_IO)
5269 gen_io_end();
5270 if (!dc->is_br) {
5271 if (dc->pc != DYNAMIC_PC &&
5272 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5273 /* static PC and NPC: we can use direct chaining */
5274 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5275 } else {
5276 if (dc->pc != DYNAMIC_PC)
5277 tcg_gen_movi_tl(cpu_pc, dc->pc);
5278 save_npc(dc);
5279 tcg_gen_exit_tb(0);
5280 }
5281 }
5282 gen_icount_end(tb, num_insns);
5283 *gen_opc_ptr = INDEX_op_end;
5284 if (spc) {
5285 j = gen_opc_ptr - gen_opc_buf;
5286 lj++;
5287 while (lj <= j)
5288 gen_opc_instr_start[lj++] = 0;
5289 #if 0
5290 log_page_dump();
5291 #endif
5292 gen_opc_jump_pc[0] = dc->jump_pc[0];
5293 gen_opc_jump_pc[1] = dc->jump_pc[1];
5294 } else {
5295 tb->size = last_pc + 4 - pc_start;
5296 tb->icount = num_insns;
5297 }
5298 #ifdef DEBUG_DISAS
5299 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5300 qemu_log("--------------\n");
5301 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5302 log_target_disas(pc_start, last_pc + 4 - pc_start, 0);
5303 qemu_log("\n");
5304 }
5305 #endif
5306 }
5307
5308 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5309 {
5310 gen_intermediate_code_internal(tb, 0, env);
5311 }
5312
5313 void gen_intermediate_code_pc(CPUSPARCState * env, TranslationBlock * tb)
5314 {
5315 gen_intermediate_code_internal(tb, 1, env);
5316 }
5317
5318 void gen_intermediate_code_init(CPUSPARCState *env)
5319 {
5320 unsigned int i;
5321 static int inited;
5322 static const char * const gregnames[8] = {
5323 NULL, // g0 not used
5324 "g1",
5325 "g2",
5326 "g3",
5327 "g4",
5328 "g5",
5329 "g6",
5330 "g7",
5331 };
5332 static const char * const fregnames[32] = {
5333 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5334 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5335 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5336 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5337 };
5338
5339 /* init various static tables */
5340 if (!inited) {
5341 inited = 1;
5342
5343 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5344 cpu_regwptr = tcg_global_mem_new_ptr(TCG_AREG0,
5345 offsetof(CPUSPARCState, regwptr),
5346 "regwptr");
5347 #ifdef TARGET_SPARC64
5348 cpu_xcc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, xcc),
5349 "xcc");
5350 cpu_asi = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, asi),
5351 "asi");
5352 cpu_fprs = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, fprs),
5353 "fprs");
5354 cpu_gsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, gsr),
5355 "gsr");
5356 cpu_tick_cmpr = tcg_global_mem_new(TCG_AREG0,
5357 offsetof(CPUSPARCState, tick_cmpr),
5358 "tick_cmpr");
5359 cpu_stick_cmpr = tcg_global_mem_new(TCG_AREG0,
5360 offsetof(CPUSPARCState, stick_cmpr),
5361 "stick_cmpr");
5362 cpu_hstick_cmpr = tcg_global_mem_new(TCG_AREG0,
5363 offsetof(CPUSPARCState, hstick_cmpr),
5364 "hstick_cmpr");
5365 cpu_hintp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hintp),
5366 "hintp");
5367 cpu_htba = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, htba),
5368 "htba");
5369 cpu_hver = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hver),
5370 "hver");
5371 cpu_ssr = tcg_global_mem_new(TCG_AREG0,
5372 offsetof(CPUSPARCState, ssr), "ssr");
5373 cpu_ver = tcg_global_mem_new(TCG_AREG0,
5374 offsetof(CPUSPARCState, version), "ver");
5375 cpu_softint = tcg_global_mem_new_i32(TCG_AREG0,
5376 offsetof(CPUSPARCState, softint),
5377 "softint");
5378 #else
5379 cpu_wim = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, wim),
5380 "wim");
5381 #endif
5382 cpu_cond = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cond),
5383 "cond");
5384 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_src),
5385 "cc_src");
5386 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0,
5387 offsetof(CPUSPARCState, cc_src2),
5388 "cc_src2");
5389 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_dst),
5390 "cc_dst");
5391 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, cc_op),
5392 "cc_op");
5393 cpu_psr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, psr),
5394 "psr");
5395 cpu_fsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, fsr),
5396 "fsr");
5397 cpu_pc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, pc),
5398 "pc");
5399 cpu_npc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, npc),
5400 "npc");
5401 cpu_y = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, y), "y");
5402 #ifndef CONFIG_USER_ONLY
5403 cpu_tbr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, tbr),
5404 "tbr");
5405 #endif
5406 for (i = 1; i < 8; i++) {
5407 cpu_gregs[i] = tcg_global_mem_new(TCG_AREG0,
5408 offsetof(CPUSPARCState, gregs[i]),
5409 gregnames[i]);
5410 }
5411 for (i = 0; i < TARGET_DPREGS; i++) {
5412 cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
5413 offsetof(CPUSPARCState, fpr[i]),
5414 fregnames[i]);
5415 }
5416
5417 /* register helpers */
5418
5419 #define GEN_HELPER 2
5420 #include "helper.h"
5421 }
5422 }
5423
5424 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, int pc_pos)
5425 {
5426 target_ulong npc;
5427 env->pc = gen_opc_pc[pc_pos];
5428 npc = gen_opc_npc[pc_pos];
5429 if (npc == 1) {
5430 /* dynamic NPC: already stored */
5431 } else if (npc == 2) {
5432 /* jump PC: use 'cond' and the jump targets of the translation */
5433 if (env->cond) {
5434 env->npc = gen_opc_jump_pc[0];
5435 } else {
5436 env->npc = gen_opc_jump_pc[1];
5437 }
5438 } else {
5439 env->npc = npc;
5440 }
5441
5442 /* flush pending conditional evaluations before exposing cpu state */
5443 if (CC_OP != CC_OP_FLAGS) {
5444 helper_compute_psr(env);
5445 }
5446 }