]> git.proxmox.com Git - mirror_qemu.git/blob - target-sparc/translate.c
tcg: Allow goto_tb to any target PC in user mode
[mirror_qemu.git] / target-sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "tcg-op.h"
27 #include "exec/cpu_ldst.h"
28
29 #include "exec/helper-gen.h"
30
31 #include "trace-tcg.h"
32 #include "exec/log.h"
33
34
35 #define DEBUG_DISAS
36
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
40
41 /* global register indexes */
42 static TCGv_env cpu_env;
43 static TCGv_ptr cpu_regwptr;
44 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
45 static TCGv_i32 cpu_cc_op;
46 static TCGv_i32 cpu_psr;
47 static TCGv cpu_fsr, cpu_pc, cpu_npc;
48 static TCGv cpu_regs[32];
49 static TCGv cpu_y;
50 #ifndef CONFIG_USER_ONLY
51 static TCGv cpu_tbr;
52 #endif
53 static TCGv cpu_cond;
54 #ifdef TARGET_SPARC64
55 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
56 static TCGv cpu_gsr;
57 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
58 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
59 static TCGv_i32 cpu_softint;
60 #else
61 static TCGv cpu_wim;
62 #endif
63 /* Floating point registers */
64 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
65
66 #include "exec/gen-icount.h"
67
68 typedef struct DisasContext {
69 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
70 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
71 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
72 int is_br;
73 int mem_idx;
74 int fpu_enabled;
75 int address_mask_32bit;
76 int singlestep;
77 uint32_t cc_op; /* current CC operation */
78 struct TranslationBlock *tb;
79 sparc_def_t *def;
80 TCGv_i32 t32[3];
81 TCGv ttl[5];
82 int n_t32;
83 int n_ttl;
84 } DisasContext;
85
86 typedef struct {
87 TCGCond cond;
88 bool is_bool;
89 bool g1, g2;
90 TCGv c1, c2;
91 } DisasCompare;
92
93 // This function uses non-native bit order
94 #define GET_FIELD(X, FROM, TO) \
95 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
96
97 // This function uses the order in the manuals, i.e. bit 0 is 2^0
98 #define GET_FIELD_SP(X, FROM, TO) \
99 GET_FIELD(X, 31 - (TO), 31 - (FROM))
100
101 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
102 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
103
104 #ifdef TARGET_SPARC64
105 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
106 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
107 #else
108 #define DFPREG(r) (r & 0x1e)
109 #define QFPREG(r) (r & 0x1c)
110 #endif
111
112 #define UA2005_HTRAP_MASK 0xff
113 #define V8_TRAP_MASK 0x7f
114
115 static int sign_extend(int x, int len)
116 {
117 len = 32 - len;
118 return (x << len) >> len;
119 }
120
121 #define IS_IMM (insn & (1<<13))
122
123 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
124 {
125 TCGv_i32 t;
126 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
127 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
128 return t;
129 }
130
131 static inline TCGv get_temp_tl(DisasContext *dc)
132 {
133 TCGv t;
134 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
135 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
136 return t;
137 }
138
139 static inline void gen_update_fprs_dirty(int rd)
140 {
141 #if defined(TARGET_SPARC64)
142 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
143 #endif
144 }
145
146 /* floating point registers moves */
147 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
148 {
149 #if TCG_TARGET_REG_BITS == 32
150 if (src & 1) {
151 return TCGV_LOW(cpu_fpr[src / 2]);
152 } else {
153 return TCGV_HIGH(cpu_fpr[src / 2]);
154 }
155 #else
156 if (src & 1) {
157 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
158 } else {
159 TCGv_i32 ret = get_temp_i32(dc);
160 TCGv_i64 t = tcg_temp_new_i64();
161
162 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
163 tcg_gen_extrl_i64_i32(ret, t);
164 tcg_temp_free_i64(t);
165
166 return ret;
167 }
168 #endif
169 }
170
171 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
172 {
173 #if TCG_TARGET_REG_BITS == 32
174 if (dst & 1) {
175 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
176 } else {
177 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
178 }
179 #else
180 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
181 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
182 (dst & 1 ? 0 : 32), 32);
183 #endif
184 gen_update_fprs_dirty(dst);
185 }
186
187 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
188 {
189 return get_temp_i32(dc);
190 }
191
192 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
193 {
194 src = DFPREG(src);
195 return cpu_fpr[src / 2];
196 }
197
198 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
199 {
200 dst = DFPREG(dst);
201 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
202 gen_update_fprs_dirty(dst);
203 }
204
205 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
206 {
207 return cpu_fpr[DFPREG(dst) / 2];
208 }
209
210 static void gen_op_load_fpr_QT0(unsigned int src)
211 {
212 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
213 offsetof(CPU_QuadU, ll.upper));
214 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
215 offsetof(CPU_QuadU, ll.lower));
216 }
217
218 static void gen_op_load_fpr_QT1(unsigned int src)
219 {
220 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
221 offsetof(CPU_QuadU, ll.upper));
222 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
223 offsetof(CPU_QuadU, ll.lower));
224 }
225
226 static void gen_op_store_QT0_fpr(unsigned int dst)
227 {
228 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
229 offsetof(CPU_QuadU, ll.upper));
230 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
231 offsetof(CPU_QuadU, ll.lower));
232 }
233
234 #ifdef TARGET_SPARC64
235 static void gen_move_Q(unsigned int rd, unsigned int rs)
236 {
237 rd = QFPREG(rd);
238 rs = QFPREG(rs);
239
240 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
241 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
242 gen_update_fprs_dirty(rd);
243 }
244 #endif
245
246 /* moves */
247 #ifdef CONFIG_USER_ONLY
248 #define supervisor(dc) 0
249 #ifdef TARGET_SPARC64
250 #define hypervisor(dc) 0
251 #endif
252 #else
253 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
254 #ifdef TARGET_SPARC64
255 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
256 #else
257 #endif
258 #endif
259
260 #ifdef TARGET_SPARC64
261 #ifndef TARGET_ABI32
262 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
263 #else
264 #define AM_CHECK(dc) (1)
265 #endif
266 #endif
267
268 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
269 {
270 #ifdef TARGET_SPARC64
271 if (AM_CHECK(dc))
272 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
273 #endif
274 }
275
276 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
277 {
278 if (reg > 0) {
279 assert(reg < 32);
280 return cpu_regs[reg];
281 } else {
282 TCGv t = get_temp_tl(dc);
283 tcg_gen_movi_tl(t, 0);
284 return t;
285 }
286 }
287
288 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
289 {
290 if (reg > 0) {
291 assert(reg < 32);
292 tcg_gen_mov_tl(cpu_regs[reg], v);
293 }
294 }
295
296 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
297 {
298 if (reg > 0) {
299 assert(reg < 32);
300 return cpu_regs[reg];
301 } else {
302 return get_temp_tl(dc);
303 }
304 }
305
306 static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
307 target_ulong npc)
308 {
309 if (unlikely(s->singlestep)) {
310 return false;
311 }
312
313 #ifndef CONFIG_USER_ONLY
314 return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
315 (npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
316 #else
317 return true;
318 #endif
319 }
320
321 static inline void gen_goto_tb(DisasContext *s, int tb_num,
322 target_ulong pc, target_ulong npc)
323 {
324 if (use_goto_tb(s, pc, npc)) {
325 /* jump to same page: we can use a direct jump */
326 tcg_gen_goto_tb(tb_num);
327 tcg_gen_movi_tl(cpu_pc, pc);
328 tcg_gen_movi_tl(cpu_npc, npc);
329 tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
330 } else {
331 /* jump to another page: currently not optimized */
332 tcg_gen_movi_tl(cpu_pc, pc);
333 tcg_gen_movi_tl(cpu_npc, npc);
334 tcg_gen_exit_tb(0);
335 }
336 }
337
338 // XXX suboptimal
339 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
340 {
341 tcg_gen_extu_i32_tl(reg, src);
342 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
343 tcg_gen_andi_tl(reg, reg, 0x1);
344 }
345
346 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
347 {
348 tcg_gen_extu_i32_tl(reg, src);
349 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
350 tcg_gen_andi_tl(reg, reg, 0x1);
351 }
352
353 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
354 {
355 tcg_gen_extu_i32_tl(reg, src);
356 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
357 tcg_gen_andi_tl(reg, reg, 0x1);
358 }
359
360 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
361 {
362 tcg_gen_extu_i32_tl(reg, src);
363 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
364 tcg_gen_andi_tl(reg, reg, 0x1);
365 }
366
367 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
368 {
369 tcg_gen_mov_tl(cpu_cc_src, src1);
370 tcg_gen_mov_tl(cpu_cc_src2, src2);
371 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
372 tcg_gen_mov_tl(dst, cpu_cc_dst);
373 }
374
375 static TCGv_i32 gen_add32_carry32(void)
376 {
377 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
378
379 /* Carry is computed from a previous add: (dst < src) */
380 #if TARGET_LONG_BITS == 64
381 cc_src1_32 = tcg_temp_new_i32();
382 cc_src2_32 = tcg_temp_new_i32();
383 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
384 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
385 #else
386 cc_src1_32 = cpu_cc_dst;
387 cc_src2_32 = cpu_cc_src;
388 #endif
389
390 carry_32 = tcg_temp_new_i32();
391 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
392
393 #if TARGET_LONG_BITS == 64
394 tcg_temp_free_i32(cc_src1_32);
395 tcg_temp_free_i32(cc_src2_32);
396 #endif
397
398 return carry_32;
399 }
400
401 static TCGv_i32 gen_sub32_carry32(void)
402 {
403 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
404
405 /* Carry is computed from a previous borrow: (src1 < src2) */
406 #if TARGET_LONG_BITS == 64
407 cc_src1_32 = tcg_temp_new_i32();
408 cc_src2_32 = tcg_temp_new_i32();
409 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
410 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
411 #else
412 cc_src1_32 = cpu_cc_src;
413 cc_src2_32 = cpu_cc_src2;
414 #endif
415
416 carry_32 = tcg_temp_new_i32();
417 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
418
419 #if TARGET_LONG_BITS == 64
420 tcg_temp_free_i32(cc_src1_32);
421 tcg_temp_free_i32(cc_src2_32);
422 #endif
423
424 return carry_32;
425 }
426
427 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
428 TCGv src2, int update_cc)
429 {
430 TCGv_i32 carry_32;
431 TCGv carry;
432
433 switch (dc->cc_op) {
434 case CC_OP_DIV:
435 case CC_OP_LOGIC:
436 /* Carry is known to be zero. Fall back to plain ADD. */
437 if (update_cc) {
438 gen_op_add_cc(dst, src1, src2);
439 } else {
440 tcg_gen_add_tl(dst, src1, src2);
441 }
442 return;
443
444 case CC_OP_ADD:
445 case CC_OP_TADD:
446 case CC_OP_TADDTV:
447 if (TARGET_LONG_BITS == 32) {
448 /* We can re-use the host's hardware carry generation by using
449 an ADD2 opcode. We discard the low part of the output.
450 Ideally we'd combine this operation with the add that
451 generated the carry in the first place. */
452 carry = tcg_temp_new();
453 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
454 tcg_temp_free(carry);
455 goto add_done;
456 }
457 carry_32 = gen_add32_carry32();
458 break;
459
460 case CC_OP_SUB:
461 case CC_OP_TSUB:
462 case CC_OP_TSUBTV:
463 carry_32 = gen_sub32_carry32();
464 break;
465
466 default:
467 /* We need external help to produce the carry. */
468 carry_32 = tcg_temp_new_i32();
469 gen_helper_compute_C_icc(carry_32, cpu_env);
470 break;
471 }
472
473 #if TARGET_LONG_BITS == 64
474 carry = tcg_temp_new();
475 tcg_gen_extu_i32_i64(carry, carry_32);
476 #else
477 carry = carry_32;
478 #endif
479
480 tcg_gen_add_tl(dst, src1, src2);
481 tcg_gen_add_tl(dst, dst, carry);
482
483 tcg_temp_free_i32(carry_32);
484 #if TARGET_LONG_BITS == 64
485 tcg_temp_free(carry);
486 #endif
487
488 add_done:
489 if (update_cc) {
490 tcg_gen_mov_tl(cpu_cc_src, src1);
491 tcg_gen_mov_tl(cpu_cc_src2, src2);
492 tcg_gen_mov_tl(cpu_cc_dst, dst);
493 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
494 dc->cc_op = CC_OP_ADDX;
495 }
496 }
497
498 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
499 {
500 tcg_gen_mov_tl(cpu_cc_src, src1);
501 tcg_gen_mov_tl(cpu_cc_src2, src2);
502 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
503 tcg_gen_mov_tl(dst, cpu_cc_dst);
504 }
505
506 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
507 TCGv src2, int update_cc)
508 {
509 TCGv_i32 carry_32;
510 TCGv carry;
511
512 switch (dc->cc_op) {
513 case CC_OP_DIV:
514 case CC_OP_LOGIC:
515 /* Carry is known to be zero. Fall back to plain SUB. */
516 if (update_cc) {
517 gen_op_sub_cc(dst, src1, src2);
518 } else {
519 tcg_gen_sub_tl(dst, src1, src2);
520 }
521 return;
522
523 case CC_OP_ADD:
524 case CC_OP_TADD:
525 case CC_OP_TADDTV:
526 carry_32 = gen_add32_carry32();
527 break;
528
529 case CC_OP_SUB:
530 case CC_OP_TSUB:
531 case CC_OP_TSUBTV:
532 if (TARGET_LONG_BITS == 32) {
533 /* We can re-use the host's hardware carry generation by using
534 a SUB2 opcode. We discard the low part of the output.
535 Ideally we'd combine this operation with the add that
536 generated the carry in the first place. */
537 carry = tcg_temp_new();
538 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
539 tcg_temp_free(carry);
540 goto sub_done;
541 }
542 carry_32 = gen_sub32_carry32();
543 break;
544
545 default:
546 /* We need external help to produce the carry. */
547 carry_32 = tcg_temp_new_i32();
548 gen_helper_compute_C_icc(carry_32, cpu_env);
549 break;
550 }
551
552 #if TARGET_LONG_BITS == 64
553 carry = tcg_temp_new();
554 tcg_gen_extu_i32_i64(carry, carry_32);
555 #else
556 carry = carry_32;
557 #endif
558
559 tcg_gen_sub_tl(dst, src1, src2);
560 tcg_gen_sub_tl(dst, dst, carry);
561
562 tcg_temp_free_i32(carry_32);
563 #if TARGET_LONG_BITS == 64
564 tcg_temp_free(carry);
565 #endif
566
567 sub_done:
568 if (update_cc) {
569 tcg_gen_mov_tl(cpu_cc_src, src1);
570 tcg_gen_mov_tl(cpu_cc_src2, src2);
571 tcg_gen_mov_tl(cpu_cc_dst, dst);
572 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
573 dc->cc_op = CC_OP_SUBX;
574 }
575 }
576
577 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
578 {
579 TCGv r_temp, zero, t0;
580
581 r_temp = tcg_temp_new();
582 t0 = tcg_temp_new();
583
584 /* old op:
585 if (!(env->y & 1))
586 T1 = 0;
587 */
588 zero = tcg_const_tl(0);
589 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
590 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
591 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
592 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
593 zero, cpu_cc_src2);
594 tcg_temp_free(zero);
595
596 // b2 = T0 & 1;
597 // env->y = (b2 << 31) | (env->y >> 1);
598 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
599 tcg_gen_shli_tl(r_temp, r_temp, 31);
600 tcg_gen_shri_tl(t0, cpu_y, 1);
601 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
602 tcg_gen_or_tl(t0, t0, r_temp);
603 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
604
605 // b1 = N ^ V;
606 gen_mov_reg_N(t0, cpu_psr);
607 gen_mov_reg_V(r_temp, cpu_psr);
608 tcg_gen_xor_tl(t0, t0, r_temp);
609 tcg_temp_free(r_temp);
610
611 // T0 = (b1 << 31) | (T0 >> 1);
612 // src1 = T0;
613 tcg_gen_shli_tl(t0, t0, 31);
614 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
615 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
616 tcg_temp_free(t0);
617
618 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
619
620 tcg_gen_mov_tl(dst, cpu_cc_dst);
621 }
622
623 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
624 {
625 #if TARGET_LONG_BITS == 32
626 if (sign_ext) {
627 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
628 } else {
629 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
630 }
631 #else
632 TCGv t0 = tcg_temp_new_i64();
633 TCGv t1 = tcg_temp_new_i64();
634
635 if (sign_ext) {
636 tcg_gen_ext32s_i64(t0, src1);
637 tcg_gen_ext32s_i64(t1, src2);
638 } else {
639 tcg_gen_ext32u_i64(t0, src1);
640 tcg_gen_ext32u_i64(t1, src2);
641 }
642
643 tcg_gen_mul_i64(dst, t0, t1);
644 tcg_temp_free(t0);
645 tcg_temp_free(t1);
646
647 tcg_gen_shri_i64(cpu_y, dst, 32);
648 #endif
649 }
650
651 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
652 {
653 /* zero-extend truncated operands before multiplication */
654 gen_op_multiply(dst, src1, src2, 0);
655 }
656
657 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
658 {
659 /* sign-extend truncated operands before multiplication */
660 gen_op_multiply(dst, src1, src2, 1);
661 }
662
663 // 1
664 static inline void gen_op_eval_ba(TCGv dst)
665 {
666 tcg_gen_movi_tl(dst, 1);
667 }
668
669 // Z
670 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
671 {
672 gen_mov_reg_Z(dst, src);
673 }
674
675 // Z | (N ^ V)
676 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
677 {
678 TCGv t0 = tcg_temp_new();
679 gen_mov_reg_N(t0, src);
680 gen_mov_reg_V(dst, src);
681 tcg_gen_xor_tl(dst, dst, t0);
682 gen_mov_reg_Z(t0, src);
683 tcg_gen_or_tl(dst, dst, t0);
684 tcg_temp_free(t0);
685 }
686
687 // N ^ V
688 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
689 {
690 TCGv t0 = tcg_temp_new();
691 gen_mov_reg_V(t0, src);
692 gen_mov_reg_N(dst, src);
693 tcg_gen_xor_tl(dst, dst, t0);
694 tcg_temp_free(t0);
695 }
696
697 // C | Z
698 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
699 {
700 TCGv t0 = tcg_temp_new();
701 gen_mov_reg_Z(t0, src);
702 gen_mov_reg_C(dst, src);
703 tcg_gen_or_tl(dst, dst, t0);
704 tcg_temp_free(t0);
705 }
706
707 // C
708 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
709 {
710 gen_mov_reg_C(dst, src);
711 }
712
713 // V
714 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
715 {
716 gen_mov_reg_V(dst, src);
717 }
718
719 // 0
720 static inline void gen_op_eval_bn(TCGv dst)
721 {
722 tcg_gen_movi_tl(dst, 0);
723 }
724
725 // N
726 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
727 {
728 gen_mov_reg_N(dst, src);
729 }
730
731 // !Z
732 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
733 {
734 gen_mov_reg_Z(dst, src);
735 tcg_gen_xori_tl(dst, dst, 0x1);
736 }
737
738 // !(Z | (N ^ V))
739 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
740 {
741 gen_op_eval_ble(dst, src);
742 tcg_gen_xori_tl(dst, dst, 0x1);
743 }
744
745 // !(N ^ V)
746 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
747 {
748 gen_op_eval_bl(dst, src);
749 tcg_gen_xori_tl(dst, dst, 0x1);
750 }
751
752 // !(C | Z)
753 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
754 {
755 gen_op_eval_bleu(dst, src);
756 tcg_gen_xori_tl(dst, dst, 0x1);
757 }
758
759 // !C
760 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
761 {
762 gen_mov_reg_C(dst, src);
763 tcg_gen_xori_tl(dst, dst, 0x1);
764 }
765
766 // !N
767 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
768 {
769 gen_mov_reg_N(dst, src);
770 tcg_gen_xori_tl(dst, dst, 0x1);
771 }
772
773 // !V
774 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
775 {
776 gen_mov_reg_V(dst, src);
777 tcg_gen_xori_tl(dst, dst, 0x1);
778 }
779
780 /*
781 FPSR bit field FCC1 | FCC0:
782 0 =
783 1 <
784 2 >
785 3 unordered
786 */
787 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
788 unsigned int fcc_offset)
789 {
790 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
791 tcg_gen_andi_tl(reg, reg, 0x1);
792 }
793
794 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
795 unsigned int fcc_offset)
796 {
797 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
798 tcg_gen_andi_tl(reg, reg, 0x1);
799 }
800
801 // !0: FCC0 | FCC1
802 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
803 unsigned int fcc_offset)
804 {
805 TCGv t0 = tcg_temp_new();
806 gen_mov_reg_FCC0(dst, src, fcc_offset);
807 gen_mov_reg_FCC1(t0, src, fcc_offset);
808 tcg_gen_or_tl(dst, dst, t0);
809 tcg_temp_free(t0);
810 }
811
812 // 1 or 2: FCC0 ^ FCC1
813 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
814 unsigned int fcc_offset)
815 {
816 TCGv t0 = tcg_temp_new();
817 gen_mov_reg_FCC0(dst, src, fcc_offset);
818 gen_mov_reg_FCC1(t0, src, fcc_offset);
819 tcg_gen_xor_tl(dst, dst, t0);
820 tcg_temp_free(t0);
821 }
822
823 // 1 or 3: FCC0
824 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
825 unsigned int fcc_offset)
826 {
827 gen_mov_reg_FCC0(dst, src, fcc_offset);
828 }
829
830 // 1: FCC0 & !FCC1
831 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
832 unsigned int fcc_offset)
833 {
834 TCGv t0 = tcg_temp_new();
835 gen_mov_reg_FCC0(dst, src, fcc_offset);
836 gen_mov_reg_FCC1(t0, src, fcc_offset);
837 tcg_gen_andc_tl(dst, dst, t0);
838 tcg_temp_free(t0);
839 }
840
841 // 2 or 3: FCC1
842 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
843 unsigned int fcc_offset)
844 {
845 gen_mov_reg_FCC1(dst, src, fcc_offset);
846 }
847
848 // 2: !FCC0 & FCC1
849 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
850 unsigned int fcc_offset)
851 {
852 TCGv t0 = tcg_temp_new();
853 gen_mov_reg_FCC0(dst, src, fcc_offset);
854 gen_mov_reg_FCC1(t0, src, fcc_offset);
855 tcg_gen_andc_tl(dst, t0, dst);
856 tcg_temp_free(t0);
857 }
858
859 // 3: FCC0 & FCC1
860 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
861 unsigned int fcc_offset)
862 {
863 TCGv t0 = tcg_temp_new();
864 gen_mov_reg_FCC0(dst, src, fcc_offset);
865 gen_mov_reg_FCC1(t0, src, fcc_offset);
866 tcg_gen_and_tl(dst, dst, t0);
867 tcg_temp_free(t0);
868 }
869
870 // 0: !(FCC0 | FCC1)
871 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
872 unsigned int fcc_offset)
873 {
874 TCGv t0 = tcg_temp_new();
875 gen_mov_reg_FCC0(dst, src, fcc_offset);
876 gen_mov_reg_FCC1(t0, src, fcc_offset);
877 tcg_gen_or_tl(dst, dst, t0);
878 tcg_gen_xori_tl(dst, dst, 0x1);
879 tcg_temp_free(t0);
880 }
881
882 // 0 or 3: !(FCC0 ^ FCC1)
883 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
884 unsigned int fcc_offset)
885 {
886 TCGv t0 = tcg_temp_new();
887 gen_mov_reg_FCC0(dst, src, fcc_offset);
888 gen_mov_reg_FCC1(t0, src, fcc_offset);
889 tcg_gen_xor_tl(dst, dst, t0);
890 tcg_gen_xori_tl(dst, dst, 0x1);
891 tcg_temp_free(t0);
892 }
893
894 // 0 or 2: !FCC0
895 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
896 unsigned int fcc_offset)
897 {
898 gen_mov_reg_FCC0(dst, src, fcc_offset);
899 tcg_gen_xori_tl(dst, dst, 0x1);
900 }
901
902 // !1: !(FCC0 & !FCC1)
903 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
904 unsigned int fcc_offset)
905 {
906 TCGv t0 = tcg_temp_new();
907 gen_mov_reg_FCC0(dst, src, fcc_offset);
908 gen_mov_reg_FCC1(t0, src, fcc_offset);
909 tcg_gen_andc_tl(dst, dst, t0);
910 tcg_gen_xori_tl(dst, dst, 0x1);
911 tcg_temp_free(t0);
912 }
913
914 // 0 or 1: !FCC1
915 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
916 unsigned int fcc_offset)
917 {
918 gen_mov_reg_FCC1(dst, src, fcc_offset);
919 tcg_gen_xori_tl(dst, dst, 0x1);
920 }
921
922 // !2: !(!FCC0 & FCC1)
923 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
924 unsigned int fcc_offset)
925 {
926 TCGv t0 = tcg_temp_new();
927 gen_mov_reg_FCC0(dst, src, fcc_offset);
928 gen_mov_reg_FCC1(t0, src, fcc_offset);
929 tcg_gen_andc_tl(dst, t0, dst);
930 tcg_gen_xori_tl(dst, dst, 0x1);
931 tcg_temp_free(t0);
932 }
933
934 // !3: !(FCC0 & FCC1)
935 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
936 unsigned int fcc_offset)
937 {
938 TCGv t0 = tcg_temp_new();
939 gen_mov_reg_FCC0(dst, src, fcc_offset);
940 gen_mov_reg_FCC1(t0, src, fcc_offset);
941 tcg_gen_and_tl(dst, dst, t0);
942 tcg_gen_xori_tl(dst, dst, 0x1);
943 tcg_temp_free(t0);
944 }
945
946 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
947 target_ulong pc2, TCGv r_cond)
948 {
949 TCGLabel *l1 = gen_new_label();
950
951 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
952
953 gen_goto_tb(dc, 0, pc1, pc1 + 4);
954
955 gen_set_label(l1);
956 gen_goto_tb(dc, 1, pc2, pc2 + 4);
957 }
958
959 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
960 {
961 TCGLabel *l1 = gen_new_label();
962 target_ulong npc = dc->npc;
963
964 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
965
966 gen_goto_tb(dc, 0, npc, pc1);
967
968 gen_set_label(l1);
969 gen_goto_tb(dc, 1, npc + 4, npc + 8);
970
971 dc->is_br = 1;
972 }
973
974 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
975 {
976 target_ulong npc = dc->npc;
977
978 if (likely(npc != DYNAMIC_PC)) {
979 dc->pc = npc;
980 dc->jump_pc[0] = pc1;
981 dc->jump_pc[1] = npc + 4;
982 dc->npc = JUMP_PC;
983 } else {
984 TCGv t, z;
985
986 tcg_gen_mov_tl(cpu_pc, cpu_npc);
987
988 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
989 t = tcg_const_tl(pc1);
990 z = tcg_const_tl(0);
991 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
992 tcg_temp_free(t);
993 tcg_temp_free(z);
994
995 dc->pc = DYNAMIC_PC;
996 }
997 }
998
999 static inline void gen_generic_branch(DisasContext *dc)
1000 {
1001 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1002 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1003 TCGv zero = tcg_const_tl(0);
1004
1005 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1006
1007 tcg_temp_free(npc0);
1008 tcg_temp_free(npc1);
1009 tcg_temp_free(zero);
1010 }
1011
1012 /* call this function before using the condition register as it may
1013 have been set for a jump */
1014 static inline void flush_cond(DisasContext *dc)
1015 {
1016 if (dc->npc == JUMP_PC) {
1017 gen_generic_branch(dc);
1018 dc->npc = DYNAMIC_PC;
1019 }
1020 }
1021
1022 static inline void save_npc(DisasContext *dc)
1023 {
1024 if (dc->npc == JUMP_PC) {
1025 gen_generic_branch(dc);
1026 dc->npc = DYNAMIC_PC;
1027 } else if (dc->npc != DYNAMIC_PC) {
1028 tcg_gen_movi_tl(cpu_npc, dc->npc);
1029 }
1030 }
1031
1032 static inline void update_psr(DisasContext *dc)
1033 {
1034 if (dc->cc_op != CC_OP_FLAGS) {
1035 dc->cc_op = CC_OP_FLAGS;
1036 gen_helper_compute_psr(cpu_env);
1037 }
1038 }
1039
1040 static inline void save_state(DisasContext *dc)
1041 {
1042 tcg_gen_movi_tl(cpu_pc, dc->pc);
1043 save_npc(dc);
1044 }
1045
1046 static inline void gen_mov_pc_npc(DisasContext *dc)
1047 {
1048 if (dc->npc == JUMP_PC) {
1049 gen_generic_branch(dc);
1050 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1051 dc->pc = DYNAMIC_PC;
1052 } else if (dc->npc == DYNAMIC_PC) {
1053 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1054 dc->pc = DYNAMIC_PC;
1055 } else {
1056 dc->pc = dc->npc;
1057 }
1058 }
1059
1060 static inline void gen_op_next_insn(void)
1061 {
1062 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1063 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1064 }
1065
1066 static void free_compare(DisasCompare *cmp)
1067 {
1068 if (!cmp->g1) {
1069 tcg_temp_free(cmp->c1);
1070 }
1071 if (!cmp->g2) {
1072 tcg_temp_free(cmp->c2);
1073 }
1074 }
1075
1076 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1077 DisasContext *dc)
1078 {
1079 static int subcc_cond[16] = {
1080 TCG_COND_NEVER,
1081 TCG_COND_EQ,
1082 TCG_COND_LE,
1083 TCG_COND_LT,
1084 TCG_COND_LEU,
1085 TCG_COND_LTU,
1086 -1, /* neg */
1087 -1, /* overflow */
1088 TCG_COND_ALWAYS,
1089 TCG_COND_NE,
1090 TCG_COND_GT,
1091 TCG_COND_GE,
1092 TCG_COND_GTU,
1093 TCG_COND_GEU,
1094 -1, /* pos */
1095 -1, /* no overflow */
1096 };
1097
1098 static int logic_cond[16] = {
1099 TCG_COND_NEVER,
1100 TCG_COND_EQ, /* eq: Z */
1101 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1102 TCG_COND_LT, /* lt: N ^ V -> N */
1103 TCG_COND_EQ, /* leu: C | Z -> Z */
1104 TCG_COND_NEVER, /* ltu: C -> 0 */
1105 TCG_COND_LT, /* neg: N */
1106 TCG_COND_NEVER, /* vs: V -> 0 */
1107 TCG_COND_ALWAYS,
1108 TCG_COND_NE, /* ne: !Z */
1109 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1110 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1111 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1112 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1113 TCG_COND_GE, /* pos: !N */
1114 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1115 };
1116
1117 TCGv_i32 r_src;
1118 TCGv r_dst;
1119
1120 #ifdef TARGET_SPARC64
1121 if (xcc) {
1122 r_src = cpu_xcc;
1123 } else {
1124 r_src = cpu_psr;
1125 }
1126 #else
1127 r_src = cpu_psr;
1128 #endif
1129
1130 switch (dc->cc_op) {
1131 case CC_OP_LOGIC:
1132 cmp->cond = logic_cond[cond];
1133 do_compare_dst_0:
1134 cmp->is_bool = false;
1135 cmp->g2 = false;
1136 cmp->c2 = tcg_const_tl(0);
1137 #ifdef TARGET_SPARC64
1138 if (!xcc) {
1139 cmp->g1 = false;
1140 cmp->c1 = tcg_temp_new();
1141 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1142 break;
1143 }
1144 #endif
1145 cmp->g1 = true;
1146 cmp->c1 = cpu_cc_dst;
1147 break;
1148
1149 case CC_OP_SUB:
1150 switch (cond) {
1151 case 6: /* neg */
1152 case 14: /* pos */
1153 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1154 goto do_compare_dst_0;
1155
1156 case 7: /* overflow */
1157 case 15: /* !overflow */
1158 goto do_dynamic;
1159
1160 default:
1161 cmp->cond = subcc_cond[cond];
1162 cmp->is_bool = false;
1163 #ifdef TARGET_SPARC64
1164 if (!xcc) {
1165 /* Note that sign-extension works for unsigned compares as
1166 long as both operands are sign-extended. */
1167 cmp->g1 = cmp->g2 = false;
1168 cmp->c1 = tcg_temp_new();
1169 cmp->c2 = tcg_temp_new();
1170 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1171 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1172 break;
1173 }
1174 #endif
1175 cmp->g1 = cmp->g2 = true;
1176 cmp->c1 = cpu_cc_src;
1177 cmp->c2 = cpu_cc_src2;
1178 break;
1179 }
1180 break;
1181
1182 default:
1183 do_dynamic:
1184 gen_helper_compute_psr(cpu_env);
1185 dc->cc_op = CC_OP_FLAGS;
1186 /* FALLTHRU */
1187
1188 case CC_OP_FLAGS:
1189 /* We're going to generate a boolean result. */
1190 cmp->cond = TCG_COND_NE;
1191 cmp->is_bool = true;
1192 cmp->g1 = cmp->g2 = false;
1193 cmp->c1 = r_dst = tcg_temp_new();
1194 cmp->c2 = tcg_const_tl(0);
1195
1196 switch (cond) {
1197 case 0x0:
1198 gen_op_eval_bn(r_dst);
1199 break;
1200 case 0x1:
1201 gen_op_eval_be(r_dst, r_src);
1202 break;
1203 case 0x2:
1204 gen_op_eval_ble(r_dst, r_src);
1205 break;
1206 case 0x3:
1207 gen_op_eval_bl(r_dst, r_src);
1208 break;
1209 case 0x4:
1210 gen_op_eval_bleu(r_dst, r_src);
1211 break;
1212 case 0x5:
1213 gen_op_eval_bcs(r_dst, r_src);
1214 break;
1215 case 0x6:
1216 gen_op_eval_bneg(r_dst, r_src);
1217 break;
1218 case 0x7:
1219 gen_op_eval_bvs(r_dst, r_src);
1220 break;
1221 case 0x8:
1222 gen_op_eval_ba(r_dst);
1223 break;
1224 case 0x9:
1225 gen_op_eval_bne(r_dst, r_src);
1226 break;
1227 case 0xa:
1228 gen_op_eval_bg(r_dst, r_src);
1229 break;
1230 case 0xb:
1231 gen_op_eval_bge(r_dst, r_src);
1232 break;
1233 case 0xc:
1234 gen_op_eval_bgu(r_dst, r_src);
1235 break;
1236 case 0xd:
1237 gen_op_eval_bcc(r_dst, r_src);
1238 break;
1239 case 0xe:
1240 gen_op_eval_bpos(r_dst, r_src);
1241 break;
1242 case 0xf:
1243 gen_op_eval_bvc(r_dst, r_src);
1244 break;
1245 }
1246 break;
1247 }
1248 }
1249
1250 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1251 {
1252 unsigned int offset;
1253 TCGv r_dst;
1254
1255 /* For now we still generate a straight boolean result. */
1256 cmp->cond = TCG_COND_NE;
1257 cmp->is_bool = true;
1258 cmp->g1 = cmp->g2 = false;
1259 cmp->c1 = r_dst = tcg_temp_new();
1260 cmp->c2 = tcg_const_tl(0);
1261
1262 switch (cc) {
1263 default:
1264 case 0x0:
1265 offset = 0;
1266 break;
1267 case 0x1:
1268 offset = 32 - 10;
1269 break;
1270 case 0x2:
1271 offset = 34 - 10;
1272 break;
1273 case 0x3:
1274 offset = 36 - 10;
1275 break;
1276 }
1277
1278 switch (cond) {
1279 case 0x0:
1280 gen_op_eval_bn(r_dst);
1281 break;
1282 case 0x1:
1283 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1284 break;
1285 case 0x2:
1286 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1287 break;
1288 case 0x3:
1289 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1290 break;
1291 case 0x4:
1292 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1293 break;
1294 case 0x5:
1295 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1296 break;
1297 case 0x6:
1298 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1299 break;
1300 case 0x7:
1301 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1302 break;
1303 case 0x8:
1304 gen_op_eval_ba(r_dst);
1305 break;
1306 case 0x9:
1307 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1308 break;
1309 case 0xa:
1310 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1311 break;
1312 case 0xb:
1313 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1314 break;
1315 case 0xc:
1316 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1317 break;
1318 case 0xd:
1319 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1320 break;
1321 case 0xe:
1322 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1323 break;
1324 case 0xf:
1325 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1326 break;
1327 }
1328 }
1329
1330 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1331 DisasContext *dc)
1332 {
1333 DisasCompare cmp;
1334 gen_compare(&cmp, cc, cond, dc);
1335
1336 /* The interface is to return a boolean in r_dst. */
1337 if (cmp.is_bool) {
1338 tcg_gen_mov_tl(r_dst, cmp.c1);
1339 } else {
1340 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1341 }
1342
1343 free_compare(&cmp);
1344 }
1345
1346 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1347 {
1348 DisasCompare cmp;
1349 gen_fcompare(&cmp, cc, cond);
1350
1351 /* The interface is to return a boolean in r_dst. */
1352 if (cmp.is_bool) {
1353 tcg_gen_mov_tl(r_dst, cmp.c1);
1354 } else {
1355 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1356 }
1357
1358 free_compare(&cmp);
1359 }
1360
1361 #ifdef TARGET_SPARC64
1362 // Inverted logic
1363 static const int gen_tcg_cond_reg[8] = {
1364 -1,
1365 TCG_COND_NE,
1366 TCG_COND_GT,
1367 TCG_COND_GE,
1368 -1,
1369 TCG_COND_EQ,
1370 TCG_COND_LE,
1371 TCG_COND_LT,
1372 };
1373
1374 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1375 {
1376 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1377 cmp->is_bool = false;
1378 cmp->g1 = true;
1379 cmp->g2 = false;
1380 cmp->c1 = r_src;
1381 cmp->c2 = tcg_const_tl(0);
1382 }
1383
1384 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1385 {
1386 DisasCompare cmp;
1387 gen_compare_reg(&cmp, cond, r_src);
1388
1389 /* The interface is to return a boolean in r_dst. */
1390 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1391
1392 free_compare(&cmp);
1393 }
1394 #endif
1395
1396 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1397 {
1398 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1399 target_ulong target = dc->pc + offset;
1400
1401 #ifdef TARGET_SPARC64
1402 if (unlikely(AM_CHECK(dc))) {
1403 target &= 0xffffffffULL;
1404 }
1405 #endif
1406 if (cond == 0x0) {
1407 /* unconditional not taken */
1408 if (a) {
1409 dc->pc = dc->npc + 4;
1410 dc->npc = dc->pc + 4;
1411 } else {
1412 dc->pc = dc->npc;
1413 dc->npc = dc->pc + 4;
1414 }
1415 } else if (cond == 0x8) {
1416 /* unconditional taken */
1417 if (a) {
1418 dc->pc = target;
1419 dc->npc = dc->pc + 4;
1420 } else {
1421 dc->pc = dc->npc;
1422 dc->npc = target;
1423 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1424 }
1425 } else {
1426 flush_cond(dc);
1427 gen_cond(cpu_cond, cc, cond, dc);
1428 if (a) {
1429 gen_branch_a(dc, target);
1430 } else {
1431 gen_branch_n(dc, target);
1432 }
1433 }
1434 }
1435
1436 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1437 {
1438 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1439 target_ulong target = dc->pc + offset;
1440
1441 #ifdef TARGET_SPARC64
1442 if (unlikely(AM_CHECK(dc))) {
1443 target &= 0xffffffffULL;
1444 }
1445 #endif
1446 if (cond == 0x0) {
1447 /* unconditional not taken */
1448 if (a) {
1449 dc->pc = dc->npc + 4;
1450 dc->npc = dc->pc + 4;
1451 } else {
1452 dc->pc = dc->npc;
1453 dc->npc = dc->pc + 4;
1454 }
1455 } else if (cond == 0x8) {
1456 /* unconditional taken */
1457 if (a) {
1458 dc->pc = target;
1459 dc->npc = dc->pc + 4;
1460 } else {
1461 dc->pc = dc->npc;
1462 dc->npc = target;
1463 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1464 }
1465 } else {
1466 flush_cond(dc);
1467 gen_fcond(cpu_cond, cc, cond);
1468 if (a) {
1469 gen_branch_a(dc, target);
1470 } else {
1471 gen_branch_n(dc, target);
1472 }
1473 }
1474 }
1475
1476 #ifdef TARGET_SPARC64
1477 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1478 TCGv r_reg)
1479 {
1480 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1481 target_ulong target = dc->pc + offset;
1482
1483 if (unlikely(AM_CHECK(dc))) {
1484 target &= 0xffffffffULL;
1485 }
1486 flush_cond(dc);
1487 gen_cond_reg(cpu_cond, cond, r_reg);
1488 if (a) {
1489 gen_branch_a(dc, target);
1490 } else {
1491 gen_branch_n(dc, target);
1492 }
1493 }
1494
1495 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1496 {
1497 switch (fccno) {
1498 case 0:
1499 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1500 break;
1501 case 1:
1502 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1503 break;
1504 case 2:
1505 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1506 break;
1507 case 3:
1508 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1509 break;
1510 }
1511 }
1512
1513 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1514 {
1515 switch (fccno) {
1516 case 0:
1517 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1518 break;
1519 case 1:
1520 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1521 break;
1522 case 2:
1523 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1524 break;
1525 case 3:
1526 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1527 break;
1528 }
1529 }
1530
1531 static inline void gen_op_fcmpq(int fccno)
1532 {
1533 switch (fccno) {
1534 case 0:
1535 gen_helper_fcmpq(cpu_env);
1536 break;
1537 case 1:
1538 gen_helper_fcmpq_fcc1(cpu_env);
1539 break;
1540 case 2:
1541 gen_helper_fcmpq_fcc2(cpu_env);
1542 break;
1543 case 3:
1544 gen_helper_fcmpq_fcc3(cpu_env);
1545 break;
1546 }
1547 }
1548
1549 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1550 {
1551 switch (fccno) {
1552 case 0:
1553 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1554 break;
1555 case 1:
1556 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1557 break;
1558 case 2:
1559 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1560 break;
1561 case 3:
1562 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1563 break;
1564 }
1565 }
1566
1567 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1568 {
1569 switch (fccno) {
1570 case 0:
1571 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1572 break;
1573 case 1:
1574 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1575 break;
1576 case 2:
1577 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1578 break;
1579 case 3:
1580 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1581 break;
1582 }
1583 }
1584
1585 static inline void gen_op_fcmpeq(int fccno)
1586 {
1587 switch (fccno) {
1588 case 0:
1589 gen_helper_fcmpeq(cpu_env);
1590 break;
1591 case 1:
1592 gen_helper_fcmpeq_fcc1(cpu_env);
1593 break;
1594 case 2:
1595 gen_helper_fcmpeq_fcc2(cpu_env);
1596 break;
1597 case 3:
1598 gen_helper_fcmpeq_fcc3(cpu_env);
1599 break;
1600 }
1601 }
1602
1603 #else
1604
1605 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1606 {
1607 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1608 }
1609
1610 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1611 {
1612 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1613 }
1614
1615 static inline void gen_op_fcmpq(int fccno)
1616 {
1617 gen_helper_fcmpq(cpu_env);
1618 }
1619
1620 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1621 {
1622 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1623 }
1624
1625 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1626 {
1627 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1628 }
1629
1630 static inline void gen_op_fcmpeq(int fccno)
1631 {
1632 gen_helper_fcmpeq(cpu_env);
1633 }
1634 #endif
1635
1636 static inline void gen_op_fpexception_im(int fsr_flags)
1637 {
1638 TCGv_i32 r_const;
1639
1640 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1641 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1642 r_const = tcg_const_i32(TT_FP_EXCP);
1643 gen_helper_raise_exception(cpu_env, r_const);
1644 tcg_temp_free_i32(r_const);
1645 }
1646
1647 static int gen_trap_ifnofpu(DisasContext *dc)
1648 {
1649 #if !defined(CONFIG_USER_ONLY)
1650 if (!dc->fpu_enabled) {
1651 TCGv_i32 r_const;
1652
1653 save_state(dc);
1654 r_const = tcg_const_i32(TT_NFPU_INSN);
1655 gen_helper_raise_exception(cpu_env, r_const);
1656 tcg_temp_free_i32(r_const);
1657 dc->is_br = 1;
1658 return 1;
1659 }
1660 #endif
1661 return 0;
1662 }
1663
1664 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1665 {
1666 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1667 }
1668
1669 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1670 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1671 {
1672 TCGv_i32 dst, src;
1673
1674 src = gen_load_fpr_F(dc, rs);
1675 dst = gen_dest_fpr_F(dc);
1676
1677 gen(dst, cpu_env, src);
1678
1679 gen_store_fpr_F(dc, rd, dst);
1680 }
1681
1682 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1683 void (*gen)(TCGv_i32, TCGv_i32))
1684 {
1685 TCGv_i32 dst, src;
1686
1687 src = gen_load_fpr_F(dc, rs);
1688 dst = gen_dest_fpr_F(dc);
1689
1690 gen(dst, src);
1691
1692 gen_store_fpr_F(dc, rd, dst);
1693 }
1694
1695 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1696 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1697 {
1698 TCGv_i32 dst, src1, src2;
1699
1700 src1 = gen_load_fpr_F(dc, rs1);
1701 src2 = gen_load_fpr_F(dc, rs2);
1702 dst = gen_dest_fpr_F(dc);
1703
1704 gen(dst, cpu_env, src1, src2);
1705
1706 gen_store_fpr_F(dc, rd, dst);
1707 }
1708
1709 #ifdef TARGET_SPARC64
1710 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1711 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1712 {
1713 TCGv_i32 dst, src1, src2;
1714
1715 src1 = gen_load_fpr_F(dc, rs1);
1716 src2 = gen_load_fpr_F(dc, rs2);
1717 dst = gen_dest_fpr_F(dc);
1718
1719 gen(dst, src1, src2);
1720
1721 gen_store_fpr_F(dc, rd, dst);
1722 }
1723 #endif
1724
1725 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1726 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1727 {
1728 TCGv_i64 dst, src;
1729
1730 src = gen_load_fpr_D(dc, rs);
1731 dst = gen_dest_fpr_D(dc, rd);
1732
1733 gen(dst, cpu_env, src);
1734
1735 gen_store_fpr_D(dc, rd, dst);
1736 }
1737
1738 #ifdef TARGET_SPARC64
1739 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1740 void (*gen)(TCGv_i64, TCGv_i64))
1741 {
1742 TCGv_i64 dst, src;
1743
1744 src = gen_load_fpr_D(dc, rs);
1745 dst = gen_dest_fpr_D(dc, rd);
1746
1747 gen(dst, src);
1748
1749 gen_store_fpr_D(dc, rd, dst);
1750 }
1751 #endif
1752
1753 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1754 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1755 {
1756 TCGv_i64 dst, src1, src2;
1757
1758 src1 = gen_load_fpr_D(dc, rs1);
1759 src2 = gen_load_fpr_D(dc, rs2);
1760 dst = gen_dest_fpr_D(dc, rd);
1761
1762 gen(dst, cpu_env, src1, src2);
1763
1764 gen_store_fpr_D(dc, rd, dst);
1765 }
1766
1767 #ifdef TARGET_SPARC64
1768 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1769 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1770 {
1771 TCGv_i64 dst, src1, src2;
1772
1773 src1 = gen_load_fpr_D(dc, rs1);
1774 src2 = gen_load_fpr_D(dc, rs2);
1775 dst = gen_dest_fpr_D(dc, rd);
1776
1777 gen(dst, src1, src2);
1778
1779 gen_store_fpr_D(dc, rd, dst);
1780 }
1781
1782 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1783 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1784 {
1785 TCGv_i64 dst, src1, src2;
1786
1787 src1 = gen_load_fpr_D(dc, rs1);
1788 src2 = gen_load_fpr_D(dc, rs2);
1789 dst = gen_dest_fpr_D(dc, rd);
1790
1791 gen(dst, cpu_gsr, src1, src2);
1792
1793 gen_store_fpr_D(dc, rd, dst);
1794 }
1795
1796 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1797 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1798 {
1799 TCGv_i64 dst, src0, src1, src2;
1800
1801 src1 = gen_load_fpr_D(dc, rs1);
1802 src2 = gen_load_fpr_D(dc, rs2);
1803 src0 = gen_load_fpr_D(dc, rd);
1804 dst = gen_dest_fpr_D(dc, rd);
1805
1806 gen(dst, src0, src1, src2);
1807
1808 gen_store_fpr_D(dc, rd, dst);
1809 }
1810 #endif
1811
1812 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1813 void (*gen)(TCGv_ptr))
1814 {
1815 gen_op_load_fpr_QT1(QFPREG(rs));
1816
1817 gen(cpu_env);
1818
1819 gen_op_store_QT0_fpr(QFPREG(rd));
1820 gen_update_fprs_dirty(QFPREG(rd));
1821 }
1822
1823 #ifdef TARGET_SPARC64
1824 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1825 void (*gen)(TCGv_ptr))
1826 {
1827 gen_op_load_fpr_QT1(QFPREG(rs));
1828
1829 gen(cpu_env);
1830
1831 gen_op_store_QT0_fpr(QFPREG(rd));
1832 gen_update_fprs_dirty(QFPREG(rd));
1833 }
1834 #endif
1835
1836 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1837 void (*gen)(TCGv_ptr))
1838 {
1839 gen_op_load_fpr_QT0(QFPREG(rs1));
1840 gen_op_load_fpr_QT1(QFPREG(rs2));
1841
1842 gen(cpu_env);
1843
1844 gen_op_store_QT0_fpr(QFPREG(rd));
1845 gen_update_fprs_dirty(QFPREG(rd));
1846 }
1847
1848 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1849 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1850 {
1851 TCGv_i64 dst;
1852 TCGv_i32 src1, src2;
1853
1854 src1 = gen_load_fpr_F(dc, rs1);
1855 src2 = gen_load_fpr_F(dc, rs2);
1856 dst = gen_dest_fpr_D(dc, rd);
1857
1858 gen(dst, cpu_env, src1, src2);
1859
1860 gen_store_fpr_D(dc, rd, dst);
1861 }
1862
1863 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1864 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1865 {
1866 TCGv_i64 src1, src2;
1867
1868 src1 = gen_load_fpr_D(dc, rs1);
1869 src2 = gen_load_fpr_D(dc, rs2);
1870
1871 gen(cpu_env, src1, src2);
1872
1873 gen_op_store_QT0_fpr(QFPREG(rd));
1874 gen_update_fprs_dirty(QFPREG(rd));
1875 }
1876
1877 #ifdef TARGET_SPARC64
1878 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1879 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1880 {
1881 TCGv_i64 dst;
1882 TCGv_i32 src;
1883
1884 src = gen_load_fpr_F(dc, rs);
1885 dst = gen_dest_fpr_D(dc, rd);
1886
1887 gen(dst, cpu_env, src);
1888
1889 gen_store_fpr_D(dc, rd, dst);
1890 }
1891 #endif
1892
1893 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1894 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1895 {
1896 TCGv_i64 dst;
1897 TCGv_i32 src;
1898
1899 src = gen_load_fpr_F(dc, rs);
1900 dst = gen_dest_fpr_D(dc, rd);
1901
1902 gen(dst, cpu_env, src);
1903
1904 gen_store_fpr_D(dc, rd, dst);
1905 }
1906
1907 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1908 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1909 {
1910 TCGv_i32 dst;
1911 TCGv_i64 src;
1912
1913 src = gen_load_fpr_D(dc, rs);
1914 dst = gen_dest_fpr_F(dc);
1915
1916 gen(dst, cpu_env, src);
1917
1918 gen_store_fpr_F(dc, rd, dst);
1919 }
1920
1921 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1922 void (*gen)(TCGv_i32, TCGv_ptr))
1923 {
1924 TCGv_i32 dst;
1925
1926 gen_op_load_fpr_QT1(QFPREG(rs));
1927 dst = gen_dest_fpr_F(dc);
1928
1929 gen(dst, cpu_env);
1930
1931 gen_store_fpr_F(dc, rd, dst);
1932 }
1933
1934 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1935 void (*gen)(TCGv_i64, TCGv_ptr))
1936 {
1937 TCGv_i64 dst;
1938
1939 gen_op_load_fpr_QT1(QFPREG(rs));
1940 dst = gen_dest_fpr_D(dc, rd);
1941
1942 gen(dst, cpu_env);
1943
1944 gen_store_fpr_D(dc, rd, dst);
1945 }
1946
1947 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1948 void (*gen)(TCGv_ptr, TCGv_i32))
1949 {
1950 TCGv_i32 src;
1951
1952 src = gen_load_fpr_F(dc, rs);
1953
1954 gen(cpu_env, src);
1955
1956 gen_op_store_QT0_fpr(QFPREG(rd));
1957 gen_update_fprs_dirty(QFPREG(rd));
1958 }
1959
1960 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1961 void (*gen)(TCGv_ptr, TCGv_i64))
1962 {
1963 TCGv_i64 src;
1964
1965 src = gen_load_fpr_D(dc, rs);
1966
1967 gen(cpu_env, src);
1968
1969 gen_op_store_QT0_fpr(QFPREG(rd));
1970 gen_update_fprs_dirty(QFPREG(rd));
1971 }
1972
1973 /* asi moves */
1974 #ifdef TARGET_SPARC64
1975 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
1976 {
1977 int asi;
1978 TCGv_i32 r_asi;
1979
1980 if (IS_IMM) {
1981 r_asi = tcg_temp_new_i32();
1982 tcg_gen_mov_i32(r_asi, cpu_asi);
1983 } else {
1984 asi = GET_FIELD(insn, 19, 26);
1985 r_asi = tcg_const_i32(asi);
1986 }
1987 return r_asi;
1988 }
1989
1990 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
1991 int sign)
1992 {
1993 TCGv_i32 r_asi, r_size, r_sign;
1994
1995 r_asi = gen_get_asi(insn, addr);
1996 r_size = tcg_const_i32(size);
1997 r_sign = tcg_const_i32(sign);
1998 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
1999 tcg_temp_free_i32(r_sign);
2000 tcg_temp_free_i32(r_size);
2001 tcg_temp_free_i32(r_asi);
2002 }
2003
2004 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2005 {
2006 TCGv_i32 r_asi, r_size;
2007
2008 r_asi = gen_get_asi(insn, addr);
2009 r_size = tcg_const_i32(size);
2010 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2011 tcg_temp_free_i32(r_size);
2012 tcg_temp_free_i32(r_asi);
2013 }
2014
2015 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
2016 {
2017 TCGv_i32 r_asi, r_size, r_rd;
2018
2019 r_asi = gen_get_asi(insn, addr);
2020 r_size = tcg_const_i32(size);
2021 r_rd = tcg_const_i32(rd);
2022 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2023 tcg_temp_free_i32(r_rd);
2024 tcg_temp_free_i32(r_size);
2025 tcg_temp_free_i32(r_asi);
2026 }
2027
2028 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
2029 {
2030 TCGv_i32 r_asi, r_size, r_rd;
2031
2032 r_asi = gen_get_asi(insn, addr);
2033 r_size = tcg_const_i32(size);
2034 r_rd = tcg_const_i32(rd);
2035 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2036 tcg_temp_free_i32(r_rd);
2037 tcg_temp_free_i32(r_size);
2038 tcg_temp_free_i32(r_asi);
2039 }
2040
2041 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2042 {
2043 TCGv_i32 r_asi, r_size, r_sign;
2044 TCGv_i64 t64 = tcg_temp_new_i64();
2045
2046 r_asi = gen_get_asi(insn, addr);
2047 r_size = tcg_const_i32(4);
2048 r_sign = tcg_const_i32(0);
2049 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2050 tcg_temp_free_i32(r_sign);
2051 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2052 tcg_temp_free_i32(r_size);
2053 tcg_temp_free_i32(r_asi);
2054 tcg_gen_trunc_i64_tl(dst, t64);
2055 tcg_temp_free_i64(t64);
2056 }
2057
2058 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2059 int insn, int rd)
2060 {
2061 TCGv_i32 r_asi, r_rd;
2062
2063 r_asi = gen_get_asi(insn, addr);
2064 r_rd = tcg_const_i32(rd);
2065 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2066 tcg_temp_free_i32(r_rd);
2067 tcg_temp_free_i32(r_asi);
2068 }
2069
2070 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2071 int insn, int rd)
2072 {
2073 TCGv_i32 r_asi, r_size;
2074 TCGv lo = gen_load_gpr(dc, rd + 1);
2075 TCGv_i64 t64 = tcg_temp_new_i64();
2076
2077 tcg_gen_concat_tl_i64(t64, lo, hi);
2078 r_asi = gen_get_asi(insn, addr);
2079 r_size = tcg_const_i32(8);
2080 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2081 tcg_temp_free_i32(r_size);
2082 tcg_temp_free_i32(r_asi);
2083 tcg_temp_free_i64(t64);
2084 }
2085
2086 static inline void gen_casx_asi(DisasContext *dc, TCGv addr,
2087 TCGv val2, int insn, int rd)
2088 {
2089 TCGv val1 = gen_load_gpr(dc, rd);
2090 TCGv dst = gen_dest_gpr(dc, rd);
2091 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2092
2093 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2094 tcg_temp_free_i32(r_asi);
2095 gen_store_gpr(dc, rd, dst);
2096 }
2097
2098 #elif !defined(CONFIG_USER_ONLY)
2099
2100 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2101 int sign)
2102 {
2103 TCGv_i32 r_asi, r_size, r_sign;
2104 TCGv_i64 t64 = tcg_temp_new_i64();
2105
2106 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2107 r_size = tcg_const_i32(size);
2108 r_sign = tcg_const_i32(sign);
2109 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2110 tcg_temp_free_i32(r_sign);
2111 tcg_temp_free_i32(r_size);
2112 tcg_temp_free_i32(r_asi);
2113 tcg_gen_trunc_i64_tl(dst, t64);
2114 tcg_temp_free_i64(t64);
2115 }
2116
2117 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2118 {
2119 TCGv_i32 r_asi, r_size;
2120 TCGv_i64 t64 = tcg_temp_new_i64();
2121
2122 tcg_gen_extu_tl_i64(t64, src);
2123 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2124 r_size = tcg_const_i32(size);
2125 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2126 tcg_temp_free_i32(r_size);
2127 tcg_temp_free_i32(r_asi);
2128 tcg_temp_free_i64(t64);
2129 }
2130
2131 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2132 {
2133 TCGv_i32 r_asi, r_size, r_sign;
2134 TCGv_i64 r_val, t64;
2135
2136 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2137 r_size = tcg_const_i32(4);
2138 r_sign = tcg_const_i32(0);
2139 t64 = tcg_temp_new_i64();
2140 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2141 tcg_temp_free(r_sign);
2142 r_val = tcg_temp_new_i64();
2143 tcg_gen_extu_tl_i64(r_val, src);
2144 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2145 tcg_temp_free_i64(r_val);
2146 tcg_temp_free_i32(r_size);
2147 tcg_temp_free_i32(r_asi);
2148 tcg_gen_trunc_i64_tl(dst, t64);
2149 tcg_temp_free_i64(t64);
2150 }
2151
2152 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2153 int insn, int rd)
2154 {
2155 TCGv_i32 r_asi, r_size, r_sign;
2156 TCGv t;
2157 TCGv_i64 t64;
2158
2159 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2160 r_size = tcg_const_i32(8);
2161 r_sign = tcg_const_i32(0);
2162 t64 = tcg_temp_new_i64();
2163 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2164 tcg_temp_free_i32(r_sign);
2165 tcg_temp_free_i32(r_size);
2166 tcg_temp_free_i32(r_asi);
2167
2168 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2169 whereby "rd + 1" elicits "error: array subscript is above array".
2170 Since we have already asserted that rd is even, the semantics
2171 are unchanged. */
2172 t = gen_dest_gpr(dc, rd | 1);
2173 tcg_gen_trunc_i64_tl(t, t64);
2174 gen_store_gpr(dc, rd | 1, t);
2175
2176 tcg_gen_shri_i64(t64, t64, 32);
2177 tcg_gen_trunc_i64_tl(hi, t64);
2178 tcg_temp_free_i64(t64);
2179 gen_store_gpr(dc, rd, hi);
2180 }
2181
2182 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2183 int insn, int rd)
2184 {
2185 TCGv_i32 r_asi, r_size;
2186 TCGv lo = gen_load_gpr(dc, rd + 1);
2187 TCGv_i64 t64 = tcg_temp_new_i64();
2188
2189 tcg_gen_concat_tl_i64(t64, lo, hi);
2190 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2191 r_size = tcg_const_i32(8);
2192 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2193 tcg_temp_free_i32(r_size);
2194 tcg_temp_free_i32(r_asi);
2195 tcg_temp_free_i64(t64);
2196 }
2197 #endif
2198
2199 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2200 static inline void gen_cas_asi(DisasContext *dc, TCGv addr,
2201 TCGv val2, int insn, int rd)
2202 {
2203 TCGv val1 = gen_load_gpr(dc, rd);
2204 TCGv dst = gen_dest_gpr(dc, rd);
2205 #ifdef TARGET_SPARC64
2206 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2207 #else
2208 TCGv_i32 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2209 #endif
2210
2211 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2212 tcg_temp_free_i32(r_asi);
2213 gen_store_gpr(dc, rd, dst);
2214 }
2215
2216 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2217 {
2218 TCGv_i64 r_val;
2219 TCGv_i32 r_asi, r_size;
2220
2221 gen_ld_asi(dst, addr, insn, 1, 0);
2222
2223 r_val = tcg_const_i64(0xffULL);
2224 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2225 r_size = tcg_const_i32(1);
2226 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2227 tcg_temp_free_i32(r_size);
2228 tcg_temp_free_i32(r_asi);
2229 tcg_temp_free_i64(r_val);
2230 }
2231 #endif
2232
2233 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2234 {
2235 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2236 return gen_load_gpr(dc, rs1);
2237 }
2238
2239 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2240 {
2241 if (IS_IMM) { /* immediate */
2242 target_long simm = GET_FIELDs(insn, 19, 31);
2243 TCGv t = get_temp_tl(dc);
2244 tcg_gen_movi_tl(t, simm);
2245 return t;
2246 } else { /* register */
2247 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2248 return gen_load_gpr(dc, rs2);
2249 }
2250 }
2251
2252 #ifdef TARGET_SPARC64
2253 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2254 {
2255 TCGv_i32 c32, zero, dst, s1, s2;
2256
2257 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2258 or fold the comparison down to 32 bits and use movcond_i32. Choose
2259 the later. */
2260 c32 = tcg_temp_new_i32();
2261 if (cmp->is_bool) {
2262 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2263 } else {
2264 TCGv_i64 c64 = tcg_temp_new_i64();
2265 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2266 tcg_gen_extrl_i64_i32(c32, c64);
2267 tcg_temp_free_i64(c64);
2268 }
2269
2270 s1 = gen_load_fpr_F(dc, rs);
2271 s2 = gen_load_fpr_F(dc, rd);
2272 dst = gen_dest_fpr_F(dc);
2273 zero = tcg_const_i32(0);
2274
2275 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2276
2277 tcg_temp_free_i32(c32);
2278 tcg_temp_free_i32(zero);
2279 gen_store_fpr_F(dc, rd, dst);
2280 }
2281
2282 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2283 {
2284 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2285 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2286 gen_load_fpr_D(dc, rs),
2287 gen_load_fpr_D(dc, rd));
2288 gen_store_fpr_D(dc, rd, dst);
2289 }
2290
2291 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2292 {
2293 int qd = QFPREG(rd);
2294 int qs = QFPREG(rs);
2295
2296 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2297 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2298 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2299 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2300
2301 gen_update_fprs_dirty(qd);
2302 }
2303
2304 #ifndef CONFIG_USER_ONLY
2305 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2306 {
2307 TCGv_i32 r_tl = tcg_temp_new_i32();
2308
2309 /* load env->tl into r_tl */
2310 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2311
2312 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2313 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2314
2315 /* calculate offset to current trap state from env->ts, reuse r_tl */
2316 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2317 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2318
2319 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2320 {
2321 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2322 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2323 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2324 tcg_temp_free_ptr(r_tl_tmp);
2325 }
2326
2327 tcg_temp_free_i32(r_tl);
2328 }
2329 #endif
2330
2331 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2332 int width, bool cc, bool left)
2333 {
2334 TCGv lo1, lo2, t1, t2;
2335 uint64_t amask, tabl, tabr;
2336 int shift, imask, omask;
2337
2338 if (cc) {
2339 tcg_gen_mov_tl(cpu_cc_src, s1);
2340 tcg_gen_mov_tl(cpu_cc_src2, s2);
2341 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2342 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2343 dc->cc_op = CC_OP_SUB;
2344 }
2345
2346 /* Theory of operation: there are two tables, left and right (not to
2347 be confused with the left and right versions of the opcode). These
2348 are indexed by the low 3 bits of the inputs. To make things "easy",
2349 these tables are loaded into two constants, TABL and TABR below.
2350 The operation index = (input & imask) << shift calculates the index
2351 into the constant, while val = (table >> index) & omask calculates
2352 the value we're looking for. */
2353 switch (width) {
2354 case 8:
2355 imask = 0x7;
2356 shift = 3;
2357 omask = 0xff;
2358 if (left) {
2359 tabl = 0x80c0e0f0f8fcfeffULL;
2360 tabr = 0xff7f3f1f0f070301ULL;
2361 } else {
2362 tabl = 0x0103070f1f3f7fffULL;
2363 tabr = 0xfffefcf8f0e0c080ULL;
2364 }
2365 break;
2366 case 16:
2367 imask = 0x6;
2368 shift = 1;
2369 omask = 0xf;
2370 if (left) {
2371 tabl = 0x8cef;
2372 tabr = 0xf731;
2373 } else {
2374 tabl = 0x137f;
2375 tabr = 0xfec8;
2376 }
2377 break;
2378 case 32:
2379 imask = 0x4;
2380 shift = 0;
2381 omask = 0x3;
2382 if (left) {
2383 tabl = (2 << 2) | 3;
2384 tabr = (3 << 2) | 1;
2385 } else {
2386 tabl = (1 << 2) | 3;
2387 tabr = (3 << 2) | 2;
2388 }
2389 break;
2390 default:
2391 abort();
2392 }
2393
2394 lo1 = tcg_temp_new();
2395 lo2 = tcg_temp_new();
2396 tcg_gen_andi_tl(lo1, s1, imask);
2397 tcg_gen_andi_tl(lo2, s2, imask);
2398 tcg_gen_shli_tl(lo1, lo1, shift);
2399 tcg_gen_shli_tl(lo2, lo2, shift);
2400
2401 t1 = tcg_const_tl(tabl);
2402 t2 = tcg_const_tl(tabr);
2403 tcg_gen_shr_tl(lo1, t1, lo1);
2404 tcg_gen_shr_tl(lo2, t2, lo2);
2405 tcg_gen_andi_tl(dst, lo1, omask);
2406 tcg_gen_andi_tl(lo2, lo2, omask);
2407
2408 amask = -8;
2409 if (AM_CHECK(dc)) {
2410 amask &= 0xffffffffULL;
2411 }
2412 tcg_gen_andi_tl(s1, s1, amask);
2413 tcg_gen_andi_tl(s2, s2, amask);
2414
2415 /* We want to compute
2416 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2417 We've already done dst = lo1, so this reduces to
2418 dst &= (s1 == s2 ? -1 : lo2)
2419 Which we perform by
2420 lo2 |= -(s1 == s2)
2421 dst &= lo2
2422 */
2423 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2424 tcg_gen_neg_tl(t1, t1);
2425 tcg_gen_or_tl(lo2, lo2, t1);
2426 tcg_gen_and_tl(dst, dst, lo2);
2427
2428 tcg_temp_free(lo1);
2429 tcg_temp_free(lo2);
2430 tcg_temp_free(t1);
2431 tcg_temp_free(t2);
2432 }
2433
2434 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2435 {
2436 TCGv tmp = tcg_temp_new();
2437
2438 tcg_gen_add_tl(tmp, s1, s2);
2439 tcg_gen_andi_tl(dst, tmp, -8);
2440 if (left) {
2441 tcg_gen_neg_tl(tmp, tmp);
2442 }
2443 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2444
2445 tcg_temp_free(tmp);
2446 }
2447
2448 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2449 {
2450 TCGv t1, t2, shift;
2451
2452 t1 = tcg_temp_new();
2453 t2 = tcg_temp_new();
2454 shift = tcg_temp_new();
2455
2456 tcg_gen_andi_tl(shift, gsr, 7);
2457 tcg_gen_shli_tl(shift, shift, 3);
2458 tcg_gen_shl_tl(t1, s1, shift);
2459
2460 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2461 shift of (up to 63) followed by a constant shift of 1. */
2462 tcg_gen_xori_tl(shift, shift, 63);
2463 tcg_gen_shr_tl(t2, s2, shift);
2464 tcg_gen_shri_tl(t2, t2, 1);
2465
2466 tcg_gen_or_tl(dst, t1, t2);
2467
2468 tcg_temp_free(t1);
2469 tcg_temp_free(t2);
2470 tcg_temp_free(shift);
2471 }
2472 #endif
2473
2474 #define CHECK_IU_FEATURE(dc, FEATURE) \
2475 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2476 goto illegal_insn;
2477 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2478 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2479 goto nfpu_insn;
2480
2481 /* before an instruction, dc->pc must be static */
2482 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2483 {
2484 unsigned int opc, rs1, rs2, rd;
2485 TCGv cpu_src1, cpu_src2;
2486 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2487 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2488 target_long simm;
2489
2490 opc = GET_FIELD(insn, 0, 1);
2491 rd = GET_FIELD(insn, 2, 6);
2492
2493 switch (opc) {
2494 case 0: /* branches/sethi */
2495 {
2496 unsigned int xop = GET_FIELD(insn, 7, 9);
2497 int32_t target;
2498 switch (xop) {
2499 #ifdef TARGET_SPARC64
2500 case 0x1: /* V9 BPcc */
2501 {
2502 int cc;
2503
2504 target = GET_FIELD_SP(insn, 0, 18);
2505 target = sign_extend(target, 19);
2506 target <<= 2;
2507 cc = GET_FIELD_SP(insn, 20, 21);
2508 if (cc == 0)
2509 do_branch(dc, target, insn, 0);
2510 else if (cc == 2)
2511 do_branch(dc, target, insn, 1);
2512 else
2513 goto illegal_insn;
2514 goto jmp_insn;
2515 }
2516 case 0x3: /* V9 BPr */
2517 {
2518 target = GET_FIELD_SP(insn, 0, 13) |
2519 (GET_FIELD_SP(insn, 20, 21) << 14);
2520 target = sign_extend(target, 16);
2521 target <<= 2;
2522 cpu_src1 = get_src1(dc, insn);
2523 do_branch_reg(dc, target, insn, cpu_src1);
2524 goto jmp_insn;
2525 }
2526 case 0x5: /* V9 FBPcc */
2527 {
2528 int cc = GET_FIELD_SP(insn, 20, 21);
2529 if (gen_trap_ifnofpu(dc)) {
2530 goto jmp_insn;
2531 }
2532 target = GET_FIELD_SP(insn, 0, 18);
2533 target = sign_extend(target, 19);
2534 target <<= 2;
2535 do_fbranch(dc, target, insn, cc);
2536 goto jmp_insn;
2537 }
2538 #else
2539 case 0x7: /* CBN+x */
2540 {
2541 goto ncp_insn;
2542 }
2543 #endif
2544 case 0x2: /* BN+x */
2545 {
2546 target = GET_FIELD(insn, 10, 31);
2547 target = sign_extend(target, 22);
2548 target <<= 2;
2549 do_branch(dc, target, insn, 0);
2550 goto jmp_insn;
2551 }
2552 case 0x6: /* FBN+x */
2553 {
2554 if (gen_trap_ifnofpu(dc)) {
2555 goto jmp_insn;
2556 }
2557 target = GET_FIELD(insn, 10, 31);
2558 target = sign_extend(target, 22);
2559 target <<= 2;
2560 do_fbranch(dc, target, insn, 0);
2561 goto jmp_insn;
2562 }
2563 case 0x4: /* SETHI */
2564 /* Special-case %g0 because that's the canonical nop. */
2565 if (rd) {
2566 uint32_t value = GET_FIELD(insn, 10, 31);
2567 TCGv t = gen_dest_gpr(dc, rd);
2568 tcg_gen_movi_tl(t, value << 10);
2569 gen_store_gpr(dc, rd, t);
2570 }
2571 break;
2572 case 0x0: /* UNIMPL */
2573 default:
2574 goto illegal_insn;
2575 }
2576 break;
2577 }
2578 break;
2579 case 1: /*CALL*/
2580 {
2581 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2582 TCGv o7 = gen_dest_gpr(dc, 15);
2583
2584 tcg_gen_movi_tl(o7, dc->pc);
2585 gen_store_gpr(dc, 15, o7);
2586 target += dc->pc;
2587 gen_mov_pc_npc(dc);
2588 #ifdef TARGET_SPARC64
2589 if (unlikely(AM_CHECK(dc))) {
2590 target &= 0xffffffffULL;
2591 }
2592 #endif
2593 dc->npc = target;
2594 }
2595 goto jmp_insn;
2596 case 2: /* FPU & Logical Operations */
2597 {
2598 unsigned int xop = GET_FIELD(insn, 7, 12);
2599 TCGv cpu_dst = get_temp_tl(dc);
2600 TCGv cpu_tmp0;
2601
2602 if (xop == 0x3a) { /* generate trap */
2603 int cond = GET_FIELD(insn, 3, 6);
2604 TCGv_i32 trap;
2605 TCGLabel *l1 = NULL;
2606 int mask;
2607
2608 if (cond == 0) {
2609 /* Trap never. */
2610 break;
2611 }
2612
2613 save_state(dc);
2614
2615 if (cond != 8) {
2616 /* Conditional trap. */
2617 DisasCompare cmp;
2618 #ifdef TARGET_SPARC64
2619 /* V9 icc/xcc */
2620 int cc = GET_FIELD_SP(insn, 11, 12);
2621 if (cc == 0) {
2622 gen_compare(&cmp, 0, cond, dc);
2623 } else if (cc == 2) {
2624 gen_compare(&cmp, 1, cond, dc);
2625 } else {
2626 goto illegal_insn;
2627 }
2628 #else
2629 gen_compare(&cmp, 0, cond, dc);
2630 #endif
2631 l1 = gen_new_label();
2632 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2633 cmp.c1, cmp.c2, l1);
2634 free_compare(&cmp);
2635 }
2636
2637 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2638 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2639
2640 /* Don't use the normal temporaries, as they may well have
2641 gone out of scope with the branch above. While we're
2642 doing that we might as well pre-truncate to 32-bit. */
2643 trap = tcg_temp_new_i32();
2644
2645 rs1 = GET_FIELD_SP(insn, 14, 18);
2646 if (IS_IMM) {
2647 rs2 = GET_FIELD_SP(insn, 0, 6);
2648 if (rs1 == 0) {
2649 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2650 /* Signal that the trap value is fully constant. */
2651 mask = 0;
2652 } else {
2653 TCGv t1 = gen_load_gpr(dc, rs1);
2654 tcg_gen_trunc_tl_i32(trap, t1);
2655 tcg_gen_addi_i32(trap, trap, rs2);
2656 }
2657 } else {
2658 TCGv t1, t2;
2659 rs2 = GET_FIELD_SP(insn, 0, 4);
2660 t1 = gen_load_gpr(dc, rs1);
2661 t2 = gen_load_gpr(dc, rs2);
2662 tcg_gen_add_tl(t1, t1, t2);
2663 tcg_gen_trunc_tl_i32(trap, t1);
2664 }
2665 if (mask != 0) {
2666 tcg_gen_andi_i32(trap, trap, mask);
2667 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2668 }
2669
2670 gen_helper_raise_exception(cpu_env, trap);
2671 tcg_temp_free_i32(trap);
2672
2673 if (cond == 8) {
2674 /* An unconditional trap ends the TB. */
2675 dc->is_br = 1;
2676 goto jmp_insn;
2677 } else {
2678 /* A conditional trap falls through to the next insn. */
2679 gen_set_label(l1);
2680 break;
2681 }
2682 } else if (xop == 0x28) {
2683 rs1 = GET_FIELD(insn, 13, 17);
2684 switch(rs1) {
2685 case 0: /* rdy */
2686 #ifndef TARGET_SPARC64
2687 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2688 manual, rdy on the microSPARC
2689 II */
2690 case 0x0f: /* stbar in the SPARCv8 manual,
2691 rdy on the microSPARC II */
2692 case 0x10 ... 0x1f: /* implementation-dependent in the
2693 SPARCv8 manual, rdy on the
2694 microSPARC II */
2695 /* Read Asr17 */
2696 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2697 TCGv t = gen_dest_gpr(dc, rd);
2698 /* Read Asr17 for a Leon3 monoprocessor */
2699 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
2700 gen_store_gpr(dc, rd, t);
2701 break;
2702 }
2703 #endif
2704 gen_store_gpr(dc, rd, cpu_y);
2705 break;
2706 #ifdef TARGET_SPARC64
2707 case 0x2: /* V9 rdccr */
2708 update_psr(dc);
2709 gen_helper_rdccr(cpu_dst, cpu_env);
2710 gen_store_gpr(dc, rd, cpu_dst);
2711 break;
2712 case 0x3: /* V9 rdasi */
2713 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2714 gen_store_gpr(dc, rd, cpu_dst);
2715 break;
2716 case 0x4: /* V9 rdtick */
2717 {
2718 TCGv_ptr r_tickptr;
2719 TCGv_i32 r_const;
2720
2721 r_tickptr = tcg_temp_new_ptr();
2722 r_const = tcg_const_i32(dc->mem_idx);
2723 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2724 offsetof(CPUSPARCState, tick));
2725 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
2726 r_const);
2727 tcg_temp_free_ptr(r_tickptr);
2728 tcg_temp_free_i32(r_const);
2729 gen_store_gpr(dc, rd, cpu_dst);
2730 }
2731 break;
2732 case 0x5: /* V9 rdpc */
2733 {
2734 TCGv t = gen_dest_gpr(dc, rd);
2735 if (unlikely(AM_CHECK(dc))) {
2736 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
2737 } else {
2738 tcg_gen_movi_tl(t, dc->pc);
2739 }
2740 gen_store_gpr(dc, rd, t);
2741 }
2742 break;
2743 case 0x6: /* V9 rdfprs */
2744 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2745 gen_store_gpr(dc, rd, cpu_dst);
2746 break;
2747 case 0xf: /* V9 membar */
2748 break; /* no effect */
2749 case 0x13: /* Graphics Status */
2750 if (gen_trap_ifnofpu(dc)) {
2751 goto jmp_insn;
2752 }
2753 gen_store_gpr(dc, rd, cpu_gsr);
2754 break;
2755 case 0x16: /* Softint */
2756 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2757 gen_store_gpr(dc, rd, cpu_dst);
2758 break;
2759 case 0x17: /* Tick compare */
2760 gen_store_gpr(dc, rd, cpu_tick_cmpr);
2761 break;
2762 case 0x18: /* System tick */
2763 {
2764 TCGv_ptr r_tickptr;
2765 TCGv_i32 r_const;
2766
2767 r_tickptr = tcg_temp_new_ptr();
2768 r_const = tcg_const_i32(dc->mem_idx);
2769 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2770 offsetof(CPUSPARCState, stick));
2771 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
2772 r_const);
2773 tcg_temp_free_ptr(r_tickptr);
2774 tcg_temp_free_i32(r_const);
2775 gen_store_gpr(dc, rd, cpu_dst);
2776 }
2777 break;
2778 case 0x19: /* System tick compare */
2779 gen_store_gpr(dc, rd, cpu_stick_cmpr);
2780 break;
2781 case 0x10: /* Performance Control */
2782 case 0x11: /* Performance Instrumentation Counter */
2783 case 0x12: /* Dispatch Control */
2784 case 0x14: /* Softint set, WO */
2785 case 0x15: /* Softint clear, WO */
2786 #endif
2787 default:
2788 goto illegal_insn;
2789 }
2790 #if !defined(CONFIG_USER_ONLY)
2791 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2792 #ifndef TARGET_SPARC64
2793 if (!supervisor(dc)) {
2794 goto priv_insn;
2795 }
2796 update_psr(dc);
2797 gen_helper_rdpsr(cpu_dst, cpu_env);
2798 #else
2799 CHECK_IU_FEATURE(dc, HYPV);
2800 if (!hypervisor(dc))
2801 goto priv_insn;
2802 rs1 = GET_FIELD(insn, 13, 17);
2803 switch (rs1) {
2804 case 0: // hpstate
2805 // gen_op_rdhpstate();
2806 break;
2807 case 1: // htstate
2808 // gen_op_rdhtstate();
2809 break;
2810 case 3: // hintp
2811 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2812 break;
2813 case 5: // htba
2814 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2815 break;
2816 case 6: // hver
2817 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2818 break;
2819 case 31: // hstick_cmpr
2820 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2821 break;
2822 default:
2823 goto illegal_insn;
2824 }
2825 #endif
2826 gen_store_gpr(dc, rd, cpu_dst);
2827 break;
2828 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2829 if (!supervisor(dc)) {
2830 goto priv_insn;
2831 }
2832 cpu_tmp0 = get_temp_tl(dc);
2833 #ifdef TARGET_SPARC64
2834 rs1 = GET_FIELD(insn, 13, 17);
2835 switch (rs1) {
2836 case 0: // tpc
2837 {
2838 TCGv_ptr r_tsptr;
2839
2840 r_tsptr = tcg_temp_new_ptr();
2841 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2842 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2843 offsetof(trap_state, tpc));
2844 tcg_temp_free_ptr(r_tsptr);
2845 }
2846 break;
2847 case 1: // tnpc
2848 {
2849 TCGv_ptr r_tsptr;
2850
2851 r_tsptr = tcg_temp_new_ptr();
2852 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2853 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2854 offsetof(trap_state, tnpc));
2855 tcg_temp_free_ptr(r_tsptr);
2856 }
2857 break;
2858 case 2: // tstate
2859 {
2860 TCGv_ptr r_tsptr;
2861
2862 r_tsptr = tcg_temp_new_ptr();
2863 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2864 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2865 offsetof(trap_state, tstate));
2866 tcg_temp_free_ptr(r_tsptr);
2867 }
2868 break;
2869 case 3: // tt
2870 {
2871 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2872
2873 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2874 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
2875 offsetof(trap_state, tt));
2876 tcg_temp_free_ptr(r_tsptr);
2877 }
2878 break;
2879 case 4: // tick
2880 {
2881 TCGv_ptr r_tickptr;
2882 TCGv_i32 r_const;
2883
2884 r_tickptr = tcg_temp_new_ptr();
2885 r_const = tcg_const_i32(dc->mem_idx);
2886 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2887 offsetof(CPUSPARCState, tick));
2888 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
2889 r_tickptr, r_const);
2890 tcg_temp_free_ptr(r_tickptr);
2891 tcg_temp_free_i32(r_const);
2892 }
2893 break;
2894 case 5: // tba
2895 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2896 break;
2897 case 6: // pstate
2898 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2899 offsetof(CPUSPARCState, pstate));
2900 break;
2901 case 7: // tl
2902 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2903 offsetof(CPUSPARCState, tl));
2904 break;
2905 case 8: // pil
2906 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2907 offsetof(CPUSPARCState, psrpil));
2908 break;
2909 case 9: // cwp
2910 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2911 break;
2912 case 10: // cansave
2913 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2914 offsetof(CPUSPARCState, cansave));
2915 break;
2916 case 11: // canrestore
2917 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2918 offsetof(CPUSPARCState, canrestore));
2919 break;
2920 case 12: // cleanwin
2921 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2922 offsetof(CPUSPARCState, cleanwin));
2923 break;
2924 case 13: // otherwin
2925 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2926 offsetof(CPUSPARCState, otherwin));
2927 break;
2928 case 14: // wstate
2929 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2930 offsetof(CPUSPARCState, wstate));
2931 break;
2932 case 16: // UA2005 gl
2933 CHECK_IU_FEATURE(dc, GL);
2934 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2935 offsetof(CPUSPARCState, gl));
2936 break;
2937 case 26: // UA2005 strand status
2938 CHECK_IU_FEATURE(dc, HYPV);
2939 if (!hypervisor(dc))
2940 goto priv_insn;
2941 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2942 break;
2943 case 31: // ver
2944 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2945 break;
2946 case 15: // fq
2947 default:
2948 goto illegal_insn;
2949 }
2950 #else
2951 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2952 #endif
2953 gen_store_gpr(dc, rd, cpu_tmp0);
2954 break;
2955 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2956 #ifdef TARGET_SPARC64
2957 save_state(dc);
2958 gen_helper_flushw(cpu_env);
2959 #else
2960 if (!supervisor(dc))
2961 goto priv_insn;
2962 gen_store_gpr(dc, rd, cpu_tbr);
2963 #endif
2964 break;
2965 #endif
2966 } else if (xop == 0x34) { /* FPU Operations */
2967 if (gen_trap_ifnofpu(dc)) {
2968 goto jmp_insn;
2969 }
2970 gen_op_clear_ieee_excp_and_FTT();
2971 rs1 = GET_FIELD(insn, 13, 17);
2972 rs2 = GET_FIELD(insn, 27, 31);
2973 xop = GET_FIELD(insn, 18, 26);
2974 save_state(dc);
2975 switch (xop) {
2976 case 0x1: /* fmovs */
2977 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2978 gen_store_fpr_F(dc, rd, cpu_src1_32);
2979 break;
2980 case 0x5: /* fnegs */
2981 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2982 break;
2983 case 0x9: /* fabss */
2984 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2985 break;
2986 case 0x29: /* fsqrts */
2987 CHECK_FPU_FEATURE(dc, FSQRT);
2988 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2989 break;
2990 case 0x2a: /* fsqrtd */
2991 CHECK_FPU_FEATURE(dc, FSQRT);
2992 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
2993 break;
2994 case 0x2b: /* fsqrtq */
2995 CHECK_FPU_FEATURE(dc, FLOAT128);
2996 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
2997 break;
2998 case 0x41: /* fadds */
2999 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3000 break;
3001 case 0x42: /* faddd */
3002 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3003 break;
3004 case 0x43: /* faddq */
3005 CHECK_FPU_FEATURE(dc, FLOAT128);
3006 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3007 break;
3008 case 0x45: /* fsubs */
3009 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3010 break;
3011 case 0x46: /* fsubd */
3012 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3013 break;
3014 case 0x47: /* fsubq */
3015 CHECK_FPU_FEATURE(dc, FLOAT128);
3016 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3017 break;
3018 case 0x49: /* fmuls */
3019 CHECK_FPU_FEATURE(dc, FMUL);
3020 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3021 break;
3022 case 0x4a: /* fmuld */
3023 CHECK_FPU_FEATURE(dc, FMUL);
3024 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3025 break;
3026 case 0x4b: /* fmulq */
3027 CHECK_FPU_FEATURE(dc, FLOAT128);
3028 CHECK_FPU_FEATURE(dc, FMUL);
3029 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3030 break;
3031 case 0x4d: /* fdivs */
3032 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3033 break;
3034 case 0x4e: /* fdivd */
3035 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3036 break;
3037 case 0x4f: /* fdivq */
3038 CHECK_FPU_FEATURE(dc, FLOAT128);
3039 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3040 break;
3041 case 0x69: /* fsmuld */
3042 CHECK_FPU_FEATURE(dc, FSMULD);
3043 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3044 break;
3045 case 0x6e: /* fdmulq */
3046 CHECK_FPU_FEATURE(dc, FLOAT128);
3047 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3048 break;
3049 case 0xc4: /* fitos */
3050 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3051 break;
3052 case 0xc6: /* fdtos */
3053 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3054 break;
3055 case 0xc7: /* fqtos */
3056 CHECK_FPU_FEATURE(dc, FLOAT128);
3057 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3058 break;
3059 case 0xc8: /* fitod */
3060 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3061 break;
3062 case 0xc9: /* fstod */
3063 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3064 break;
3065 case 0xcb: /* fqtod */
3066 CHECK_FPU_FEATURE(dc, FLOAT128);
3067 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3068 break;
3069 case 0xcc: /* fitoq */
3070 CHECK_FPU_FEATURE(dc, FLOAT128);
3071 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3072 break;
3073 case 0xcd: /* fstoq */
3074 CHECK_FPU_FEATURE(dc, FLOAT128);
3075 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3076 break;
3077 case 0xce: /* fdtoq */
3078 CHECK_FPU_FEATURE(dc, FLOAT128);
3079 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3080 break;
3081 case 0xd1: /* fstoi */
3082 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3083 break;
3084 case 0xd2: /* fdtoi */
3085 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3086 break;
3087 case 0xd3: /* fqtoi */
3088 CHECK_FPU_FEATURE(dc, FLOAT128);
3089 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3090 break;
3091 #ifdef TARGET_SPARC64
3092 case 0x2: /* V9 fmovd */
3093 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3094 gen_store_fpr_D(dc, rd, cpu_src1_64);
3095 break;
3096 case 0x3: /* V9 fmovq */
3097 CHECK_FPU_FEATURE(dc, FLOAT128);
3098 gen_move_Q(rd, rs2);
3099 break;
3100 case 0x6: /* V9 fnegd */
3101 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3102 break;
3103 case 0x7: /* V9 fnegq */
3104 CHECK_FPU_FEATURE(dc, FLOAT128);
3105 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3106 break;
3107 case 0xa: /* V9 fabsd */
3108 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3109 break;
3110 case 0xb: /* V9 fabsq */
3111 CHECK_FPU_FEATURE(dc, FLOAT128);
3112 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3113 break;
3114 case 0x81: /* V9 fstox */
3115 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3116 break;
3117 case 0x82: /* V9 fdtox */
3118 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3119 break;
3120 case 0x83: /* V9 fqtox */
3121 CHECK_FPU_FEATURE(dc, FLOAT128);
3122 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3123 break;
3124 case 0x84: /* V9 fxtos */
3125 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3126 break;
3127 case 0x88: /* V9 fxtod */
3128 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3129 break;
3130 case 0x8c: /* V9 fxtoq */
3131 CHECK_FPU_FEATURE(dc, FLOAT128);
3132 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3133 break;
3134 #endif
3135 default:
3136 goto illegal_insn;
3137 }
3138 } else if (xop == 0x35) { /* FPU Operations */
3139 #ifdef TARGET_SPARC64
3140 int cond;
3141 #endif
3142 if (gen_trap_ifnofpu(dc)) {
3143 goto jmp_insn;
3144 }
3145 gen_op_clear_ieee_excp_and_FTT();
3146 rs1 = GET_FIELD(insn, 13, 17);
3147 rs2 = GET_FIELD(insn, 27, 31);
3148 xop = GET_FIELD(insn, 18, 26);
3149 save_state(dc);
3150
3151 #ifdef TARGET_SPARC64
3152 #define FMOVR(sz) \
3153 do { \
3154 DisasCompare cmp; \
3155 cond = GET_FIELD_SP(insn, 10, 12); \
3156 cpu_src1 = get_src1(dc, insn); \
3157 gen_compare_reg(&cmp, cond, cpu_src1); \
3158 gen_fmov##sz(dc, &cmp, rd, rs2); \
3159 free_compare(&cmp); \
3160 } while (0)
3161
3162 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3163 FMOVR(s);
3164 break;
3165 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3166 FMOVR(d);
3167 break;
3168 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3169 CHECK_FPU_FEATURE(dc, FLOAT128);
3170 FMOVR(q);
3171 break;
3172 }
3173 #undef FMOVR
3174 #endif
3175 switch (xop) {
3176 #ifdef TARGET_SPARC64
3177 #define FMOVCC(fcc, sz) \
3178 do { \
3179 DisasCompare cmp; \
3180 cond = GET_FIELD_SP(insn, 14, 17); \
3181 gen_fcompare(&cmp, fcc, cond); \
3182 gen_fmov##sz(dc, &cmp, rd, rs2); \
3183 free_compare(&cmp); \
3184 } while (0)
3185
3186 case 0x001: /* V9 fmovscc %fcc0 */
3187 FMOVCC(0, s);
3188 break;
3189 case 0x002: /* V9 fmovdcc %fcc0 */
3190 FMOVCC(0, d);
3191 break;
3192 case 0x003: /* V9 fmovqcc %fcc0 */
3193 CHECK_FPU_FEATURE(dc, FLOAT128);
3194 FMOVCC(0, q);
3195 break;
3196 case 0x041: /* V9 fmovscc %fcc1 */
3197 FMOVCC(1, s);
3198 break;
3199 case 0x042: /* V9 fmovdcc %fcc1 */
3200 FMOVCC(1, d);
3201 break;
3202 case 0x043: /* V9 fmovqcc %fcc1 */
3203 CHECK_FPU_FEATURE(dc, FLOAT128);
3204 FMOVCC(1, q);
3205 break;
3206 case 0x081: /* V9 fmovscc %fcc2 */
3207 FMOVCC(2, s);
3208 break;
3209 case 0x082: /* V9 fmovdcc %fcc2 */
3210 FMOVCC(2, d);
3211 break;
3212 case 0x083: /* V9 fmovqcc %fcc2 */
3213 CHECK_FPU_FEATURE(dc, FLOAT128);
3214 FMOVCC(2, q);
3215 break;
3216 case 0x0c1: /* V9 fmovscc %fcc3 */
3217 FMOVCC(3, s);
3218 break;
3219 case 0x0c2: /* V9 fmovdcc %fcc3 */
3220 FMOVCC(3, d);
3221 break;
3222 case 0x0c3: /* V9 fmovqcc %fcc3 */
3223 CHECK_FPU_FEATURE(dc, FLOAT128);
3224 FMOVCC(3, q);
3225 break;
3226 #undef FMOVCC
3227 #define FMOVCC(xcc, sz) \
3228 do { \
3229 DisasCompare cmp; \
3230 cond = GET_FIELD_SP(insn, 14, 17); \
3231 gen_compare(&cmp, xcc, cond, dc); \
3232 gen_fmov##sz(dc, &cmp, rd, rs2); \
3233 free_compare(&cmp); \
3234 } while (0)
3235
3236 case 0x101: /* V9 fmovscc %icc */
3237 FMOVCC(0, s);
3238 break;
3239 case 0x102: /* V9 fmovdcc %icc */
3240 FMOVCC(0, d);
3241 break;
3242 case 0x103: /* V9 fmovqcc %icc */
3243 CHECK_FPU_FEATURE(dc, FLOAT128);
3244 FMOVCC(0, q);
3245 break;
3246 case 0x181: /* V9 fmovscc %xcc */
3247 FMOVCC(1, s);
3248 break;
3249 case 0x182: /* V9 fmovdcc %xcc */
3250 FMOVCC(1, d);
3251 break;
3252 case 0x183: /* V9 fmovqcc %xcc */
3253 CHECK_FPU_FEATURE(dc, FLOAT128);
3254 FMOVCC(1, q);
3255 break;
3256 #undef FMOVCC
3257 #endif
3258 case 0x51: /* fcmps, V9 %fcc */
3259 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3260 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3261 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3262 break;
3263 case 0x52: /* fcmpd, V9 %fcc */
3264 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3265 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3266 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3267 break;
3268 case 0x53: /* fcmpq, V9 %fcc */
3269 CHECK_FPU_FEATURE(dc, FLOAT128);
3270 gen_op_load_fpr_QT0(QFPREG(rs1));
3271 gen_op_load_fpr_QT1(QFPREG(rs2));
3272 gen_op_fcmpq(rd & 3);
3273 break;
3274 case 0x55: /* fcmpes, V9 %fcc */
3275 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3276 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3277 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3278 break;
3279 case 0x56: /* fcmped, V9 %fcc */
3280 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3281 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3282 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3283 break;
3284 case 0x57: /* fcmpeq, V9 %fcc */
3285 CHECK_FPU_FEATURE(dc, FLOAT128);
3286 gen_op_load_fpr_QT0(QFPREG(rs1));
3287 gen_op_load_fpr_QT1(QFPREG(rs2));
3288 gen_op_fcmpeq(rd & 3);
3289 break;
3290 default:
3291 goto illegal_insn;
3292 }
3293 } else if (xop == 0x2) {
3294 TCGv dst = gen_dest_gpr(dc, rd);
3295 rs1 = GET_FIELD(insn, 13, 17);
3296 if (rs1 == 0) {
3297 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3298 if (IS_IMM) { /* immediate */
3299 simm = GET_FIELDs(insn, 19, 31);
3300 tcg_gen_movi_tl(dst, simm);
3301 gen_store_gpr(dc, rd, dst);
3302 } else { /* register */
3303 rs2 = GET_FIELD(insn, 27, 31);
3304 if (rs2 == 0) {
3305 tcg_gen_movi_tl(dst, 0);
3306 gen_store_gpr(dc, rd, dst);
3307 } else {
3308 cpu_src2 = gen_load_gpr(dc, rs2);
3309 gen_store_gpr(dc, rd, cpu_src2);
3310 }
3311 }
3312 } else {
3313 cpu_src1 = get_src1(dc, insn);
3314 if (IS_IMM) { /* immediate */
3315 simm = GET_FIELDs(insn, 19, 31);
3316 tcg_gen_ori_tl(dst, cpu_src1, simm);
3317 gen_store_gpr(dc, rd, dst);
3318 } else { /* register */
3319 rs2 = GET_FIELD(insn, 27, 31);
3320 if (rs2 == 0) {
3321 /* mov shortcut: or x, %g0, y -> mov x, y */
3322 gen_store_gpr(dc, rd, cpu_src1);
3323 } else {
3324 cpu_src2 = gen_load_gpr(dc, rs2);
3325 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3326 gen_store_gpr(dc, rd, dst);
3327 }
3328 }
3329 }
3330 #ifdef TARGET_SPARC64
3331 } else if (xop == 0x25) { /* sll, V9 sllx */
3332 cpu_src1 = get_src1(dc, insn);
3333 if (IS_IMM) { /* immediate */
3334 simm = GET_FIELDs(insn, 20, 31);
3335 if (insn & (1 << 12)) {
3336 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3337 } else {
3338 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3339 }
3340 } else { /* register */
3341 rs2 = GET_FIELD(insn, 27, 31);
3342 cpu_src2 = gen_load_gpr(dc, rs2);
3343 cpu_tmp0 = get_temp_tl(dc);
3344 if (insn & (1 << 12)) {
3345 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3346 } else {
3347 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3348 }
3349 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3350 }
3351 gen_store_gpr(dc, rd, cpu_dst);
3352 } else if (xop == 0x26) { /* srl, V9 srlx */
3353 cpu_src1 = get_src1(dc, insn);
3354 if (IS_IMM) { /* immediate */
3355 simm = GET_FIELDs(insn, 20, 31);
3356 if (insn & (1 << 12)) {
3357 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3358 } else {
3359 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3360 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3361 }
3362 } else { /* register */
3363 rs2 = GET_FIELD(insn, 27, 31);
3364 cpu_src2 = gen_load_gpr(dc, rs2);
3365 cpu_tmp0 = get_temp_tl(dc);
3366 if (insn & (1 << 12)) {
3367 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3368 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3369 } else {
3370 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3371 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3372 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3373 }
3374 }
3375 gen_store_gpr(dc, rd, cpu_dst);
3376 } else if (xop == 0x27) { /* sra, V9 srax */
3377 cpu_src1 = get_src1(dc, insn);
3378 if (IS_IMM) { /* immediate */
3379 simm = GET_FIELDs(insn, 20, 31);
3380 if (insn & (1 << 12)) {
3381 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3382 } else {
3383 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3384 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3385 }
3386 } else { /* register */
3387 rs2 = GET_FIELD(insn, 27, 31);
3388 cpu_src2 = gen_load_gpr(dc, rs2);
3389 cpu_tmp0 = get_temp_tl(dc);
3390 if (insn & (1 << 12)) {
3391 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3392 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3393 } else {
3394 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3395 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3396 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3397 }
3398 }
3399 gen_store_gpr(dc, rd, cpu_dst);
3400 #endif
3401 } else if (xop < 0x36) {
3402 if (xop < 0x20) {
3403 cpu_src1 = get_src1(dc, insn);
3404 cpu_src2 = get_src2(dc, insn);
3405 switch (xop & ~0x10) {
3406 case 0x0: /* add */
3407 if (xop & 0x10) {
3408 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3409 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3410 dc->cc_op = CC_OP_ADD;
3411 } else {
3412 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3413 }
3414 break;
3415 case 0x1: /* and */
3416 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3417 if (xop & 0x10) {
3418 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3419 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3420 dc->cc_op = CC_OP_LOGIC;
3421 }
3422 break;
3423 case 0x2: /* or */
3424 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3425 if (xop & 0x10) {
3426 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3427 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3428 dc->cc_op = CC_OP_LOGIC;
3429 }
3430 break;
3431 case 0x3: /* xor */
3432 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3433 if (xop & 0x10) {
3434 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3435 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3436 dc->cc_op = CC_OP_LOGIC;
3437 }
3438 break;
3439 case 0x4: /* sub */
3440 if (xop & 0x10) {
3441 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3442 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3443 dc->cc_op = CC_OP_SUB;
3444 } else {
3445 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3446 }
3447 break;
3448 case 0x5: /* andn */
3449 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3450 if (xop & 0x10) {
3451 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3452 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3453 dc->cc_op = CC_OP_LOGIC;
3454 }
3455 break;
3456 case 0x6: /* orn */
3457 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3458 if (xop & 0x10) {
3459 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3460 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3461 dc->cc_op = CC_OP_LOGIC;
3462 }
3463 break;
3464 case 0x7: /* xorn */
3465 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3466 if (xop & 0x10) {
3467 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3468 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3469 dc->cc_op = CC_OP_LOGIC;
3470 }
3471 break;
3472 case 0x8: /* addx, V9 addc */
3473 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3474 (xop & 0x10));
3475 break;
3476 #ifdef TARGET_SPARC64
3477 case 0x9: /* V9 mulx */
3478 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3479 break;
3480 #endif
3481 case 0xa: /* umul */
3482 CHECK_IU_FEATURE(dc, MUL);
3483 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3484 if (xop & 0x10) {
3485 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3486 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3487 dc->cc_op = CC_OP_LOGIC;
3488 }
3489 break;
3490 case 0xb: /* smul */
3491 CHECK_IU_FEATURE(dc, MUL);
3492 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3493 if (xop & 0x10) {
3494 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3495 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3496 dc->cc_op = CC_OP_LOGIC;
3497 }
3498 break;
3499 case 0xc: /* subx, V9 subc */
3500 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3501 (xop & 0x10));
3502 break;
3503 #ifdef TARGET_SPARC64
3504 case 0xd: /* V9 udivx */
3505 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3506 break;
3507 #endif
3508 case 0xe: /* udiv */
3509 CHECK_IU_FEATURE(dc, DIV);
3510 if (xop & 0x10) {
3511 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3512 cpu_src2);
3513 dc->cc_op = CC_OP_DIV;
3514 } else {
3515 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3516 cpu_src2);
3517 }
3518 break;
3519 case 0xf: /* sdiv */
3520 CHECK_IU_FEATURE(dc, DIV);
3521 if (xop & 0x10) {
3522 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3523 cpu_src2);
3524 dc->cc_op = CC_OP_DIV;
3525 } else {
3526 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3527 cpu_src2);
3528 }
3529 break;
3530 default:
3531 goto illegal_insn;
3532 }
3533 gen_store_gpr(dc, rd, cpu_dst);
3534 } else {
3535 cpu_src1 = get_src1(dc, insn);
3536 cpu_src2 = get_src2(dc, insn);
3537 switch (xop) {
3538 case 0x20: /* taddcc */
3539 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3540 gen_store_gpr(dc, rd, cpu_dst);
3541 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3542 dc->cc_op = CC_OP_TADD;
3543 break;
3544 case 0x21: /* tsubcc */
3545 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3546 gen_store_gpr(dc, rd, cpu_dst);
3547 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3548 dc->cc_op = CC_OP_TSUB;
3549 break;
3550 case 0x22: /* taddcctv */
3551 gen_helper_taddcctv(cpu_dst, cpu_env,
3552 cpu_src1, cpu_src2);
3553 gen_store_gpr(dc, rd, cpu_dst);
3554 dc->cc_op = CC_OP_TADDTV;
3555 break;
3556 case 0x23: /* tsubcctv */
3557 gen_helper_tsubcctv(cpu_dst, cpu_env,
3558 cpu_src1, cpu_src2);
3559 gen_store_gpr(dc, rd, cpu_dst);
3560 dc->cc_op = CC_OP_TSUBTV;
3561 break;
3562 case 0x24: /* mulscc */
3563 update_psr(dc);
3564 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3565 gen_store_gpr(dc, rd, cpu_dst);
3566 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3567 dc->cc_op = CC_OP_ADD;
3568 break;
3569 #ifndef TARGET_SPARC64
3570 case 0x25: /* sll */
3571 if (IS_IMM) { /* immediate */
3572 simm = GET_FIELDs(insn, 20, 31);
3573 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3574 } else { /* register */
3575 cpu_tmp0 = get_temp_tl(dc);
3576 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3577 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3578 }
3579 gen_store_gpr(dc, rd, cpu_dst);
3580 break;
3581 case 0x26: /* srl */
3582 if (IS_IMM) { /* immediate */
3583 simm = GET_FIELDs(insn, 20, 31);
3584 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3585 } else { /* register */
3586 cpu_tmp0 = get_temp_tl(dc);
3587 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3588 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3589 }
3590 gen_store_gpr(dc, rd, cpu_dst);
3591 break;
3592 case 0x27: /* sra */
3593 if (IS_IMM) { /* immediate */
3594 simm = GET_FIELDs(insn, 20, 31);
3595 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3596 } else { /* register */
3597 cpu_tmp0 = get_temp_tl(dc);
3598 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3599 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3600 }
3601 gen_store_gpr(dc, rd, cpu_dst);
3602 break;
3603 #endif
3604 case 0x30:
3605 {
3606 cpu_tmp0 = get_temp_tl(dc);
3607 switch(rd) {
3608 case 0: /* wry */
3609 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3610 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3611 break;
3612 #ifndef TARGET_SPARC64
3613 case 0x01 ... 0x0f: /* undefined in the
3614 SPARCv8 manual, nop
3615 on the microSPARC
3616 II */
3617 case 0x10 ... 0x1f: /* implementation-dependent
3618 in the SPARCv8
3619 manual, nop on the
3620 microSPARC II */
3621 if ((rd == 0x13) && (dc->def->features &
3622 CPU_FEATURE_POWERDOWN)) {
3623 /* LEON3 power-down */
3624 save_state(dc);
3625 gen_helper_power_down(cpu_env);
3626 }
3627 break;
3628 #else
3629 case 0x2: /* V9 wrccr */
3630 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3631 gen_helper_wrccr(cpu_env, cpu_tmp0);
3632 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3633 dc->cc_op = CC_OP_FLAGS;
3634 break;
3635 case 0x3: /* V9 wrasi */
3636 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3637 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
3638 tcg_gen_trunc_tl_i32(cpu_asi, cpu_tmp0);
3639 break;
3640 case 0x6: /* V9 wrfprs */
3641 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3642 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
3643 save_state(dc);
3644 gen_op_next_insn();
3645 tcg_gen_exit_tb(0);
3646 dc->is_br = 1;
3647 break;
3648 case 0xf: /* V9 sir, nop if user */
3649 #if !defined(CONFIG_USER_ONLY)
3650 if (supervisor(dc)) {
3651 ; // XXX
3652 }
3653 #endif
3654 break;
3655 case 0x13: /* Graphics Status */
3656 if (gen_trap_ifnofpu(dc)) {
3657 goto jmp_insn;
3658 }
3659 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3660 break;
3661 case 0x14: /* Softint set */
3662 if (!supervisor(dc))
3663 goto illegal_insn;
3664 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3665 gen_helper_set_softint(cpu_env, cpu_tmp0);
3666 break;
3667 case 0x15: /* Softint clear */
3668 if (!supervisor(dc))
3669 goto illegal_insn;
3670 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3671 gen_helper_clear_softint(cpu_env, cpu_tmp0);
3672 break;
3673 case 0x16: /* Softint write */
3674 if (!supervisor(dc))
3675 goto illegal_insn;
3676 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3677 gen_helper_write_softint(cpu_env, cpu_tmp0);
3678 break;
3679 case 0x17: /* Tick compare */
3680 #if !defined(CONFIG_USER_ONLY)
3681 if (!supervisor(dc))
3682 goto illegal_insn;
3683 #endif
3684 {
3685 TCGv_ptr r_tickptr;
3686
3687 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3688 cpu_src2);
3689 r_tickptr = tcg_temp_new_ptr();
3690 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3691 offsetof(CPUSPARCState, tick));
3692 gen_helper_tick_set_limit(r_tickptr,
3693 cpu_tick_cmpr);
3694 tcg_temp_free_ptr(r_tickptr);
3695 }
3696 break;
3697 case 0x18: /* System tick */
3698 #if !defined(CONFIG_USER_ONLY)
3699 if (!supervisor(dc))
3700 goto illegal_insn;
3701 #endif
3702 {
3703 TCGv_ptr r_tickptr;
3704
3705 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
3706 cpu_src2);
3707 r_tickptr = tcg_temp_new_ptr();
3708 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3709 offsetof(CPUSPARCState, stick));
3710 gen_helper_tick_set_count(r_tickptr,
3711 cpu_tmp0);
3712 tcg_temp_free_ptr(r_tickptr);
3713 }
3714 break;
3715 case 0x19: /* System tick compare */
3716 #if !defined(CONFIG_USER_ONLY)
3717 if (!supervisor(dc))
3718 goto illegal_insn;
3719 #endif
3720 {
3721 TCGv_ptr r_tickptr;
3722
3723 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3724 cpu_src2);
3725 r_tickptr = tcg_temp_new_ptr();
3726 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3727 offsetof(CPUSPARCState, stick));
3728 gen_helper_tick_set_limit(r_tickptr,
3729 cpu_stick_cmpr);
3730 tcg_temp_free_ptr(r_tickptr);
3731 }
3732 break;
3733
3734 case 0x10: /* Performance Control */
3735 case 0x11: /* Performance Instrumentation
3736 Counter */
3737 case 0x12: /* Dispatch Control */
3738 #endif
3739 default:
3740 goto illegal_insn;
3741 }
3742 }
3743 break;
3744 #if !defined(CONFIG_USER_ONLY)
3745 case 0x31: /* wrpsr, V9 saved, restored */
3746 {
3747 if (!supervisor(dc))
3748 goto priv_insn;
3749 #ifdef TARGET_SPARC64
3750 switch (rd) {
3751 case 0:
3752 gen_helper_saved(cpu_env);
3753 break;
3754 case 1:
3755 gen_helper_restored(cpu_env);
3756 break;
3757 case 2: /* UA2005 allclean */
3758 case 3: /* UA2005 otherw */
3759 case 4: /* UA2005 normalw */
3760 case 5: /* UA2005 invalw */
3761 // XXX
3762 default:
3763 goto illegal_insn;
3764 }
3765 #else
3766 cpu_tmp0 = get_temp_tl(dc);
3767 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3768 gen_helper_wrpsr(cpu_env, cpu_tmp0);
3769 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3770 dc->cc_op = CC_OP_FLAGS;
3771 save_state(dc);
3772 gen_op_next_insn();
3773 tcg_gen_exit_tb(0);
3774 dc->is_br = 1;
3775 #endif
3776 }
3777 break;
3778 case 0x32: /* wrwim, V9 wrpr */
3779 {
3780 if (!supervisor(dc))
3781 goto priv_insn;
3782 cpu_tmp0 = get_temp_tl(dc);
3783 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3784 #ifdef TARGET_SPARC64
3785 switch (rd) {
3786 case 0: // tpc
3787 {
3788 TCGv_ptr r_tsptr;
3789
3790 r_tsptr = tcg_temp_new_ptr();
3791 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3792 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3793 offsetof(trap_state, tpc));
3794 tcg_temp_free_ptr(r_tsptr);
3795 }
3796 break;
3797 case 1: // tnpc
3798 {
3799 TCGv_ptr r_tsptr;
3800
3801 r_tsptr = tcg_temp_new_ptr();
3802 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3803 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3804 offsetof(trap_state, tnpc));
3805 tcg_temp_free_ptr(r_tsptr);
3806 }
3807 break;
3808 case 2: // tstate
3809 {
3810 TCGv_ptr r_tsptr;
3811
3812 r_tsptr = tcg_temp_new_ptr();
3813 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3814 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3815 offsetof(trap_state,
3816 tstate));
3817 tcg_temp_free_ptr(r_tsptr);
3818 }
3819 break;
3820 case 3: // tt
3821 {
3822 TCGv_ptr r_tsptr;
3823
3824 r_tsptr = tcg_temp_new_ptr();
3825 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3826 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
3827 offsetof(trap_state, tt));
3828 tcg_temp_free_ptr(r_tsptr);
3829 }
3830 break;
3831 case 4: // tick
3832 {
3833 TCGv_ptr r_tickptr;
3834
3835 r_tickptr = tcg_temp_new_ptr();
3836 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3837 offsetof(CPUSPARCState, tick));
3838 gen_helper_tick_set_count(r_tickptr,
3839 cpu_tmp0);
3840 tcg_temp_free_ptr(r_tickptr);
3841 }
3842 break;
3843 case 5: // tba
3844 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3845 break;
3846 case 6: // pstate
3847 save_state(dc);
3848 gen_helper_wrpstate(cpu_env, cpu_tmp0);
3849 dc->npc = DYNAMIC_PC;
3850 break;
3851 case 7: // tl
3852 save_state(dc);
3853 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3854 offsetof(CPUSPARCState, tl));
3855 dc->npc = DYNAMIC_PC;
3856 break;
3857 case 8: // pil
3858 gen_helper_wrpil(cpu_env, cpu_tmp0);
3859 break;
3860 case 9: // cwp
3861 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3862 break;
3863 case 10: // cansave
3864 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3865 offsetof(CPUSPARCState,
3866 cansave));
3867 break;
3868 case 11: // canrestore
3869 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3870 offsetof(CPUSPARCState,
3871 canrestore));
3872 break;
3873 case 12: // cleanwin
3874 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3875 offsetof(CPUSPARCState,
3876 cleanwin));
3877 break;
3878 case 13: // otherwin
3879 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3880 offsetof(CPUSPARCState,
3881 otherwin));
3882 break;
3883 case 14: // wstate
3884 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3885 offsetof(CPUSPARCState,
3886 wstate));
3887 break;
3888 case 16: // UA2005 gl
3889 CHECK_IU_FEATURE(dc, GL);
3890 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3891 offsetof(CPUSPARCState, gl));
3892 break;
3893 case 26: // UA2005 strand status
3894 CHECK_IU_FEATURE(dc, HYPV);
3895 if (!hypervisor(dc))
3896 goto priv_insn;
3897 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
3898 break;
3899 default:
3900 goto illegal_insn;
3901 }
3902 #else
3903 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
3904 if (dc->def->nwindows != 32) {
3905 tcg_gen_andi_tl(cpu_wim, cpu_wim,
3906 (1 << dc->def->nwindows) - 1);
3907 }
3908 #endif
3909 }
3910 break;
3911 case 0x33: /* wrtbr, UA2005 wrhpr */
3912 {
3913 #ifndef TARGET_SPARC64
3914 if (!supervisor(dc))
3915 goto priv_insn;
3916 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
3917 #else
3918 CHECK_IU_FEATURE(dc, HYPV);
3919 if (!hypervisor(dc))
3920 goto priv_insn;
3921 cpu_tmp0 = get_temp_tl(dc);
3922 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3923 switch (rd) {
3924 case 0: // hpstate
3925 // XXX gen_op_wrhpstate();
3926 save_state(dc);
3927 gen_op_next_insn();
3928 tcg_gen_exit_tb(0);
3929 dc->is_br = 1;
3930 break;
3931 case 1: // htstate
3932 // XXX gen_op_wrhtstate();
3933 break;
3934 case 3: // hintp
3935 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
3936 break;
3937 case 5: // htba
3938 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
3939 break;
3940 case 31: // hstick_cmpr
3941 {
3942 TCGv_ptr r_tickptr;
3943
3944 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
3945 r_tickptr = tcg_temp_new_ptr();
3946 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3947 offsetof(CPUSPARCState, hstick));
3948 gen_helper_tick_set_limit(r_tickptr,
3949 cpu_hstick_cmpr);
3950 tcg_temp_free_ptr(r_tickptr);
3951 }
3952 break;
3953 case 6: // hver readonly
3954 default:
3955 goto illegal_insn;
3956 }
3957 #endif
3958 }
3959 break;
3960 #endif
3961 #ifdef TARGET_SPARC64
3962 case 0x2c: /* V9 movcc */
3963 {
3964 int cc = GET_FIELD_SP(insn, 11, 12);
3965 int cond = GET_FIELD_SP(insn, 14, 17);
3966 DisasCompare cmp;
3967 TCGv dst;
3968
3969 if (insn & (1 << 18)) {
3970 if (cc == 0) {
3971 gen_compare(&cmp, 0, cond, dc);
3972 } else if (cc == 2) {
3973 gen_compare(&cmp, 1, cond, dc);
3974 } else {
3975 goto illegal_insn;
3976 }
3977 } else {
3978 gen_fcompare(&cmp, cc, cond);
3979 }
3980
3981 /* The get_src2 above loaded the normal 13-bit
3982 immediate field, not the 11-bit field we have
3983 in movcc. But it did handle the reg case. */
3984 if (IS_IMM) {
3985 simm = GET_FIELD_SPs(insn, 0, 10);
3986 tcg_gen_movi_tl(cpu_src2, simm);
3987 }
3988
3989 dst = gen_load_gpr(dc, rd);
3990 tcg_gen_movcond_tl(cmp.cond, dst,
3991 cmp.c1, cmp.c2,
3992 cpu_src2, dst);
3993 free_compare(&cmp);
3994 gen_store_gpr(dc, rd, dst);
3995 break;
3996 }
3997 case 0x2d: /* V9 sdivx */
3998 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3999 gen_store_gpr(dc, rd, cpu_dst);
4000 break;
4001 case 0x2e: /* V9 popc */
4002 gen_helper_popc(cpu_dst, cpu_src2);
4003 gen_store_gpr(dc, rd, cpu_dst);
4004 break;
4005 case 0x2f: /* V9 movr */
4006 {
4007 int cond = GET_FIELD_SP(insn, 10, 12);
4008 DisasCompare cmp;
4009 TCGv dst;
4010
4011 gen_compare_reg(&cmp, cond, cpu_src1);
4012
4013 /* The get_src2 above loaded the normal 13-bit
4014 immediate field, not the 10-bit field we have
4015 in movr. But it did handle the reg case. */
4016 if (IS_IMM) {
4017 simm = GET_FIELD_SPs(insn, 0, 9);
4018 tcg_gen_movi_tl(cpu_src2, simm);
4019 }
4020
4021 dst = gen_load_gpr(dc, rd);
4022 tcg_gen_movcond_tl(cmp.cond, dst,
4023 cmp.c1, cmp.c2,
4024 cpu_src2, dst);
4025 free_compare(&cmp);
4026 gen_store_gpr(dc, rd, dst);
4027 break;
4028 }
4029 #endif
4030 default:
4031 goto illegal_insn;
4032 }
4033 }
4034 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4035 #ifdef TARGET_SPARC64
4036 int opf = GET_FIELD_SP(insn, 5, 13);
4037 rs1 = GET_FIELD(insn, 13, 17);
4038 rs2 = GET_FIELD(insn, 27, 31);
4039 if (gen_trap_ifnofpu(dc)) {
4040 goto jmp_insn;
4041 }
4042
4043 switch (opf) {
4044 case 0x000: /* VIS I edge8cc */
4045 CHECK_FPU_FEATURE(dc, VIS1);
4046 cpu_src1 = gen_load_gpr(dc, rs1);
4047 cpu_src2 = gen_load_gpr(dc, rs2);
4048 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4049 gen_store_gpr(dc, rd, cpu_dst);
4050 break;
4051 case 0x001: /* VIS II edge8n */
4052 CHECK_FPU_FEATURE(dc, VIS2);
4053 cpu_src1 = gen_load_gpr(dc, rs1);
4054 cpu_src2 = gen_load_gpr(dc, rs2);
4055 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4056 gen_store_gpr(dc, rd, cpu_dst);
4057 break;
4058 case 0x002: /* VIS I edge8lcc */
4059 CHECK_FPU_FEATURE(dc, VIS1);
4060 cpu_src1 = gen_load_gpr(dc, rs1);
4061 cpu_src2 = gen_load_gpr(dc, rs2);
4062 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4063 gen_store_gpr(dc, rd, cpu_dst);
4064 break;
4065 case 0x003: /* VIS II edge8ln */
4066 CHECK_FPU_FEATURE(dc, VIS2);
4067 cpu_src1 = gen_load_gpr(dc, rs1);
4068 cpu_src2 = gen_load_gpr(dc, rs2);
4069 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4070 gen_store_gpr(dc, rd, cpu_dst);
4071 break;
4072 case 0x004: /* VIS I edge16cc */
4073 CHECK_FPU_FEATURE(dc, VIS1);
4074 cpu_src1 = gen_load_gpr(dc, rs1);
4075 cpu_src2 = gen_load_gpr(dc, rs2);
4076 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4077 gen_store_gpr(dc, rd, cpu_dst);
4078 break;
4079 case 0x005: /* VIS II edge16n */
4080 CHECK_FPU_FEATURE(dc, VIS2);
4081 cpu_src1 = gen_load_gpr(dc, rs1);
4082 cpu_src2 = gen_load_gpr(dc, rs2);
4083 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4084 gen_store_gpr(dc, rd, cpu_dst);
4085 break;
4086 case 0x006: /* VIS I edge16lcc */
4087 CHECK_FPU_FEATURE(dc, VIS1);
4088 cpu_src1 = gen_load_gpr(dc, rs1);
4089 cpu_src2 = gen_load_gpr(dc, rs2);
4090 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4091 gen_store_gpr(dc, rd, cpu_dst);
4092 break;
4093 case 0x007: /* VIS II edge16ln */
4094 CHECK_FPU_FEATURE(dc, VIS2);
4095 cpu_src1 = gen_load_gpr(dc, rs1);
4096 cpu_src2 = gen_load_gpr(dc, rs2);
4097 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4098 gen_store_gpr(dc, rd, cpu_dst);
4099 break;
4100 case 0x008: /* VIS I edge32cc */
4101 CHECK_FPU_FEATURE(dc, VIS1);
4102 cpu_src1 = gen_load_gpr(dc, rs1);
4103 cpu_src2 = gen_load_gpr(dc, rs2);
4104 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4105 gen_store_gpr(dc, rd, cpu_dst);
4106 break;
4107 case 0x009: /* VIS II edge32n */
4108 CHECK_FPU_FEATURE(dc, VIS2);
4109 cpu_src1 = gen_load_gpr(dc, rs1);
4110 cpu_src2 = gen_load_gpr(dc, rs2);
4111 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4112 gen_store_gpr(dc, rd, cpu_dst);
4113 break;
4114 case 0x00a: /* VIS I edge32lcc */
4115 CHECK_FPU_FEATURE(dc, VIS1);
4116 cpu_src1 = gen_load_gpr(dc, rs1);
4117 cpu_src2 = gen_load_gpr(dc, rs2);
4118 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4119 gen_store_gpr(dc, rd, cpu_dst);
4120 break;
4121 case 0x00b: /* VIS II edge32ln */
4122 CHECK_FPU_FEATURE(dc, VIS2);
4123 cpu_src1 = gen_load_gpr(dc, rs1);
4124 cpu_src2 = gen_load_gpr(dc, rs2);
4125 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4126 gen_store_gpr(dc, rd, cpu_dst);
4127 break;
4128 case 0x010: /* VIS I array8 */
4129 CHECK_FPU_FEATURE(dc, VIS1);
4130 cpu_src1 = gen_load_gpr(dc, rs1);
4131 cpu_src2 = gen_load_gpr(dc, rs2);
4132 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4133 gen_store_gpr(dc, rd, cpu_dst);
4134 break;
4135 case 0x012: /* VIS I array16 */
4136 CHECK_FPU_FEATURE(dc, VIS1);
4137 cpu_src1 = gen_load_gpr(dc, rs1);
4138 cpu_src2 = gen_load_gpr(dc, rs2);
4139 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4140 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4141 gen_store_gpr(dc, rd, cpu_dst);
4142 break;
4143 case 0x014: /* VIS I array32 */
4144 CHECK_FPU_FEATURE(dc, VIS1);
4145 cpu_src1 = gen_load_gpr(dc, rs1);
4146 cpu_src2 = gen_load_gpr(dc, rs2);
4147 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4148 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4149 gen_store_gpr(dc, rd, cpu_dst);
4150 break;
4151 case 0x018: /* VIS I alignaddr */
4152 CHECK_FPU_FEATURE(dc, VIS1);
4153 cpu_src1 = gen_load_gpr(dc, rs1);
4154 cpu_src2 = gen_load_gpr(dc, rs2);
4155 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4156 gen_store_gpr(dc, rd, cpu_dst);
4157 break;
4158 case 0x01a: /* VIS I alignaddrl */
4159 CHECK_FPU_FEATURE(dc, VIS1);
4160 cpu_src1 = gen_load_gpr(dc, rs1);
4161 cpu_src2 = gen_load_gpr(dc, rs2);
4162 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4163 gen_store_gpr(dc, rd, cpu_dst);
4164 break;
4165 case 0x019: /* VIS II bmask */
4166 CHECK_FPU_FEATURE(dc, VIS2);
4167 cpu_src1 = gen_load_gpr(dc, rs1);
4168 cpu_src2 = gen_load_gpr(dc, rs2);
4169 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4170 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4171 gen_store_gpr(dc, rd, cpu_dst);
4172 break;
4173 case 0x020: /* VIS I fcmple16 */
4174 CHECK_FPU_FEATURE(dc, VIS1);
4175 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4176 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4177 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4178 gen_store_gpr(dc, rd, cpu_dst);
4179 break;
4180 case 0x022: /* VIS I fcmpne16 */
4181 CHECK_FPU_FEATURE(dc, VIS1);
4182 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4183 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4184 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4185 gen_store_gpr(dc, rd, cpu_dst);
4186 break;
4187 case 0x024: /* VIS I fcmple32 */
4188 CHECK_FPU_FEATURE(dc, VIS1);
4189 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4190 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4191 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4192 gen_store_gpr(dc, rd, cpu_dst);
4193 break;
4194 case 0x026: /* VIS I fcmpne32 */
4195 CHECK_FPU_FEATURE(dc, VIS1);
4196 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4197 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4198 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4199 gen_store_gpr(dc, rd, cpu_dst);
4200 break;
4201 case 0x028: /* VIS I fcmpgt16 */
4202 CHECK_FPU_FEATURE(dc, VIS1);
4203 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4204 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4205 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4206 gen_store_gpr(dc, rd, cpu_dst);
4207 break;
4208 case 0x02a: /* VIS I fcmpeq16 */
4209 CHECK_FPU_FEATURE(dc, VIS1);
4210 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4211 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4212 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4213 gen_store_gpr(dc, rd, cpu_dst);
4214 break;
4215 case 0x02c: /* VIS I fcmpgt32 */
4216 CHECK_FPU_FEATURE(dc, VIS1);
4217 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4218 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4219 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4220 gen_store_gpr(dc, rd, cpu_dst);
4221 break;
4222 case 0x02e: /* VIS I fcmpeq32 */
4223 CHECK_FPU_FEATURE(dc, VIS1);
4224 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4225 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4226 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4227 gen_store_gpr(dc, rd, cpu_dst);
4228 break;
4229 case 0x031: /* VIS I fmul8x16 */
4230 CHECK_FPU_FEATURE(dc, VIS1);
4231 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4232 break;
4233 case 0x033: /* VIS I fmul8x16au */
4234 CHECK_FPU_FEATURE(dc, VIS1);
4235 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4236 break;
4237 case 0x035: /* VIS I fmul8x16al */
4238 CHECK_FPU_FEATURE(dc, VIS1);
4239 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4240 break;
4241 case 0x036: /* VIS I fmul8sux16 */
4242 CHECK_FPU_FEATURE(dc, VIS1);
4243 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4244 break;
4245 case 0x037: /* VIS I fmul8ulx16 */
4246 CHECK_FPU_FEATURE(dc, VIS1);
4247 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4248 break;
4249 case 0x038: /* VIS I fmuld8sux16 */
4250 CHECK_FPU_FEATURE(dc, VIS1);
4251 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4252 break;
4253 case 0x039: /* VIS I fmuld8ulx16 */
4254 CHECK_FPU_FEATURE(dc, VIS1);
4255 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4256 break;
4257 case 0x03a: /* VIS I fpack32 */
4258 CHECK_FPU_FEATURE(dc, VIS1);
4259 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4260 break;
4261 case 0x03b: /* VIS I fpack16 */
4262 CHECK_FPU_FEATURE(dc, VIS1);
4263 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4264 cpu_dst_32 = gen_dest_fpr_F(dc);
4265 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4266 gen_store_fpr_F(dc, rd, cpu_dst_32);
4267 break;
4268 case 0x03d: /* VIS I fpackfix */
4269 CHECK_FPU_FEATURE(dc, VIS1);
4270 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4271 cpu_dst_32 = gen_dest_fpr_F(dc);
4272 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4273 gen_store_fpr_F(dc, rd, cpu_dst_32);
4274 break;
4275 case 0x03e: /* VIS I pdist */
4276 CHECK_FPU_FEATURE(dc, VIS1);
4277 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4278 break;
4279 case 0x048: /* VIS I faligndata */
4280 CHECK_FPU_FEATURE(dc, VIS1);
4281 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4282 break;
4283 case 0x04b: /* VIS I fpmerge */
4284 CHECK_FPU_FEATURE(dc, VIS1);
4285 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4286 break;
4287 case 0x04c: /* VIS II bshuffle */
4288 CHECK_FPU_FEATURE(dc, VIS2);
4289 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4290 break;
4291 case 0x04d: /* VIS I fexpand */
4292 CHECK_FPU_FEATURE(dc, VIS1);
4293 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4294 break;
4295 case 0x050: /* VIS I fpadd16 */
4296 CHECK_FPU_FEATURE(dc, VIS1);
4297 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4298 break;
4299 case 0x051: /* VIS I fpadd16s */
4300 CHECK_FPU_FEATURE(dc, VIS1);
4301 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4302 break;
4303 case 0x052: /* VIS I fpadd32 */
4304 CHECK_FPU_FEATURE(dc, VIS1);
4305 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4306 break;
4307 case 0x053: /* VIS I fpadd32s */
4308 CHECK_FPU_FEATURE(dc, VIS1);
4309 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4310 break;
4311 case 0x054: /* VIS I fpsub16 */
4312 CHECK_FPU_FEATURE(dc, VIS1);
4313 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4314 break;
4315 case 0x055: /* VIS I fpsub16s */
4316 CHECK_FPU_FEATURE(dc, VIS1);
4317 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4318 break;
4319 case 0x056: /* VIS I fpsub32 */
4320 CHECK_FPU_FEATURE(dc, VIS1);
4321 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4322 break;
4323 case 0x057: /* VIS I fpsub32s */
4324 CHECK_FPU_FEATURE(dc, VIS1);
4325 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4326 break;
4327 case 0x060: /* VIS I fzero */
4328 CHECK_FPU_FEATURE(dc, VIS1);
4329 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4330 tcg_gen_movi_i64(cpu_dst_64, 0);
4331 gen_store_fpr_D(dc, rd, cpu_dst_64);
4332 break;
4333 case 0x061: /* VIS I fzeros */
4334 CHECK_FPU_FEATURE(dc, VIS1);
4335 cpu_dst_32 = gen_dest_fpr_F(dc);
4336 tcg_gen_movi_i32(cpu_dst_32, 0);
4337 gen_store_fpr_F(dc, rd, cpu_dst_32);
4338 break;
4339 case 0x062: /* VIS I fnor */
4340 CHECK_FPU_FEATURE(dc, VIS1);
4341 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4342 break;
4343 case 0x063: /* VIS I fnors */
4344 CHECK_FPU_FEATURE(dc, VIS1);
4345 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4346 break;
4347 case 0x064: /* VIS I fandnot2 */
4348 CHECK_FPU_FEATURE(dc, VIS1);
4349 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4350 break;
4351 case 0x065: /* VIS I fandnot2s */
4352 CHECK_FPU_FEATURE(dc, VIS1);
4353 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4354 break;
4355 case 0x066: /* VIS I fnot2 */
4356 CHECK_FPU_FEATURE(dc, VIS1);
4357 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4358 break;
4359 case 0x067: /* VIS I fnot2s */
4360 CHECK_FPU_FEATURE(dc, VIS1);
4361 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4362 break;
4363 case 0x068: /* VIS I fandnot1 */
4364 CHECK_FPU_FEATURE(dc, VIS1);
4365 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4366 break;
4367 case 0x069: /* VIS I fandnot1s */
4368 CHECK_FPU_FEATURE(dc, VIS1);
4369 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4370 break;
4371 case 0x06a: /* VIS I fnot1 */
4372 CHECK_FPU_FEATURE(dc, VIS1);
4373 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4374 break;
4375 case 0x06b: /* VIS I fnot1s */
4376 CHECK_FPU_FEATURE(dc, VIS1);
4377 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4378 break;
4379 case 0x06c: /* VIS I fxor */
4380 CHECK_FPU_FEATURE(dc, VIS1);
4381 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4382 break;
4383 case 0x06d: /* VIS I fxors */
4384 CHECK_FPU_FEATURE(dc, VIS1);
4385 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4386 break;
4387 case 0x06e: /* VIS I fnand */
4388 CHECK_FPU_FEATURE(dc, VIS1);
4389 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4390 break;
4391 case 0x06f: /* VIS I fnands */
4392 CHECK_FPU_FEATURE(dc, VIS1);
4393 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4394 break;
4395 case 0x070: /* VIS I fand */
4396 CHECK_FPU_FEATURE(dc, VIS1);
4397 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4398 break;
4399 case 0x071: /* VIS I fands */
4400 CHECK_FPU_FEATURE(dc, VIS1);
4401 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4402 break;
4403 case 0x072: /* VIS I fxnor */
4404 CHECK_FPU_FEATURE(dc, VIS1);
4405 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4406 break;
4407 case 0x073: /* VIS I fxnors */
4408 CHECK_FPU_FEATURE(dc, VIS1);
4409 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4410 break;
4411 case 0x074: /* VIS I fsrc1 */
4412 CHECK_FPU_FEATURE(dc, VIS1);
4413 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4414 gen_store_fpr_D(dc, rd, cpu_src1_64);
4415 break;
4416 case 0x075: /* VIS I fsrc1s */
4417 CHECK_FPU_FEATURE(dc, VIS1);
4418 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4419 gen_store_fpr_F(dc, rd, cpu_src1_32);
4420 break;
4421 case 0x076: /* VIS I fornot2 */
4422 CHECK_FPU_FEATURE(dc, VIS1);
4423 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4424 break;
4425 case 0x077: /* VIS I fornot2s */
4426 CHECK_FPU_FEATURE(dc, VIS1);
4427 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4428 break;
4429 case 0x078: /* VIS I fsrc2 */
4430 CHECK_FPU_FEATURE(dc, VIS1);
4431 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4432 gen_store_fpr_D(dc, rd, cpu_src1_64);
4433 break;
4434 case 0x079: /* VIS I fsrc2s */
4435 CHECK_FPU_FEATURE(dc, VIS1);
4436 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4437 gen_store_fpr_F(dc, rd, cpu_src1_32);
4438 break;
4439 case 0x07a: /* VIS I fornot1 */
4440 CHECK_FPU_FEATURE(dc, VIS1);
4441 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4442 break;
4443 case 0x07b: /* VIS I fornot1s */
4444 CHECK_FPU_FEATURE(dc, VIS1);
4445 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4446 break;
4447 case 0x07c: /* VIS I for */
4448 CHECK_FPU_FEATURE(dc, VIS1);
4449 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4450 break;
4451 case 0x07d: /* VIS I fors */
4452 CHECK_FPU_FEATURE(dc, VIS1);
4453 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4454 break;
4455 case 0x07e: /* VIS I fone */
4456 CHECK_FPU_FEATURE(dc, VIS1);
4457 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4458 tcg_gen_movi_i64(cpu_dst_64, -1);
4459 gen_store_fpr_D(dc, rd, cpu_dst_64);
4460 break;
4461 case 0x07f: /* VIS I fones */
4462 CHECK_FPU_FEATURE(dc, VIS1);
4463 cpu_dst_32 = gen_dest_fpr_F(dc);
4464 tcg_gen_movi_i32(cpu_dst_32, -1);
4465 gen_store_fpr_F(dc, rd, cpu_dst_32);
4466 break;
4467 case 0x080: /* VIS I shutdown */
4468 case 0x081: /* VIS II siam */
4469 // XXX
4470 goto illegal_insn;
4471 default:
4472 goto illegal_insn;
4473 }
4474 #else
4475 goto ncp_insn;
4476 #endif
4477 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4478 #ifdef TARGET_SPARC64
4479 goto illegal_insn;
4480 #else
4481 goto ncp_insn;
4482 #endif
4483 #ifdef TARGET_SPARC64
4484 } else if (xop == 0x39) { /* V9 return */
4485 TCGv_i32 r_const;
4486
4487 save_state(dc);
4488 cpu_src1 = get_src1(dc, insn);
4489 cpu_tmp0 = get_temp_tl(dc);
4490 if (IS_IMM) { /* immediate */
4491 simm = GET_FIELDs(insn, 19, 31);
4492 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4493 } else { /* register */
4494 rs2 = GET_FIELD(insn, 27, 31);
4495 if (rs2) {
4496 cpu_src2 = gen_load_gpr(dc, rs2);
4497 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4498 } else {
4499 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4500 }
4501 }
4502 gen_helper_restore(cpu_env);
4503 gen_mov_pc_npc(dc);
4504 r_const = tcg_const_i32(3);
4505 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4506 tcg_temp_free_i32(r_const);
4507 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4508 dc->npc = DYNAMIC_PC;
4509 goto jmp_insn;
4510 #endif
4511 } else {
4512 cpu_src1 = get_src1(dc, insn);
4513 cpu_tmp0 = get_temp_tl(dc);
4514 if (IS_IMM) { /* immediate */
4515 simm = GET_FIELDs(insn, 19, 31);
4516 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4517 } else { /* register */
4518 rs2 = GET_FIELD(insn, 27, 31);
4519 if (rs2) {
4520 cpu_src2 = gen_load_gpr(dc, rs2);
4521 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4522 } else {
4523 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4524 }
4525 }
4526 switch (xop) {
4527 case 0x38: /* jmpl */
4528 {
4529 TCGv t;
4530 TCGv_i32 r_const;
4531
4532 t = gen_dest_gpr(dc, rd);
4533 tcg_gen_movi_tl(t, dc->pc);
4534 gen_store_gpr(dc, rd, t);
4535 gen_mov_pc_npc(dc);
4536 r_const = tcg_const_i32(3);
4537 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4538 tcg_temp_free_i32(r_const);
4539 gen_address_mask(dc, cpu_tmp0);
4540 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4541 dc->npc = DYNAMIC_PC;
4542 }
4543 goto jmp_insn;
4544 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4545 case 0x39: /* rett, V9 return */
4546 {
4547 TCGv_i32 r_const;
4548
4549 if (!supervisor(dc))
4550 goto priv_insn;
4551 gen_mov_pc_npc(dc);
4552 r_const = tcg_const_i32(3);
4553 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4554 tcg_temp_free_i32(r_const);
4555 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4556 dc->npc = DYNAMIC_PC;
4557 gen_helper_rett(cpu_env);
4558 }
4559 goto jmp_insn;
4560 #endif
4561 case 0x3b: /* flush */
4562 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4563 goto unimp_flush;
4564 /* nop */
4565 break;
4566 case 0x3c: /* save */
4567 save_state(dc);
4568 gen_helper_save(cpu_env);
4569 gen_store_gpr(dc, rd, cpu_tmp0);
4570 break;
4571 case 0x3d: /* restore */
4572 save_state(dc);
4573 gen_helper_restore(cpu_env);
4574 gen_store_gpr(dc, rd, cpu_tmp0);
4575 break;
4576 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4577 case 0x3e: /* V9 done/retry */
4578 {
4579 switch (rd) {
4580 case 0:
4581 if (!supervisor(dc))
4582 goto priv_insn;
4583 dc->npc = DYNAMIC_PC;
4584 dc->pc = DYNAMIC_PC;
4585 gen_helper_done(cpu_env);
4586 goto jmp_insn;
4587 case 1:
4588 if (!supervisor(dc))
4589 goto priv_insn;
4590 dc->npc = DYNAMIC_PC;
4591 dc->pc = DYNAMIC_PC;
4592 gen_helper_retry(cpu_env);
4593 goto jmp_insn;
4594 default:
4595 goto illegal_insn;
4596 }
4597 }
4598 break;
4599 #endif
4600 default:
4601 goto illegal_insn;
4602 }
4603 }
4604 break;
4605 }
4606 break;
4607 case 3: /* load/store instructions */
4608 {
4609 unsigned int xop = GET_FIELD(insn, 7, 12);
4610 /* ??? gen_address_mask prevents us from using a source
4611 register directly. Always generate a temporary. */
4612 TCGv cpu_addr = get_temp_tl(dc);
4613
4614 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
4615 if (xop == 0x3c || xop == 0x3e) {
4616 /* V9 casa/casxa : no offset */
4617 } else if (IS_IMM) { /* immediate */
4618 simm = GET_FIELDs(insn, 19, 31);
4619 if (simm != 0) {
4620 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
4621 }
4622 } else { /* register */
4623 rs2 = GET_FIELD(insn, 27, 31);
4624 if (rs2 != 0) {
4625 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
4626 }
4627 }
4628 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4629 (xop > 0x17 && xop <= 0x1d ) ||
4630 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4631 TCGv cpu_val = gen_dest_gpr(dc, rd);
4632
4633 switch (xop) {
4634 case 0x0: /* ld, V9 lduw, load unsigned word */
4635 gen_address_mask(dc, cpu_addr);
4636 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4637 break;
4638 case 0x1: /* ldub, load unsigned byte */
4639 gen_address_mask(dc, cpu_addr);
4640 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4641 break;
4642 case 0x2: /* lduh, load unsigned halfword */
4643 gen_address_mask(dc, cpu_addr);
4644 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4645 break;
4646 case 0x3: /* ldd, load double word */
4647 if (rd & 1)
4648 goto illegal_insn;
4649 else {
4650 TCGv_i32 r_const;
4651 TCGv_i64 t64;
4652
4653 save_state(dc);
4654 r_const = tcg_const_i32(7);
4655 /* XXX remove alignment check */
4656 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4657 tcg_temp_free_i32(r_const);
4658 gen_address_mask(dc, cpu_addr);
4659 t64 = tcg_temp_new_i64();
4660 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4661 tcg_gen_trunc_i64_tl(cpu_val, t64);
4662 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4663 gen_store_gpr(dc, rd + 1, cpu_val);
4664 tcg_gen_shri_i64(t64, t64, 32);
4665 tcg_gen_trunc_i64_tl(cpu_val, t64);
4666 tcg_temp_free_i64(t64);
4667 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4668 }
4669 break;
4670 case 0x9: /* ldsb, load signed byte */
4671 gen_address_mask(dc, cpu_addr);
4672 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4673 break;
4674 case 0xa: /* ldsh, load signed halfword */
4675 gen_address_mask(dc, cpu_addr);
4676 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4677 break;
4678 case 0xd: /* ldstub -- XXX: should be atomically */
4679 {
4680 TCGv r_const;
4681
4682 gen_address_mask(dc, cpu_addr);
4683 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4684 r_const = tcg_const_tl(0xff);
4685 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4686 tcg_temp_free(r_const);
4687 }
4688 break;
4689 case 0x0f:
4690 /* swap, swap register with memory. Also atomically */
4691 {
4692 TCGv t0 = get_temp_tl(dc);
4693 CHECK_IU_FEATURE(dc, SWAP);
4694 cpu_src1 = gen_load_gpr(dc, rd);
4695 gen_address_mask(dc, cpu_addr);
4696 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4697 tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
4698 tcg_gen_mov_tl(cpu_val, t0);
4699 }
4700 break;
4701 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4702 case 0x10: /* lda, V9 lduwa, load word alternate */
4703 #ifndef TARGET_SPARC64
4704 if (IS_IMM)
4705 goto illegal_insn;
4706 if (!supervisor(dc))
4707 goto priv_insn;
4708 #endif
4709 save_state(dc);
4710 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
4711 break;
4712 case 0x11: /* lduba, load unsigned byte alternate */
4713 #ifndef TARGET_SPARC64
4714 if (IS_IMM)
4715 goto illegal_insn;
4716 if (!supervisor(dc))
4717 goto priv_insn;
4718 #endif
4719 save_state(dc);
4720 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
4721 break;
4722 case 0x12: /* lduha, load unsigned halfword alternate */
4723 #ifndef TARGET_SPARC64
4724 if (IS_IMM)
4725 goto illegal_insn;
4726 if (!supervisor(dc))
4727 goto priv_insn;
4728 #endif
4729 save_state(dc);
4730 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
4731 break;
4732 case 0x13: /* ldda, load double word alternate */
4733 #ifndef TARGET_SPARC64
4734 if (IS_IMM)
4735 goto illegal_insn;
4736 if (!supervisor(dc))
4737 goto priv_insn;
4738 #endif
4739 if (rd & 1)
4740 goto illegal_insn;
4741 save_state(dc);
4742 gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
4743 goto skip_move;
4744 case 0x19: /* ldsba, load signed byte alternate */
4745 #ifndef TARGET_SPARC64
4746 if (IS_IMM)
4747 goto illegal_insn;
4748 if (!supervisor(dc))
4749 goto priv_insn;
4750 #endif
4751 save_state(dc);
4752 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
4753 break;
4754 case 0x1a: /* ldsha, load signed halfword alternate */
4755 #ifndef TARGET_SPARC64
4756 if (IS_IMM)
4757 goto illegal_insn;
4758 if (!supervisor(dc))
4759 goto priv_insn;
4760 #endif
4761 save_state(dc);
4762 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
4763 break;
4764 case 0x1d: /* ldstuba -- XXX: should be atomically */
4765 #ifndef TARGET_SPARC64
4766 if (IS_IMM)
4767 goto illegal_insn;
4768 if (!supervisor(dc))
4769 goto priv_insn;
4770 #endif
4771 save_state(dc);
4772 gen_ldstub_asi(cpu_val, cpu_addr, insn);
4773 break;
4774 case 0x1f: /* swapa, swap reg with alt. memory. Also
4775 atomically */
4776 CHECK_IU_FEATURE(dc, SWAP);
4777 #ifndef TARGET_SPARC64
4778 if (IS_IMM)
4779 goto illegal_insn;
4780 if (!supervisor(dc))
4781 goto priv_insn;
4782 #endif
4783 save_state(dc);
4784 cpu_src1 = gen_load_gpr(dc, rd);
4785 gen_swap_asi(cpu_val, cpu_src1, cpu_addr, insn);
4786 break;
4787
4788 #ifndef TARGET_SPARC64
4789 case 0x30: /* ldc */
4790 case 0x31: /* ldcsr */
4791 case 0x33: /* lddc */
4792 goto ncp_insn;
4793 #endif
4794 #endif
4795 #ifdef TARGET_SPARC64
4796 case 0x08: /* V9 ldsw */
4797 gen_address_mask(dc, cpu_addr);
4798 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4799 break;
4800 case 0x0b: /* V9 ldx */
4801 gen_address_mask(dc, cpu_addr);
4802 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4803 break;
4804 case 0x18: /* V9 ldswa */
4805 save_state(dc);
4806 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
4807 break;
4808 case 0x1b: /* V9 ldxa */
4809 save_state(dc);
4810 gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
4811 break;
4812 case 0x2d: /* V9 prefetch, no effect */
4813 goto skip_move;
4814 case 0x30: /* V9 ldfa */
4815 if (gen_trap_ifnofpu(dc)) {
4816 goto jmp_insn;
4817 }
4818 save_state(dc);
4819 gen_ldf_asi(cpu_addr, insn, 4, rd);
4820 gen_update_fprs_dirty(rd);
4821 goto skip_move;
4822 case 0x33: /* V9 lddfa */
4823 if (gen_trap_ifnofpu(dc)) {
4824 goto jmp_insn;
4825 }
4826 save_state(dc);
4827 gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
4828 gen_update_fprs_dirty(DFPREG(rd));
4829 goto skip_move;
4830 case 0x3d: /* V9 prefetcha, no effect */
4831 goto skip_move;
4832 case 0x32: /* V9 ldqfa */
4833 CHECK_FPU_FEATURE(dc, FLOAT128);
4834 if (gen_trap_ifnofpu(dc)) {
4835 goto jmp_insn;
4836 }
4837 save_state(dc);
4838 gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
4839 gen_update_fprs_dirty(QFPREG(rd));
4840 goto skip_move;
4841 #endif
4842 default:
4843 goto illegal_insn;
4844 }
4845 gen_store_gpr(dc, rd, cpu_val);
4846 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4847 skip_move: ;
4848 #endif
4849 } else if (xop >= 0x20 && xop < 0x24) {
4850 TCGv t0;
4851
4852 if (gen_trap_ifnofpu(dc)) {
4853 goto jmp_insn;
4854 }
4855 save_state(dc);
4856 switch (xop) {
4857 case 0x20: /* ldf, load fpreg */
4858 gen_address_mask(dc, cpu_addr);
4859 t0 = get_temp_tl(dc);
4860 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4861 cpu_dst_32 = gen_dest_fpr_F(dc);
4862 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4863 gen_store_fpr_F(dc, rd, cpu_dst_32);
4864 break;
4865 case 0x21: /* ldfsr, V9 ldxfsr */
4866 #ifdef TARGET_SPARC64
4867 gen_address_mask(dc, cpu_addr);
4868 if (rd == 1) {
4869 TCGv_i64 t64 = tcg_temp_new_i64();
4870 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4871 gen_helper_ldxfsr(cpu_env, t64);
4872 tcg_temp_free_i64(t64);
4873 break;
4874 }
4875 #endif
4876 cpu_dst_32 = get_temp_i32(dc);
4877 t0 = get_temp_tl(dc);
4878 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4879 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4880 gen_helper_ldfsr(cpu_env, cpu_dst_32);
4881 break;
4882 case 0x22: /* ldqf, load quad fpreg */
4883 {
4884 TCGv_i32 r_const;
4885
4886 CHECK_FPU_FEATURE(dc, FLOAT128);
4887 r_const = tcg_const_i32(dc->mem_idx);
4888 gen_address_mask(dc, cpu_addr);
4889 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4890 tcg_temp_free_i32(r_const);
4891 gen_op_store_QT0_fpr(QFPREG(rd));
4892 gen_update_fprs_dirty(QFPREG(rd));
4893 }
4894 break;
4895 case 0x23: /* lddf, load double fpreg */
4896 gen_address_mask(dc, cpu_addr);
4897 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4898 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4899 gen_store_fpr_D(dc, rd, cpu_dst_64);
4900 break;
4901 default:
4902 goto illegal_insn;
4903 }
4904 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
4905 xop == 0xe || xop == 0x1e) {
4906 TCGv cpu_val = gen_load_gpr(dc, rd);
4907
4908 switch (xop) {
4909 case 0x4: /* st, store word */
4910 gen_address_mask(dc, cpu_addr);
4911 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4912 break;
4913 case 0x5: /* stb, store byte */
4914 gen_address_mask(dc, cpu_addr);
4915 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
4916 break;
4917 case 0x6: /* sth, store halfword */
4918 gen_address_mask(dc, cpu_addr);
4919 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
4920 break;
4921 case 0x7: /* std, store double word */
4922 if (rd & 1)
4923 goto illegal_insn;
4924 else {
4925 TCGv_i32 r_const;
4926 TCGv_i64 t64;
4927 TCGv lo;
4928
4929 save_state(dc);
4930 gen_address_mask(dc, cpu_addr);
4931 r_const = tcg_const_i32(7);
4932 /* XXX remove alignment check */
4933 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4934 tcg_temp_free_i32(r_const);
4935 lo = gen_load_gpr(dc, rd + 1);
4936
4937 t64 = tcg_temp_new_i64();
4938 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
4939 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
4940 tcg_temp_free_i64(t64);
4941 }
4942 break;
4943 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4944 case 0x14: /* sta, V9 stwa, store word alternate */
4945 #ifndef TARGET_SPARC64
4946 if (IS_IMM)
4947 goto illegal_insn;
4948 if (!supervisor(dc))
4949 goto priv_insn;
4950 #endif
4951 save_state(dc);
4952 gen_st_asi(cpu_val, cpu_addr, insn, 4);
4953 dc->npc = DYNAMIC_PC;
4954 break;
4955 case 0x15: /* stba, store byte alternate */
4956 #ifndef TARGET_SPARC64
4957 if (IS_IMM)
4958 goto illegal_insn;
4959 if (!supervisor(dc))
4960 goto priv_insn;
4961 #endif
4962 save_state(dc);
4963 gen_st_asi(cpu_val, cpu_addr, insn, 1);
4964 dc->npc = DYNAMIC_PC;
4965 break;
4966 case 0x16: /* stha, store halfword alternate */
4967 #ifndef TARGET_SPARC64
4968 if (IS_IMM)
4969 goto illegal_insn;
4970 if (!supervisor(dc))
4971 goto priv_insn;
4972 #endif
4973 save_state(dc);
4974 gen_st_asi(cpu_val, cpu_addr, insn, 2);
4975 dc->npc = DYNAMIC_PC;
4976 break;
4977 case 0x17: /* stda, store double word alternate */
4978 #ifndef TARGET_SPARC64
4979 if (IS_IMM)
4980 goto illegal_insn;
4981 if (!supervisor(dc))
4982 goto priv_insn;
4983 #endif
4984 if (rd & 1)
4985 goto illegal_insn;
4986 else {
4987 save_state(dc);
4988 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
4989 }
4990 break;
4991 #endif
4992 #ifdef TARGET_SPARC64
4993 case 0x0e: /* V9 stx */
4994 gen_address_mask(dc, cpu_addr);
4995 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
4996 break;
4997 case 0x1e: /* V9 stxa */
4998 save_state(dc);
4999 gen_st_asi(cpu_val, cpu_addr, insn, 8);
5000 dc->npc = DYNAMIC_PC;
5001 break;
5002 #endif
5003 default:
5004 goto illegal_insn;
5005 }
5006 } else if (xop > 0x23 && xop < 0x28) {
5007 if (gen_trap_ifnofpu(dc)) {
5008 goto jmp_insn;
5009 }
5010 save_state(dc);
5011 switch (xop) {
5012 case 0x24: /* stf, store fpreg */
5013 {
5014 TCGv t = get_temp_tl(dc);
5015 gen_address_mask(dc, cpu_addr);
5016 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5017 tcg_gen_ext_i32_tl(t, cpu_src1_32);
5018 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5019 }
5020 break;
5021 case 0x25: /* stfsr, V9 stxfsr */
5022 {
5023 TCGv t = get_temp_tl(dc);
5024
5025 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUSPARCState, fsr));
5026 #ifdef TARGET_SPARC64
5027 gen_address_mask(dc, cpu_addr);
5028 if (rd == 1) {
5029 tcg_gen_qemu_st64(t, cpu_addr, dc->mem_idx);
5030 break;
5031 }
5032 #endif
5033 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5034 }
5035 break;
5036 case 0x26:
5037 #ifdef TARGET_SPARC64
5038 /* V9 stqf, store quad fpreg */
5039 {
5040 TCGv_i32 r_const;
5041
5042 CHECK_FPU_FEATURE(dc, FLOAT128);
5043 gen_op_load_fpr_QT0(QFPREG(rd));
5044 r_const = tcg_const_i32(dc->mem_idx);
5045 gen_address_mask(dc, cpu_addr);
5046 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5047 tcg_temp_free_i32(r_const);
5048 }
5049 break;
5050 #else /* !TARGET_SPARC64 */
5051 /* stdfq, store floating point queue */
5052 #if defined(CONFIG_USER_ONLY)
5053 goto illegal_insn;
5054 #else
5055 if (!supervisor(dc))
5056 goto priv_insn;
5057 if (gen_trap_ifnofpu(dc)) {
5058 goto jmp_insn;
5059 }
5060 goto nfq_insn;
5061 #endif
5062 #endif
5063 case 0x27: /* stdf, store double fpreg */
5064 gen_address_mask(dc, cpu_addr);
5065 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5066 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5067 break;
5068 default:
5069 goto illegal_insn;
5070 }
5071 } else if (xop > 0x33 && xop < 0x3f) {
5072 save_state(dc);
5073 switch (xop) {
5074 #ifdef TARGET_SPARC64
5075 case 0x34: /* V9 stfa */
5076 if (gen_trap_ifnofpu(dc)) {
5077 goto jmp_insn;
5078 }
5079 gen_stf_asi(cpu_addr, insn, 4, rd);
5080 break;
5081 case 0x36: /* V9 stqfa */
5082 {
5083 TCGv_i32 r_const;
5084
5085 CHECK_FPU_FEATURE(dc, FLOAT128);
5086 if (gen_trap_ifnofpu(dc)) {
5087 goto jmp_insn;
5088 }
5089 r_const = tcg_const_i32(7);
5090 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5091 tcg_temp_free_i32(r_const);
5092 gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
5093 }
5094 break;
5095 case 0x37: /* V9 stdfa */
5096 if (gen_trap_ifnofpu(dc)) {
5097 goto jmp_insn;
5098 }
5099 gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
5100 break;
5101 case 0x3e: /* V9 casxa */
5102 rs2 = GET_FIELD(insn, 27, 31);
5103 cpu_src2 = gen_load_gpr(dc, rs2);
5104 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5105 break;
5106 #else
5107 case 0x34: /* stc */
5108 case 0x35: /* stcsr */
5109 case 0x36: /* stdcq */
5110 case 0x37: /* stdc */
5111 goto ncp_insn;
5112 #endif
5113 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5114 case 0x3c: /* V9 or LEON3 casa */
5115 #ifndef TARGET_SPARC64
5116 CHECK_IU_FEATURE(dc, CASA);
5117 if (IS_IMM) {
5118 goto illegal_insn;
5119 }
5120 /* LEON3 allows CASA from user space with ASI 0xa */
5121 if ((GET_FIELD(insn, 19, 26) != 0xa) && !supervisor(dc)) {
5122 goto priv_insn;
5123 }
5124 #endif
5125 rs2 = GET_FIELD(insn, 27, 31);
5126 cpu_src2 = gen_load_gpr(dc, rs2);
5127 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5128 break;
5129 #endif
5130 default:
5131 goto illegal_insn;
5132 }
5133 } else {
5134 goto illegal_insn;
5135 }
5136 }
5137 break;
5138 }
5139 /* default case for non jump instructions */
5140 if (dc->npc == DYNAMIC_PC) {
5141 dc->pc = DYNAMIC_PC;
5142 gen_op_next_insn();
5143 } else if (dc->npc == JUMP_PC) {
5144 /* we can do a static jump */
5145 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5146 dc->is_br = 1;
5147 } else {
5148 dc->pc = dc->npc;
5149 dc->npc = dc->npc + 4;
5150 }
5151 jmp_insn:
5152 goto egress;
5153 illegal_insn:
5154 {
5155 TCGv_i32 r_const;
5156
5157 save_state(dc);
5158 r_const = tcg_const_i32(TT_ILL_INSN);
5159 gen_helper_raise_exception(cpu_env, r_const);
5160 tcg_temp_free_i32(r_const);
5161 dc->is_br = 1;
5162 }
5163 goto egress;
5164 unimp_flush:
5165 {
5166 TCGv_i32 r_const;
5167
5168 save_state(dc);
5169 r_const = tcg_const_i32(TT_UNIMP_FLUSH);
5170 gen_helper_raise_exception(cpu_env, r_const);
5171 tcg_temp_free_i32(r_const);
5172 dc->is_br = 1;
5173 }
5174 goto egress;
5175 #if !defined(CONFIG_USER_ONLY)
5176 priv_insn:
5177 {
5178 TCGv_i32 r_const;
5179
5180 save_state(dc);
5181 r_const = tcg_const_i32(TT_PRIV_INSN);
5182 gen_helper_raise_exception(cpu_env, r_const);
5183 tcg_temp_free_i32(r_const);
5184 dc->is_br = 1;
5185 }
5186 goto egress;
5187 #endif
5188 nfpu_insn:
5189 save_state(dc);
5190 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
5191 dc->is_br = 1;
5192 goto egress;
5193 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5194 nfq_insn:
5195 save_state(dc);
5196 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
5197 dc->is_br = 1;
5198 goto egress;
5199 #endif
5200 #ifndef TARGET_SPARC64
5201 ncp_insn:
5202 {
5203 TCGv r_const;
5204
5205 save_state(dc);
5206 r_const = tcg_const_i32(TT_NCP_INSN);
5207 gen_helper_raise_exception(cpu_env, r_const);
5208 tcg_temp_free(r_const);
5209 dc->is_br = 1;
5210 }
5211 goto egress;
5212 #endif
5213 egress:
5214 if (dc->n_t32 != 0) {
5215 int i;
5216 for (i = dc->n_t32 - 1; i >= 0; --i) {
5217 tcg_temp_free_i32(dc->t32[i]);
5218 }
5219 dc->n_t32 = 0;
5220 }
5221 if (dc->n_ttl != 0) {
5222 int i;
5223 for (i = dc->n_ttl - 1; i >= 0; --i) {
5224 tcg_temp_free(dc->ttl[i]);
5225 }
5226 dc->n_ttl = 0;
5227 }
5228 }
5229
5230 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5231 {
5232 SPARCCPU *cpu = sparc_env_get_cpu(env);
5233 CPUState *cs = CPU(cpu);
5234 target_ulong pc_start, last_pc;
5235 DisasContext dc1, *dc = &dc1;
5236 int num_insns;
5237 int max_insns;
5238 unsigned int insn;
5239
5240 memset(dc, 0, sizeof(DisasContext));
5241 dc->tb = tb;
5242 pc_start = tb->pc;
5243 dc->pc = pc_start;
5244 last_pc = dc->pc;
5245 dc->npc = (target_ulong) tb->cs_base;
5246 dc->cc_op = CC_OP_DYNAMIC;
5247 dc->mem_idx = cpu_mmu_index(env, false);
5248 dc->def = env->def;
5249 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5250 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5251 dc->singlestep = (cs->singlestep_enabled || singlestep);
5252
5253 num_insns = 0;
5254 max_insns = tb->cflags & CF_COUNT_MASK;
5255 if (max_insns == 0) {
5256 max_insns = CF_COUNT_MASK;
5257 }
5258 if (max_insns > TCG_MAX_INSNS) {
5259 max_insns = TCG_MAX_INSNS;
5260 }
5261
5262 gen_tb_start(tb);
5263 do {
5264 if (dc->npc & JUMP_PC) {
5265 assert(dc->jump_pc[1] == dc->pc + 4);
5266 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5267 } else {
5268 tcg_gen_insn_start(dc->pc, dc->npc);
5269 }
5270 num_insns++;
5271 last_pc = dc->pc;
5272
5273 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5274 if (dc->pc != pc_start) {
5275 save_state(dc);
5276 }
5277 gen_helper_debug(cpu_env);
5278 tcg_gen_exit_tb(0);
5279 dc->is_br = 1;
5280 goto exit_gen_loop;
5281 }
5282
5283 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5284 gen_io_start();
5285 }
5286
5287 insn = cpu_ldl_code(env, dc->pc);
5288
5289 disas_sparc_insn(dc, insn);
5290
5291 if (dc->is_br)
5292 break;
5293 /* if the next PC is different, we abort now */
5294 if (dc->pc != (last_pc + 4))
5295 break;
5296 /* if we reach a page boundary, we stop generation so that the
5297 PC of a TT_TFAULT exception is always in the right page */
5298 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5299 break;
5300 /* if single step mode, we generate only one instruction and
5301 generate an exception */
5302 if (dc->singlestep) {
5303 break;
5304 }
5305 } while (!tcg_op_buf_full() &&
5306 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5307 num_insns < max_insns);
5308
5309 exit_gen_loop:
5310 if (tb->cflags & CF_LAST_IO) {
5311 gen_io_end();
5312 }
5313 if (!dc->is_br) {
5314 if (dc->pc != DYNAMIC_PC &&
5315 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5316 /* static PC and NPC: we can use direct chaining */
5317 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5318 } else {
5319 if (dc->pc != DYNAMIC_PC) {
5320 tcg_gen_movi_tl(cpu_pc, dc->pc);
5321 }
5322 save_npc(dc);
5323 tcg_gen_exit_tb(0);
5324 }
5325 }
5326 gen_tb_end(tb, num_insns);
5327
5328 tb->size = last_pc + 4 - pc_start;
5329 tb->icount = num_insns;
5330
5331 #ifdef DEBUG_DISAS
5332 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5333 qemu_log("--------------\n");
5334 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5335 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5336 qemu_log("\n");
5337 }
5338 #endif
5339 }
5340
5341 void gen_intermediate_code_init(CPUSPARCState *env)
5342 {
5343 static int inited;
5344 static const char gregnames[32][4] = {
5345 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5346 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5347 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5348 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5349 };
5350 static const char fregnames[32][4] = {
5351 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5352 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5353 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5354 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5355 };
5356
5357 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5358 #ifdef TARGET_SPARC64
5359 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5360 { &cpu_asi, offsetof(CPUSPARCState, asi), "asi" },
5361 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5362 { &cpu_softint, offsetof(CPUSPARCState, softint), "softint" },
5363 #else
5364 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5365 #endif
5366 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5367 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5368 };
5369
5370 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5371 #ifdef TARGET_SPARC64
5372 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5373 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5374 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5375 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5376 "hstick_cmpr" },
5377 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5378 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5379 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5380 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5381 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5382 #endif
5383 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5384 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5385 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5386 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5387 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5388 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5389 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5390 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5391 #ifndef CONFIG_USER_ONLY
5392 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5393 #endif
5394 };
5395
5396 unsigned int i;
5397
5398 /* init various static tables */
5399 if (inited) {
5400 return;
5401 }
5402 inited = 1;
5403
5404 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5405
5406 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5407 offsetof(CPUSPARCState, regwptr),
5408 "regwptr");
5409
5410 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5411 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5412 }
5413
5414 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5415 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5416 }
5417
5418 TCGV_UNUSED(cpu_regs[0]);
5419 for (i = 1; i < 8; ++i) {
5420 cpu_regs[i] = tcg_global_mem_new(cpu_env,
5421 offsetof(CPUSPARCState, gregs[i]),
5422 gregnames[i]);
5423 }
5424
5425 for (i = 8; i < 32; ++i) {
5426 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5427 (i - 8) * sizeof(target_ulong),
5428 gregnames[i]);
5429 }
5430
5431 for (i = 0; i < TARGET_DPREGS; i++) {
5432 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5433 offsetof(CPUSPARCState, fpr[i]),
5434 fregnames[i]);
5435 }
5436 }
5437
5438 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5439 target_ulong *data)
5440 {
5441 target_ulong pc = data[0];
5442 target_ulong npc = data[1];
5443
5444 env->pc = pc;
5445 if (npc == DYNAMIC_PC) {
5446 /* dynamic NPC: already stored */
5447 } else if (npc & JUMP_PC) {
5448 /* jump PC: use 'cond' and the jump targets of the translation */
5449 if (env->cond) {
5450 env->npc = npc & ~3;
5451 } else {
5452 env->npc = pc + 4;
5453 }
5454 } else {
5455 env->npc = npc;
5456 }
5457 }