]> git.proxmox.com Git - mirror_qemu.git/blob - target-sparc/translate.c
target-sparc: Use movcond in gen_generic_branch
[mirror_qemu.git] / target-sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "disas.h"
29 #include "helper.h"
30 #include "tcg-op.h"
31
32 #define GEN_HELPER 1
33 #include "helper.h"
34
35 #define DEBUG_DISAS
36
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
40
41 /* global register indexes */
42 static TCGv_ptr cpu_env, cpu_regwptr;
43 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
44 static TCGv_i32 cpu_cc_op;
45 static TCGv_i32 cpu_psr;
46 static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
47 static TCGv cpu_y;
48 #ifndef CONFIG_USER_ONLY
49 static TCGv cpu_tbr;
50 #endif
51 static TCGv cpu_cond, cpu_dst, cpu_addr, cpu_val;
52 #ifdef TARGET_SPARC64
53 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
54 static TCGv cpu_gsr;
55 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
56 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
57 static TCGv_i32 cpu_softint;
58 #else
59 static TCGv cpu_wim;
60 #endif
61 /* local register indexes (only used inside old micro ops) */
62 static TCGv cpu_tmp0;
63 static TCGv_i32 cpu_tmp32;
64 static TCGv_i64 cpu_tmp64;
65 /* Floating point registers */
66 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67
68 static target_ulong gen_opc_npc[OPC_BUF_SIZE];
69 static target_ulong gen_opc_jump_pc[2];
70
71 #include "gen-icount.h"
72
73 typedef struct DisasContext {
74 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
75 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
76 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
77 int is_br;
78 int mem_idx;
79 int fpu_enabled;
80 int address_mask_32bit;
81 int singlestep;
82 uint32_t cc_op; /* current CC operation */
83 struct TranslationBlock *tb;
84 sparc_def_t *def;
85 TCGv_i32 t32[3];
86 int n_t32;
87 } DisasContext;
88
89 typedef struct {
90 TCGCond cond;
91 bool is_bool;
92 bool g1, g2;
93 TCGv c1, c2;
94 } DisasCompare;
95
96 // This function uses non-native bit order
97 #define GET_FIELD(X, FROM, TO) \
98 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
99
100 // This function uses the order in the manuals, i.e. bit 0 is 2^0
101 #define GET_FIELD_SP(X, FROM, TO) \
102 GET_FIELD(X, 31 - (TO), 31 - (FROM))
103
104 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
105 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
106
107 #ifdef TARGET_SPARC64
108 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
109 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
110 #else
111 #define DFPREG(r) (r & 0x1e)
112 #define QFPREG(r) (r & 0x1c)
113 #endif
114
115 #define UA2005_HTRAP_MASK 0xff
116 #define V8_TRAP_MASK 0x7f
117
118 static int sign_extend(int x, int len)
119 {
120 len = 32 - len;
121 return (x << len) >> len;
122 }
123
124 #define IS_IMM (insn & (1<<13))
125
126 static inline void gen_update_fprs_dirty(int rd)
127 {
128 #if defined(TARGET_SPARC64)
129 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
130 #endif
131 }
132
133 /* floating point registers moves */
134 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
135 {
136 #if TCG_TARGET_REG_BITS == 32
137 if (src & 1) {
138 return TCGV_LOW(cpu_fpr[src / 2]);
139 } else {
140 return TCGV_HIGH(cpu_fpr[src / 2]);
141 }
142 #else
143 if (src & 1) {
144 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
145 } else {
146 TCGv_i32 ret = tcg_temp_local_new_i32();
147 TCGv_i64 t = tcg_temp_new_i64();
148
149 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
150 tcg_gen_trunc_i64_i32(ret, t);
151 tcg_temp_free_i64(t);
152
153 dc->t32[dc->n_t32++] = ret;
154 assert(dc->n_t32 <= ARRAY_SIZE(dc->t32));
155
156 return ret;
157 }
158 #endif
159 }
160
161 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
162 {
163 #if TCG_TARGET_REG_BITS == 32
164 if (dst & 1) {
165 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
166 } else {
167 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
168 }
169 #else
170 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
171 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
172 (dst & 1 ? 0 : 32), 32);
173 #endif
174 gen_update_fprs_dirty(dst);
175 }
176
177 static TCGv_i32 gen_dest_fpr_F(void)
178 {
179 return cpu_tmp32;
180 }
181
182 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
183 {
184 src = DFPREG(src);
185 return cpu_fpr[src / 2];
186 }
187
188 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
189 {
190 dst = DFPREG(dst);
191 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
192 gen_update_fprs_dirty(dst);
193 }
194
195 static TCGv_i64 gen_dest_fpr_D(void)
196 {
197 return cpu_tmp64;
198 }
199
200 static void gen_op_load_fpr_QT0(unsigned int src)
201 {
202 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
203 offsetof(CPU_QuadU, ll.upper));
204 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
205 offsetof(CPU_QuadU, ll.lower));
206 }
207
208 static void gen_op_load_fpr_QT1(unsigned int src)
209 {
210 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
211 offsetof(CPU_QuadU, ll.upper));
212 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
213 offsetof(CPU_QuadU, ll.lower));
214 }
215
216 static void gen_op_store_QT0_fpr(unsigned int dst)
217 {
218 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
219 offsetof(CPU_QuadU, ll.upper));
220 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
221 offsetof(CPU_QuadU, ll.lower));
222 }
223
224 #ifdef TARGET_SPARC64
225 static void gen_move_Q(unsigned int rd, unsigned int rs)
226 {
227 rd = QFPREG(rd);
228 rs = QFPREG(rs);
229
230 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
231 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
232 gen_update_fprs_dirty(rd);
233 }
234 #endif
235
236 /* moves */
237 #ifdef CONFIG_USER_ONLY
238 #define supervisor(dc) 0
239 #ifdef TARGET_SPARC64
240 #define hypervisor(dc) 0
241 #endif
242 #else
243 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
244 #ifdef TARGET_SPARC64
245 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
246 #else
247 #endif
248 #endif
249
250 #ifdef TARGET_SPARC64
251 #ifndef TARGET_ABI32
252 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
253 #else
254 #define AM_CHECK(dc) (1)
255 #endif
256 #endif
257
258 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
259 {
260 #ifdef TARGET_SPARC64
261 if (AM_CHECK(dc))
262 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
263 #endif
264 }
265
266 static inline void gen_movl_reg_TN(int reg, TCGv tn)
267 {
268 if (reg == 0)
269 tcg_gen_movi_tl(tn, 0);
270 else if (reg < 8)
271 tcg_gen_mov_tl(tn, cpu_gregs[reg]);
272 else {
273 tcg_gen_ld_tl(tn, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
274 }
275 }
276
277 static inline void gen_movl_TN_reg(int reg, TCGv tn)
278 {
279 if (reg == 0)
280 return;
281 else if (reg < 8)
282 tcg_gen_mov_tl(cpu_gregs[reg], tn);
283 else {
284 tcg_gen_st_tl(tn, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
285 }
286 }
287
288 static inline void gen_goto_tb(DisasContext *s, int tb_num,
289 target_ulong pc, target_ulong npc)
290 {
291 TranslationBlock *tb;
292
293 tb = s->tb;
294 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
295 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
296 !s->singlestep) {
297 /* jump to same page: we can use a direct jump */
298 tcg_gen_goto_tb(tb_num);
299 tcg_gen_movi_tl(cpu_pc, pc);
300 tcg_gen_movi_tl(cpu_npc, npc);
301 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
302 } else {
303 /* jump to another page: currently not optimized */
304 tcg_gen_movi_tl(cpu_pc, pc);
305 tcg_gen_movi_tl(cpu_npc, npc);
306 tcg_gen_exit_tb(0);
307 }
308 }
309
310 // XXX suboptimal
311 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
312 {
313 tcg_gen_extu_i32_tl(reg, src);
314 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
315 tcg_gen_andi_tl(reg, reg, 0x1);
316 }
317
318 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
319 {
320 tcg_gen_extu_i32_tl(reg, src);
321 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
322 tcg_gen_andi_tl(reg, reg, 0x1);
323 }
324
325 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
326 {
327 tcg_gen_extu_i32_tl(reg, src);
328 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
329 tcg_gen_andi_tl(reg, reg, 0x1);
330 }
331
332 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
333 {
334 tcg_gen_extu_i32_tl(reg, src);
335 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
336 tcg_gen_andi_tl(reg, reg, 0x1);
337 }
338
339 static inline void gen_add_tv(TCGv dst, TCGv src1, TCGv src2)
340 {
341 TCGv r_temp;
342 TCGv_i32 r_const;
343 int l1;
344
345 l1 = gen_new_label();
346
347 r_temp = tcg_temp_new();
348 tcg_gen_xor_tl(r_temp, src1, src2);
349 tcg_gen_not_tl(r_temp, r_temp);
350 tcg_gen_xor_tl(cpu_tmp0, src1, dst);
351 tcg_gen_and_tl(r_temp, r_temp, cpu_tmp0);
352 tcg_gen_andi_tl(r_temp, r_temp, (1ULL << 31));
353 tcg_gen_brcondi_tl(TCG_COND_EQ, r_temp, 0, l1);
354 r_const = tcg_const_i32(TT_TOVF);
355 gen_helper_raise_exception(cpu_env, r_const);
356 tcg_temp_free_i32(r_const);
357 gen_set_label(l1);
358 tcg_temp_free(r_temp);
359 }
360
361 static inline void gen_tag_tv(TCGv src1, TCGv src2)
362 {
363 int l1;
364 TCGv_i32 r_const;
365
366 l1 = gen_new_label();
367 tcg_gen_or_tl(cpu_tmp0, src1, src2);
368 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0x3);
369 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1);
370 r_const = tcg_const_i32(TT_TOVF);
371 gen_helper_raise_exception(cpu_env, r_const);
372 tcg_temp_free_i32(r_const);
373 gen_set_label(l1);
374 }
375
376 static inline void gen_op_addi_cc(TCGv dst, TCGv src1, target_long src2)
377 {
378 tcg_gen_mov_tl(cpu_cc_src, src1);
379 tcg_gen_movi_tl(cpu_cc_src2, src2);
380 tcg_gen_addi_tl(cpu_cc_dst, cpu_cc_src, src2);
381 tcg_gen_mov_tl(dst, cpu_cc_dst);
382 }
383
384 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
385 {
386 tcg_gen_mov_tl(cpu_cc_src, src1);
387 tcg_gen_mov_tl(cpu_cc_src2, src2);
388 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
389 tcg_gen_mov_tl(dst, cpu_cc_dst);
390 }
391
392 static TCGv_i32 gen_add32_carry32(void)
393 {
394 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
395
396 /* Carry is computed from a previous add: (dst < src) */
397 #if TARGET_LONG_BITS == 64
398 cc_src1_32 = tcg_temp_new_i32();
399 cc_src2_32 = tcg_temp_new_i32();
400 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_dst);
401 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src);
402 #else
403 cc_src1_32 = cpu_cc_dst;
404 cc_src2_32 = cpu_cc_src;
405 #endif
406
407 carry_32 = tcg_temp_new_i32();
408 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
409
410 #if TARGET_LONG_BITS == 64
411 tcg_temp_free_i32(cc_src1_32);
412 tcg_temp_free_i32(cc_src2_32);
413 #endif
414
415 return carry_32;
416 }
417
418 static TCGv_i32 gen_sub32_carry32(void)
419 {
420 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
421
422 /* Carry is computed from a previous borrow: (src1 < src2) */
423 #if TARGET_LONG_BITS == 64
424 cc_src1_32 = tcg_temp_new_i32();
425 cc_src2_32 = tcg_temp_new_i32();
426 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_src);
427 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src2);
428 #else
429 cc_src1_32 = cpu_cc_src;
430 cc_src2_32 = cpu_cc_src2;
431 #endif
432
433 carry_32 = tcg_temp_new_i32();
434 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
435
436 #if TARGET_LONG_BITS == 64
437 tcg_temp_free_i32(cc_src1_32);
438 tcg_temp_free_i32(cc_src2_32);
439 #endif
440
441 return carry_32;
442 }
443
444 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
445 TCGv src2, int update_cc)
446 {
447 TCGv_i32 carry_32;
448 TCGv carry;
449
450 switch (dc->cc_op) {
451 case CC_OP_DIV:
452 case CC_OP_LOGIC:
453 /* Carry is known to be zero. Fall back to plain ADD. */
454 if (update_cc) {
455 gen_op_add_cc(dst, src1, src2);
456 } else {
457 tcg_gen_add_tl(dst, src1, src2);
458 }
459 return;
460
461 case CC_OP_ADD:
462 case CC_OP_TADD:
463 case CC_OP_TADDTV:
464 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
465 {
466 /* For 32-bit hosts, we can re-use the host's hardware carry
467 generation by using an ADD2 opcode. We discard the low
468 part of the output. Ideally we'd combine this operation
469 with the add that generated the carry in the first place. */
470 TCGv dst_low = tcg_temp_new();
471 tcg_gen_op6_i32(INDEX_op_add2_i32, dst_low, dst,
472 cpu_cc_src, src1, cpu_cc_src2, src2);
473 tcg_temp_free(dst_low);
474 goto add_done;
475 }
476 #endif
477 carry_32 = gen_add32_carry32();
478 break;
479
480 case CC_OP_SUB:
481 case CC_OP_TSUB:
482 case CC_OP_TSUBTV:
483 carry_32 = gen_sub32_carry32();
484 break;
485
486 default:
487 /* We need external help to produce the carry. */
488 carry_32 = tcg_temp_new_i32();
489 gen_helper_compute_C_icc(carry_32, cpu_env);
490 break;
491 }
492
493 #if TARGET_LONG_BITS == 64
494 carry = tcg_temp_new();
495 tcg_gen_extu_i32_i64(carry, carry_32);
496 #else
497 carry = carry_32;
498 #endif
499
500 tcg_gen_add_tl(dst, src1, src2);
501 tcg_gen_add_tl(dst, dst, carry);
502
503 tcg_temp_free_i32(carry_32);
504 #if TARGET_LONG_BITS == 64
505 tcg_temp_free(carry);
506 #endif
507
508 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
509 add_done:
510 #endif
511 if (update_cc) {
512 tcg_gen_mov_tl(cpu_cc_src, src1);
513 tcg_gen_mov_tl(cpu_cc_src2, src2);
514 tcg_gen_mov_tl(cpu_cc_dst, dst);
515 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
516 dc->cc_op = CC_OP_ADDX;
517 }
518 }
519
520 static inline void gen_op_tadd_cc(TCGv dst, TCGv src1, TCGv src2)
521 {
522 tcg_gen_mov_tl(cpu_cc_src, src1);
523 tcg_gen_mov_tl(cpu_cc_src2, src2);
524 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
525 tcg_gen_mov_tl(dst, cpu_cc_dst);
526 }
527
528 static inline void gen_op_tadd_ccTV(TCGv dst, TCGv src1, TCGv src2)
529 {
530 tcg_gen_mov_tl(cpu_cc_src, src1);
531 tcg_gen_mov_tl(cpu_cc_src2, src2);
532 gen_tag_tv(cpu_cc_src, cpu_cc_src2);
533 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
534 gen_add_tv(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
535 tcg_gen_mov_tl(dst, cpu_cc_dst);
536 }
537
538 static inline void gen_sub_tv(TCGv dst, TCGv src1, TCGv src2)
539 {
540 TCGv r_temp;
541 TCGv_i32 r_const;
542 int l1;
543
544 l1 = gen_new_label();
545
546 r_temp = tcg_temp_new();
547 tcg_gen_xor_tl(r_temp, src1, src2);
548 tcg_gen_xor_tl(cpu_tmp0, src1, dst);
549 tcg_gen_and_tl(r_temp, r_temp, cpu_tmp0);
550 tcg_gen_andi_tl(r_temp, r_temp, (1ULL << 31));
551 tcg_gen_brcondi_tl(TCG_COND_EQ, r_temp, 0, l1);
552 r_const = tcg_const_i32(TT_TOVF);
553 gen_helper_raise_exception(cpu_env, r_const);
554 tcg_temp_free_i32(r_const);
555 gen_set_label(l1);
556 tcg_temp_free(r_temp);
557 }
558
559 static inline void gen_op_subi_cc(TCGv dst, TCGv src1, target_long src2, DisasContext *dc)
560 {
561 tcg_gen_mov_tl(cpu_cc_src, src1);
562 tcg_gen_movi_tl(cpu_cc_src2, src2);
563 if (src2 == 0) {
564 tcg_gen_mov_tl(cpu_cc_dst, src1);
565 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
566 dc->cc_op = CC_OP_LOGIC;
567 } else {
568 tcg_gen_subi_tl(cpu_cc_dst, cpu_cc_src, src2);
569 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
570 dc->cc_op = CC_OP_SUB;
571 }
572 tcg_gen_mov_tl(dst, cpu_cc_dst);
573 }
574
575 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
576 {
577 tcg_gen_mov_tl(cpu_cc_src, src1);
578 tcg_gen_mov_tl(cpu_cc_src2, src2);
579 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
580 tcg_gen_mov_tl(dst, cpu_cc_dst);
581 }
582
583 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
584 TCGv src2, int update_cc)
585 {
586 TCGv_i32 carry_32;
587 TCGv carry;
588
589 switch (dc->cc_op) {
590 case CC_OP_DIV:
591 case CC_OP_LOGIC:
592 /* Carry is known to be zero. Fall back to plain SUB. */
593 if (update_cc) {
594 gen_op_sub_cc(dst, src1, src2);
595 } else {
596 tcg_gen_sub_tl(dst, src1, src2);
597 }
598 return;
599
600 case CC_OP_ADD:
601 case CC_OP_TADD:
602 case CC_OP_TADDTV:
603 carry_32 = gen_add32_carry32();
604 break;
605
606 case CC_OP_SUB:
607 case CC_OP_TSUB:
608 case CC_OP_TSUBTV:
609 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
610 {
611 /* For 32-bit hosts, we can re-use the host's hardware carry
612 generation by using a SUB2 opcode. We discard the low
613 part of the output. Ideally we'd combine this operation
614 with the add that generated the carry in the first place. */
615 TCGv dst_low = tcg_temp_new();
616 tcg_gen_op6_i32(INDEX_op_sub2_i32, dst_low, dst,
617 cpu_cc_src, src1, cpu_cc_src2, src2);
618 tcg_temp_free(dst_low);
619 goto sub_done;
620 }
621 #endif
622 carry_32 = gen_sub32_carry32();
623 break;
624
625 default:
626 /* We need external help to produce the carry. */
627 carry_32 = tcg_temp_new_i32();
628 gen_helper_compute_C_icc(carry_32, cpu_env);
629 break;
630 }
631
632 #if TARGET_LONG_BITS == 64
633 carry = tcg_temp_new();
634 tcg_gen_extu_i32_i64(carry, carry_32);
635 #else
636 carry = carry_32;
637 #endif
638
639 tcg_gen_sub_tl(dst, src1, src2);
640 tcg_gen_sub_tl(dst, dst, carry);
641
642 tcg_temp_free_i32(carry_32);
643 #if TARGET_LONG_BITS == 64
644 tcg_temp_free(carry);
645 #endif
646
647 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
648 sub_done:
649 #endif
650 if (update_cc) {
651 tcg_gen_mov_tl(cpu_cc_src, src1);
652 tcg_gen_mov_tl(cpu_cc_src2, src2);
653 tcg_gen_mov_tl(cpu_cc_dst, dst);
654 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
655 dc->cc_op = CC_OP_SUBX;
656 }
657 }
658
659 static inline void gen_op_tsub_cc(TCGv dst, TCGv src1, TCGv src2)
660 {
661 tcg_gen_mov_tl(cpu_cc_src, src1);
662 tcg_gen_mov_tl(cpu_cc_src2, src2);
663 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
664 tcg_gen_mov_tl(dst, cpu_cc_dst);
665 }
666
667 static inline void gen_op_tsub_ccTV(TCGv dst, TCGv src1, TCGv src2)
668 {
669 tcg_gen_mov_tl(cpu_cc_src, src1);
670 tcg_gen_mov_tl(cpu_cc_src2, src2);
671 gen_tag_tv(cpu_cc_src, cpu_cc_src2);
672 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
673 gen_sub_tv(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
674 tcg_gen_mov_tl(dst, cpu_cc_dst);
675 }
676
677 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
678 {
679 TCGv r_temp;
680 int l1;
681
682 l1 = gen_new_label();
683 r_temp = tcg_temp_new();
684
685 /* old op:
686 if (!(env->y & 1))
687 T1 = 0;
688 */
689 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
690 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
691 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
692 tcg_gen_brcondi_tl(TCG_COND_NE, r_temp, 0, l1);
693 tcg_gen_movi_tl(cpu_cc_src2, 0);
694 gen_set_label(l1);
695
696 // b2 = T0 & 1;
697 // env->y = (b2 << 31) | (env->y >> 1);
698 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
699 tcg_gen_shli_tl(r_temp, r_temp, 31);
700 tcg_gen_shri_tl(cpu_tmp0, cpu_y, 1);
701 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0x7fffffff);
702 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, r_temp);
703 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
704
705 // b1 = N ^ V;
706 gen_mov_reg_N(cpu_tmp0, cpu_psr);
707 gen_mov_reg_V(r_temp, cpu_psr);
708 tcg_gen_xor_tl(cpu_tmp0, cpu_tmp0, r_temp);
709 tcg_temp_free(r_temp);
710
711 // T0 = (b1 << 31) | (T0 >> 1);
712 // src1 = T0;
713 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, 31);
714 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
715 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
716
717 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
718
719 tcg_gen_mov_tl(dst, cpu_cc_dst);
720 }
721
722 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
723 {
724 TCGv_i32 r_src1, r_src2;
725 TCGv_i64 r_temp, r_temp2;
726
727 r_src1 = tcg_temp_new_i32();
728 r_src2 = tcg_temp_new_i32();
729
730 tcg_gen_trunc_tl_i32(r_src1, src1);
731 tcg_gen_trunc_tl_i32(r_src2, src2);
732
733 r_temp = tcg_temp_new_i64();
734 r_temp2 = tcg_temp_new_i64();
735
736 if (sign_ext) {
737 tcg_gen_ext_i32_i64(r_temp, r_src2);
738 tcg_gen_ext_i32_i64(r_temp2, r_src1);
739 } else {
740 tcg_gen_extu_i32_i64(r_temp, r_src2);
741 tcg_gen_extu_i32_i64(r_temp2, r_src1);
742 }
743
744 tcg_gen_mul_i64(r_temp2, r_temp, r_temp2);
745
746 tcg_gen_shri_i64(r_temp, r_temp2, 32);
747 tcg_gen_trunc_i64_tl(cpu_tmp0, r_temp);
748 tcg_temp_free_i64(r_temp);
749 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
750
751 tcg_gen_trunc_i64_tl(dst, r_temp2);
752
753 tcg_temp_free_i64(r_temp2);
754
755 tcg_temp_free_i32(r_src1);
756 tcg_temp_free_i32(r_src2);
757 }
758
759 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
760 {
761 /* zero-extend truncated operands before multiplication */
762 gen_op_multiply(dst, src1, src2, 0);
763 }
764
765 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
766 {
767 /* sign-extend truncated operands before multiplication */
768 gen_op_multiply(dst, src1, src2, 1);
769 }
770
771 #ifdef TARGET_SPARC64
772 static inline void gen_trap_ifdivzero_tl(TCGv divisor)
773 {
774 TCGv_i32 r_const;
775 int l1;
776
777 l1 = gen_new_label();
778 tcg_gen_brcondi_tl(TCG_COND_NE, divisor, 0, l1);
779 r_const = tcg_const_i32(TT_DIV_ZERO);
780 gen_helper_raise_exception(cpu_env, r_const);
781 tcg_temp_free_i32(r_const);
782 gen_set_label(l1);
783 }
784
785 static inline void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2)
786 {
787 int l1, l2;
788 TCGv r_temp1, r_temp2;
789
790 l1 = gen_new_label();
791 l2 = gen_new_label();
792 r_temp1 = tcg_temp_local_new();
793 r_temp2 = tcg_temp_local_new();
794 tcg_gen_mov_tl(r_temp1, src1);
795 tcg_gen_mov_tl(r_temp2, src2);
796 gen_trap_ifdivzero_tl(r_temp2);
797 tcg_gen_brcondi_tl(TCG_COND_NE, r_temp1, INT64_MIN, l1);
798 tcg_gen_brcondi_tl(TCG_COND_NE, r_temp2, -1, l1);
799 tcg_gen_movi_i64(dst, INT64_MIN);
800 tcg_gen_br(l2);
801 gen_set_label(l1);
802 tcg_gen_div_i64(dst, r_temp1, r_temp2);
803 gen_set_label(l2);
804 tcg_temp_free(r_temp1);
805 tcg_temp_free(r_temp2);
806 }
807 #endif
808
809 // 1
810 static inline void gen_op_eval_ba(TCGv dst)
811 {
812 tcg_gen_movi_tl(dst, 1);
813 }
814
815 // Z
816 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
817 {
818 gen_mov_reg_Z(dst, src);
819 }
820
821 // Z | (N ^ V)
822 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
823 {
824 gen_mov_reg_N(cpu_tmp0, src);
825 gen_mov_reg_V(dst, src);
826 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
827 gen_mov_reg_Z(cpu_tmp0, src);
828 tcg_gen_or_tl(dst, dst, cpu_tmp0);
829 }
830
831 // N ^ V
832 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
833 {
834 gen_mov_reg_V(cpu_tmp0, src);
835 gen_mov_reg_N(dst, src);
836 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
837 }
838
839 // C | Z
840 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
841 {
842 gen_mov_reg_Z(cpu_tmp0, src);
843 gen_mov_reg_C(dst, src);
844 tcg_gen_or_tl(dst, dst, cpu_tmp0);
845 }
846
847 // C
848 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
849 {
850 gen_mov_reg_C(dst, src);
851 }
852
853 // V
854 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
855 {
856 gen_mov_reg_V(dst, src);
857 }
858
859 // 0
860 static inline void gen_op_eval_bn(TCGv dst)
861 {
862 tcg_gen_movi_tl(dst, 0);
863 }
864
865 // N
866 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
867 {
868 gen_mov_reg_N(dst, src);
869 }
870
871 // !Z
872 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
873 {
874 gen_mov_reg_Z(dst, src);
875 tcg_gen_xori_tl(dst, dst, 0x1);
876 }
877
878 // !(Z | (N ^ V))
879 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
880 {
881 gen_mov_reg_N(cpu_tmp0, src);
882 gen_mov_reg_V(dst, src);
883 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
884 gen_mov_reg_Z(cpu_tmp0, src);
885 tcg_gen_or_tl(dst, dst, cpu_tmp0);
886 tcg_gen_xori_tl(dst, dst, 0x1);
887 }
888
889 // !(N ^ V)
890 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
891 {
892 gen_mov_reg_V(cpu_tmp0, src);
893 gen_mov_reg_N(dst, src);
894 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
895 tcg_gen_xori_tl(dst, dst, 0x1);
896 }
897
898 // !(C | Z)
899 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
900 {
901 gen_mov_reg_Z(cpu_tmp0, src);
902 gen_mov_reg_C(dst, src);
903 tcg_gen_or_tl(dst, dst, cpu_tmp0);
904 tcg_gen_xori_tl(dst, dst, 0x1);
905 }
906
907 // !C
908 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
909 {
910 gen_mov_reg_C(dst, src);
911 tcg_gen_xori_tl(dst, dst, 0x1);
912 }
913
914 // !N
915 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
916 {
917 gen_mov_reg_N(dst, src);
918 tcg_gen_xori_tl(dst, dst, 0x1);
919 }
920
921 // !V
922 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
923 {
924 gen_mov_reg_V(dst, src);
925 tcg_gen_xori_tl(dst, dst, 0x1);
926 }
927
928 /*
929 FPSR bit field FCC1 | FCC0:
930 0 =
931 1 <
932 2 >
933 3 unordered
934 */
935 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
936 unsigned int fcc_offset)
937 {
938 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
939 tcg_gen_andi_tl(reg, reg, 0x1);
940 }
941
942 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
943 unsigned int fcc_offset)
944 {
945 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
946 tcg_gen_andi_tl(reg, reg, 0x1);
947 }
948
949 // !0: FCC0 | FCC1
950 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
951 unsigned int fcc_offset)
952 {
953 gen_mov_reg_FCC0(dst, src, fcc_offset);
954 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
955 tcg_gen_or_tl(dst, dst, cpu_tmp0);
956 }
957
958 // 1 or 2: FCC0 ^ FCC1
959 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
960 unsigned int fcc_offset)
961 {
962 gen_mov_reg_FCC0(dst, src, fcc_offset);
963 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
964 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
965 }
966
967 // 1 or 3: FCC0
968 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
969 unsigned int fcc_offset)
970 {
971 gen_mov_reg_FCC0(dst, src, fcc_offset);
972 }
973
974 // 1: FCC0 & !FCC1
975 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
976 unsigned int fcc_offset)
977 {
978 gen_mov_reg_FCC0(dst, src, fcc_offset);
979 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
980 tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
981 tcg_gen_and_tl(dst, dst, cpu_tmp0);
982 }
983
984 // 2 or 3: FCC1
985 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
986 unsigned int fcc_offset)
987 {
988 gen_mov_reg_FCC1(dst, src, fcc_offset);
989 }
990
991 // 2: !FCC0 & FCC1
992 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
993 unsigned int fcc_offset)
994 {
995 gen_mov_reg_FCC0(dst, src, fcc_offset);
996 tcg_gen_xori_tl(dst, dst, 0x1);
997 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
998 tcg_gen_and_tl(dst, dst, cpu_tmp0);
999 }
1000
1001 // 3: FCC0 & FCC1
1002 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
1003 unsigned int fcc_offset)
1004 {
1005 gen_mov_reg_FCC0(dst, src, fcc_offset);
1006 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
1007 tcg_gen_and_tl(dst, dst, cpu_tmp0);
1008 }
1009
1010 // 0: !(FCC0 | FCC1)
1011 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
1012 unsigned int fcc_offset)
1013 {
1014 gen_mov_reg_FCC0(dst, src, fcc_offset);
1015 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
1016 tcg_gen_or_tl(dst, dst, cpu_tmp0);
1017 tcg_gen_xori_tl(dst, dst, 0x1);
1018 }
1019
1020 // 0 or 3: !(FCC0 ^ FCC1)
1021 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
1022 unsigned int fcc_offset)
1023 {
1024 gen_mov_reg_FCC0(dst, src, fcc_offset);
1025 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
1026 tcg_gen_xor_tl(dst, dst, cpu_tmp0);
1027 tcg_gen_xori_tl(dst, dst, 0x1);
1028 }
1029
1030 // 0 or 2: !FCC0
1031 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
1032 unsigned int fcc_offset)
1033 {
1034 gen_mov_reg_FCC0(dst, src, fcc_offset);
1035 tcg_gen_xori_tl(dst, dst, 0x1);
1036 }
1037
1038 // !1: !(FCC0 & !FCC1)
1039 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
1040 unsigned int fcc_offset)
1041 {
1042 gen_mov_reg_FCC0(dst, src, fcc_offset);
1043 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
1044 tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1);
1045 tcg_gen_and_tl(dst, dst, cpu_tmp0);
1046 tcg_gen_xori_tl(dst, dst, 0x1);
1047 }
1048
1049 // 0 or 1: !FCC1
1050 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
1051 unsigned int fcc_offset)
1052 {
1053 gen_mov_reg_FCC1(dst, src, fcc_offset);
1054 tcg_gen_xori_tl(dst, dst, 0x1);
1055 }
1056
1057 // !2: !(!FCC0 & FCC1)
1058 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
1059 unsigned int fcc_offset)
1060 {
1061 gen_mov_reg_FCC0(dst, src, fcc_offset);
1062 tcg_gen_xori_tl(dst, dst, 0x1);
1063 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
1064 tcg_gen_and_tl(dst, dst, cpu_tmp0);
1065 tcg_gen_xori_tl(dst, dst, 0x1);
1066 }
1067
1068 // !3: !(FCC0 & FCC1)
1069 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
1070 unsigned int fcc_offset)
1071 {
1072 gen_mov_reg_FCC0(dst, src, fcc_offset);
1073 gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
1074 tcg_gen_and_tl(dst, dst, cpu_tmp0);
1075 tcg_gen_xori_tl(dst, dst, 0x1);
1076 }
1077
1078 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
1079 target_ulong pc2, TCGv r_cond)
1080 {
1081 int l1;
1082
1083 l1 = gen_new_label();
1084
1085 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
1086
1087 gen_goto_tb(dc, 0, pc1, pc1 + 4);
1088
1089 gen_set_label(l1);
1090 gen_goto_tb(dc, 1, pc2, pc2 + 4);
1091 }
1092
1093 static inline void gen_branch_a(DisasContext *dc, target_ulong pc1,
1094 target_ulong pc2, TCGv r_cond)
1095 {
1096 int l1;
1097
1098 l1 = gen_new_label();
1099
1100 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
1101
1102 gen_goto_tb(dc, 0, pc2, pc1);
1103
1104 gen_set_label(l1);
1105 gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8);
1106 }
1107
1108 static inline void gen_generic_branch(DisasContext *dc)
1109 {
1110 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1111 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1112 TCGv zero = tcg_const_tl(0);
1113
1114 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1115
1116 tcg_temp_free(npc0);
1117 tcg_temp_free(npc1);
1118 tcg_temp_free(zero);
1119 }
1120
1121 /* call this function before using the condition register as it may
1122 have been set for a jump */
1123 static inline void flush_cond(DisasContext *dc)
1124 {
1125 if (dc->npc == JUMP_PC) {
1126 gen_generic_branch(dc);
1127 dc->npc = DYNAMIC_PC;
1128 }
1129 }
1130
1131 static inline void save_npc(DisasContext *dc)
1132 {
1133 if (dc->npc == JUMP_PC) {
1134 gen_generic_branch(dc);
1135 dc->npc = DYNAMIC_PC;
1136 } else if (dc->npc != DYNAMIC_PC) {
1137 tcg_gen_movi_tl(cpu_npc, dc->npc);
1138 }
1139 }
1140
1141 static inline void save_state(DisasContext *dc)
1142 {
1143 tcg_gen_movi_tl(cpu_pc, dc->pc);
1144 /* flush pending conditional evaluations before exposing cpu state */
1145 if (dc->cc_op != CC_OP_FLAGS) {
1146 dc->cc_op = CC_OP_FLAGS;
1147 gen_helper_compute_psr(cpu_env);
1148 }
1149 save_npc(dc);
1150 }
1151
1152 static inline void gen_mov_pc_npc(DisasContext *dc)
1153 {
1154 if (dc->npc == JUMP_PC) {
1155 gen_generic_branch(dc);
1156 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1157 dc->pc = DYNAMIC_PC;
1158 } else if (dc->npc == DYNAMIC_PC) {
1159 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1160 dc->pc = DYNAMIC_PC;
1161 } else {
1162 dc->pc = dc->npc;
1163 }
1164 }
1165
1166 static inline void gen_op_next_insn(void)
1167 {
1168 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1169 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1170 }
1171
1172 static void free_compare(DisasCompare *cmp)
1173 {
1174 if (!cmp->g1) {
1175 tcg_temp_free(cmp->c1);
1176 }
1177 if (!cmp->g2) {
1178 tcg_temp_free(cmp->c2);
1179 }
1180 }
1181
1182 static void gen_compare(DisasCompare *cmp, unsigned int cc, unsigned int cond,
1183 DisasContext *dc)
1184 {
1185 TCGv_i32 r_src;
1186 TCGv r_dst;
1187
1188 /* For now we still generate a straight boolean result. */
1189 cmp->cond = TCG_COND_NE;
1190 cmp->is_bool = true;
1191 cmp->g1 = cmp->g2 = false;
1192 cmp->c1 = r_dst = tcg_temp_new();
1193 cmp->c2 = tcg_const_tl(0);
1194
1195 #ifdef TARGET_SPARC64
1196 if (cc)
1197 r_src = cpu_xcc;
1198 else
1199 r_src = cpu_psr;
1200 #else
1201 r_src = cpu_psr;
1202 #endif
1203 switch (dc->cc_op) {
1204 case CC_OP_FLAGS:
1205 break;
1206 default:
1207 gen_helper_compute_psr(cpu_env);
1208 dc->cc_op = CC_OP_FLAGS;
1209 break;
1210 }
1211 switch (cond) {
1212 case 0x0:
1213 gen_op_eval_bn(r_dst);
1214 break;
1215 case 0x1:
1216 gen_op_eval_be(r_dst, r_src);
1217 break;
1218 case 0x2:
1219 gen_op_eval_ble(r_dst, r_src);
1220 break;
1221 case 0x3:
1222 gen_op_eval_bl(r_dst, r_src);
1223 break;
1224 case 0x4:
1225 gen_op_eval_bleu(r_dst, r_src);
1226 break;
1227 case 0x5:
1228 gen_op_eval_bcs(r_dst, r_src);
1229 break;
1230 case 0x6:
1231 gen_op_eval_bneg(r_dst, r_src);
1232 break;
1233 case 0x7:
1234 gen_op_eval_bvs(r_dst, r_src);
1235 break;
1236 case 0x8:
1237 gen_op_eval_ba(r_dst);
1238 break;
1239 case 0x9:
1240 gen_op_eval_bne(r_dst, r_src);
1241 break;
1242 case 0xa:
1243 gen_op_eval_bg(r_dst, r_src);
1244 break;
1245 case 0xb:
1246 gen_op_eval_bge(r_dst, r_src);
1247 break;
1248 case 0xc:
1249 gen_op_eval_bgu(r_dst, r_src);
1250 break;
1251 case 0xd:
1252 gen_op_eval_bcc(r_dst, r_src);
1253 break;
1254 case 0xe:
1255 gen_op_eval_bpos(r_dst, r_src);
1256 break;
1257 case 0xf:
1258 gen_op_eval_bvc(r_dst, r_src);
1259 break;
1260 }
1261 }
1262
1263 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1264 {
1265 unsigned int offset;
1266 TCGv r_dst;
1267
1268 /* For now we still generate a straight boolean result. */
1269 cmp->cond = TCG_COND_NE;
1270 cmp->is_bool = true;
1271 cmp->g1 = cmp->g2 = false;
1272 cmp->c1 = r_dst = tcg_temp_new();
1273 cmp->c2 = tcg_const_tl(0);
1274
1275 switch (cc) {
1276 default:
1277 case 0x0:
1278 offset = 0;
1279 break;
1280 case 0x1:
1281 offset = 32 - 10;
1282 break;
1283 case 0x2:
1284 offset = 34 - 10;
1285 break;
1286 case 0x3:
1287 offset = 36 - 10;
1288 break;
1289 }
1290
1291 switch (cond) {
1292 case 0x0:
1293 gen_op_eval_bn(r_dst);
1294 break;
1295 case 0x1:
1296 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1297 break;
1298 case 0x2:
1299 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1300 break;
1301 case 0x3:
1302 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1303 break;
1304 case 0x4:
1305 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1306 break;
1307 case 0x5:
1308 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1309 break;
1310 case 0x6:
1311 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1312 break;
1313 case 0x7:
1314 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1315 break;
1316 case 0x8:
1317 gen_op_eval_ba(r_dst);
1318 break;
1319 case 0x9:
1320 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1321 break;
1322 case 0xa:
1323 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1324 break;
1325 case 0xb:
1326 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1327 break;
1328 case 0xc:
1329 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1330 break;
1331 case 0xd:
1332 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1333 break;
1334 case 0xe:
1335 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1336 break;
1337 case 0xf:
1338 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1339 break;
1340 }
1341 }
1342
1343 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1344 DisasContext *dc)
1345 {
1346 DisasCompare cmp;
1347 gen_compare(&cmp, cc, cond, dc);
1348
1349 /* The interface is to return a boolean in r_dst. */
1350 if (cmp.is_bool) {
1351 tcg_gen_mov_tl(r_dst, cmp.c1);
1352 } else {
1353 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1354 }
1355
1356 free_compare(&cmp);
1357 }
1358
1359 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1360 {
1361 DisasCompare cmp;
1362 gen_fcompare(&cmp, cc, cond);
1363
1364 /* The interface is to return a boolean in r_dst. */
1365 if (cmp.is_bool) {
1366 tcg_gen_mov_tl(r_dst, cmp.c1);
1367 } else {
1368 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1369 }
1370
1371 free_compare(&cmp);
1372 }
1373
1374 #ifdef TARGET_SPARC64
1375 // Inverted logic
1376 static const int gen_tcg_cond_reg[8] = {
1377 -1,
1378 TCG_COND_NE,
1379 TCG_COND_GT,
1380 TCG_COND_GE,
1381 -1,
1382 TCG_COND_EQ,
1383 TCG_COND_LE,
1384 TCG_COND_LT,
1385 };
1386
1387 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1388 {
1389 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1390 cmp->is_bool = false;
1391 cmp->g1 = true;
1392 cmp->g2 = false;
1393 cmp->c1 = r_src;
1394 cmp->c2 = tcg_const_tl(0);
1395 }
1396
1397 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1398 {
1399 DisasCompare cmp;
1400 gen_compare_reg(&cmp, cond, r_src);
1401
1402 /* The interface is to return a boolean in r_dst. */
1403 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1404
1405 free_compare(&cmp);
1406 }
1407 #endif
1408
1409 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1410 {
1411 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1412 target_ulong target = dc->pc + offset;
1413
1414 #ifdef TARGET_SPARC64
1415 if (unlikely(AM_CHECK(dc))) {
1416 target &= 0xffffffffULL;
1417 }
1418 #endif
1419 if (cond == 0x0) {
1420 /* unconditional not taken */
1421 if (a) {
1422 dc->pc = dc->npc + 4;
1423 dc->npc = dc->pc + 4;
1424 } else {
1425 dc->pc = dc->npc;
1426 dc->npc = dc->pc + 4;
1427 }
1428 } else if (cond == 0x8) {
1429 /* unconditional taken */
1430 if (a) {
1431 dc->pc = target;
1432 dc->npc = dc->pc + 4;
1433 } else {
1434 dc->pc = dc->npc;
1435 dc->npc = target;
1436 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1437 }
1438 } else {
1439 flush_cond(dc);
1440 gen_cond(cpu_cond, cc, cond, dc);
1441 if (a) {
1442 gen_branch_a(dc, target, dc->npc, cpu_cond);
1443 dc->is_br = 1;
1444 } else {
1445 dc->pc = dc->npc;
1446 dc->jump_pc[0] = target;
1447 if (unlikely(dc->npc == DYNAMIC_PC)) {
1448 dc->jump_pc[1] = DYNAMIC_PC;
1449 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1450 } else {
1451 dc->jump_pc[1] = dc->npc + 4;
1452 dc->npc = JUMP_PC;
1453 }
1454 }
1455 }
1456 }
1457
1458 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1459 {
1460 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1461 target_ulong target = dc->pc + offset;
1462
1463 #ifdef TARGET_SPARC64
1464 if (unlikely(AM_CHECK(dc))) {
1465 target &= 0xffffffffULL;
1466 }
1467 #endif
1468 if (cond == 0x0) {
1469 /* unconditional not taken */
1470 if (a) {
1471 dc->pc = dc->npc + 4;
1472 dc->npc = dc->pc + 4;
1473 } else {
1474 dc->pc = dc->npc;
1475 dc->npc = dc->pc + 4;
1476 }
1477 } else if (cond == 0x8) {
1478 /* unconditional taken */
1479 if (a) {
1480 dc->pc = target;
1481 dc->npc = dc->pc + 4;
1482 } else {
1483 dc->pc = dc->npc;
1484 dc->npc = target;
1485 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1486 }
1487 } else {
1488 flush_cond(dc);
1489 gen_fcond(cpu_cond, cc, cond);
1490 if (a) {
1491 gen_branch_a(dc, target, dc->npc, cpu_cond);
1492 dc->is_br = 1;
1493 } else {
1494 dc->pc = dc->npc;
1495 dc->jump_pc[0] = target;
1496 if (unlikely(dc->npc == DYNAMIC_PC)) {
1497 dc->jump_pc[1] = DYNAMIC_PC;
1498 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1499 } else {
1500 dc->jump_pc[1] = dc->npc + 4;
1501 dc->npc = JUMP_PC;
1502 }
1503 }
1504 }
1505 }
1506
1507 #ifdef TARGET_SPARC64
1508 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1509 TCGv r_reg)
1510 {
1511 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1512 target_ulong target = dc->pc + offset;
1513
1514 if (unlikely(AM_CHECK(dc))) {
1515 target &= 0xffffffffULL;
1516 }
1517 flush_cond(dc);
1518 gen_cond_reg(cpu_cond, cond, r_reg);
1519 if (a) {
1520 gen_branch_a(dc, target, dc->npc, cpu_cond);
1521 dc->is_br = 1;
1522 } else {
1523 dc->pc = dc->npc;
1524 dc->jump_pc[0] = target;
1525 if (unlikely(dc->npc == DYNAMIC_PC)) {
1526 dc->jump_pc[1] = DYNAMIC_PC;
1527 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1528 } else {
1529 dc->jump_pc[1] = dc->npc + 4;
1530 dc->npc = JUMP_PC;
1531 }
1532 }
1533 }
1534
1535 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1536 {
1537 switch (fccno) {
1538 case 0:
1539 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1540 break;
1541 case 1:
1542 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1543 break;
1544 case 2:
1545 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1546 break;
1547 case 3:
1548 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1549 break;
1550 }
1551 }
1552
1553 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1554 {
1555 switch (fccno) {
1556 case 0:
1557 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1558 break;
1559 case 1:
1560 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1561 break;
1562 case 2:
1563 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1564 break;
1565 case 3:
1566 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1567 break;
1568 }
1569 }
1570
1571 static inline void gen_op_fcmpq(int fccno)
1572 {
1573 switch (fccno) {
1574 case 0:
1575 gen_helper_fcmpq(cpu_env);
1576 break;
1577 case 1:
1578 gen_helper_fcmpq_fcc1(cpu_env);
1579 break;
1580 case 2:
1581 gen_helper_fcmpq_fcc2(cpu_env);
1582 break;
1583 case 3:
1584 gen_helper_fcmpq_fcc3(cpu_env);
1585 break;
1586 }
1587 }
1588
1589 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1590 {
1591 switch (fccno) {
1592 case 0:
1593 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1594 break;
1595 case 1:
1596 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1597 break;
1598 case 2:
1599 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1600 break;
1601 case 3:
1602 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1603 break;
1604 }
1605 }
1606
1607 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1608 {
1609 switch (fccno) {
1610 case 0:
1611 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1612 break;
1613 case 1:
1614 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1615 break;
1616 case 2:
1617 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1618 break;
1619 case 3:
1620 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1621 break;
1622 }
1623 }
1624
1625 static inline void gen_op_fcmpeq(int fccno)
1626 {
1627 switch (fccno) {
1628 case 0:
1629 gen_helper_fcmpeq(cpu_env);
1630 break;
1631 case 1:
1632 gen_helper_fcmpeq_fcc1(cpu_env);
1633 break;
1634 case 2:
1635 gen_helper_fcmpeq_fcc2(cpu_env);
1636 break;
1637 case 3:
1638 gen_helper_fcmpeq_fcc3(cpu_env);
1639 break;
1640 }
1641 }
1642
1643 #else
1644
1645 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1646 {
1647 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1648 }
1649
1650 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1651 {
1652 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1653 }
1654
1655 static inline void gen_op_fcmpq(int fccno)
1656 {
1657 gen_helper_fcmpq(cpu_env);
1658 }
1659
1660 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1661 {
1662 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1663 }
1664
1665 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1666 {
1667 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1668 }
1669
1670 static inline void gen_op_fcmpeq(int fccno)
1671 {
1672 gen_helper_fcmpeq(cpu_env);
1673 }
1674 #endif
1675
1676 static inline void gen_op_fpexception_im(int fsr_flags)
1677 {
1678 TCGv_i32 r_const;
1679
1680 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1681 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1682 r_const = tcg_const_i32(TT_FP_EXCP);
1683 gen_helper_raise_exception(cpu_env, r_const);
1684 tcg_temp_free_i32(r_const);
1685 }
1686
1687 static int gen_trap_ifnofpu(DisasContext *dc)
1688 {
1689 #if !defined(CONFIG_USER_ONLY)
1690 if (!dc->fpu_enabled) {
1691 TCGv_i32 r_const;
1692
1693 save_state(dc);
1694 r_const = tcg_const_i32(TT_NFPU_INSN);
1695 gen_helper_raise_exception(cpu_env, r_const);
1696 tcg_temp_free_i32(r_const);
1697 dc->is_br = 1;
1698 return 1;
1699 }
1700 #endif
1701 return 0;
1702 }
1703
1704 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1705 {
1706 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1707 }
1708
1709 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1710 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1711 {
1712 TCGv_i32 dst, src;
1713
1714 src = gen_load_fpr_F(dc, rs);
1715 dst = gen_dest_fpr_F();
1716
1717 gen(dst, cpu_env, src);
1718
1719 gen_store_fpr_F(dc, rd, dst);
1720 }
1721
1722 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1723 void (*gen)(TCGv_i32, TCGv_i32))
1724 {
1725 TCGv_i32 dst, src;
1726
1727 src = gen_load_fpr_F(dc, rs);
1728 dst = gen_dest_fpr_F();
1729
1730 gen(dst, src);
1731
1732 gen_store_fpr_F(dc, rd, dst);
1733 }
1734
1735 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1736 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1737 {
1738 TCGv_i32 dst, src1, src2;
1739
1740 src1 = gen_load_fpr_F(dc, rs1);
1741 src2 = gen_load_fpr_F(dc, rs2);
1742 dst = gen_dest_fpr_F();
1743
1744 gen(dst, cpu_env, src1, src2);
1745
1746 gen_store_fpr_F(dc, rd, dst);
1747 }
1748
1749 #ifdef TARGET_SPARC64
1750 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1751 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1752 {
1753 TCGv_i32 dst, src1, src2;
1754
1755 src1 = gen_load_fpr_F(dc, rs1);
1756 src2 = gen_load_fpr_F(dc, rs2);
1757 dst = gen_dest_fpr_F();
1758
1759 gen(dst, src1, src2);
1760
1761 gen_store_fpr_F(dc, rd, dst);
1762 }
1763 #endif
1764
1765 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1766 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1767 {
1768 TCGv_i64 dst, src;
1769
1770 src = gen_load_fpr_D(dc, rs);
1771 dst = gen_dest_fpr_D();
1772
1773 gen(dst, cpu_env, src);
1774
1775 gen_store_fpr_D(dc, rd, dst);
1776 }
1777
1778 #ifdef TARGET_SPARC64
1779 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1780 void (*gen)(TCGv_i64, TCGv_i64))
1781 {
1782 TCGv_i64 dst, src;
1783
1784 src = gen_load_fpr_D(dc, rs);
1785 dst = gen_dest_fpr_D();
1786
1787 gen(dst, src);
1788
1789 gen_store_fpr_D(dc, rd, dst);
1790 }
1791 #endif
1792
1793 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1794 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1795 {
1796 TCGv_i64 dst, src1, src2;
1797
1798 src1 = gen_load_fpr_D(dc, rs1);
1799 src2 = gen_load_fpr_D(dc, rs2);
1800 dst = gen_dest_fpr_D();
1801
1802 gen(dst, cpu_env, src1, src2);
1803
1804 gen_store_fpr_D(dc, rd, dst);
1805 }
1806
1807 #ifdef TARGET_SPARC64
1808 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1809 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1810 {
1811 TCGv_i64 dst, src1, src2;
1812
1813 src1 = gen_load_fpr_D(dc, rs1);
1814 src2 = gen_load_fpr_D(dc, rs2);
1815 dst = gen_dest_fpr_D();
1816
1817 gen(dst, src1, src2);
1818
1819 gen_store_fpr_D(dc, rd, dst);
1820 }
1821
1822 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1823 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1824 {
1825 TCGv_i64 dst, src1, src2;
1826
1827 src1 = gen_load_fpr_D(dc, rs1);
1828 src2 = gen_load_fpr_D(dc, rs2);
1829 dst = gen_dest_fpr_D();
1830
1831 gen(dst, cpu_gsr, src1, src2);
1832
1833 gen_store_fpr_D(dc, rd, dst);
1834 }
1835
1836 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1837 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1838 {
1839 TCGv_i64 dst, src0, src1, src2;
1840
1841 src1 = gen_load_fpr_D(dc, rs1);
1842 src2 = gen_load_fpr_D(dc, rs2);
1843 src0 = gen_load_fpr_D(dc, rd);
1844 dst = gen_dest_fpr_D();
1845
1846 gen(dst, src0, src1, src2);
1847
1848 gen_store_fpr_D(dc, rd, dst);
1849 }
1850 #endif
1851
1852 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1853 void (*gen)(TCGv_ptr))
1854 {
1855 gen_op_load_fpr_QT1(QFPREG(rs));
1856
1857 gen(cpu_env);
1858
1859 gen_op_store_QT0_fpr(QFPREG(rd));
1860 gen_update_fprs_dirty(QFPREG(rd));
1861 }
1862
1863 #ifdef TARGET_SPARC64
1864 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1865 void (*gen)(TCGv_ptr))
1866 {
1867 gen_op_load_fpr_QT1(QFPREG(rs));
1868
1869 gen(cpu_env);
1870
1871 gen_op_store_QT0_fpr(QFPREG(rd));
1872 gen_update_fprs_dirty(QFPREG(rd));
1873 }
1874 #endif
1875
1876 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1877 void (*gen)(TCGv_ptr))
1878 {
1879 gen_op_load_fpr_QT0(QFPREG(rs1));
1880 gen_op_load_fpr_QT1(QFPREG(rs2));
1881
1882 gen(cpu_env);
1883
1884 gen_op_store_QT0_fpr(QFPREG(rd));
1885 gen_update_fprs_dirty(QFPREG(rd));
1886 }
1887
1888 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1889 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1890 {
1891 TCGv_i64 dst;
1892 TCGv_i32 src1, src2;
1893
1894 src1 = gen_load_fpr_F(dc, rs1);
1895 src2 = gen_load_fpr_F(dc, rs2);
1896 dst = gen_dest_fpr_D();
1897
1898 gen(dst, cpu_env, src1, src2);
1899
1900 gen_store_fpr_D(dc, rd, dst);
1901 }
1902
1903 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1904 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1905 {
1906 TCGv_i64 src1, src2;
1907
1908 src1 = gen_load_fpr_D(dc, rs1);
1909 src2 = gen_load_fpr_D(dc, rs2);
1910
1911 gen(cpu_env, src1, src2);
1912
1913 gen_op_store_QT0_fpr(QFPREG(rd));
1914 gen_update_fprs_dirty(QFPREG(rd));
1915 }
1916
1917 #ifdef TARGET_SPARC64
1918 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1919 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1920 {
1921 TCGv_i64 dst;
1922 TCGv_i32 src;
1923
1924 src = gen_load_fpr_F(dc, rs);
1925 dst = gen_dest_fpr_D();
1926
1927 gen(dst, cpu_env, src);
1928
1929 gen_store_fpr_D(dc, rd, dst);
1930 }
1931 #endif
1932
1933 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1934 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1935 {
1936 TCGv_i64 dst;
1937 TCGv_i32 src;
1938
1939 src = gen_load_fpr_F(dc, rs);
1940 dst = gen_dest_fpr_D();
1941
1942 gen(dst, cpu_env, src);
1943
1944 gen_store_fpr_D(dc, rd, dst);
1945 }
1946
1947 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1948 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1949 {
1950 TCGv_i32 dst;
1951 TCGv_i64 src;
1952
1953 src = gen_load_fpr_D(dc, rs);
1954 dst = gen_dest_fpr_F();
1955
1956 gen(dst, cpu_env, src);
1957
1958 gen_store_fpr_F(dc, rd, dst);
1959 }
1960
1961 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1962 void (*gen)(TCGv_i32, TCGv_ptr))
1963 {
1964 TCGv_i32 dst;
1965
1966 gen_op_load_fpr_QT1(QFPREG(rs));
1967 dst = gen_dest_fpr_F();
1968
1969 gen(dst, cpu_env);
1970
1971 gen_store_fpr_F(dc, rd, dst);
1972 }
1973
1974 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1975 void (*gen)(TCGv_i64, TCGv_ptr))
1976 {
1977 TCGv_i64 dst;
1978
1979 gen_op_load_fpr_QT1(QFPREG(rs));
1980 dst = gen_dest_fpr_D();
1981
1982 gen(dst, cpu_env);
1983
1984 gen_store_fpr_D(dc, rd, dst);
1985 }
1986
1987 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1988 void (*gen)(TCGv_ptr, TCGv_i32))
1989 {
1990 TCGv_i32 src;
1991
1992 src = gen_load_fpr_F(dc, rs);
1993
1994 gen(cpu_env, src);
1995
1996 gen_op_store_QT0_fpr(QFPREG(rd));
1997 gen_update_fprs_dirty(QFPREG(rd));
1998 }
1999
2000 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
2001 void (*gen)(TCGv_ptr, TCGv_i64))
2002 {
2003 TCGv_i64 src;
2004
2005 src = gen_load_fpr_D(dc, rs);
2006
2007 gen(cpu_env, src);
2008
2009 gen_op_store_QT0_fpr(QFPREG(rd));
2010 gen_update_fprs_dirty(QFPREG(rd));
2011 }
2012
2013 /* asi moves */
2014 #ifdef TARGET_SPARC64
2015 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
2016 {
2017 int asi;
2018 TCGv_i32 r_asi;
2019
2020 if (IS_IMM) {
2021 r_asi = tcg_temp_new_i32();
2022 tcg_gen_mov_i32(r_asi, cpu_asi);
2023 } else {
2024 asi = GET_FIELD(insn, 19, 26);
2025 r_asi = tcg_const_i32(asi);
2026 }
2027 return r_asi;
2028 }
2029
2030 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2031 int sign)
2032 {
2033 TCGv_i32 r_asi, r_size, r_sign;
2034
2035 r_asi = gen_get_asi(insn, addr);
2036 r_size = tcg_const_i32(size);
2037 r_sign = tcg_const_i32(sign);
2038 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
2039 tcg_temp_free_i32(r_sign);
2040 tcg_temp_free_i32(r_size);
2041 tcg_temp_free_i32(r_asi);
2042 }
2043
2044 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2045 {
2046 TCGv_i32 r_asi, r_size;
2047
2048 r_asi = gen_get_asi(insn, addr);
2049 r_size = tcg_const_i32(size);
2050 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2051 tcg_temp_free_i32(r_size);
2052 tcg_temp_free_i32(r_asi);
2053 }
2054
2055 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
2056 {
2057 TCGv_i32 r_asi, r_size, r_rd;
2058
2059 r_asi = gen_get_asi(insn, addr);
2060 r_size = tcg_const_i32(size);
2061 r_rd = tcg_const_i32(rd);
2062 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2063 tcg_temp_free_i32(r_rd);
2064 tcg_temp_free_i32(r_size);
2065 tcg_temp_free_i32(r_asi);
2066 }
2067
2068 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
2069 {
2070 TCGv_i32 r_asi, r_size, r_rd;
2071
2072 r_asi = gen_get_asi(insn, addr);
2073 r_size = tcg_const_i32(size);
2074 r_rd = tcg_const_i32(rd);
2075 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2076 tcg_temp_free_i32(r_rd);
2077 tcg_temp_free_i32(r_size);
2078 tcg_temp_free_i32(r_asi);
2079 }
2080
2081 static inline void gen_swap_asi(TCGv dst, TCGv addr, int insn)
2082 {
2083 TCGv_i32 r_asi, r_size, r_sign;
2084
2085 r_asi = gen_get_asi(insn, addr);
2086 r_size = tcg_const_i32(4);
2087 r_sign = tcg_const_i32(0);
2088 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2089 tcg_temp_free_i32(r_sign);
2090 gen_helper_st_asi(cpu_env, addr, dst, r_asi, r_size);
2091 tcg_temp_free_i32(r_size);
2092 tcg_temp_free_i32(r_asi);
2093 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2094 }
2095
2096 static inline void gen_ldda_asi(TCGv hi, TCGv addr, int insn, int rd)
2097 {
2098 TCGv_i32 r_asi, r_rd;
2099
2100 r_asi = gen_get_asi(insn, addr);
2101 r_rd = tcg_const_i32(rd);
2102 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2103 tcg_temp_free_i32(r_rd);
2104 tcg_temp_free_i32(r_asi);
2105 }
2106
2107 static inline void gen_stda_asi(TCGv hi, TCGv addr, int insn, int rd)
2108 {
2109 TCGv_i32 r_asi, r_size;
2110
2111 gen_movl_reg_TN(rd + 1, cpu_tmp0);
2112 tcg_gen_concat_tl_i64(cpu_tmp64, cpu_tmp0, hi);
2113 r_asi = gen_get_asi(insn, addr);
2114 r_size = tcg_const_i32(8);
2115 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2116 tcg_temp_free_i32(r_size);
2117 tcg_temp_free_i32(r_asi);
2118 }
2119
2120 static inline void gen_cas_asi(TCGv dst, TCGv addr, TCGv val2, int insn,
2121 int rd)
2122 {
2123 TCGv r_val1;
2124 TCGv_i32 r_asi;
2125
2126 r_val1 = tcg_temp_new();
2127 gen_movl_reg_TN(rd, r_val1);
2128 r_asi = gen_get_asi(insn, addr);
2129 gen_helper_cas_asi(dst, cpu_env, addr, r_val1, val2, r_asi);
2130 tcg_temp_free_i32(r_asi);
2131 tcg_temp_free(r_val1);
2132 }
2133
2134 static inline void gen_casx_asi(TCGv dst, TCGv addr, TCGv val2, int insn,
2135 int rd)
2136 {
2137 TCGv_i32 r_asi;
2138
2139 gen_movl_reg_TN(rd, cpu_tmp64);
2140 r_asi = gen_get_asi(insn, addr);
2141 gen_helper_casx_asi(dst, cpu_env, addr, cpu_tmp64, val2, r_asi);
2142 tcg_temp_free_i32(r_asi);
2143 }
2144
2145 #elif !defined(CONFIG_USER_ONLY)
2146
2147 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2148 int sign)
2149 {
2150 TCGv_i32 r_asi, r_size, r_sign;
2151
2152 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2153 r_size = tcg_const_i32(size);
2154 r_sign = tcg_const_i32(sign);
2155 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2156 tcg_temp_free(r_sign);
2157 tcg_temp_free(r_size);
2158 tcg_temp_free(r_asi);
2159 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2160 }
2161
2162 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2163 {
2164 TCGv_i32 r_asi, r_size;
2165
2166 tcg_gen_extu_tl_i64(cpu_tmp64, src);
2167 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2168 r_size = tcg_const_i32(size);
2169 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2170 tcg_temp_free(r_size);
2171 tcg_temp_free(r_asi);
2172 }
2173
2174 static inline void gen_swap_asi(TCGv dst, TCGv addr, int insn)
2175 {
2176 TCGv_i32 r_asi, r_size, r_sign;
2177 TCGv_i64 r_val;
2178
2179 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2180 r_size = tcg_const_i32(4);
2181 r_sign = tcg_const_i32(0);
2182 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2183 tcg_temp_free(r_sign);
2184 r_val = tcg_temp_new_i64();
2185 tcg_gen_extu_tl_i64(r_val, dst);
2186 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2187 tcg_temp_free_i64(r_val);
2188 tcg_temp_free(r_size);
2189 tcg_temp_free(r_asi);
2190 tcg_gen_trunc_i64_tl(dst, cpu_tmp64);
2191 }
2192
2193 static inline void gen_ldda_asi(TCGv hi, TCGv addr, int insn, int rd)
2194 {
2195 TCGv_i32 r_asi, r_size, r_sign;
2196
2197 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2198 r_size = tcg_const_i32(8);
2199 r_sign = tcg_const_i32(0);
2200 gen_helper_ld_asi(cpu_tmp64, cpu_env, addr, r_asi, r_size, r_sign);
2201 tcg_temp_free(r_sign);
2202 tcg_temp_free(r_size);
2203 tcg_temp_free(r_asi);
2204 tcg_gen_trunc_i64_tl(cpu_tmp0, cpu_tmp64);
2205 gen_movl_TN_reg(rd + 1, cpu_tmp0);
2206 tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
2207 tcg_gen_trunc_i64_tl(hi, cpu_tmp64);
2208 gen_movl_TN_reg(rd, hi);
2209 }
2210
2211 static inline void gen_stda_asi(TCGv hi, TCGv addr, int insn, int rd)
2212 {
2213 TCGv_i32 r_asi, r_size;
2214
2215 gen_movl_reg_TN(rd + 1, cpu_tmp0);
2216 tcg_gen_concat_tl_i64(cpu_tmp64, cpu_tmp0, hi);
2217 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2218 r_size = tcg_const_i32(8);
2219 gen_helper_st_asi(cpu_env, addr, cpu_tmp64, r_asi, r_size);
2220 tcg_temp_free(r_size);
2221 tcg_temp_free(r_asi);
2222 }
2223 #endif
2224
2225 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2226 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2227 {
2228 TCGv_i64 r_val;
2229 TCGv_i32 r_asi, r_size;
2230
2231 gen_ld_asi(dst, addr, insn, 1, 0);
2232
2233 r_val = tcg_const_i64(0xffULL);
2234 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2235 r_size = tcg_const_i32(1);
2236 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2237 tcg_temp_free_i32(r_size);
2238 tcg_temp_free_i32(r_asi);
2239 tcg_temp_free_i64(r_val);
2240 }
2241 #endif
2242
2243 static inline TCGv get_src1(unsigned int insn, TCGv def)
2244 {
2245 TCGv r_rs1 = def;
2246 unsigned int rs1;
2247
2248 rs1 = GET_FIELD(insn, 13, 17);
2249 if (rs1 == 0) {
2250 tcg_gen_movi_tl(def, 0);
2251 } else if (rs1 < 8) {
2252 r_rs1 = cpu_gregs[rs1];
2253 } else {
2254 tcg_gen_ld_tl(def, cpu_regwptr, (rs1 - 8) * sizeof(target_ulong));
2255 }
2256 return r_rs1;
2257 }
2258
2259 static inline TCGv get_src2(unsigned int insn, TCGv def)
2260 {
2261 TCGv r_rs2 = def;
2262
2263 if (IS_IMM) { /* immediate */
2264 target_long simm = GET_FIELDs(insn, 19, 31);
2265 tcg_gen_movi_tl(def, simm);
2266 } else { /* register */
2267 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2268 if (rs2 == 0) {
2269 tcg_gen_movi_tl(def, 0);
2270 } else if (rs2 < 8) {
2271 r_rs2 = cpu_gregs[rs2];
2272 } else {
2273 tcg_gen_ld_tl(def, cpu_regwptr, (rs2 - 8) * sizeof(target_ulong));
2274 }
2275 }
2276 return r_rs2;
2277 }
2278
2279 #ifdef TARGET_SPARC64
2280 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2281 {
2282 TCGv_i32 c32, zero, dst, s1, s2;
2283
2284 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2285 or fold the comparison down to 32 bits and use movcond_i32. Choose
2286 the later. */
2287 c32 = tcg_temp_new_i32();
2288 if (cmp->is_bool) {
2289 tcg_gen_trunc_i64_i32(c32, cmp->c1);
2290 } else {
2291 TCGv_i64 c64 = tcg_temp_new_i64();
2292 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2293 tcg_gen_trunc_i64_i32(c32, c64);
2294 tcg_temp_free_i64(c64);
2295 }
2296
2297 s1 = gen_load_fpr_F(dc, rs);
2298 s2 = gen_load_fpr_F(dc, rd);
2299 dst = gen_dest_fpr_F();
2300 zero = tcg_const_i32(0);
2301
2302 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2303
2304 tcg_temp_free_i32(c32);
2305 tcg_temp_free_i32(zero);
2306 gen_store_fpr_F(dc, rd, dst);
2307 }
2308
2309 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2310 {
2311 TCGv_i64 dst = gen_dest_fpr_D();
2312 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2313 gen_load_fpr_D(dc, rs),
2314 gen_load_fpr_D(dc, rd));
2315 gen_store_fpr_D(dc, rd, dst);
2316 }
2317
2318 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2319 {
2320 int qd = QFPREG(rd);
2321 int qs = QFPREG(rs);
2322
2323 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2324 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2325 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2326 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2327
2328 gen_update_fprs_dirty(qd);
2329 }
2330
2331 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2332 {
2333 TCGv_i32 r_tl = tcg_temp_new_i32();
2334
2335 /* load env->tl into r_tl */
2336 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2337
2338 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2339 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2340
2341 /* calculate offset to current trap state from env->ts, reuse r_tl */
2342 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2343 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2344
2345 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2346 {
2347 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2348 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2349 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2350 tcg_temp_free_ptr(r_tl_tmp);
2351 }
2352
2353 tcg_temp_free_i32(r_tl);
2354 }
2355
2356 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2357 int width, bool cc, bool left)
2358 {
2359 TCGv lo1, lo2, t1, t2;
2360 uint64_t amask, tabl, tabr;
2361 int shift, imask, omask;
2362
2363 if (cc) {
2364 tcg_gen_mov_tl(cpu_cc_src, s1);
2365 tcg_gen_mov_tl(cpu_cc_src2, s2);
2366 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2367 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2368 dc->cc_op = CC_OP_SUB;
2369 }
2370
2371 /* Theory of operation: there are two tables, left and right (not to
2372 be confused with the left and right versions of the opcode). These
2373 are indexed by the low 3 bits of the inputs. To make things "easy",
2374 these tables are loaded into two constants, TABL and TABR below.
2375 The operation index = (input & imask) << shift calculates the index
2376 into the constant, while val = (table >> index) & omask calculates
2377 the value we're looking for. */
2378 switch (width) {
2379 case 8:
2380 imask = 0x7;
2381 shift = 3;
2382 omask = 0xff;
2383 if (left) {
2384 tabl = 0x80c0e0f0f8fcfeffULL;
2385 tabr = 0xff7f3f1f0f070301ULL;
2386 } else {
2387 tabl = 0x0103070f1f3f7fffULL;
2388 tabr = 0xfffefcf8f0e0c080ULL;
2389 }
2390 break;
2391 case 16:
2392 imask = 0x6;
2393 shift = 1;
2394 omask = 0xf;
2395 if (left) {
2396 tabl = 0x8cef;
2397 tabr = 0xf731;
2398 } else {
2399 tabl = 0x137f;
2400 tabr = 0xfec8;
2401 }
2402 break;
2403 case 32:
2404 imask = 0x4;
2405 shift = 0;
2406 omask = 0x3;
2407 if (left) {
2408 tabl = (2 << 2) | 3;
2409 tabr = (3 << 2) | 1;
2410 } else {
2411 tabl = (1 << 2) | 3;
2412 tabr = (3 << 2) | 2;
2413 }
2414 break;
2415 default:
2416 abort();
2417 }
2418
2419 lo1 = tcg_temp_new();
2420 lo2 = tcg_temp_new();
2421 tcg_gen_andi_tl(lo1, s1, imask);
2422 tcg_gen_andi_tl(lo2, s2, imask);
2423 tcg_gen_shli_tl(lo1, lo1, shift);
2424 tcg_gen_shli_tl(lo2, lo2, shift);
2425
2426 t1 = tcg_const_tl(tabl);
2427 t2 = tcg_const_tl(tabr);
2428 tcg_gen_shr_tl(lo1, t1, lo1);
2429 tcg_gen_shr_tl(lo2, t2, lo2);
2430 tcg_gen_andi_tl(dst, lo1, omask);
2431 tcg_gen_andi_tl(lo2, lo2, omask);
2432
2433 amask = -8;
2434 if (AM_CHECK(dc)) {
2435 amask &= 0xffffffffULL;
2436 }
2437 tcg_gen_andi_tl(s1, s1, amask);
2438 tcg_gen_andi_tl(s2, s2, amask);
2439
2440 /* We want to compute
2441 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2442 We've already done dst = lo1, so this reduces to
2443 dst &= (s1 == s2 ? -1 : lo2)
2444 Which we perform by
2445 lo2 |= -(s1 == s2)
2446 dst &= lo2
2447 */
2448 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2449 tcg_gen_neg_tl(t1, t1);
2450 tcg_gen_or_tl(lo2, lo2, t1);
2451 tcg_gen_and_tl(dst, dst, lo2);
2452
2453 tcg_temp_free(lo1);
2454 tcg_temp_free(lo2);
2455 tcg_temp_free(t1);
2456 tcg_temp_free(t2);
2457 }
2458
2459 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2460 {
2461 TCGv tmp = tcg_temp_new();
2462
2463 tcg_gen_add_tl(tmp, s1, s2);
2464 tcg_gen_andi_tl(dst, tmp, -8);
2465 if (left) {
2466 tcg_gen_neg_tl(tmp, tmp);
2467 }
2468 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2469
2470 tcg_temp_free(tmp);
2471 }
2472
2473 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2474 {
2475 TCGv t1, t2, shift;
2476
2477 t1 = tcg_temp_new();
2478 t2 = tcg_temp_new();
2479 shift = tcg_temp_new();
2480
2481 tcg_gen_andi_tl(shift, gsr, 7);
2482 tcg_gen_shli_tl(shift, shift, 3);
2483 tcg_gen_shl_tl(t1, s1, shift);
2484
2485 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2486 shift of (up to 63) followed by a constant shift of 1. */
2487 tcg_gen_xori_tl(shift, shift, 63);
2488 tcg_gen_shr_tl(t2, s2, shift);
2489 tcg_gen_shri_tl(t2, t2, 1);
2490
2491 tcg_gen_or_tl(dst, t1, t2);
2492
2493 tcg_temp_free(t1);
2494 tcg_temp_free(t2);
2495 tcg_temp_free(shift);
2496 }
2497 #endif
2498
2499 #define CHECK_IU_FEATURE(dc, FEATURE) \
2500 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2501 goto illegal_insn;
2502 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2503 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2504 goto nfpu_insn;
2505
2506 /* before an instruction, dc->pc must be static */
2507 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2508 {
2509 unsigned int opc, rs1, rs2, rd;
2510 TCGv cpu_src1, cpu_src2, cpu_tmp1, cpu_tmp2;
2511 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2512 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2513 target_long simm;
2514
2515 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2516 tcg_gen_debug_insn_start(dc->pc);
2517 }
2518
2519 opc = GET_FIELD(insn, 0, 1);
2520
2521 rd = GET_FIELD(insn, 2, 6);
2522
2523 cpu_tmp1 = cpu_src1 = tcg_temp_new();
2524 cpu_tmp2 = cpu_src2 = tcg_temp_new();
2525
2526 switch (opc) {
2527 case 0: /* branches/sethi */
2528 {
2529 unsigned int xop = GET_FIELD(insn, 7, 9);
2530 int32_t target;
2531 switch (xop) {
2532 #ifdef TARGET_SPARC64
2533 case 0x1: /* V9 BPcc */
2534 {
2535 int cc;
2536
2537 target = GET_FIELD_SP(insn, 0, 18);
2538 target = sign_extend(target, 19);
2539 target <<= 2;
2540 cc = GET_FIELD_SP(insn, 20, 21);
2541 if (cc == 0)
2542 do_branch(dc, target, insn, 0);
2543 else if (cc == 2)
2544 do_branch(dc, target, insn, 1);
2545 else
2546 goto illegal_insn;
2547 goto jmp_insn;
2548 }
2549 case 0x3: /* V9 BPr */
2550 {
2551 target = GET_FIELD_SP(insn, 0, 13) |
2552 (GET_FIELD_SP(insn, 20, 21) << 14);
2553 target = sign_extend(target, 16);
2554 target <<= 2;
2555 cpu_src1 = get_src1(insn, cpu_src1);
2556 do_branch_reg(dc, target, insn, cpu_src1);
2557 goto jmp_insn;
2558 }
2559 case 0x5: /* V9 FBPcc */
2560 {
2561 int cc = GET_FIELD_SP(insn, 20, 21);
2562 if (gen_trap_ifnofpu(dc)) {
2563 goto jmp_insn;
2564 }
2565 target = GET_FIELD_SP(insn, 0, 18);
2566 target = sign_extend(target, 19);
2567 target <<= 2;
2568 do_fbranch(dc, target, insn, cc);
2569 goto jmp_insn;
2570 }
2571 #else
2572 case 0x7: /* CBN+x */
2573 {
2574 goto ncp_insn;
2575 }
2576 #endif
2577 case 0x2: /* BN+x */
2578 {
2579 target = GET_FIELD(insn, 10, 31);
2580 target = sign_extend(target, 22);
2581 target <<= 2;
2582 do_branch(dc, target, insn, 0);
2583 goto jmp_insn;
2584 }
2585 case 0x6: /* FBN+x */
2586 {
2587 if (gen_trap_ifnofpu(dc)) {
2588 goto jmp_insn;
2589 }
2590 target = GET_FIELD(insn, 10, 31);
2591 target = sign_extend(target, 22);
2592 target <<= 2;
2593 do_fbranch(dc, target, insn, 0);
2594 goto jmp_insn;
2595 }
2596 case 0x4: /* SETHI */
2597 if (rd) { // nop
2598 uint32_t value = GET_FIELD(insn, 10, 31);
2599 TCGv r_const;
2600
2601 r_const = tcg_const_tl(value << 10);
2602 gen_movl_TN_reg(rd, r_const);
2603 tcg_temp_free(r_const);
2604 }
2605 break;
2606 case 0x0: /* UNIMPL */
2607 default:
2608 goto illegal_insn;
2609 }
2610 break;
2611 }
2612 break;
2613 case 1: /*CALL*/
2614 {
2615 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2616 TCGv r_const;
2617
2618 r_const = tcg_const_tl(dc->pc);
2619 gen_movl_TN_reg(15, r_const);
2620 tcg_temp_free(r_const);
2621 target += dc->pc;
2622 gen_mov_pc_npc(dc);
2623 #ifdef TARGET_SPARC64
2624 if (unlikely(AM_CHECK(dc))) {
2625 target &= 0xffffffffULL;
2626 }
2627 #endif
2628 dc->npc = target;
2629 }
2630 goto jmp_insn;
2631 case 2: /* FPU & Logical Operations */
2632 {
2633 unsigned int xop = GET_FIELD(insn, 7, 12);
2634 if (xop == 0x3a) { /* generate trap */
2635 int cond;
2636
2637 cpu_src1 = get_src1(insn, cpu_src1);
2638 if (IS_IMM) {
2639 rs2 = GET_FIELD(insn, 25, 31);
2640 tcg_gen_addi_tl(cpu_dst, cpu_src1, rs2);
2641 } else {
2642 rs2 = GET_FIELD(insn, 27, 31);
2643 if (rs2 != 0) {
2644 gen_movl_reg_TN(rs2, cpu_src2);
2645 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
2646 } else
2647 tcg_gen_mov_tl(cpu_dst, cpu_src1);
2648 }
2649
2650 cond = GET_FIELD(insn, 3, 6);
2651 if (cond == 0x8) { /* Trap Always */
2652 save_state(dc);
2653 if ((dc->def->features & CPU_FEATURE_HYPV) &&
2654 supervisor(dc))
2655 tcg_gen_andi_tl(cpu_dst, cpu_dst, UA2005_HTRAP_MASK);
2656 else
2657 tcg_gen_andi_tl(cpu_dst, cpu_dst, V8_TRAP_MASK);
2658 tcg_gen_addi_tl(cpu_dst, cpu_dst, TT_TRAP);
2659 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
2660 gen_helper_raise_exception(cpu_env, cpu_tmp32);
2661
2662 } else if (cond != 0) {
2663 DisasCompare cmp;
2664 int l1;
2665 #ifdef TARGET_SPARC64
2666 /* V9 icc/xcc */
2667 int cc = GET_FIELD_SP(insn, 11, 12);
2668
2669 save_state(dc);
2670 if (cc == 0) {
2671 gen_compare(&cmp, 0, cond, dc);
2672 } else if (cc == 2) {
2673 gen_compare(&cmp, 1, cond, dc);
2674 } else {
2675 goto illegal_insn;
2676 }
2677 #else
2678 save_state(dc);
2679 gen_compare(&cmp, 0, cond, dc);
2680 #endif
2681 l1 = gen_new_label();
2682 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2683 cmp.c1, cmp.c2, l1);
2684 free_compare(&cmp);
2685
2686 if ((dc->def->features & CPU_FEATURE_HYPV) &&
2687 supervisor(dc))
2688 tcg_gen_andi_tl(cpu_dst, cpu_dst, UA2005_HTRAP_MASK);
2689 else
2690 tcg_gen_andi_tl(cpu_dst, cpu_dst, V8_TRAP_MASK);
2691 tcg_gen_addi_tl(cpu_dst, cpu_dst, TT_TRAP);
2692 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
2693 gen_helper_raise_exception(cpu_env, cpu_tmp32);
2694
2695 gen_set_label(l1);
2696 }
2697 gen_op_next_insn();
2698 tcg_gen_exit_tb(0);
2699 dc->is_br = 1;
2700 goto jmp_insn;
2701 } else if (xop == 0x28) {
2702 rs1 = GET_FIELD(insn, 13, 17);
2703 switch(rs1) {
2704 case 0: /* rdy */
2705 #ifndef TARGET_SPARC64
2706 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2707 manual, rdy on the microSPARC
2708 II */
2709 case 0x0f: /* stbar in the SPARCv8 manual,
2710 rdy on the microSPARC II */
2711 case 0x10 ... 0x1f: /* implementation-dependent in the
2712 SPARCv8 manual, rdy on the
2713 microSPARC II */
2714 /* Read Asr17 */
2715 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2716 TCGv r_const;
2717
2718 /* Read Asr17 for a Leon3 monoprocessor */
2719 r_const = tcg_const_tl((1 << 8)
2720 | (dc->def->nwindows - 1));
2721 gen_movl_TN_reg(rd, r_const);
2722 tcg_temp_free(r_const);
2723 break;
2724 }
2725 #endif
2726 gen_movl_TN_reg(rd, cpu_y);
2727 break;
2728 #ifdef TARGET_SPARC64
2729 case 0x2: /* V9 rdccr */
2730 gen_helper_compute_psr(cpu_env);
2731 gen_helper_rdccr(cpu_dst, cpu_env);
2732 gen_movl_TN_reg(rd, cpu_dst);
2733 break;
2734 case 0x3: /* V9 rdasi */
2735 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2736 gen_movl_TN_reg(rd, cpu_dst);
2737 break;
2738 case 0x4: /* V9 rdtick */
2739 {
2740 TCGv_ptr r_tickptr;
2741
2742 r_tickptr = tcg_temp_new_ptr();
2743 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2744 offsetof(CPUSPARCState, tick));
2745 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2746 tcg_temp_free_ptr(r_tickptr);
2747 gen_movl_TN_reg(rd, cpu_dst);
2748 }
2749 break;
2750 case 0x5: /* V9 rdpc */
2751 {
2752 TCGv r_const;
2753
2754 if (unlikely(AM_CHECK(dc))) {
2755 r_const = tcg_const_tl(dc->pc & 0xffffffffULL);
2756 } else {
2757 r_const = tcg_const_tl(dc->pc);
2758 }
2759 gen_movl_TN_reg(rd, r_const);
2760 tcg_temp_free(r_const);
2761 }
2762 break;
2763 case 0x6: /* V9 rdfprs */
2764 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2765 gen_movl_TN_reg(rd, cpu_dst);
2766 break;
2767 case 0xf: /* V9 membar */
2768 break; /* no effect */
2769 case 0x13: /* Graphics Status */
2770 if (gen_trap_ifnofpu(dc)) {
2771 goto jmp_insn;
2772 }
2773 gen_movl_TN_reg(rd, cpu_gsr);
2774 break;
2775 case 0x16: /* Softint */
2776 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2777 gen_movl_TN_reg(rd, cpu_dst);
2778 break;
2779 case 0x17: /* Tick compare */
2780 gen_movl_TN_reg(rd, cpu_tick_cmpr);
2781 break;
2782 case 0x18: /* System tick */
2783 {
2784 TCGv_ptr r_tickptr;
2785
2786 r_tickptr = tcg_temp_new_ptr();
2787 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2788 offsetof(CPUSPARCState, stick));
2789 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2790 tcg_temp_free_ptr(r_tickptr);
2791 gen_movl_TN_reg(rd, cpu_dst);
2792 }
2793 break;
2794 case 0x19: /* System tick compare */
2795 gen_movl_TN_reg(rd, cpu_stick_cmpr);
2796 break;
2797 case 0x10: /* Performance Control */
2798 case 0x11: /* Performance Instrumentation Counter */
2799 case 0x12: /* Dispatch Control */
2800 case 0x14: /* Softint set, WO */
2801 case 0x15: /* Softint clear, WO */
2802 #endif
2803 default:
2804 goto illegal_insn;
2805 }
2806 #if !defined(CONFIG_USER_ONLY)
2807 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2808 #ifndef TARGET_SPARC64
2809 if (!supervisor(dc))
2810 goto priv_insn;
2811 gen_helper_compute_psr(cpu_env);
2812 dc->cc_op = CC_OP_FLAGS;
2813 gen_helper_rdpsr(cpu_dst, cpu_env);
2814 #else
2815 CHECK_IU_FEATURE(dc, HYPV);
2816 if (!hypervisor(dc))
2817 goto priv_insn;
2818 rs1 = GET_FIELD(insn, 13, 17);
2819 switch (rs1) {
2820 case 0: // hpstate
2821 // gen_op_rdhpstate();
2822 break;
2823 case 1: // htstate
2824 // gen_op_rdhtstate();
2825 break;
2826 case 3: // hintp
2827 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2828 break;
2829 case 5: // htba
2830 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2831 break;
2832 case 6: // hver
2833 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2834 break;
2835 case 31: // hstick_cmpr
2836 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2837 break;
2838 default:
2839 goto illegal_insn;
2840 }
2841 #endif
2842 gen_movl_TN_reg(rd, cpu_dst);
2843 break;
2844 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2845 if (!supervisor(dc))
2846 goto priv_insn;
2847 #ifdef TARGET_SPARC64
2848 rs1 = GET_FIELD(insn, 13, 17);
2849 switch (rs1) {
2850 case 0: // tpc
2851 {
2852 TCGv_ptr r_tsptr;
2853
2854 r_tsptr = tcg_temp_new_ptr();
2855 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2856 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2857 offsetof(trap_state, tpc));
2858 tcg_temp_free_ptr(r_tsptr);
2859 }
2860 break;
2861 case 1: // tnpc
2862 {
2863 TCGv_ptr r_tsptr;
2864
2865 r_tsptr = tcg_temp_new_ptr();
2866 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2867 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2868 offsetof(trap_state, tnpc));
2869 tcg_temp_free_ptr(r_tsptr);
2870 }
2871 break;
2872 case 2: // tstate
2873 {
2874 TCGv_ptr r_tsptr;
2875
2876 r_tsptr = tcg_temp_new_ptr();
2877 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2878 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2879 offsetof(trap_state, tstate));
2880 tcg_temp_free_ptr(r_tsptr);
2881 }
2882 break;
2883 case 3: // tt
2884 {
2885 TCGv_ptr r_tsptr;
2886
2887 r_tsptr = tcg_temp_new_ptr();
2888 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2889 tcg_gen_ld_i32(cpu_tmp32, r_tsptr,
2890 offsetof(trap_state, tt));
2891 tcg_temp_free_ptr(r_tsptr);
2892 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2893 }
2894 break;
2895 case 4: // tick
2896 {
2897 TCGv_ptr r_tickptr;
2898
2899 r_tickptr = tcg_temp_new_ptr();
2900 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2901 offsetof(CPUSPARCState, tick));
2902 gen_helper_tick_get_count(cpu_tmp0, r_tickptr);
2903 gen_movl_TN_reg(rd, cpu_tmp0);
2904 tcg_temp_free_ptr(r_tickptr);
2905 }
2906 break;
2907 case 5: // tba
2908 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2909 break;
2910 case 6: // pstate
2911 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2912 offsetof(CPUSPARCState, pstate));
2913 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2914 break;
2915 case 7: // tl
2916 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2917 offsetof(CPUSPARCState, tl));
2918 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2919 break;
2920 case 8: // pil
2921 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2922 offsetof(CPUSPARCState, psrpil));
2923 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2924 break;
2925 case 9: // cwp
2926 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2927 break;
2928 case 10: // cansave
2929 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2930 offsetof(CPUSPARCState, cansave));
2931 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2932 break;
2933 case 11: // canrestore
2934 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2935 offsetof(CPUSPARCState, canrestore));
2936 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2937 break;
2938 case 12: // cleanwin
2939 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2940 offsetof(CPUSPARCState, cleanwin));
2941 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2942 break;
2943 case 13: // otherwin
2944 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2945 offsetof(CPUSPARCState, otherwin));
2946 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2947 break;
2948 case 14: // wstate
2949 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2950 offsetof(CPUSPARCState, wstate));
2951 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2952 break;
2953 case 16: // UA2005 gl
2954 CHECK_IU_FEATURE(dc, GL);
2955 tcg_gen_ld_i32(cpu_tmp32, cpu_env,
2956 offsetof(CPUSPARCState, gl));
2957 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
2958 break;
2959 case 26: // UA2005 strand status
2960 CHECK_IU_FEATURE(dc, HYPV);
2961 if (!hypervisor(dc))
2962 goto priv_insn;
2963 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2964 break;
2965 case 31: // ver
2966 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2967 break;
2968 case 15: // fq
2969 default:
2970 goto illegal_insn;
2971 }
2972 #else
2973 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2974 #endif
2975 gen_movl_TN_reg(rd, cpu_tmp0);
2976 break;
2977 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2978 #ifdef TARGET_SPARC64
2979 save_state(dc);
2980 gen_helper_flushw(cpu_env);
2981 #else
2982 if (!supervisor(dc))
2983 goto priv_insn;
2984 gen_movl_TN_reg(rd, cpu_tbr);
2985 #endif
2986 break;
2987 #endif
2988 } else if (xop == 0x34) { /* FPU Operations */
2989 if (gen_trap_ifnofpu(dc)) {
2990 goto jmp_insn;
2991 }
2992 gen_op_clear_ieee_excp_and_FTT();
2993 rs1 = GET_FIELD(insn, 13, 17);
2994 rs2 = GET_FIELD(insn, 27, 31);
2995 xop = GET_FIELD(insn, 18, 26);
2996 save_state(dc);
2997 switch (xop) {
2998 case 0x1: /* fmovs */
2999 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3000 gen_store_fpr_F(dc, rd, cpu_src1_32);
3001 break;
3002 case 0x5: /* fnegs */
3003 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
3004 break;
3005 case 0x9: /* fabss */
3006 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
3007 break;
3008 case 0x29: /* fsqrts */
3009 CHECK_FPU_FEATURE(dc, FSQRT);
3010 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
3011 break;
3012 case 0x2a: /* fsqrtd */
3013 CHECK_FPU_FEATURE(dc, FSQRT);
3014 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3015 break;
3016 case 0x2b: /* fsqrtq */
3017 CHECK_FPU_FEATURE(dc, FLOAT128);
3018 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3019 break;
3020 case 0x41: /* fadds */
3021 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3022 break;
3023 case 0x42: /* faddd */
3024 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3025 break;
3026 case 0x43: /* faddq */
3027 CHECK_FPU_FEATURE(dc, FLOAT128);
3028 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3029 break;
3030 case 0x45: /* fsubs */
3031 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3032 break;
3033 case 0x46: /* fsubd */
3034 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3035 break;
3036 case 0x47: /* fsubq */
3037 CHECK_FPU_FEATURE(dc, FLOAT128);
3038 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3039 break;
3040 case 0x49: /* fmuls */
3041 CHECK_FPU_FEATURE(dc, FMUL);
3042 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3043 break;
3044 case 0x4a: /* fmuld */
3045 CHECK_FPU_FEATURE(dc, FMUL);
3046 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3047 break;
3048 case 0x4b: /* fmulq */
3049 CHECK_FPU_FEATURE(dc, FLOAT128);
3050 CHECK_FPU_FEATURE(dc, FMUL);
3051 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3052 break;
3053 case 0x4d: /* fdivs */
3054 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3055 break;
3056 case 0x4e: /* fdivd */
3057 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3058 break;
3059 case 0x4f: /* fdivq */
3060 CHECK_FPU_FEATURE(dc, FLOAT128);
3061 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3062 break;
3063 case 0x69: /* fsmuld */
3064 CHECK_FPU_FEATURE(dc, FSMULD);
3065 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3066 break;
3067 case 0x6e: /* fdmulq */
3068 CHECK_FPU_FEATURE(dc, FLOAT128);
3069 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3070 break;
3071 case 0xc4: /* fitos */
3072 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3073 break;
3074 case 0xc6: /* fdtos */
3075 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3076 break;
3077 case 0xc7: /* fqtos */
3078 CHECK_FPU_FEATURE(dc, FLOAT128);
3079 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3080 break;
3081 case 0xc8: /* fitod */
3082 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3083 break;
3084 case 0xc9: /* fstod */
3085 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3086 break;
3087 case 0xcb: /* fqtod */
3088 CHECK_FPU_FEATURE(dc, FLOAT128);
3089 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3090 break;
3091 case 0xcc: /* fitoq */
3092 CHECK_FPU_FEATURE(dc, FLOAT128);
3093 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3094 break;
3095 case 0xcd: /* fstoq */
3096 CHECK_FPU_FEATURE(dc, FLOAT128);
3097 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3098 break;
3099 case 0xce: /* fdtoq */
3100 CHECK_FPU_FEATURE(dc, FLOAT128);
3101 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3102 break;
3103 case 0xd1: /* fstoi */
3104 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3105 break;
3106 case 0xd2: /* fdtoi */
3107 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3108 break;
3109 case 0xd3: /* fqtoi */
3110 CHECK_FPU_FEATURE(dc, FLOAT128);
3111 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3112 break;
3113 #ifdef TARGET_SPARC64
3114 case 0x2: /* V9 fmovd */
3115 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3116 gen_store_fpr_D(dc, rd, cpu_src1_64);
3117 break;
3118 case 0x3: /* V9 fmovq */
3119 CHECK_FPU_FEATURE(dc, FLOAT128);
3120 gen_move_Q(rd, rs2);
3121 break;
3122 case 0x6: /* V9 fnegd */
3123 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3124 break;
3125 case 0x7: /* V9 fnegq */
3126 CHECK_FPU_FEATURE(dc, FLOAT128);
3127 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3128 break;
3129 case 0xa: /* V9 fabsd */
3130 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3131 break;
3132 case 0xb: /* V9 fabsq */
3133 CHECK_FPU_FEATURE(dc, FLOAT128);
3134 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3135 break;
3136 case 0x81: /* V9 fstox */
3137 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3138 break;
3139 case 0x82: /* V9 fdtox */
3140 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3141 break;
3142 case 0x83: /* V9 fqtox */
3143 CHECK_FPU_FEATURE(dc, FLOAT128);
3144 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3145 break;
3146 case 0x84: /* V9 fxtos */
3147 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3148 break;
3149 case 0x88: /* V9 fxtod */
3150 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3151 break;
3152 case 0x8c: /* V9 fxtoq */
3153 CHECK_FPU_FEATURE(dc, FLOAT128);
3154 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3155 break;
3156 #endif
3157 default:
3158 goto illegal_insn;
3159 }
3160 } else if (xop == 0x35) { /* FPU Operations */
3161 #ifdef TARGET_SPARC64
3162 int cond;
3163 #endif
3164 if (gen_trap_ifnofpu(dc)) {
3165 goto jmp_insn;
3166 }
3167 gen_op_clear_ieee_excp_and_FTT();
3168 rs1 = GET_FIELD(insn, 13, 17);
3169 rs2 = GET_FIELD(insn, 27, 31);
3170 xop = GET_FIELD(insn, 18, 26);
3171 save_state(dc);
3172 #ifdef TARGET_SPARC64
3173 if ((xop & 0x11f) == 0x005) { // V9 fmovsr
3174 int l1;
3175
3176 l1 = gen_new_label();
3177 cond = GET_FIELD_SP(insn, 14, 17);
3178 cpu_src1 = get_src1(insn, cpu_src1);
3179 tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], cpu_src1,
3180 0, l1);
3181 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3182 gen_store_fpr_F(dc, rd, cpu_src1_32);
3183 gen_set_label(l1);
3184 break;
3185 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3186 int l1;
3187
3188 l1 = gen_new_label();
3189 cond = GET_FIELD_SP(insn, 14, 17);
3190 cpu_src1 = get_src1(insn, cpu_src1);
3191 tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], cpu_src1,
3192 0, l1);
3193 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3194 gen_store_fpr_D(dc, rd, cpu_src1_64);
3195 gen_set_label(l1);
3196 break;
3197 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3198 int l1;
3199
3200 CHECK_FPU_FEATURE(dc, FLOAT128);
3201 l1 = gen_new_label();
3202 cond = GET_FIELD_SP(insn, 14, 17);
3203 cpu_src1 = get_src1(insn, cpu_src1);
3204 tcg_gen_brcondi_tl(gen_tcg_cond_reg[cond], cpu_src1,
3205 0, l1);
3206 gen_move_Q(rd, rs2);
3207 gen_set_label(l1);
3208 break;
3209 }
3210 #endif
3211 switch (xop) {
3212 #ifdef TARGET_SPARC64
3213 #define FMOVCC(fcc, sz) \
3214 do { \
3215 DisasCompare cmp; \
3216 cond = GET_FIELD_SP(insn, 14, 17); \
3217 gen_fcompare(&cmp, fcc, cond); \
3218 gen_fmov##sz(dc, &cmp, rd, rs2); \
3219 free_compare(&cmp); \
3220 } while (0)
3221
3222 case 0x001: /* V9 fmovscc %fcc0 */
3223 FMOVCC(0, s);
3224 break;
3225 case 0x002: /* V9 fmovdcc %fcc0 */
3226 FMOVCC(0, d);
3227 break;
3228 case 0x003: /* V9 fmovqcc %fcc0 */
3229 CHECK_FPU_FEATURE(dc, FLOAT128);
3230 FMOVCC(0, q);
3231 break;
3232 case 0x041: /* V9 fmovscc %fcc1 */
3233 FMOVCC(1, s);
3234 break;
3235 case 0x042: /* V9 fmovdcc %fcc1 */
3236 FMOVCC(1, d);
3237 break;
3238 case 0x043: /* V9 fmovqcc %fcc1 */
3239 CHECK_FPU_FEATURE(dc, FLOAT128);
3240 FMOVCC(1, q);
3241 break;
3242 case 0x081: /* V9 fmovscc %fcc2 */
3243 FMOVCC(2, s);
3244 break;
3245 case 0x082: /* V9 fmovdcc %fcc2 */
3246 FMOVCC(2, d);
3247 break;
3248 case 0x083: /* V9 fmovqcc %fcc2 */
3249 CHECK_FPU_FEATURE(dc, FLOAT128);
3250 FMOVCC(2, q);
3251 break;
3252 case 0x0c1: /* V9 fmovscc %fcc3 */
3253 FMOVCC(3, s);
3254 break;
3255 case 0x0c2: /* V9 fmovdcc %fcc3 */
3256 FMOVCC(3, d);
3257 break;
3258 case 0x0c3: /* V9 fmovqcc %fcc3 */
3259 CHECK_FPU_FEATURE(dc, FLOAT128);
3260 FMOVCC(3, q);
3261 break;
3262 #undef FMOVCC
3263 #define FMOVCC(xcc, sz) \
3264 do { \
3265 DisasCompare cmp; \
3266 cond = GET_FIELD_SP(insn, 14, 17); \
3267 gen_compare(&cmp, xcc, cond, dc); \
3268 gen_fmov##sz(dc, &cmp, rd, rs2); \
3269 free_compare(&cmp); \
3270 } while (0)
3271
3272 case 0x101: /* V9 fmovscc %icc */
3273 FMOVCC(0, s);
3274 break;
3275 case 0x102: /* V9 fmovdcc %icc */
3276 FMOVCC(0, d);
3277 break;
3278 case 0x103: /* V9 fmovqcc %icc */
3279 CHECK_FPU_FEATURE(dc, FLOAT128);
3280 FMOVCC(0, q);
3281 break;
3282 case 0x181: /* V9 fmovscc %xcc */
3283 FMOVCC(1, s);
3284 break;
3285 case 0x182: /* V9 fmovdcc %xcc */
3286 FMOVCC(1, d);
3287 break;
3288 case 0x183: /* V9 fmovqcc %xcc */
3289 CHECK_FPU_FEATURE(dc, FLOAT128);
3290 FMOVCC(1, q);
3291 break;
3292 #undef FMOVCC
3293 #endif
3294 case 0x51: /* fcmps, V9 %fcc */
3295 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3296 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3297 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3298 break;
3299 case 0x52: /* fcmpd, V9 %fcc */
3300 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3301 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3302 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3303 break;
3304 case 0x53: /* fcmpq, V9 %fcc */
3305 CHECK_FPU_FEATURE(dc, FLOAT128);
3306 gen_op_load_fpr_QT0(QFPREG(rs1));
3307 gen_op_load_fpr_QT1(QFPREG(rs2));
3308 gen_op_fcmpq(rd & 3);
3309 break;
3310 case 0x55: /* fcmpes, V9 %fcc */
3311 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3312 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3313 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3314 break;
3315 case 0x56: /* fcmped, V9 %fcc */
3316 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3317 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3318 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3319 break;
3320 case 0x57: /* fcmpeq, V9 %fcc */
3321 CHECK_FPU_FEATURE(dc, FLOAT128);
3322 gen_op_load_fpr_QT0(QFPREG(rs1));
3323 gen_op_load_fpr_QT1(QFPREG(rs2));
3324 gen_op_fcmpeq(rd & 3);
3325 break;
3326 default:
3327 goto illegal_insn;
3328 }
3329 } else if (xop == 0x2) {
3330 // clr/mov shortcut
3331
3332 rs1 = GET_FIELD(insn, 13, 17);
3333 if (rs1 == 0) {
3334 // or %g0, x, y -> mov T0, x; mov y, T0
3335 if (IS_IMM) { /* immediate */
3336 TCGv r_const;
3337
3338 simm = GET_FIELDs(insn, 19, 31);
3339 r_const = tcg_const_tl(simm);
3340 gen_movl_TN_reg(rd, r_const);
3341 tcg_temp_free(r_const);
3342 } else { /* register */
3343 rs2 = GET_FIELD(insn, 27, 31);
3344 gen_movl_reg_TN(rs2, cpu_dst);
3345 gen_movl_TN_reg(rd, cpu_dst);
3346 }
3347 } else {
3348 cpu_src1 = get_src1(insn, cpu_src1);
3349 if (IS_IMM) { /* immediate */
3350 simm = GET_FIELDs(insn, 19, 31);
3351 tcg_gen_ori_tl(cpu_dst, cpu_src1, simm);
3352 gen_movl_TN_reg(rd, cpu_dst);
3353 } else { /* register */
3354 // or x, %g0, y -> mov T1, x; mov y, T1
3355 rs2 = GET_FIELD(insn, 27, 31);
3356 if (rs2 != 0) {
3357 gen_movl_reg_TN(rs2, cpu_src2);
3358 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3359 gen_movl_TN_reg(rd, cpu_dst);
3360 } else
3361 gen_movl_TN_reg(rd, cpu_src1);
3362 }
3363 }
3364 #ifdef TARGET_SPARC64
3365 } else if (xop == 0x25) { /* sll, V9 sllx */
3366 cpu_src1 = get_src1(insn, cpu_src1);
3367 if (IS_IMM) { /* immediate */
3368 simm = GET_FIELDs(insn, 20, 31);
3369 if (insn & (1 << 12)) {
3370 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3371 } else {
3372 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3373 }
3374 } else { /* register */
3375 rs2 = GET_FIELD(insn, 27, 31);
3376 gen_movl_reg_TN(rs2, cpu_src2);
3377 if (insn & (1 << 12)) {
3378 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3379 } else {
3380 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3381 }
3382 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3383 }
3384 gen_movl_TN_reg(rd, cpu_dst);
3385 } else if (xop == 0x26) { /* srl, V9 srlx */
3386 cpu_src1 = get_src1(insn, cpu_src1);
3387 if (IS_IMM) { /* immediate */
3388 simm = GET_FIELDs(insn, 20, 31);
3389 if (insn & (1 << 12)) {
3390 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3391 } else {
3392 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3393 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3394 }
3395 } else { /* register */
3396 rs2 = GET_FIELD(insn, 27, 31);
3397 gen_movl_reg_TN(rs2, cpu_src2);
3398 if (insn & (1 << 12)) {
3399 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3400 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3401 } else {
3402 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3403 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3404 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3405 }
3406 }
3407 gen_movl_TN_reg(rd, cpu_dst);
3408 } else if (xop == 0x27) { /* sra, V9 srax */
3409 cpu_src1 = get_src1(insn, cpu_src1);
3410 if (IS_IMM) { /* immediate */
3411 simm = GET_FIELDs(insn, 20, 31);
3412 if (insn & (1 << 12)) {
3413 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3414 } else {
3415 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3416 tcg_gen_ext32s_i64(cpu_dst, cpu_dst);
3417 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3418 }
3419 } else { /* register */
3420 rs2 = GET_FIELD(insn, 27, 31);
3421 gen_movl_reg_TN(rs2, cpu_src2);
3422 if (insn & (1 << 12)) {
3423 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3424 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3425 } else {
3426 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3427 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3428 tcg_gen_ext32s_i64(cpu_dst, cpu_dst);
3429 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3430 }
3431 }
3432 gen_movl_TN_reg(rd, cpu_dst);
3433 #endif
3434 } else if (xop < 0x36) {
3435 if (xop < 0x20) {
3436 cpu_src1 = get_src1(insn, cpu_src1);
3437 cpu_src2 = get_src2(insn, cpu_src2);
3438 switch (xop & ~0x10) {
3439 case 0x0: /* add */
3440 if (IS_IMM) {
3441 simm = GET_FIELDs(insn, 19, 31);
3442 if (xop & 0x10) {
3443 gen_op_addi_cc(cpu_dst, cpu_src1, simm);
3444 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3445 dc->cc_op = CC_OP_ADD;
3446 } else {
3447 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
3448 }
3449 } else {
3450 if (xop & 0x10) {
3451 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3452 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3453 dc->cc_op = CC_OP_ADD;
3454 } else {
3455 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3456 }
3457 }
3458 break;
3459 case 0x1: /* and */
3460 if (IS_IMM) {
3461 simm = GET_FIELDs(insn, 19, 31);
3462 tcg_gen_andi_tl(cpu_dst, cpu_src1, simm);
3463 } else {
3464 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3465 }
3466 if (xop & 0x10) {
3467 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3468 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3469 dc->cc_op = CC_OP_LOGIC;
3470 }
3471 break;
3472 case 0x2: /* or */
3473 if (IS_IMM) {
3474 simm = GET_FIELDs(insn, 19, 31);
3475 tcg_gen_ori_tl(cpu_dst, cpu_src1, simm);
3476 } else {
3477 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3478 }
3479 if (xop & 0x10) {
3480 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3481 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3482 dc->cc_op = CC_OP_LOGIC;
3483 }
3484 break;
3485 case 0x3: /* xor */
3486 if (IS_IMM) {
3487 simm = GET_FIELDs(insn, 19, 31);
3488 tcg_gen_xori_tl(cpu_dst, cpu_src1, simm);
3489 } else {
3490 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3491 }
3492 if (xop & 0x10) {
3493 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3494 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3495 dc->cc_op = CC_OP_LOGIC;
3496 }
3497 break;
3498 case 0x4: /* sub */
3499 if (IS_IMM) {
3500 simm = GET_FIELDs(insn, 19, 31);
3501 if (xop & 0x10) {
3502 gen_op_subi_cc(cpu_dst, cpu_src1, simm, dc);
3503 } else {
3504 tcg_gen_subi_tl(cpu_dst, cpu_src1, simm);
3505 }
3506 } else {
3507 if (xop & 0x10) {
3508 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3509 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3510 dc->cc_op = CC_OP_SUB;
3511 } else {
3512 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3513 }
3514 }
3515 break;
3516 case 0x5: /* andn */
3517 if (IS_IMM) {
3518 simm = GET_FIELDs(insn, 19, 31);
3519 tcg_gen_andi_tl(cpu_dst, cpu_src1, ~simm);
3520 } else {
3521 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3522 }
3523 if (xop & 0x10) {
3524 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3525 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3526 dc->cc_op = CC_OP_LOGIC;
3527 }
3528 break;
3529 case 0x6: /* orn */
3530 if (IS_IMM) {
3531 simm = GET_FIELDs(insn, 19, 31);
3532 tcg_gen_ori_tl(cpu_dst, cpu_src1, ~simm);
3533 } else {
3534 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3535 }
3536 if (xop & 0x10) {
3537 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3538 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3539 dc->cc_op = CC_OP_LOGIC;
3540 }
3541 break;
3542 case 0x7: /* xorn */
3543 if (IS_IMM) {
3544 simm = GET_FIELDs(insn, 19, 31);
3545 tcg_gen_xori_tl(cpu_dst, cpu_src1, ~simm);
3546 } else {
3547 tcg_gen_not_tl(cpu_tmp0, cpu_src2);
3548 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_tmp0);
3549 }
3550 if (xop & 0x10) {
3551 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3552 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3553 dc->cc_op = CC_OP_LOGIC;
3554 }
3555 break;
3556 case 0x8: /* addx, V9 addc */
3557 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3558 (xop & 0x10));
3559 break;
3560 #ifdef TARGET_SPARC64
3561 case 0x9: /* V9 mulx */
3562 if (IS_IMM) {
3563 simm = GET_FIELDs(insn, 19, 31);
3564 tcg_gen_muli_i64(cpu_dst, cpu_src1, simm);
3565 } else {
3566 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3567 }
3568 break;
3569 #endif
3570 case 0xa: /* umul */
3571 CHECK_IU_FEATURE(dc, MUL);
3572 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3573 if (xop & 0x10) {
3574 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3575 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3576 dc->cc_op = CC_OP_LOGIC;
3577 }
3578 break;
3579 case 0xb: /* smul */
3580 CHECK_IU_FEATURE(dc, MUL);
3581 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3582 if (xop & 0x10) {
3583 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3584 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3585 dc->cc_op = CC_OP_LOGIC;
3586 }
3587 break;
3588 case 0xc: /* subx, V9 subc */
3589 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3590 (xop & 0x10));
3591 break;
3592 #ifdef TARGET_SPARC64
3593 case 0xd: /* V9 udivx */
3594 {
3595 TCGv r_temp1, r_temp2;
3596 r_temp1 = tcg_temp_local_new();
3597 r_temp2 = tcg_temp_local_new();
3598 tcg_gen_mov_tl(r_temp1, cpu_src1);
3599 tcg_gen_mov_tl(r_temp2, cpu_src2);
3600 gen_trap_ifdivzero_tl(r_temp2);
3601 tcg_gen_divu_i64(cpu_dst, r_temp1, r_temp2);
3602 tcg_temp_free(r_temp1);
3603 tcg_temp_free(r_temp2);
3604 }
3605 break;
3606 #endif
3607 case 0xe: /* udiv */
3608 CHECK_IU_FEATURE(dc, DIV);
3609 if (xop & 0x10) {
3610 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3611 cpu_src2);
3612 dc->cc_op = CC_OP_DIV;
3613 } else {
3614 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3615 cpu_src2);
3616 }
3617 break;
3618 case 0xf: /* sdiv */
3619 CHECK_IU_FEATURE(dc, DIV);
3620 if (xop & 0x10) {
3621 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3622 cpu_src2);
3623 dc->cc_op = CC_OP_DIV;
3624 } else {
3625 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3626 cpu_src2);
3627 }
3628 break;
3629 default:
3630 goto illegal_insn;
3631 }
3632 gen_movl_TN_reg(rd, cpu_dst);
3633 } else {
3634 cpu_src1 = get_src1(insn, cpu_src1);
3635 cpu_src2 = get_src2(insn, cpu_src2);
3636 switch (xop) {
3637 case 0x20: /* taddcc */
3638 gen_op_tadd_cc(cpu_dst, cpu_src1, cpu_src2);
3639 gen_movl_TN_reg(rd, cpu_dst);
3640 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3641 dc->cc_op = CC_OP_TADD;
3642 break;
3643 case 0x21: /* tsubcc */
3644 gen_op_tsub_cc(cpu_dst, cpu_src1, cpu_src2);
3645 gen_movl_TN_reg(rd, cpu_dst);
3646 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3647 dc->cc_op = CC_OP_TSUB;
3648 break;
3649 case 0x22: /* taddcctv */
3650 save_state(dc);
3651 gen_op_tadd_ccTV(cpu_dst, cpu_src1, cpu_src2);
3652 gen_movl_TN_reg(rd, cpu_dst);
3653 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADDTV);
3654 dc->cc_op = CC_OP_TADDTV;
3655 break;
3656 case 0x23: /* tsubcctv */
3657 save_state(dc);
3658 gen_op_tsub_ccTV(cpu_dst, cpu_src1, cpu_src2);
3659 gen_movl_TN_reg(rd, cpu_dst);
3660 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUBTV);
3661 dc->cc_op = CC_OP_TSUBTV;
3662 break;
3663 case 0x24: /* mulscc */
3664 gen_helper_compute_psr(cpu_env);
3665 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3666 gen_movl_TN_reg(rd, cpu_dst);
3667 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3668 dc->cc_op = CC_OP_ADD;
3669 break;
3670 #ifndef TARGET_SPARC64
3671 case 0x25: /* sll */
3672 if (IS_IMM) { /* immediate */
3673 simm = GET_FIELDs(insn, 20, 31);
3674 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3675 } else { /* register */
3676 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3677 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3678 }
3679 gen_movl_TN_reg(rd, cpu_dst);
3680 break;
3681 case 0x26: /* srl */
3682 if (IS_IMM) { /* immediate */
3683 simm = GET_FIELDs(insn, 20, 31);
3684 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3685 } else { /* register */
3686 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3687 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3688 }
3689 gen_movl_TN_reg(rd, cpu_dst);
3690 break;
3691 case 0x27: /* sra */
3692 if (IS_IMM) { /* immediate */
3693 simm = GET_FIELDs(insn, 20, 31);
3694 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3695 } else { /* register */
3696 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3697 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3698 }
3699 gen_movl_TN_reg(rd, cpu_dst);
3700 break;
3701 #endif
3702 case 0x30:
3703 {
3704 switch(rd) {
3705 case 0: /* wry */
3706 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3707 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3708 break;
3709 #ifndef TARGET_SPARC64
3710 case 0x01 ... 0x0f: /* undefined in the
3711 SPARCv8 manual, nop
3712 on the microSPARC
3713 II */
3714 case 0x10 ... 0x1f: /* implementation-dependent
3715 in the SPARCv8
3716 manual, nop on the
3717 microSPARC II */
3718 break;
3719 #else
3720 case 0x2: /* V9 wrccr */
3721 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3722 gen_helper_wrccr(cpu_env, cpu_dst);
3723 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3724 dc->cc_op = CC_OP_FLAGS;
3725 break;
3726 case 0x3: /* V9 wrasi */
3727 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3728 tcg_gen_andi_tl(cpu_dst, cpu_dst, 0xff);
3729 tcg_gen_trunc_tl_i32(cpu_asi, cpu_dst);
3730 break;
3731 case 0x6: /* V9 wrfprs */
3732 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3733 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_dst);
3734 save_state(dc);
3735 gen_op_next_insn();
3736 tcg_gen_exit_tb(0);
3737 dc->is_br = 1;
3738 break;
3739 case 0xf: /* V9 sir, nop if user */
3740 #if !defined(CONFIG_USER_ONLY)
3741 if (supervisor(dc)) {
3742 ; // XXX
3743 }
3744 #endif
3745 break;
3746 case 0x13: /* Graphics Status */
3747 if (gen_trap_ifnofpu(dc)) {
3748 goto jmp_insn;
3749 }
3750 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3751 break;
3752 case 0x14: /* Softint set */
3753 if (!supervisor(dc))
3754 goto illegal_insn;
3755 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3756 gen_helper_set_softint(cpu_env, cpu_tmp64);
3757 break;
3758 case 0x15: /* Softint clear */
3759 if (!supervisor(dc))
3760 goto illegal_insn;
3761 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3762 gen_helper_clear_softint(cpu_env, cpu_tmp64);
3763 break;
3764 case 0x16: /* Softint write */
3765 if (!supervisor(dc))
3766 goto illegal_insn;
3767 tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
3768 gen_helper_write_softint(cpu_env, cpu_tmp64);
3769 break;
3770 case 0x17: /* Tick compare */
3771 #if !defined(CONFIG_USER_ONLY)
3772 if (!supervisor(dc))
3773 goto illegal_insn;
3774 #endif
3775 {
3776 TCGv_ptr r_tickptr;
3777
3778 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3779 cpu_src2);
3780 r_tickptr = tcg_temp_new_ptr();
3781 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3782 offsetof(CPUSPARCState, tick));
3783 gen_helper_tick_set_limit(r_tickptr,
3784 cpu_tick_cmpr);
3785 tcg_temp_free_ptr(r_tickptr);
3786 }
3787 break;
3788 case 0x18: /* System tick */
3789 #if !defined(CONFIG_USER_ONLY)
3790 if (!supervisor(dc))
3791 goto illegal_insn;
3792 #endif
3793 {
3794 TCGv_ptr r_tickptr;
3795
3796 tcg_gen_xor_tl(cpu_dst, cpu_src1,
3797 cpu_src2);
3798 r_tickptr = tcg_temp_new_ptr();
3799 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3800 offsetof(CPUSPARCState, stick));
3801 gen_helper_tick_set_count(r_tickptr,
3802 cpu_dst);
3803 tcg_temp_free_ptr(r_tickptr);
3804 }
3805 break;
3806 case 0x19: /* System tick compare */
3807 #if !defined(CONFIG_USER_ONLY)
3808 if (!supervisor(dc))
3809 goto illegal_insn;
3810 #endif
3811 {
3812 TCGv_ptr r_tickptr;
3813
3814 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3815 cpu_src2);
3816 r_tickptr = tcg_temp_new_ptr();
3817 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3818 offsetof(CPUSPARCState, stick));
3819 gen_helper_tick_set_limit(r_tickptr,
3820 cpu_stick_cmpr);
3821 tcg_temp_free_ptr(r_tickptr);
3822 }
3823 break;
3824
3825 case 0x10: /* Performance Control */
3826 case 0x11: /* Performance Instrumentation
3827 Counter */
3828 case 0x12: /* Dispatch Control */
3829 #endif
3830 default:
3831 goto illegal_insn;
3832 }
3833 }
3834 break;
3835 #if !defined(CONFIG_USER_ONLY)
3836 case 0x31: /* wrpsr, V9 saved, restored */
3837 {
3838 if (!supervisor(dc))
3839 goto priv_insn;
3840 #ifdef TARGET_SPARC64
3841 switch (rd) {
3842 case 0:
3843 gen_helper_saved(cpu_env);
3844 break;
3845 case 1:
3846 gen_helper_restored(cpu_env);
3847 break;
3848 case 2: /* UA2005 allclean */
3849 case 3: /* UA2005 otherw */
3850 case 4: /* UA2005 normalw */
3851 case 5: /* UA2005 invalw */
3852 // XXX
3853 default:
3854 goto illegal_insn;
3855 }
3856 #else
3857 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3858 gen_helper_wrpsr(cpu_env, cpu_dst);
3859 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3860 dc->cc_op = CC_OP_FLAGS;
3861 save_state(dc);
3862 gen_op_next_insn();
3863 tcg_gen_exit_tb(0);
3864 dc->is_br = 1;
3865 #endif
3866 }
3867 break;
3868 case 0x32: /* wrwim, V9 wrpr */
3869 {
3870 if (!supervisor(dc))
3871 goto priv_insn;
3872 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3873 #ifdef TARGET_SPARC64
3874 switch (rd) {
3875 case 0: // tpc
3876 {
3877 TCGv_ptr r_tsptr;
3878
3879 r_tsptr = tcg_temp_new_ptr();
3880 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3881 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3882 offsetof(trap_state, tpc));
3883 tcg_temp_free_ptr(r_tsptr);
3884 }
3885 break;
3886 case 1: // tnpc
3887 {
3888 TCGv_ptr r_tsptr;
3889
3890 r_tsptr = tcg_temp_new_ptr();
3891 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3892 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3893 offsetof(trap_state, tnpc));
3894 tcg_temp_free_ptr(r_tsptr);
3895 }
3896 break;
3897 case 2: // tstate
3898 {
3899 TCGv_ptr r_tsptr;
3900
3901 r_tsptr = tcg_temp_new_ptr();
3902 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3903 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3904 offsetof(trap_state,
3905 tstate));
3906 tcg_temp_free_ptr(r_tsptr);
3907 }
3908 break;
3909 case 3: // tt
3910 {
3911 TCGv_ptr r_tsptr;
3912
3913 r_tsptr = tcg_temp_new_ptr();
3914 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3915 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3916 tcg_gen_st_i32(cpu_tmp32, r_tsptr,
3917 offsetof(trap_state, tt));
3918 tcg_temp_free_ptr(r_tsptr);
3919 }
3920 break;
3921 case 4: // tick
3922 {
3923 TCGv_ptr r_tickptr;
3924
3925 r_tickptr = tcg_temp_new_ptr();
3926 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3927 offsetof(CPUSPARCState, tick));
3928 gen_helper_tick_set_count(r_tickptr,
3929 cpu_tmp0);
3930 tcg_temp_free_ptr(r_tickptr);
3931 }
3932 break;
3933 case 5: // tba
3934 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3935 break;
3936 case 6: // pstate
3937 {
3938 TCGv r_tmp = tcg_temp_local_new();
3939
3940 tcg_gen_mov_tl(r_tmp, cpu_tmp0);
3941 save_state(dc);
3942 gen_helper_wrpstate(cpu_env, r_tmp);
3943 tcg_temp_free(r_tmp);
3944 dc->npc = DYNAMIC_PC;
3945 }
3946 break;
3947 case 7: // tl
3948 {
3949 TCGv r_tmp = tcg_temp_local_new();
3950
3951 tcg_gen_mov_tl(r_tmp, cpu_tmp0);
3952 save_state(dc);
3953 tcg_gen_trunc_tl_i32(cpu_tmp32, r_tmp);
3954 tcg_temp_free(r_tmp);
3955 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3956 offsetof(CPUSPARCState, tl));
3957 dc->npc = DYNAMIC_PC;
3958 }
3959 break;
3960 case 8: // pil
3961 gen_helper_wrpil(cpu_env, cpu_tmp0);
3962 break;
3963 case 9: // cwp
3964 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3965 break;
3966 case 10: // cansave
3967 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3968 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3969 offsetof(CPUSPARCState,
3970 cansave));
3971 break;
3972 case 11: // canrestore
3973 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3974 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3975 offsetof(CPUSPARCState,
3976 canrestore));
3977 break;
3978 case 12: // cleanwin
3979 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3980 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3981 offsetof(CPUSPARCState,
3982 cleanwin));
3983 break;
3984 case 13: // otherwin
3985 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3986 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3987 offsetof(CPUSPARCState,
3988 otherwin));
3989 break;
3990 case 14: // wstate
3991 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3992 tcg_gen_st_i32(cpu_tmp32, cpu_env,
3993 offsetof(CPUSPARCState,
3994 wstate));
3995 break;
3996 case 16: // UA2005 gl
3997 CHECK_IU_FEATURE(dc, GL);
3998 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
3999 tcg_gen_st_i32(cpu_tmp32, cpu_env,
4000 offsetof(CPUSPARCState, gl));
4001 break;
4002 case 26: // UA2005 strand status
4003 CHECK_IU_FEATURE(dc, HYPV);
4004 if (!hypervisor(dc))
4005 goto priv_insn;
4006 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
4007 break;
4008 default:
4009 goto illegal_insn;
4010 }
4011 #else
4012 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
4013 if (dc->def->nwindows != 32)
4014 tcg_gen_andi_tl(cpu_tmp32, cpu_tmp32,
4015 (1 << dc->def->nwindows) - 1);
4016 tcg_gen_mov_i32(cpu_wim, cpu_tmp32);
4017 #endif
4018 }
4019 break;
4020 case 0x33: /* wrtbr, UA2005 wrhpr */
4021 {
4022 #ifndef TARGET_SPARC64
4023 if (!supervisor(dc))
4024 goto priv_insn;
4025 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
4026 #else
4027 CHECK_IU_FEATURE(dc, HYPV);
4028 if (!hypervisor(dc))
4029 goto priv_insn;
4030 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4031 switch (rd) {
4032 case 0: // hpstate
4033 // XXX gen_op_wrhpstate();
4034 save_state(dc);
4035 gen_op_next_insn();
4036 tcg_gen_exit_tb(0);
4037 dc->is_br = 1;
4038 break;
4039 case 1: // htstate
4040 // XXX gen_op_wrhtstate();
4041 break;
4042 case 3: // hintp
4043 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
4044 break;
4045 case 5: // htba
4046 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
4047 break;
4048 case 31: // hstick_cmpr
4049 {
4050 TCGv_ptr r_tickptr;
4051
4052 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
4053 r_tickptr = tcg_temp_new_ptr();
4054 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4055 offsetof(CPUSPARCState, hstick));
4056 gen_helper_tick_set_limit(r_tickptr,
4057 cpu_hstick_cmpr);
4058 tcg_temp_free_ptr(r_tickptr);
4059 }
4060 break;
4061 case 6: // hver readonly
4062 default:
4063 goto illegal_insn;
4064 }
4065 #endif
4066 }
4067 break;
4068 #endif
4069 #ifdef TARGET_SPARC64
4070 case 0x2c: /* V9 movcc */
4071 {
4072 int cc = GET_FIELD_SP(insn, 11, 12);
4073 int cond = GET_FIELD_SP(insn, 14, 17);
4074 DisasCompare cmp;
4075
4076 if (insn & (1 << 18)) {
4077 if (cc == 0) {
4078 gen_compare(&cmp, 0, cond, dc);
4079 } else if (cc == 2) {
4080 gen_compare(&cmp, 1, cond, dc);
4081 } else {
4082 goto illegal_insn;
4083 }
4084 } else {
4085 gen_fcompare(&cmp, cc, cond);
4086 }
4087
4088 /* The get_src2 above loaded the normal 13-bit
4089 immediate field, not the 11-bit field we have
4090 in movcc. But it did handle the reg case. */
4091 if (IS_IMM) {
4092 simm = GET_FIELD_SPs(insn, 0, 10);
4093 tcg_gen_movi_tl(cpu_src2, simm);
4094 }
4095
4096 gen_movl_reg_TN(rd, cpu_dst);
4097 tcg_gen_movcond_tl(cmp.cond, cpu_dst,
4098 cmp.c1, cmp.c2,
4099 cpu_src2, cpu_dst);
4100 free_compare(&cmp);
4101 gen_movl_TN_reg(rd, cpu_dst);
4102 break;
4103 }
4104 case 0x2d: /* V9 sdivx */
4105 gen_op_sdivx(cpu_dst, cpu_src1, cpu_src2);
4106 gen_movl_TN_reg(rd, cpu_dst);
4107 break;
4108 case 0x2e: /* V9 popc */
4109 {
4110 cpu_src2 = get_src2(insn, cpu_src2);
4111 gen_helper_popc(cpu_dst, cpu_src2);
4112 gen_movl_TN_reg(rd, cpu_dst);
4113 }
4114 case 0x2f: /* V9 movr */
4115 {
4116 int cond = GET_FIELD_SP(insn, 10, 12);
4117 DisasCompare cmp;
4118
4119 gen_compare_reg(&cmp, cond, cpu_src1);
4120
4121 /* The get_src2 above loaded the normal 13-bit
4122 immediate field, not the 10-bit field we have
4123 in movr. But it did handle the reg case. */
4124 if (IS_IMM) {
4125 simm = GET_FIELD_SPs(insn, 0, 9);
4126 tcg_gen_movi_tl(cpu_src2, simm);
4127 }
4128
4129 gen_movl_reg_TN(rd, cpu_dst);
4130 tcg_gen_movcond_tl(cmp.cond, cpu_dst,
4131 cmp.c1, cmp.c2,
4132 cpu_src2, cpu_dst);
4133 free_compare(&cmp);
4134 gen_movl_TN_reg(rd, cpu_dst);
4135 break;
4136 }
4137 #endif
4138 default:
4139 goto illegal_insn;
4140 }
4141 }
4142 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4143 #ifdef TARGET_SPARC64
4144 int opf = GET_FIELD_SP(insn, 5, 13);
4145 rs1 = GET_FIELD(insn, 13, 17);
4146 rs2 = GET_FIELD(insn, 27, 31);
4147 if (gen_trap_ifnofpu(dc)) {
4148 goto jmp_insn;
4149 }
4150
4151 switch (opf) {
4152 case 0x000: /* VIS I edge8cc */
4153 CHECK_FPU_FEATURE(dc, VIS1);
4154 gen_movl_reg_TN(rs1, cpu_src1);
4155 gen_movl_reg_TN(rs2, cpu_src2);
4156 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4157 gen_movl_TN_reg(rd, cpu_dst);
4158 break;
4159 case 0x001: /* VIS II edge8n */
4160 CHECK_FPU_FEATURE(dc, VIS2);
4161 gen_movl_reg_TN(rs1, cpu_src1);
4162 gen_movl_reg_TN(rs2, cpu_src2);
4163 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4164 gen_movl_TN_reg(rd, cpu_dst);
4165 break;
4166 case 0x002: /* VIS I edge8lcc */
4167 CHECK_FPU_FEATURE(dc, VIS1);
4168 gen_movl_reg_TN(rs1, cpu_src1);
4169 gen_movl_reg_TN(rs2, cpu_src2);
4170 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4171 gen_movl_TN_reg(rd, cpu_dst);
4172 break;
4173 case 0x003: /* VIS II edge8ln */
4174 CHECK_FPU_FEATURE(dc, VIS2);
4175 gen_movl_reg_TN(rs1, cpu_src1);
4176 gen_movl_reg_TN(rs2, cpu_src2);
4177 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4178 gen_movl_TN_reg(rd, cpu_dst);
4179 break;
4180 case 0x004: /* VIS I edge16cc */
4181 CHECK_FPU_FEATURE(dc, VIS1);
4182 gen_movl_reg_TN(rs1, cpu_src1);
4183 gen_movl_reg_TN(rs2, cpu_src2);
4184 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4185 gen_movl_TN_reg(rd, cpu_dst);
4186 break;
4187 case 0x005: /* VIS II edge16n */
4188 CHECK_FPU_FEATURE(dc, VIS2);
4189 gen_movl_reg_TN(rs1, cpu_src1);
4190 gen_movl_reg_TN(rs2, cpu_src2);
4191 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4192 gen_movl_TN_reg(rd, cpu_dst);
4193 break;
4194 case 0x006: /* VIS I edge16lcc */
4195 CHECK_FPU_FEATURE(dc, VIS1);
4196 gen_movl_reg_TN(rs1, cpu_src1);
4197 gen_movl_reg_TN(rs2, cpu_src2);
4198 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4199 gen_movl_TN_reg(rd, cpu_dst);
4200 break;
4201 case 0x007: /* VIS II edge16ln */
4202 CHECK_FPU_FEATURE(dc, VIS2);
4203 gen_movl_reg_TN(rs1, cpu_src1);
4204 gen_movl_reg_TN(rs2, cpu_src2);
4205 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4206 gen_movl_TN_reg(rd, cpu_dst);
4207 break;
4208 case 0x008: /* VIS I edge32cc */
4209 CHECK_FPU_FEATURE(dc, VIS1);
4210 gen_movl_reg_TN(rs1, cpu_src1);
4211 gen_movl_reg_TN(rs2, cpu_src2);
4212 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4213 gen_movl_TN_reg(rd, cpu_dst);
4214 break;
4215 case 0x009: /* VIS II edge32n */
4216 CHECK_FPU_FEATURE(dc, VIS2);
4217 gen_movl_reg_TN(rs1, cpu_src1);
4218 gen_movl_reg_TN(rs2, cpu_src2);
4219 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4220 gen_movl_TN_reg(rd, cpu_dst);
4221 break;
4222 case 0x00a: /* VIS I edge32lcc */
4223 CHECK_FPU_FEATURE(dc, VIS1);
4224 gen_movl_reg_TN(rs1, cpu_src1);
4225 gen_movl_reg_TN(rs2, cpu_src2);
4226 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4227 gen_movl_TN_reg(rd, cpu_dst);
4228 break;
4229 case 0x00b: /* VIS II edge32ln */
4230 CHECK_FPU_FEATURE(dc, VIS2);
4231 gen_movl_reg_TN(rs1, cpu_src1);
4232 gen_movl_reg_TN(rs2, cpu_src2);
4233 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4234 gen_movl_TN_reg(rd, cpu_dst);
4235 break;
4236 case 0x010: /* VIS I array8 */
4237 CHECK_FPU_FEATURE(dc, VIS1);
4238 cpu_src1 = get_src1(insn, cpu_src1);
4239 gen_movl_reg_TN(rs2, cpu_src2);
4240 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4241 gen_movl_TN_reg(rd, cpu_dst);
4242 break;
4243 case 0x012: /* VIS I array16 */
4244 CHECK_FPU_FEATURE(dc, VIS1);
4245 cpu_src1 = get_src1(insn, cpu_src1);
4246 gen_movl_reg_TN(rs2, cpu_src2);
4247 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4248 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4249 gen_movl_TN_reg(rd, cpu_dst);
4250 break;
4251 case 0x014: /* VIS I array32 */
4252 CHECK_FPU_FEATURE(dc, VIS1);
4253 cpu_src1 = get_src1(insn, cpu_src1);
4254 gen_movl_reg_TN(rs2, cpu_src2);
4255 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4256 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4257 gen_movl_TN_reg(rd, cpu_dst);
4258 break;
4259 case 0x018: /* VIS I alignaddr */
4260 CHECK_FPU_FEATURE(dc, VIS1);
4261 cpu_src1 = get_src1(insn, cpu_src1);
4262 gen_movl_reg_TN(rs2, cpu_src2);
4263 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4264 gen_movl_TN_reg(rd, cpu_dst);
4265 break;
4266 case 0x01a: /* VIS I alignaddrl */
4267 CHECK_FPU_FEATURE(dc, VIS1);
4268 cpu_src1 = get_src1(insn, cpu_src1);
4269 gen_movl_reg_TN(rs2, cpu_src2);
4270 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4271 gen_movl_TN_reg(rd, cpu_dst);
4272 break;
4273 case 0x019: /* VIS II bmask */
4274 CHECK_FPU_FEATURE(dc, VIS2);
4275 cpu_src1 = get_src1(insn, cpu_src1);
4276 cpu_src2 = get_src1(insn, cpu_src2);
4277 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4278 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4279 gen_movl_TN_reg(rd, cpu_dst);
4280 break;
4281 case 0x020: /* VIS I fcmple16 */
4282 CHECK_FPU_FEATURE(dc, VIS1);
4283 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4284 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4285 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4286 gen_movl_TN_reg(rd, cpu_dst);
4287 break;
4288 case 0x022: /* VIS I fcmpne16 */
4289 CHECK_FPU_FEATURE(dc, VIS1);
4290 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4291 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4292 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4293 gen_movl_TN_reg(rd, cpu_dst);
4294 break;
4295 case 0x024: /* VIS I fcmple32 */
4296 CHECK_FPU_FEATURE(dc, VIS1);
4297 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4298 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4299 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4300 gen_movl_TN_reg(rd, cpu_dst);
4301 break;
4302 case 0x026: /* VIS I fcmpne32 */
4303 CHECK_FPU_FEATURE(dc, VIS1);
4304 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4305 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4306 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4307 gen_movl_TN_reg(rd, cpu_dst);
4308 break;
4309 case 0x028: /* VIS I fcmpgt16 */
4310 CHECK_FPU_FEATURE(dc, VIS1);
4311 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4312 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4313 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4314 gen_movl_TN_reg(rd, cpu_dst);
4315 break;
4316 case 0x02a: /* VIS I fcmpeq16 */
4317 CHECK_FPU_FEATURE(dc, VIS1);
4318 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4319 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4320 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4321 gen_movl_TN_reg(rd, cpu_dst);
4322 break;
4323 case 0x02c: /* VIS I fcmpgt32 */
4324 CHECK_FPU_FEATURE(dc, VIS1);
4325 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4326 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4327 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4328 gen_movl_TN_reg(rd, cpu_dst);
4329 break;
4330 case 0x02e: /* VIS I fcmpeq32 */
4331 CHECK_FPU_FEATURE(dc, VIS1);
4332 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4333 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4334 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4335 gen_movl_TN_reg(rd, cpu_dst);
4336 break;
4337 case 0x031: /* VIS I fmul8x16 */
4338 CHECK_FPU_FEATURE(dc, VIS1);
4339 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4340 break;
4341 case 0x033: /* VIS I fmul8x16au */
4342 CHECK_FPU_FEATURE(dc, VIS1);
4343 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4344 break;
4345 case 0x035: /* VIS I fmul8x16al */
4346 CHECK_FPU_FEATURE(dc, VIS1);
4347 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4348 break;
4349 case 0x036: /* VIS I fmul8sux16 */
4350 CHECK_FPU_FEATURE(dc, VIS1);
4351 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4352 break;
4353 case 0x037: /* VIS I fmul8ulx16 */
4354 CHECK_FPU_FEATURE(dc, VIS1);
4355 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4356 break;
4357 case 0x038: /* VIS I fmuld8sux16 */
4358 CHECK_FPU_FEATURE(dc, VIS1);
4359 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4360 break;
4361 case 0x039: /* VIS I fmuld8ulx16 */
4362 CHECK_FPU_FEATURE(dc, VIS1);
4363 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4364 break;
4365 case 0x03a: /* VIS I fpack32 */
4366 CHECK_FPU_FEATURE(dc, VIS1);
4367 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4368 break;
4369 case 0x03b: /* VIS I fpack16 */
4370 CHECK_FPU_FEATURE(dc, VIS1);
4371 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4372 cpu_dst_32 = gen_dest_fpr_F();
4373 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4374 gen_store_fpr_F(dc, rd, cpu_dst_32);
4375 break;
4376 case 0x03d: /* VIS I fpackfix */
4377 CHECK_FPU_FEATURE(dc, VIS1);
4378 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4379 cpu_dst_32 = gen_dest_fpr_F();
4380 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4381 gen_store_fpr_F(dc, rd, cpu_dst_32);
4382 break;
4383 case 0x03e: /* VIS I pdist */
4384 CHECK_FPU_FEATURE(dc, VIS1);
4385 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4386 break;
4387 case 0x048: /* VIS I faligndata */
4388 CHECK_FPU_FEATURE(dc, VIS1);
4389 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4390 break;
4391 case 0x04b: /* VIS I fpmerge */
4392 CHECK_FPU_FEATURE(dc, VIS1);
4393 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4394 break;
4395 case 0x04c: /* VIS II bshuffle */
4396 CHECK_FPU_FEATURE(dc, VIS2);
4397 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4398 break;
4399 case 0x04d: /* VIS I fexpand */
4400 CHECK_FPU_FEATURE(dc, VIS1);
4401 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4402 break;
4403 case 0x050: /* VIS I fpadd16 */
4404 CHECK_FPU_FEATURE(dc, VIS1);
4405 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4406 break;
4407 case 0x051: /* VIS I fpadd16s */
4408 CHECK_FPU_FEATURE(dc, VIS1);
4409 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4410 break;
4411 case 0x052: /* VIS I fpadd32 */
4412 CHECK_FPU_FEATURE(dc, VIS1);
4413 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4414 break;
4415 case 0x053: /* VIS I fpadd32s */
4416 CHECK_FPU_FEATURE(dc, VIS1);
4417 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4418 break;
4419 case 0x054: /* VIS I fpsub16 */
4420 CHECK_FPU_FEATURE(dc, VIS1);
4421 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4422 break;
4423 case 0x055: /* VIS I fpsub16s */
4424 CHECK_FPU_FEATURE(dc, VIS1);
4425 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4426 break;
4427 case 0x056: /* VIS I fpsub32 */
4428 CHECK_FPU_FEATURE(dc, VIS1);
4429 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4430 break;
4431 case 0x057: /* VIS I fpsub32s */
4432 CHECK_FPU_FEATURE(dc, VIS1);
4433 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4434 break;
4435 case 0x060: /* VIS I fzero */
4436 CHECK_FPU_FEATURE(dc, VIS1);
4437 cpu_dst_64 = gen_dest_fpr_D();
4438 tcg_gen_movi_i64(cpu_dst_64, 0);
4439 gen_store_fpr_D(dc, rd, cpu_dst_64);
4440 break;
4441 case 0x061: /* VIS I fzeros */
4442 CHECK_FPU_FEATURE(dc, VIS1);
4443 cpu_dst_32 = gen_dest_fpr_F();
4444 tcg_gen_movi_i32(cpu_dst_32, 0);
4445 gen_store_fpr_F(dc, rd, cpu_dst_32);
4446 break;
4447 case 0x062: /* VIS I fnor */
4448 CHECK_FPU_FEATURE(dc, VIS1);
4449 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4450 break;
4451 case 0x063: /* VIS I fnors */
4452 CHECK_FPU_FEATURE(dc, VIS1);
4453 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4454 break;
4455 case 0x064: /* VIS I fandnot2 */
4456 CHECK_FPU_FEATURE(dc, VIS1);
4457 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4458 break;
4459 case 0x065: /* VIS I fandnot2s */
4460 CHECK_FPU_FEATURE(dc, VIS1);
4461 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4462 break;
4463 case 0x066: /* VIS I fnot2 */
4464 CHECK_FPU_FEATURE(dc, VIS1);
4465 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4466 break;
4467 case 0x067: /* VIS I fnot2s */
4468 CHECK_FPU_FEATURE(dc, VIS1);
4469 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4470 break;
4471 case 0x068: /* VIS I fandnot1 */
4472 CHECK_FPU_FEATURE(dc, VIS1);
4473 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4474 break;
4475 case 0x069: /* VIS I fandnot1s */
4476 CHECK_FPU_FEATURE(dc, VIS1);
4477 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4478 break;
4479 case 0x06a: /* VIS I fnot1 */
4480 CHECK_FPU_FEATURE(dc, VIS1);
4481 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4482 break;
4483 case 0x06b: /* VIS I fnot1s */
4484 CHECK_FPU_FEATURE(dc, VIS1);
4485 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4486 break;
4487 case 0x06c: /* VIS I fxor */
4488 CHECK_FPU_FEATURE(dc, VIS1);
4489 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4490 break;
4491 case 0x06d: /* VIS I fxors */
4492 CHECK_FPU_FEATURE(dc, VIS1);
4493 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4494 break;
4495 case 0x06e: /* VIS I fnand */
4496 CHECK_FPU_FEATURE(dc, VIS1);
4497 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4498 break;
4499 case 0x06f: /* VIS I fnands */
4500 CHECK_FPU_FEATURE(dc, VIS1);
4501 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4502 break;
4503 case 0x070: /* VIS I fand */
4504 CHECK_FPU_FEATURE(dc, VIS1);
4505 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4506 break;
4507 case 0x071: /* VIS I fands */
4508 CHECK_FPU_FEATURE(dc, VIS1);
4509 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4510 break;
4511 case 0x072: /* VIS I fxnor */
4512 CHECK_FPU_FEATURE(dc, VIS1);
4513 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4514 break;
4515 case 0x073: /* VIS I fxnors */
4516 CHECK_FPU_FEATURE(dc, VIS1);
4517 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4518 break;
4519 case 0x074: /* VIS I fsrc1 */
4520 CHECK_FPU_FEATURE(dc, VIS1);
4521 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4522 gen_store_fpr_D(dc, rd, cpu_src1_64);
4523 break;
4524 case 0x075: /* VIS I fsrc1s */
4525 CHECK_FPU_FEATURE(dc, VIS1);
4526 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4527 gen_store_fpr_F(dc, rd, cpu_src1_32);
4528 break;
4529 case 0x076: /* VIS I fornot2 */
4530 CHECK_FPU_FEATURE(dc, VIS1);
4531 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4532 break;
4533 case 0x077: /* VIS I fornot2s */
4534 CHECK_FPU_FEATURE(dc, VIS1);
4535 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4536 break;
4537 case 0x078: /* VIS I fsrc2 */
4538 CHECK_FPU_FEATURE(dc, VIS1);
4539 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4540 gen_store_fpr_D(dc, rd, cpu_src1_64);
4541 break;
4542 case 0x079: /* VIS I fsrc2s */
4543 CHECK_FPU_FEATURE(dc, VIS1);
4544 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4545 gen_store_fpr_F(dc, rd, cpu_src1_32);
4546 break;
4547 case 0x07a: /* VIS I fornot1 */
4548 CHECK_FPU_FEATURE(dc, VIS1);
4549 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4550 break;
4551 case 0x07b: /* VIS I fornot1s */
4552 CHECK_FPU_FEATURE(dc, VIS1);
4553 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4554 break;
4555 case 0x07c: /* VIS I for */
4556 CHECK_FPU_FEATURE(dc, VIS1);
4557 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4558 break;
4559 case 0x07d: /* VIS I fors */
4560 CHECK_FPU_FEATURE(dc, VIS1);
4561 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4562 break;
4563 case 0x07e: /* VIS I fone */
4564 CHECK_FPU_FEATURE(dc, VIS1);
4565 cpu_dst_64 = gen_dest_fpr_D();
4566 tcg_gen_movi_i64(cpu_dst_64, -1);
4567 gen_store_fpr_D(dc, rd, cpu_dst_64);
4568 break;
4569 case 0x07f: /* VIS I fones */
4570 CHECK_FPU_FEATURE(dc, VIS1);
4571 cpu_dst_32 = gen_dest_fpr_F();
4572 tcg_gen_movi_i32(cpu_dst_32, -1);
4573 gen_store_fpr_F(dc, rd, cpu_dst_32);
4574 break;
4575 case 0x080: /* VIS I shutdown */
4576 case 0x081: /* VIS II siam */
4577 // XXX
4578 goto illegal_insn;
4579 default:
4580 goto illegal_insn;
4581 }
4582 #else
4583 goto ncp_insn;
4584 #endif
4585 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4586 #ifdef TARGET_SPARC64
4587 goto illegal_insn;
4588 #else
4589 goto ncp_insn;
4590 #endif
4591 #ifdef TARGET_SPARC64
4592 } else if (xop == 0x39) { /* V9 return */
4593 TCGv_i32 r_const;
4594
4595 save_state(dc);
4596 cpu_src1 = get_src1(insn, cpu_src1);
4597 if (IS_IMM) { /* immediate */
4598 simm = GET_FIELDs(insn, 19, 31);
4599 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
4600 } else { /* register */
4601 rs2 = GET_FIELD(insn, 27, 31);
4602 if (rs2) {
4603 gen_movl_reg_TN(rs2, cpu_src2);
4604 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4605 } else
4606 tcg_gen_mov_tl(cpu_dst, cpu_src1);
4607 }
4608 gen_helper_restore(cpu_env);
4609 gen_mov_pc_npc(dc);
4610 r_const = tcg_const_i32(3);
4611 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4612 tcg_temp_free_i32(r_const);
4613 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4614 dc->npc = DYNAMIC_PC;
4615 goto jmp_insn;
4616 #endif
4617 } else {
4618 cpu_src1 = get_src1(insn, cpu_src1);
4619 if (IS_IMM) { /* immediate */
4620 simm = GET_FIELDs(insn, 19, 31);
4621 tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
4622 } else { /* register */
4623 rs2 = GET_FIELD(insn, 27, 31);
4624 if (rs2) {
4625 gen_movl_reg_TN(rs2, cpu_src2);
4626 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4627 } else
4628 tcg_gen_mov_tl(cpu_dst, cpu_src1);
4629 }
4630 switch (xop) {
4631 case 0x38: /* jmpl */
4632 {
4633 TCGv r_pc;
4634 TCGv_i32 r_const;
4635
4636 r_pc = tcg_const_tl(dc->pc);
4637 gen_movl_TN_reg(rd, r_pc);
4638 tcg_temp_free(r_pc);
4639 gen_mov_pc_npc(dc);
4640 r_const = tcg_const_i32(3);
4641 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4642 tcg_temp_free_i32(r_const);
4643 gen_address_mask(dc, cpu_dst);
4644 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4645 dc->npc = DYNAMIC_PC;
4646 }
4647 goto jmp_insn;
4648 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4649 case 0x39: /* rett, V9 return */
4650 {
4651 TCGv_i32 r_const;
4652
4653 if (!supervisor(dc))
4654 goto priv_insn;
4655 gen_mov_pc_npc(dc);
4656 r_const = tcg_const_i32(3);
4657 gen_helper_check_align(cpu_env, cpu_dst, r_const);
4658 tcg_temp_free_i32(r_const);
4659 tcg_gen_mov_tl(cpu_npc, cpu_dst);
4660 dc->npc = DYNAMIC_PC;
4661 gen_helper_rett(cpu_env);
4662 }
4663 goto jmp_insn;
4664 #endif
4665 case 0x3b: /* flush */
4666 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4667 goto unimp_flush;
4668 /* nop */
4669 break;
4670 case 0x3c: /* save */
4671 save_state(dc);
4672 gen_helper_save(cpu_env);
4673 gen_movl_TN_reg(rd, cpu_dst);
4674 break;
4675 case 0x3d: /* restore */
4676 save_state(dc);
4677 gen_helper_restore(cpu_env);
4678 gen_movl_TN_reg(rd, cpu_dst);
4679 break;
4680 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4681 case 0x3e: /* V9 done/retry */
4682 {
4683 switch (rd) {
4684 case 0:
4685 if (!supervisor(dc))
4686 goto priv_insn;
4687 dc->npc = DYNAMIC_PC;
4688 dc->pc = DYNAMIC_PC;
4689 gen_helper_done(cpu_env);
4690 goto jmp_insn;
4691 case 1:
4692 if (!supervisor(dc))
4693 goto priv_insn;
4694 dc->npc = DYNAMIC_PC;
4695 dc->pc = DYNAMIC_PC;
4696 gen_helper_retry(cpu_env);
4697 goto jmp_insn;
4698 default:
4699 goto illegal_insn;
4700 }
4701 }
4702 break;
4703 #endif
4704 default:
4705 goto illegal_insn;
4706 }
4707 }
4708 break;
4709 }
4710 break;
4711 case 3: /* load/store instructions */
4712 {
4713 unsigned int xop = GET_FIELD(insn, 7, 12);
4714
4715 /* flush pending conditional evaluations before exposing
4716 cpu state */
4717 if (dc->cc_op != CC_OP_FLAGS) {
4718 dc->cc_op = CC_OP_FLAGS;
4719 gen_helper_compute_psr(cpu_env);
4720 }
4721 cpu_src1 = get_src1(insn, cpu_src1);
4722 if (xop == 0x3c || xop == 0x3e) { // V9 casa/casxa
4723 rs2 = GET_FIELD(insn, 27, 31);
4724 gen_movl_reg_TN(rs2, cpu_src2);
4725 tcg_gen_mov_tl(cpu_addr, cpu_src1);
4726 } else if (IS_IMM) { /* immediate */
4727 simm = GET_FIELDs(insn, 19, 31);
4728 tcg_gen_addi_tl(cpu_addr, cpu_src1, simm);
4729 } else { /* register */
4730 rs2 = GET_FIELD(insn, 27, 31);
4731 if (rs2 != 0) {
4732 gen_movl_reg_TN(rs2, cpu_src2);
4733 tcg_gen_add_tl(cpu_addr, cpu_src1, cpu_src2);
4734 } else
4735 tcg_gen_mov_tl(cpu_addr, cpu_src1);
4736 }
4737 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4738 (xop > 0x17 && xop <= 0x1d ) ||
4739 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4740 switch (xop) {
4741 case 0x0: /* ld, V9 lduw, load unsigned word */
4742 gen_address_mask(dc, cpu_addr);
4743 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4744 break;
4745 case 0x1: /* ldub, load unsigned byte */
4746 gen_address_mask(dc, cpu_addr);
4747 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4748 break;
4749 case 0x2: /* lduh, load unsigned halfword */
4750 gen_address_mask(dc, cpu_addr);
4751 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4752 break;
4753 case 0x3: /* ldd, load double word */
4754 if (rd & 1)
4755 goto illegal_insn;
4756 else {
4757 TCGv_i32 r_const;
4758
4759 save_state(dc);
4760 r_const = tcg_const_i32(7);
4761 /* XXX remove alignment check */
4762 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4763 tcg_temp_free_i32(r_const);
4764 gen_address_mask(dc, cpu_addr);
4765 tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
4766 tcg_gen_trunc_i64_tl(cpu_tmp0, cpu_tmp64);
4767 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffffULL);
4768 gen_movl_TN_reg(rd + 1, cpu_tmp0);
4769 tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
4770 tcg_gen_trunc_i64_tl(cpu_val, cpu_tmp64);
4771 tcg_gen_andi_tl(cpu_val, cpu_val, 0xffffffffULL);
4772 }
4773 break;
4774 case 0x9: /* ldsb, load signed byte */
4775 gen_address_mask(dc, cpu_addr);
4776 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4777 break;
4778 case 0xa: /* ldsh, load signed halfword */
4779 gen_address_mask(dc, cpu_addr);
4780 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4781 break;
4782 case 0xd: /* ldstub -- XXX: should be atomically */
4783 {
4784 TCGv r_const;
4785
4786 gen_address_mask(dc, cpu_addr);
4787 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4788 r_const = tcg_const_tl(0xff);
4789 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4790 tcg_temp_free(r_const);
4791 }
4792 break;
4793 case 0x0f: /* swap, swap register with memory. Also
4794 atomically */
4795 CHECK_IU_FEATURE(dc, SWAP);
4796 gen_movl_reg_TN(rd, cpu_val);
4797 gen_address_mask(dc, cpu_addr);
4798 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4799 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4800 tcg_gen_mov_tl(cpu_val, cpu_tmp0);
4801 break;
4802 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4803 case 0x10: /* lda, V9 lduwa, load word alternate */
4804 #ifndef TARGET_SPARC64
4805 if (IS_IMM)
4806 goto illegal_insn;
4807 if (!supervisor(dc))
4808 goto priv_insn;
4809 #endif
4810 save_state(dc);
4811 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
4812 break;
4813 case 0x11: /* lduba, load unsigned byte alternate */
4814 #ifndef TARGET_SPARC64
4815 if (IS_IMM)
4816 goto illegal_insn;
4817 if (!supervisor(dc))
4818 goto priv_insn;
4819 #endif
4820 save_state(dc);
4821 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
4822 break;
4823 case 0x12: /* lduha, load unsigned halfword alternate */
4824 #ifndef TARGET_SPARC64
4825 if (IS_IMM)
4826 goto illegal_insn;
4827 if (!supervisor(dc))
4828 goto priv_insn;
4829 #endif
4830 save_state(dc);
4831 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
4832 break;
4833 case 0x13: /* ldda, load double word alternate */
4834 #ifndef TARGET_SPARC64
4835 if (IS_IMM)
4836 goto illegal_insn;
4837 if (!supervisor(dc))
4838 goto priv_insn;
4839 #endif
4840 if (rd & 1)
4841 goto illegal_insn;
4842 save_state(dc);
4843 gen_ldda_asi(cpu_val, cpu_addr, insn, rd);
4844 goto skip_move;
4845 case 0x19: /* ldsba, load signed byte alternate */
4846 #ifndef TARGET_SPARC64
4847 if (IS_IMM)
4848 goto illegal_insn;
4849 if (!supervisor(dc))
4850 goto priv_insn;
4851 #endif
4852 save_state(dc);
4853 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
4854 break;
4855 case 0x1a: /* ldsha, load signed halfword alternate */
4856 #ifndef TARGET_SPARC64
4857 if (IS_IMM)
4858 goto illegal_insn;
4859 if (!supervisor(dc))
4860 goto priv_insn;
4861 #endif
4862 save_state(dc);
4863 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
4864 break;
4865 case 0x1d: /* ldstuba -- XXX: should be atomically */
4866 #ifndef TARGET_SPARC64
4867 if (IS_IMM)
4868 goto illegal_insn;
4869 if (!supervisor(dc))
4870 goto priv_insn;
4871 #endif
4872 save_state(dc);
4873 gen_ldstub_asi(cpu_val, cpu_addr, insn);
4874 break;
4875 case 0x1f: /* swapa, swap reg with alt. memory. Also
4876 atomically */
4877 CHECK_IU_FEATURE(dc, SWAP);
4878 #ifndef TARGET_SPARC64
4879 if (IS_IMM)
4880 goto illegal_insn;
4881 if (!supervisor(dc))
4882 goto priv_insn;
4883 #endif
4884 save_state(dc);
4885 gen_movl_reg_TN(rd, cpu_val);
4886 gen_swap_asi(cpu_val, cpu_addr, insn);
4887 break;
4888
4889 #ifndef TARGET_SPARC64
4890 case 0x30: /* ldc */
4891 case 0x31: /* ldcsr */
4892 case 0x33: /* lddc */
4893 goto ncp_insn;
4894 #endif
4895 #endif
4896 #ifdef TARGET_SPARC64
4897 case 0x08: /* V9 ldsw */
4898 gen_address_mask(dc, cpu_addr);
4899 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4900 break;
4901 case 0x0b: /* V9 ldx */
4902 gen_address_mask(dc, cpu_addr);
4903 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4904 break;
4905 case 0x18: /* V9 ldswa */
4906 save_state(dc);
4907 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
4908 break;
4909 case 0x1b: /* V9 ldxa */
4910 save_state(dc);
4911 gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
4912 break;
4913 case 0x2d: /* V9 prefetch, no effect */
4914 goto skip_move;
4915 case 0x30: /* V9 ldfa */
4916 if (gen_trap_ifnofpu(dc)) {
4917 goto jmp_insn;
4918 }
4919 save_state(dc);
4920 gen_ldf_asi(cpu_addr, insn, 4, rd);
4921 gen_update_fprs_dirty(rd);
4922 goto skip_move;
4923 case 0x33: /* V9 lddfa */
4924 if (gen_trap_ifnofpu(dc)) {
4925 goto jmp_insn;
4926 }
4927 save_state(dc);
4928 gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
4929 gen_update_fprs_dirty(DFPREG(rd));
4930 goto skip_move;
4931 case 0x3d: /* V9 prefetcha, no effect */
4932 goto skip_move;
4933 case 0x32: /* V9 ldqfa */
4934 CHECK_FPU_FEATURE(dc, FLOAT128);
4935 if (gen_trap_ifnofpu(dc)) {
4936 goto jmp_insn;
4937 }
4938 save_state(dc);
4939 gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
4940 gen_update_fprs_dirty(QFPREG(rd));
4941 goto skip_move;
4942 #endif
4943 default:
4944 goto illegal_insn;
4945 }
4946 gen_movl_TN_reg(rd, cpu_val);
4947 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4948 skip_move: ;
4949 #endif
4950 } else if (xop >= 0x20 && xop < 0x24) {
4951 if (gen_trap_ifnofpu(dc)) {
4952 goto jmp_insn;
4953 }
4954 save_state(dc);
4955 switch (xop) {
4956 case 0x20: /* ldf, load fpreg */
4957 gen_address_mask(dc, cpu_addr);
4958 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4959 cpu_dst_32 = gen_dest_fpr_F();
4960 tcg_gen_trunc_tl_i32(cpu_dst_32, cpu_tmp0);
4961 gen_store_fpr_F(dc, rd, cpu_dst_32);
4962 break;
4963 case 0x21: /* ldfsr, V9 ldxfsr */
4964 #ifdef TARGET_SPARC64
4965 gen_address_mask(dc, cpu_addr);
4966 if (rd == 1) {
4967 tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
4968 gen_helper_ldxfsr(cpu_env, cpu_tmp64);
4969 } else {
4970 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
4971 tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
4972 gen_helper_ldfsr(cpu_env, cpu_tmp32);
4973 }
4974 #else
4975 {
4976 tcg_gen_qemu_ld32u(cpu_tmp32, cpu_addr, dc->mem_idx);
4977 gen_helper_ldfsr(cpu_env, cpu_tmp32);
4978 }
4979 #endif
4980 break;
4981 case 0x22: /* ldqf, load quad fpreg */
4982 {
4983 TCGv_i32 r_const;
4984
4985 CHECK_FPU_FEATURE(dc, FLOAT128);
4986 r_const = tcg_const_i32(dc->mem_idx);
4987 gen_address_mask(dc, cpu_addr);
4988 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4989 tcg_temp_free_i32(r_const);
4990 gen_op_store_QT0_fpr(QFPREG(rd));
4991 gen_update_fprs_dirty(QFPREG(rd));
4992 }
4993 break;
4994 case 0x23: /* lddf, load double fpreg */
4995 gen_address_mask(dc, cpu_addr);
4996 cpu_dst_64 = gen_dest_fpr_D();
4997 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4998 gen_store_fpr_D(dc, rd, cpu_dst_64);
4999 break;
5000 default:
5001 goto illegal_insn;
5002 }
5003 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5004 xop == 0xe || xop == 0x1e) {
5005 gen_movl_reg_TN(rd, cpu_val);
5006 switch (xop) {
5007 case 0x4: /* st, store word */
5008 gen_address_mask(dc, cpu_addr);
5009 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
5010 break;
5011 case 0x5: /* stb, store byte */
5012 gen_address_mask(dc, cpu_addr);
5013 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
5014 break;
5015 case 0x6: /* sth, store halfword */
5016 gen_address_mask(dc, cpu_addr);
5017 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
5018 break;
5019 case 0x7: /* std, store double word */
5020 if (rd & 1)
5021 goto illegal_insn;
5022 else {
5023 TCGv_i32 r_const;
5024
5025 save_state(dc);
5026 gen_address_mask(dc, cpu_addr);
5027 r_const = tcg_const_i32(7);
5028 /* XXX remove alignment check */
5029 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5030 tcg_temp_free_i32(r_const);
5031 gen_movl_reg_TN(rd + 1, cpu_tmp0);
5032 tcg_gen_concat_tl_i64(cpu_tmp64, cpu_tmp0, cpu_val);
5033 tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
5034 }
5035 break;
5036 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5037 case 0x14: /* sta, V9 stwa, store word alternate */
5038 #ifndef TARGET_SPARC64
5039 if (IS_IMM)
5040 goto illegal_insn;
5041 if (!supervisor(dc))
5042 goto priv_insn;
5043 #endif
5044 save_state(dc);
5045 gen_st_asi(cpu_val, cpu_addr, insn, 4);
5046 dc->npc = DYNAMIC_PC;
5047 break;
5048 case 0x15: /* stba, store byte alternate */
5049 #ifndef TARGET_SPARC64
5050 if (IS_IMM)
5051 goto illegal_insn;
5052 if (!supervisor(dc))
5053 goto priv_insn;
5054 #endif
5055 save_state(dc);
5056 gen_st_asi(cpu_val, cpu_addr, insn, 1);
5057 dc->npc = DYNAMIC_PC;
5058 break;
5059 case 0x16: /* stha, store halfword alternate */
5060 #ifndef TARGET_SPARC64
5061 if (IS_IMM)
5062 goto illegal_insn;
5063 if (!supervisor(dc))
5064 goto priv_insn;
5065 #endif
5066 save_state(dc);
5067 gen_st_asi(cpu_val, cpu_addr, insn, 2);
5068 dc->npc = DYNAMIC_PC;
5069 break;
5070 case 0x17: /* stda, store double word alternate */
5071 #ifndef TARGET_SPARC64
5072 if (IS_IMM)
5073 goto illegal_insn;
5074 if (!supervisor(dc))
5075 goto priv_insn;
5076 #endif
5077 if (rd & 1)
5078 goto illegal_insn;
5079 else {
5080 save_state(dc);
5081 gen_stda_asi(cpu_val, cpu_addr, insn, rd);
5082 }
5083 break;
5084 #endif
5085 #ifdef TARGET_SPARC64
5086 case 0x0e: /* V9 stx */
5087 gen_address_mask(dc, cpu_addr);
5088 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
5089 break;
5090 case 0x1e: /* V9 stxa */
5091 save_state(dc);
5092 gen_st_asi(cpu_val, cpu_addr, insn, 8);
5093 dc->npc = DYNAMIC_PC;
5094 break;
5095 #endif
5096 default:
5097 goto illegal_insn;
5098 }
5099 } else if (xop > 0x23 && xop < 0x28) {
5100 if (gen_trap_ifnofpu(dc)) {
5101 goto jmp_insn;
5102 }
5103 save_state(dc);
5104 switch (xop) {
5105 case 0x24: /* stf, store fpreg */
5106 gen_address_mask(dc, cpu_addr);
5107 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5108 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_src1_32);
5109 tcg_gen_qemu_st32(cpu_tmp0, cpu_addr, dc->mem_idx);
5110 break;
5111 case 0x25: /* stfsr, V9 stxfsr */
5112 #ifdef TARGET_SPARC64
5113 gen_address_mask(dc, cpu_addr);
5114 tcg_gen_ld_i64(cpu_tmp64, cpu_env, offsetof(CPUSPARCState, fsr));
5115 if (rd == 1)
5116 tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
5117 else
5118 tcg_gen_qemu_st32(cpu_tmp64, cpu_addr, dc->mem_idx);
5119 #else
5120 tcg_gen_ld_i32(cpu_tmp32, cpu_env, offsetof(CPUSPARCState, fsr));
5121 tcg_gen_qemu_st32(cpu_tmp32, cpu_addr, dc->mem_idx);
5122 #endif
5123 break;
5124 case 0x26:
5125 #ifdef TARGET_SPARC64
5126 /* V9 stqf, store quad fpreg */
5127 {
5128 TCGv_i32 r_const;
5129
5130 CHECK_FPU_FEATURE(dc, FLOAT128);
5131 gen_op_load_fpr_QT0(QFPREG(rd));
5132 r_const = tcg_const_i32(dc->mem_idx);
5133 gen_address_mask(dc, cpu_addr);
5134 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5135 tcg_temp_free_i32(r_const);
5136 }
5137 break;
5138 #else /* !TARGET_SPARC64 */
5139 /* stdfq, store floating point queue */
5140 #if defined(CONFIG_USER_ONLY)
5141 goto illegal_insn;
5142 #else
5143 if (!supervisor(dc))
5144 goto priv_insn;
5145 if (gen_trap_ifnofpu(dc)) {
5146 goto jmp_insn;
5147 }
5148 goto nfq_insn;
5149 #endif
5150 #endif
5151 case 0x27: /* stdf, store double fpreg */
5152 gen_address_mask(dc, cpu_addr);
5153 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5154 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5155 break;
5156 default:
5157 goto illegal_insn;
5158 }
5159 } else if (xop > 0x33 && xop < 0x3f) {
5160 save_state(dc);
5161 switch (xop) {
5162 #ifdef TARGET_SPARC64
5163 case 0x34: /* V9 stfa */
5164 if (gen_trap_ifnofpu(dc)) {
5165 goto jmp_insn;
5166 }
5167 gen_stf_asi(cpu_addr, insn, 4, rd);
5168 break;
5169 case 0x36: /* V9 stqfa */
5170 {
5171 TCGv_i32 r_const;
5172
5173 CHECK_FPU_FEATURE(dc, FLOAT128);
5174 if (gen_trap_ifnofpu(dc)) {
5175 goto jmp_insn;
5176 }
5177 r_const = tcg_const_i32(7);
5178 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5179 tcg_temp_free_i32(r_const);
5180 gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
5181 }
5182 break;
5183 case 0x37: /* V9 stdfa */
5184 if (gen_trap_ifnofpu(dc)) {
5185 goto jmp_insn;
5186 }
5187 gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
5188 break;
5189 case 0x3c: /* V9 casa */
5190 gen_cas_asi(cpu_val, cpu_addr, cpu_src2, insn, rd);
5191 gen_movl_TN_reg(rd, cpu_val);
5192 break;
5193 case 0x3e: /* V9 casxa */
5194 gen_casx_asi(cpu_val, cpu_addr, cpu_src2, insn, rd);
5195 gen_movl_TN_reg(rd, cpu_val);
5196 break;
5197 #else
5198 case 0x34: /* stc */
5199 case 0x35: /* stcsr */
5200 case 0x36: /* stdcq */
5201 case 0x37: /* stdc */
5202 goto ncp_insn;
5203 #endif
5204 default:
5205 goto illegal_insn;
5206 }
5207 } else
5208 goto illegal_insn;
5209 }
5210 break;
5211 }
5212 /* default case for non jump instructions */
5213 if (dc->npc == DYNAMIC_PC) {
5214 dc->pc = DYNAMIC_PC;
5215 gen_op_next_insn();
5216 } else if (dc->npc == JUMP_PC) {
5217 /* we can do a static jump */
5218 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5219 dc->is_br = 1;
5220 } else {
5221 dc->pc = dc->npc;
5222 dc->npc = dc->npc + 4;
5223 }
5224 jmp_insn:
5225 goto egress;
5226 illegal_insn:
5227 {
5228 TCGv_i32 r_const;
5229
5230 save_state(dc);
5231 r_const = tcg_const_i32(TT_ILL_INSN);
5232 gen_helper_raise_exception(cpu_env, r_const);
5233 tcg_temp_free_i32(r_const);
5234 dc->is_br = 1;
5235 }
5236 goto egress;
5237 unimp_flush:
5238 {
5239 TCGv_i32 r_const;
5240
5241 save_state(dc);
5242 r_const = tcg_const_i32(TT_UNIMP_FLUSH);
5243 gen_helper_raise_exception(cpu_env, r_const);
5244 tcg_temp_free_i32(r_const);
5245 dc->is_br = 1;
5246 }
5247 goto egress;
5248 #if !defined(CONFIG_USER_ONLY)
5249 priv_insn:
5250 {
5251 TCGv_i32 r_const;
5252
5253 save_state(dc);
5254 r_const = tcg_const_i32(TT_PRIV_INSN);
5255 gen_helper_raise_exception(cpu_env, r_const);
5256 tcg_temp_free_i32(r_const);
5257 dc->is_br = 1;
5258 }
5259 goto egress;
5260 #endif
5261 nfpu_insn:
5262 save_state(dc);
5263 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
5264 dc->is_br = 1;
5265 goto egress;
5266 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5267 nfq_insn:
5268 save_state(dc);
5269 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
5270 dc->is_br = 1;
5271 goto egress;
5272 #endif
5273 #ifndef TARGET_SPARC64
5274 ncp_insn:
5275 {
5276 TCGv r_const;
5277
5278 save_state(dc);
5279 r_const = tcg_const_i32(TT_NCP_INSN);
5280 gen_helper_raise_exception(cpu_env, r_const);
5281 tcg_temp_free(r_const);
5282 dc->is_br = 1;
5283 }
5284 goto egress;
5285 #endif
5286 egress:
5287 tcg_temp_free(cpu_tmp1);
5288 tcg_temp_free(cpu_tmp2);
5289 if (dc->n_t32 != 0) {
5290 int i;
5291 for (i = dc->n_t32 - 1; i >= 0; --i) {
5292 tcg_temp_free_i32(dc->t32[i]);
5293 }
5294 dc->n_t32 = 0;
5295 }
5296 }
5297
5298 static inline void gen_intermediate_code_internal(TranslationBlock * tb,
5299 int spc, CPUSPARCState *env)
5300 {
5301 target_ulong pc_start, last_pc;
5302 uint16_t *gen_opc_end;
5303 DisasContext dc1, *dc = &dc1;
5304 CPUBreakpoint *bp;
5305 int j, lj = -1;
5306 int num_insns;
5307 int max_insns;
5308 unsigned int insn;
5309
5310 memset(dc, 0, sizeof(DisasContext));
5311 dc->tb = tb;
5312 pc_start = tb->pc;
5313 dc->pc = pc_start;
5314 last_pc = dc->pc;
5315 dc->npc = (target_ulong) tb->cs_base;
5316 dc->cc_op = CC_OP_DYNAMIC;
5317 dc->mem_idx = cpu_mmu_index(env);
5318 dc->def = env->def;
5319 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5320 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5321 dc->singlestep = (env->singlestep_enabled || singlestep);
5322 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
5323
5324 cpu_tmp0 = tcg_temp_new();
5325 cpu_tmp32 = tcg_temp_new_i32();
5326 cpu_tmp64 = tcg_temp_new_i64();
5327
5328 cpu_dst = tcg_temp_local_new();
5329
5330 // loads and stores
5331 cpu_val = tcg_temp_local_new();
5332 cpu_addr = tcg_temp_local_new();
5333
5334 num_insns = 0;
5335 max_insns = tb->cflags & CF_COUNT_MASK;
5336 if (max_insns == 0)
5337 max_insns = CF_COUNT_MASK;
5338 gen_icount_start();
5339 do {
5340 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
5341 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5342 if (bp->pc == dc->pc) {
5343 if (dc->pc != pc_start)
5344 save_state(dc);
5345 gen_helper_debug(cpu_env);
5346 tcg_gen_exit_tb(0);
5347 dc->is_br = 1;
5348 goto exit_gen_loop;
5349 }
5350 }
5351 }
5352 if (spc) {
5353 qemu_log("Search PC...\n");
5354 j = gen_opc_ptr - gen_opc_buf;
5355 if (lj < j) {
5356 lj++;
5357 while (lj < j)
5358 gen_opc_instr_start[lj++] = 0;
5359 gen_opc_pc[lj] = dc->pc;
5360 gen_opc_npc[lj] = dc->npc;
5361 gen_opc_instr_start[lj] = 1;
5362 gen_opc_icount[lj] = num_insns;
5363 }
5364 }
5365 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
5366 gen_io_start();
5367 last_pc = dc->pc;
5368 insn = cpu_ldl_code(env, dc->pc);
5369 disas_sparc_insn(dc, insn);
5370 num_insns++;
5371
5372 if (dc->is_br)
5373 break;
5374 /* if the next PC is different, we abort now */
5375 if (dc->pc != (last_pc + 4))
5376 break;
5377 /* if we reach a page boundary, we stop generation so that the
5378 PC of a TT_TFAULT exception is always in the right page */
5379 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5380 break;
5381 /* if single step mode, we generate only one instruction and
5382 generate an exception */
5383 if (dc->singlestep) {
5384 break;
5385 }
5386 } while ((gen_opc_ptr < gen_opc_end) &&
5387 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5388 num_insns < max_insns);
5389
5390 exit_gen_loop:
5391 tcg_temp_free(cpu_addr);
5392 tcg_temp_free(cpu_val);
5393 tcg_temp_free(cpu_dst);
5394 tcg_temp_free_i64(cpu_tmp64);
5395 tcg_temp_free_i32(cpu_tmp32);
5396 tcg_temp_free(cpu_tmp0);
5397
5398 if (tb->cflags & CF_LAST_IO)
5399 gen_io_end();
5400 if (!dc->is_br) {
5401 if (dc->pc != DYNAMIC_PC &&
5402 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5403 /* static PC and NPC: we can use direct chaining */
5404 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5405 } else {
5406 if (dc->pc != DYNAMIC_PC)
5407 tcg_gen_movi_tl(cpu_pc, dc->pc);
5408 save_npc(dc);
5409 tcg_gen_exit_tb(0);
5410 }
5411 }
5412 gen_icount_end(tb, num_insns);
5413 *gen_opc_ptr = INDEX_op_end;
5414 if (spc) {
5415 j = gen_opc_ptr - gen_opc_buf;
5416 lj++;
5417 while (lj <= j)
5418 gen_opc_instr_start[lj++] = 0;
5419 #if 0
5420 log_page_dump();
5421 #endif
5422 gen_opc_jump_pc[0] = dc->jump_pc[0];
5423 gen_opc_jump_pc[1] = dc->jump_pc[1];
5424 } else {
5425 tb->size = last_pc + 4 - pc_start;
5426 tb->icount = num_insns;
5427 }
5428 #ifdef DEBUG_DISAS
5429 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5430 qemu_log("--------------\n");
5431 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5432 log_target_disas(pc_start, last_pc + 4 - pc_start, 0);
5433 qemu_log("\n");
5434 }
5435 #endif
5436 }
5437
5438 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5439 {
5440 gen_intermediate_code_internal(tb, 0, env);
5441 }
5442
5443 void gen_intermediate_code_pc(CPUSPARCState * env, TranslationBlock * tb)
5444 {
5445 gen_intermediate_code_internal(tb, 1, env);
5446 }
5447
5448 void gen_intermediate_code_init(CPUSPARCState *env)
5449 {
5450 unsigned int i;
5451 static int inited;
5452 static const char * const gregnames[8] = {
5453 NULL, // g0 not used
5454 "g1",
5455 "g2",
5456 "g3",
5457 "g4",
5458 "g5",
5459 "g6",
5460 "g7",
5461 };
5462 static const char * const fregnames[32] = {
5463 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5464 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5465 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5466 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5467 };
5468
5469 /* init various static tables */
5470 if (!inited) {
5471 inited = 1;
5472
5473 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5474 cpu_regwptr = tcg_global_mem_new_ptr(TCG_AREG0,
5475 offsetof(CPUSPARCState, regwptr),
5476 "regwptr");
5477 #ifdef TARGET_SPARC64
5478 cpu_xcc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, xcc),
5479 "xcc");
5480 cpu_asi = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, asi),
5481 "asi");
5482 cpu_fprs = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, fprs),
5483 "fprs");
5484 cpu_gsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, gsr),
5485 "gsr");
5486 cpu_tick_cmpr = tcg_global_mem_new(TCG_AREG0,
5487 offsetof(CPUSPARCState, tick_cmpr),
5488 "tick_cmpr");
5489 cpu_stick_cmpr = tcg_global_mem_new(TCG_AREG0,
5490 offsetof(CPUSPARCState, stick_cmpr),
5491 "stick_cmpr");
5492 cpu_hstick_cmpr = tcg_global_mem_new(TCG_AREG0,
5493 offsetof(CPUSPARCState, hstick_cmpr),
5494 "hstick_cmpr");
5495 cpu_hintp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hintp),
5496 "hintp");
5497 cpu_htba = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, htba),
5498 "htba");
5499 cpu_hver = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hver),
5500 "hver");
5501 cpu_ssr = tcg_global_mem_new(TCG_AREG0,
5502 offsetof(CPUSPARCState, ssr), "ssr");
5503 cpu_ver = tcg_global_mem_new(TCG_AREG0,
5504 offsetof(CPUSPARCState, version), "ver");
5505 cpu_softint = tcg_global_mem_new_i32(TCG_AREG0,
5506 offsetof(CPUSPARCState, softint),
5507 "softint");
5508 #else
5509 cpu_wim = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, wim),
5510 "wim");
5511 #endif
5512 cpu_cond = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cond),
5513 "cond");
5514 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_src),
5515 "cc_src");
5516 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0,
5517 offsetof(CPUSPARCState, cc_src2),
5518 "cc_src2");
5519 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_dst),
5520 "cc_dst");
5521 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, cc_op),
5522 "cc_op");
5523 cpu_psr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, psr),
5524 "psr");
5525 cpu_fsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, fsr),
5526 "fsr");
5527 cpu_pc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, pc),
5528 "pc");
5529 cpu_npc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, npc),
5530 "npc");
5531 cpu_y = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, y), "y");
5532 #ifndef CONFIG_USER_ONLY
5533 cpu_tbr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, tbr),
5534 "tbr");
5535 #endif
5536 for (i = 1; i < 8; i++) {
5537 cpu_gregs[i] = tcg_global_mem_new(TCG_AREG0,
5538 offsetof(CPUSPARCState, gregs[i]),
5539 gregnames[i]);
5540 }
5541 for (i = 0; i < TARGET_DPREGS; i++) {
5542 cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
5543 offsetof(CPUSPARCState, fpr[i]),
5544 fregnames[i]);
5545 }
5546
5547 /* register helpers */
5548
5549 #define GEN_HELPER 2
5550 #include "helper.h"
5551 }
5552 }
5553
5554 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, int pc_pos)
5555 {
5556 target_ulong npc;
5557 env->pc = gen_opc_pc[pc_pos];
5558 npc = gen_opc_npc[pc_pos];
5559 if (npc == 1) {
5560 /* dynamic NPC: already stored */
5561 } else if (npc == 2) {
5562 /* jump PC: use 'cond' and the jump targets of the translation */
5563 if (env->cond) {
5564 env->npc = gen_opc_jump_pc[0];
5565 } else {
5566 env->npc = gen_opc_jump_pc[1];
5567 }
5568 } else {
5569 env->npc = npc;
5570 }
5571
5572 /* flush pending conditional evaluations before exposing cpu state */
5573 if (CC_OP != CC_OP_FLAGS) {
5574 helper_compute_psr(env);
5575 }
5576 }