]> git.proxmox.com Git - mirror_qemu.git/blob - target/sparc/translate.c
target/sparc: Use DYNAMIC_PC_LOOKUP for JMPL
[mirror_qemu.git] / target / sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
29
30 #include "exec/helper-gen.h"
31
32 #include "exec/translator.h"
33 #include "exec/log.h"
34 #include "asi.h"
35
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
38 #undef HELPER_H
39
40 /* Dynamic PC, must exit to main loop. */
41 #define DYNAMIC_PC 1
42 /* Dynamic PC, one of two values according to jump_pc[T2]. */
43 #define JUMP_PC 2
44 /* Dynamic PC, may lookup next TB. */
45 #define DYNAMIC_PC_LOOKUP 3
46
47 #define DISAS_EXIT DISAS_TARGET_0
48
49 /* global register indexes */
50 static TCGv_ptr cpu_regwptr;
51 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
52 static TCGv_i32 cpu_cc_op;
53 static TCGv_i32 cpu_psr;
54 static TCGv cpu_fsr, cpu_pc, cpu_npc;
55 static TCGv cpu_regs[32];
56 static TCGv cpu_y;
57 #ifndef CONFIG_USER_ONLY
58 static TCGv cpu_tbr;
59 #endif
60 static TCGv cpu_cond;
61 #ifdef TARGET_SPARC64
62 static TCGv_i32 cpu_xcc, cpu_fprs;
63 static TCGv cpu_gsr;
64 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
65 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
66 #else
67 static TCGv cpu_wim;
68 #endif
69 /* Floating point registers */
70 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
71
72 typedef struct DisasContext {
73 DisasContextBase base;
74 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
75 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
76 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
77 int mem_idx;
78 bool fpu_enabled;
79 bool address_mask_32bit;
80 #ifndef CONFIG_USER_ONLY
81 bool supervisor;
82 #ifdef TARGET_SPARC64
83 bool hypervisor;
84 #endif
85 #endif
86
87 uint32_t cc_op; /* current CC operation */
88 sparc_def_t *def;
89 #ifdef TARGET_SPARC64
90 int fprs_dirty;
91 int asi;
92 #endif
93 } DisasContext;
94
95 typedef struct {
96 TCGCond cond;
97 bool is_bool;
98 TCGv c1, c2;
99 } DisasCompare;
100
101 // This function uses non-native bit order
102 #define GET_FIELD(X, FROM, TO) \
103 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
104
105 // This function uses the order in the manuals, i.e. bit 0 is 2^0
106 #define GET_FIELD_SP(X, FROM, TO) \
107 GET_FIELD(X, 31 - (TO), 31 - (FROM))
108
109 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
110 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
111
112 #ifdef TARGET_SPARC64
113 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
114 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
115 #else
116 #define DFPREG(r) (r & 0x1e)
117 #define QFPREG(r) (r & 0x1c)
118 #endif
119
120 #define UA2005_HTRAP_MASK 0xff
121 #define V8_TRAP_MASK 0x7f
122
123 static int sign_extend(int x, int len)
124 {
125 len = 32 - len;
126 return (x << len) >> len;
127 }
128
129 #define IS_IMM (insn & (1<<13))
130
131 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
132 {
133 #if defined(TARGET_SPARC64)
134 int bit = (rd < 32) ? 1 : 2;
135 /* If we know we've already set this bit within the TB,
136 we can avoid setting it again. */
137 if (!(dc->fprs_dirty & bit)) {
138 dc->fprs_dirty |= bit;
139 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
140 }
141 #endif
142 }
143
144 /* floating point registers moves */
145 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
146 {
147 TCGv_i32 ret = tcg_temp_new_i32();
148 if (src & 1) {
149 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
150 } else {
151 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
152 }
153 return ret;
154 }
155
156 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
157 {
158 TCGv_i64 t = tcg_temp_new_i64();
159
160 tcg_gen_extu_i32_i64(t, v);
161 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
162 (dst & 1 ? 0 : 32), 32);
163 gen_update_fprs_dirty(dc, dst);
164 }
165
166 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
167 {
168 return tcg_temp_new_i32();
169 }
170
171 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
172 {
173 src = DFPREG(src);
174 return cpu_fpr[src / 2];
175 }
176
177 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
178 {
179 dst = DFPREG(dst);
180 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
181 gen_update_fprs_dirty(dc, dst);
182 }
183
184 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
185 {
186 return cpu_fpr[DFPREG(dst) / 2];
187 }
188
189 static void gen_op_load_fpr_QT0(unsigned int src)
190 {
191 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
192 offsetof(CPU_QuadU, ll.upper));
193 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
194 offsetof(CPU_QuadU, ll.lower));
195 }
196
197 static void gen_op_load_fpr_QT1(unsigned int src)
198 {
199 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
200 offsetof(CPU_QuadU, ll.upper));
201 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
202 offsetof(CPU_QuadU, ll.lower));
203 }
204
205 static void gen_op_store_QT0_fpr(unsigned int dst)
206 {
207 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
208 offsetof(CPU_QuadU, ll.upper));
209 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
210 offsetof(CPU_QuadU, ll.lower));
211 }
212
213 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
214 TCGv_i64 v1, TCGv_i64 v2)
215 {
216 dst = QFPREG(dst);
217
218 tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
219 tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
220 gen_update_fprs_dirty(dc, dst);
221 }
222
223 #ifdef TARGET_SPARC64
224 static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
225 {
226 src = QFPREG(src);
227 return cpu_fpr[src / 2];
228 }
229
230 static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
231 {
232 src = QFPREG(src);
233 return cpu_fpr[src / 2 + 1];
234 }
235
236 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
237 {
238 rd = QFPREG(rd);
239 rs = QFPREG(rs);
240
241 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
242 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
243 gen_update_fprs_dirty(dc, rd);
244 }
245 #endif
246
247 /* moves */
248 #ifdef CONFIG_USER_ONLY
249 #define supervisor(dc) 0
250 #ifdef TARGET_SPARC64
251 #define hypervisor(dc) 0
252 #endif
253 #else
254 #ifdef TARGET_SPARC64
255 #define hypervisor(dc) (dc->hypervisor)
256 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
257 #else
258 #define supervisor(dc) (dc->supervisor)
259 #endif
260 #endif
261
262 #ifdef TARGET_SPARC64
263 #ifndef TARGET_ABI32
264 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
265 #else
266 #define AM_CHECK(dc) (1)
267 #endif
268 #endif
269
270 static void gen_address_mask(DisasContext *dc, TCGv addr)
271 {
272 #ifdef TARGET_SPARC64
273 if (AM_CHECK(dc))
274 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
275 #endif
276 }
277
278 static TCGv gen_load_gpr(DisasContext *dc, int reg)
279 {
280 if (reg > 0) {
281 assert(reg < 32);
282 return cpu_regs[reg];
283 } else {
284 TCGv t = tcg_temp_new();
285 tcg_gen_movi_tl(t, 0);
286 return t;
287 }
288 }
289
290 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
291 {
292 if (reg > 0) {
293 assert(reg < 32);
294 tcg_gen_mov_tl(cpu_regs[reg], v);
295 }
296 }
297
298 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
299 {
300 if (reg > 0) {
301 assert(reg < 32);
302 return cpu_regs[reg];
303 } else {
304 return tcg_temp_new();
305 }
306 }
307
308 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
309 {
310 return translator_use_goto_tb(&s->base, pc) &&
311 translator_use_goto_tb(&s->base, npc);
312 }
313
314 static void gen_goto_tb(DisasContext *s, int tb_num,
315 target_ulong pc, target_ulong npc)
316 {
317 if (use_goto_tb(s, pc, npc)) {
318 /* jump to same page: we can use a direct jump */
319 tcg_gen_goto_tb(tb_num);
320 tcg_gen_movi_tl(cpu_pc, pc);
321 tcg_gen_movi_tl(cpu_npc, npc);
322 tcg_gen_exit_tb(s->base.tb, tb_num);
323 } else {
324 /* jump to another page: we can use an indirect jump */
325 tcg_gen_movi_tl(cpu_pc, pc);
326 tcg_gen_movi_tl(cpu_npc, npc);
327 tcg_gen_lookup_and_goto_ptr();
328 }
329 }
330
331 // XXX suboptimal
332 static void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
333 {
334 tcg_gen_extu_i32_tl(reg, src);
335 tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1);
336 }
337
338 static void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
339 {
340 tcg_gen_extu_i32_tl(reg, src);
341 tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1);
342 }
343
344 static void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
345 {
346 tcg_gen_extu_i32_tl(reg, src);
347 tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1);
348 }
349
350 static void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
351 {
352 tcg_gen_extu_i32_tl(reg, src);
353 tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1);
354 }
355
356 static void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
357 {
358 tcg_gen_mov_tl(cpu_cc_src, src1);
359 tcg_gen_mov_tl(cpu_cc_src2, src2);
360 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
361 tcg_gen_mov_tl(dst, cpu_cc_dst);
362 }
363
364 static TCGv_i32 gen_add32_carry32(void)
365 {
366 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
367
368 /* Carry is computed from a previous add: (dst < src) */
369 #if TARGET_LONG_BITS == 64
370 cc_src1_32 = tcg_temp_new_i32();
371 cc_src2_32 = tcg_temp_new_i32();
372 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
373 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
374 #else
375 cc_src1_32 = cpu_cc_dst;
376 cc_src2_32 = cpu_cc_src;
377 #endif
378
379 carry_32 = tcg_temp_new_i32();
380 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
381
382 return carry_32;
383 }
384
385 static TCGv_i32 gen_sub32_carry32(void)
386 {
387 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
388
389 /* Carry is computed from a previous borrow: (src1 < src2) */
390 #if TARGET_LONG_BITS == 64
391 cc_src1_32 = tcg_temp_new_i32();
392 cc_src2_32 = tcg_temp_new_i32();
393 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
394 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
395 #else
396 cc_src1_32 = cpu_cc_src;
397 cc_src2_32 = cpu_cc_src2;
398 #endif
399
400 carry_32 = tcg_temp_new_i32();
401 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
402
403 return carry_32;
404 }
405
406 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
407 TCGv src2, int update_cc)
408 {
409 TCGv_i32 carry_32;
410 TCGv carry;
411
412 switch (dc->cc_op) {
413 case CC_OP_DIV:
414 case CC_OP_LOGIC:
415 /* Carry is known to be zero. Fall back to plain ADD. */
416 if (update_cc) {
417 gen_op_add_cc(dst, src1, src2);
418 } else {
419 tcg_gen_add_tl(dst, src1, src2);
420 }
421 return;
422
423 case CC_OP_ADD:
424 case CC_OP_TADD:
425 case CC_OP_TADDTV:
426 if (TARGET_LONG_BITS == 32) {
427 /* We can re-use the host's hardware carry generation by using
428 an ADD2 opcode. We discard the low part of the output.
429 Ideally we'd combine this operation with the add that
430 generated the carry in the first place. */
431 carry = tcg_temp_new();
432 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
433 goto add_done;
434 }
435 carry_32 = gen_add32_carry32();
436 break;
437
438 case CC_OP_SUB:
439 case CC_OP_TSUB:
440 case CC_OP_TSUBTV:
441 carry_32 = gen_sub32_carry32();
442 break;
443
444 default:
445 /* We need external help to produce the carry. */
446 carry_32 = tcg_temp_new_i32();
447 gen_helper_compute_C_icc(carry_32, cpu_env);
448 break;
449 }
450
451 #if TARGET_LONG_BITS == 64
452 carry = tcg_temp_new();
453 tcg_gen_extu_i32_i64(carry, carry_32);
454 #else
455 carry = carry_32;
456 #endif
457
458 tcg_gen_add_tl(dst, src1, src2);
459 tcg_gen_add_tl(dst, dst, carry);
460
461 add_done:
462 if (update_cc) {
463 tcg_gen_mov_tl(cpu_cc_src, src1);
464 tcg_gen_mov_tl(cpu_cc_src2, src2);
465 tcg_gen_mov_tl(cpu_cc_dst, dst);
466 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
467 dc->cc_op = CC_OP_ADDX;
468 }
469 }
470
471 static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
472 {
473 tcg_gen_mov_tl(cpu_cc_src, src1);
474 tcg_gen_mov_tl(cpu_cc_src2, src2);
475 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
476 tcg_gen_mov_tl(dst, cpu_cc_dst);
477 }
478
479 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
480 TCGv src2, int update_cc)
481 {
482 TCGv_i32 carry_32;
483 TCGv carry;
484
485 switch (dc->cc_op) {
486 case CC_OP_DIV:
487 case CC_OP_LOGIC:
488 /* Carry is known to be zero. Fall back to plain SUB. */
489 if (update_cc) {
490 gen_op_sub_cc(dst, src1, src2);
491 } else {
492 tcg_gen_sub_tl(dst, src1, src2);
493 }
494 return;
495
496 case CC_OP_ADD:
497 case CC_OP_TADD:
498 case CC_OP_TADDTV:
499 carry_32 = gen_add32_carry32();
500 break;
501
502 case CC_OP_SUB:
503 case CC_OP_TSUB:
504 case CC_OP_TSUBTV:
505 if (TARGET_LONG_BITS == 32) {
506 /* We can re-use the host's hardware carry generation by using
507 a SUB2 opcode. We discard the low part of the output.
508 Ideally we'd combine this operation with the add that
509 generated the carry in the first place. */
510 carry = tcg_temp_new();
511 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
512 goto sub_done;
513 }
514 carry_32 = gen_sub32_carry32();
515 break;
516
517 default:
518 /* We need external help to produce the carry. */
519 carry_32 = tcg_temp_new_i32();
520 gen_helper_compute_C_icc(carry_32, cpu_env);
521 break;
522 }
523
524 #if TARGET_LONG_BITS == 64
525 carry = tcg_temp_new();
526 tcg_gen_extu_i32_i64(carry, carry_32);
527 #else
528 carry = carry_32;
529 #endif
530
531 tcg_gen_sub_tl(dst, src1, src2);
532 tcg_gen_sub_tl(dst, dst, carry);
533
534 sub_done:
535 if (update_cc) {
536 tcg_gen_mov_tl(cpu_cc_src, src1);
537 tcg_gen_mov_tl(cpu_cc_src2, src2);
538 tcg_gen_mov_tl(cpu_cc_dst, dst);
539 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
540 dc->cc_op = CC_OP_SUBX;
541 }
542 }
543
544 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
545 {
546 TCGv r_temp, zero, t0;
547
548 r_temp = tcg_temp_new();
549 t0 = tcg_temp_new();
550
551 /* old op:
552 if (!(env->y & 1))
553 T1 = 0;
554 */
555 zero = tcg_constant_tl(0);
556 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
557 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
558 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
559 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
560 zero, cpu_cc_src2);
561
562 // b2 = T0 & 1;
563 // env->y = (b2 << 31) | (env->y >> 1);
564 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
565 tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1);
566
567 // b1 = N ^ V;
568 gen_mov_reg_N(t0, cpu_psr);
569 gen_mov_reg_V(r_temp, cpu_psr);
570 tcg_gen_xor_tl(t0, t0, r_temp);
571
572 // T0 = (b1 << 31) | (T0 >> 1);
573 // src1 = T0;
574 tcg_gen_shli_tl(t0, t0, 31);
575 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
576 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
577
578 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
579
580 tcg_gen_mov_tl(dst, cpu_cc_dst);
581 }
582
583 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
584 {
585 #if TARGET_LONG_BITS == 32
586 if (sign_ext) {
587 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
588 } else {
589 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
590 }
591 #else
592 TCGv t0 = tcg_temp_new_i64();
593 TCGv t1 = tcg_temp_new_i64();
594
595 if (sign_ext) {
596 tcg_gen_ext32s_i64(t0, src1);
597 tcg_gen_ext32s_i64(t1, src2);
598 } else {
599 tcg_gen_ext32u_i64(t0, src1);
600 tcg_gen_ext32u_i64(t1, src2);
601 }
602
603 tcg_gen_mul_i64(dst, t0, t1);
604 tcg_gen_shri_i64(cpu_y, dst, 32);
605 #endif
606 }
607
608 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
609 {
610 /* zero-extend truncated operands before multiplication */
611 gen_op_multiply(dst, src1, src2, 0);
612 }
613
614 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
615 {
616 /* sign-extend truncated operands before multiplication */
617 gen_op_multiply(dst, src1, src2, 1);
618 }
619
620 // 1
621 static void gen_op_eval_ba(TCGv dst)
622 {
623 tcg_gen_movi_tl(dst, 1);
624 }
625
626 // Z
627 static void gen_op_eval_be(TCGv dst, TCGv_i32 src)
628 {
629 gen_mov_reg_Z(dst, src);
630 }
631
632 // Z | (N ^ V)
633 static void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
634 {
635 TCGv t0 = tcg_temp_new();
636 gen_mov_reg_N(t0, src);
637 gen_mov_reg_V(dst, src);
638 tcg_gen_xor_tl(dst, dst, t0);
639 gen_mov_reg_Z(t0, src);
640 tcg_gen_or_tl(dst, dst, t0);
641 }
642
643 // N ^ V
644 static void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
645 {
646 TCGv t0 = tcg_temp_new();
647 gen_mov_reg_V(t0, src);
648 gen_mov_reg_N(dst, src);
649 tcg_gen_xor_tl(dst, dst, t0);
650 }
651
652 // C | Z
653 static void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
654 {
655 TCGv t0 = tcg_temp_new();
656 gen_mov_reg_Z(t0, src);
657 gen_mov_reg_C(dst, src);
658 tcg_gen_or_tl(dst, dst, t0);
659 }
660
661 // C
662 static void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
663 {
664 gen_mov_reg_C(dst, src);
665 }
666
667 // V
668 static void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
669 {
670 gen_mov_reg_V(dst, src);
671 }
672
673 // 0
674 static void gen_op_eval_bn(TCGv dst)
675 {
676 tcg_gen_movi_tl(dst, 0);
677 }
678
679 // N
680 static void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
681 {
682 gen_mov_reg_N(dst, src);
683 }
684
685 // !Z
686 static void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
687 {
688 gen_mov_reg_Z(dst, src);
689 tcg_gen_xori_tl(dst, dst, 0x1);
690 }
691
692 // !(Z | (N ^ V))
693 static void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
694 {
695 gen_op_eval_ble(dst, src);
696 tcg_gen_xori_tl(dst, dst, 0x1);
697 }
698
699 // !(N ^ V)
700 static void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
701 {
702 gen_op_eval_bl(dst, src);
703 tcg_gen_xori_tl(dst, dst, 0x1);
704 }
705
706 // !(C | Z)
707 static void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
708 {
709 gen_op_eval_bleu(dst, src);
710 tcg_gen_xori_tl(dst, dst, 0x1);
711 }
712
713 // !C
714 static void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
715 {
716 gen_mov_reg_C(dst, src);
717 tcg_gen_xori_tl(dst, dst, 0x1);
718 }
719
720 // !N
721 static void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
722 {
723 gen_mov_reg_N(dst, src);
724 tcg_gen_xori_tl(dst, dst, 0x1);
725 }
726
727 // !V
728 static void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
729 {
730 gen_mov_reg_V(dst, src);
731 tcg_gen_xori_tl(dst, dst, 0x1);
732 }
733
734 /*
735 FPSR bit field FCC1 | FCC0:
736 0 =
737 1 <
738 2 >
739 3 unordered
740 */
741 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
742 unsigned int fcc_offset)
743 {
744 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
745 tcg_gen_andi_tl(reg, reg, 0x1);
746 }
747
748 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
749 {
750 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
751 tcg_gen_andi_tl(reg, reg, 0x1);
752 }
753
754 // !0: FCC0 | FCC1
755 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
756 {
757 TCGv t0 = tcg_temp_new();
758 gen_mov_reg_FCC0(dst, src, fcc_offset);
759 gen_mov_reg_FCC1(t0, src, fcc_offset);
760 tcg_gen_or_tl(dst, dst, t0);
761 }
762
763 // 1 or 2: FCC0 ^ FCC1
764 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
765 {
766 TCGv t0 = tcg_temp_new();
767 gen_mov_reg_FCC0(dst, src, fcc_offset);
768 gen_mov_reg_FCC1(t0, src, fcc_offset);
769 tcg_gen_xor_tl(dst, dst, t0);
770 }
771
772 // 1 or 3: FCC0
773 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
774 {
775 gen_mov_reg_FCC0(dst, src, fcc_offset);
776 }
777
778 // 1: FCC0 & !FCC1
779 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
780 {
781 TCGv t0 = tcg_temp_new();
782 gen_mov_reg_FCC0(dst, src, fcc_offset);
783 gen_mov_reg_FCC1(t0, src, fcc_offset);
784 tcg_gen_andc_tl(dst, dst, t0);
785 }
786
787 // 2 or 3: FCC1
788 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
789 {
790 gen_mov_reg_FCC1(dst, src, fcc_offset);
791 }
792
793 // 2: !FCC0 & FCC1
794 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
795 {
796 TCGv t0 = tcg_temp_new();
797 gen_mov_reg_FCC0(dst, src, fcc_offset);
798 gen_mov_reg_FCC1(t0, src, fcc_offset);
799 tcg_gen_andc_tl(dst, t0, dst);
800 }
801
802 // 3: FCC0 & FCC1
803 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
804 {
805 TCGv t0 = tcg_temp_new();
806 gen_mov_reg_FCC0(dst, src, fcc_offset);
807 gen_mov_reg_FCC1(t0, src, fcc_offset);
808 tcg_gen_and_tl(dst, dst, t0);
809 }
810
811 // 0: !(FCC0 | FCC1)
812 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
813 {
814 TCGv t0 = tcg_temp_new();
815 gen_mov_reg_FCC0(dst, src, fcc_offset);
816 gen_mov_reg_FCC1(t0, src, fcc_offset);
817 tcg_gen_or_tl(dst, dst, t0);
818 tcg_gen_xori_tl(dst, dst, 0x1);
819 }
820
821 // 0 or 3: !(FCC0 ^ FCC1)
822 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
823 {
824 TCGv t0 = tcg_temp_new();
825 gen_mov_reg_FCC0(dst, src, fcc_offset);
826 gen_mov_reg_FCC1(t0, src, fcc_offset);
827 tcg_gen_xor_tl(dst, dst, t0);
828 tcg_gen_xori_tl(dst, dst, 0x1);
829 }
830
831 // 0 or 2: !FCC0
832 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
833 {
834 gen_mov_reg_FCC0(dst, src, fcc_offset);
835 tcg_gen_xori_tl(dst, dst, 0x1);
836 }
837
838 // !1: !(FCC0 & !FCC1)
839 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
840 {
841 TCGv t0 = tcg_temp_new();
842 gen_mov_reg_FCC0(dst, src, fcc_offset);
843 gen_mov_reg_FCC1(t0, src, fcc_offset);
844 tcg_gen_andc_tl(dst, dst, t0);
845 tcg_gen_xori_tl(dst, dst, 0x1);
846 }
847
848 // 0 or 1: !FCC1
849 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
850 {
851 gen_mov_reg_FCC1(dst, src, fcc_offset);
852 tcg_gen_xori_tl(dst, dst, 0x1);
853 }
854
855 // !2: !(!FCC0 & FCC1)
856 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
857 {
858 TCGv t0 = tcg_temp_new();
859 gen_mov_reg_FCC0(dst, src, fcc_offset);
860 gen_mov_reg_FCC1(t0, src, fcc_offset);
861 tcg_gen_andc_tl(dst, t0, dst);
862 tcg_gen_xori_tl(dst, dst, 0x1);
863 }
864
865 // !3: !(FCC0 & FCC1)
866 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
867 {
868 TCGv t0 = tcg_temp_new();
869 gen_mov_reg_FCC0(dst, src, fcc_offset);
870 gen_mov_reg_FCC1(t0, src, fcc_offset);
871 tcg_gen_and_tl(dst, dst, t0);
872 tcg_gen_xori_tl(dst, dst, 0x1);
873 }
874
875 static void gen_branch2(DisasContext *dc, target_ulong pc1,
876 target_ulong pc2, TCGv r_cond)
877 {
878 TCGLabel *l1 = gen_new_label();
879
880 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
881
882 gen_goto_tb(dc, 0, pc1, pc1 + 4);
883
884 gen_set_label(l1);
885 gen_goto_tb(dc, 1, pc2, pc2 + 4);
886 }
887
888 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
889 {
890 TCGLabel *l1 = gen_new_label();
891 target_ulong npc = dc->npc;
892
893 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
894
895 gen_goto_tb(dc, 0, npc, pc1);
896
897 gen_set_label(l1);
898 gen_goto_tb(dc, 1, npc + 4, npc + 8);
899
900 dc->base.is_jmp = DISAS_NORETURN;
901 }
902
903 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
904 {
905 target_ulong npc = dc->npc;
906
907 if (npc & 3) {
908 switch (npc) {
909 case DYNAMIC_PC:
910 case DYNAMIC_PC_LOOKUP:
911 tcg_gen_mov_tl(cpu_pc, cpu_npc);
912 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
913 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc,
914 cpu_cond, tcg_constant_tl(0),
915 tcg_constant_tl(pc1), cpu_npc);
916 dc->pc = npc;
917 break;
918 default:
919 g_assert_not_reached();
920 }
921 } else {
922 dc->pc = npc;
923 dc->jump_pc[0] = pc1;
924 dc->jump_pc[1] = npc + 4;
925 dc->npc = JUMP_PC;
926 }
927 }
928
929 static void gen_generic_branch(DisasContext *dc)
930 {
931 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
932 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
933 TCGv zero = tcg_constant_tl(0);
934
935 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
936 }
937
938 /* call this function before using the condition register as it may
939 have been set for a jump */
940 static void flush_cond(DisasContext *dc)
941 {
942 if (dc->npc == JUMP_PC) {
943 gen_generic_branch(dc);
944 dc->npc = DYNAMIC_PC_LOOKUP;
945 }
946 }
947
948 static void save_npc(DisasContext *dc)
949 {
950 if (dc->npc & 3) {
951 switch (dc->npc) {
952 case JUMP_PC:
953 gen_generic_branch(dc);
954 dc->npc = DYNAMIC_PC_LOOKUP;
955 break;
956 case DYNAMIC_PC:
957 case DYNAMIC_PC_LOOKUP:
958 break;
959 default:
960 g_assert_not_reached();
961 }
962 } else {
963 tcg_gen_movi_tl(cpu_npc, dc->npc);
964 }
965 }
966
967 static void update_psr(DisasContext *dc)
968 {
969 if (dc->cc_op != CC_OP_FLAGS) {
970 dc->cc_op = CC_OP_FLAGS;
971 gen_helper_compute_psr(cpu_env);
972 }
973 }
974
975 static void save_state(DisasContext *dc)
976 {
977 tcg_gen_movi_tl(cpu_pc, dc->pc);
978 save_npc(dc);
979 }
980
981 static void gen_exception(DisasContext *dc, int which)
982 {
983 save_state(dc);
984 gen_helper_raise_exception(cpu_env, tcg_constant_i32(which));
985 dc->base.is_jmp = DISAS_NORETURN;
986 }
987
988 static void gen_check_align(TCGv addr, int mask)
989 {
990 gen_helper_check_align(cpu_env, addr, tcg_constant_i32(mask));
991 }
992
993 static void gen_mov_pc_npc(DisasContext *dc)
994 {
995 if (dc->npc & 3) {
996 switch (dc->npc) {
997 case JUMP_PC:
998 gen_generic_branch(dc);
999 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1000 dc->pc = DYNAMIC_PC_LOOKUP;
1001 break;
1002 case DYNAMIC_PC:
1003 case DYNAMIC_PC_LOOKUP:
1004 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1005 dc->pc = dc->npc;
1006 break;
1007 default:
1008 g_assert_not_reached();
1009 }
1010 } else {
1011 dc->pc = dc->npc;
1012 }
1013 }
1014
1015 static void gen_op_next_insn(void)
1016 {
1017 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1018 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1019 }
1020
1021 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1022 DisasContext *dc)
1023 {
1024 static int subcc_cond[16] = {
1025 TCG_COND_NEVER,
1026 TCG_COND_EQ,
1027 TCG_COND_LE,
1028 TCG_COND_LT,
1029 TCG_COND_LEU,
1030 TCG_COND_LTU,
1031 -1, /* neg */
1032 -1, /* overflow */
1033 TCG_COND_ALWAYS,
1034 TCG_COND_NE,
1035 TCG_COND_GT,
1036 TCG_COND_GE,
1037 TCG_COND_GTU,
1038 TCG_COND_GEU,
1039 -1, /* pos */
1040 -1, /* no overflow */
1041 };
1042
1043 static int logic_cond[16] = {
1044 TCG_COND_NEVER,
1045 TCG_COND_EQ, /* eq: Z */
1046 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1047 TCG_COND_LT, /* lt: N ^ V -> N */
1048 TCG_COND_EQ, /* leu: C | Z -> Z */
1049 TCG_COND_NEVER, /* ltu: C -> 0 */
1050 TCG_COND_LT, /* neg: N */
1051 TCG_COND_NEVER, /* vs: V -> 0 */
1052 TCG_COND_ALWAYS,
1053 TCG_COND_NE, /* ne: !Z */
1054 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1055 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1056 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1057 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1058 TCG_COND_GE, /* pos: !N */
1059 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1060 };
1061
1062 TCGv_i32 r_src;
1063 TCGv r_dst;
1064
1065 #ifdef TARGET_SPARC64
1066 if (xcc) {
1067 r_src = cpu_xcc;
1068 } else {
1069 r_src = cpu_psr;
1070 }
1071 #else
1072 r_src = cpu_psr;
1073 #endif
1074
1075 switch (dc->cc_op) {
1076 case CC_OP_LOGIC:
1077 cmp->cond = logic_cond[cond];
1078 do_compare_dst_0:
1079 cmp->is_bool = false;
1080 cmp->c2 = tcg_constant_tl(0);
1081 #ifdef TARGET_SPARC64
1082 if (!xcc) {
1083 cmp->c1 = tcg_temp_new();
1084 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1085 break;
1086 }
1087 #endif
1088 cmp->c1 = cpu_cc_dst;
1089 break;
1090
1091 case CC_OP_SUB:
1092 switch (cond) {
1093 case 6: /* neg */
1094 case 14: /* pos */
1095 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1096 goto do_compare_dst_0;
1097
1098 case 7: /* overflow */
1099 case 15: /* !overflow */
1100 goto do_dynamic;
1101
1102 default:
1103 cmp->cond = subcc_cond[cond];
1104 cmp->is_bool = false;
1105 #ifdef TARGET_SPARC64
1106 if (!xcc) {
1107 /* Note that sign-extension works for unsigned compares as
1108 long as both operands are sign-extended. */
1109 cmp->c1 = tcg_temp_new();
1110 cmp->c2 = tcg_temp_new();
1111 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1112 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1113 break;
1114 }
1115 #endif
1116 cmp->c1 = cpu_cc_src;
1117 cmp->c2 = cpu_cc_src2;
1118 break;
1119 }
1120 break;
1121
1122 default:
1123 do_dynamic:
1124 gen_helper_compute_psr(cpu_env);
1125 dc->cc_op = CC_OP_FLAGS;
1126 /* FALLTHRU */
1127
1128 case CC_OP_FLAGS:
1129 /* We're going to generate a boolean result. */
1130 cmp->cond = TCG_COND_NE;
1131 cmp->is_bool = true;
1132 cmp->c1 = r_dst = tcg_temp_new();
1133 cmp->c2 = tcg_constant_tl(0);
1134
1135 switch (cond) {
1136 case 0x0:
1137 gen_op_eval_bn(r_dst);
1138 break;
1139 case 0x1:
1140 gen_op_eval_be(r_dst, r_src);
1141 break;
1142 case 0x2:
1143 gen_op_eval_ble(r_dst, r_src);
1144 break;
1145 case 0x3:
1146 gen_op_eval_bl(r_dst, r_src);
1147 break;
1148 case 0x4:
1149 gen_op_eval_bleu(r_dst, r_src);
1150 break;
1151 case 0x5:
1152 gen_op_eval_bcs(r_dst, r_src);
1153 break;
1154 case 0x6:
1155 gen_op_eval_bneg(r_dst, r_src);
1156 break;
1157 case 0x7:
1158 gen_op_eval_bvs(r_dst, r_src);
1159 break;
1160 case 0x8:
1161 gen_op_eval_ba(r_dst);
1162 break;
1163 case 0x9:
1164 gen_op_eval_bne(r_dst, r_src);
1165 break;
1166 case 0xa:
1167 gen_op_eval_bg(r_dst, r_src);
1168 break;
1169 case 0xb:
1170 gen_op_eval_bge(r_dst, r_src);
1171 break;
1172 case 0xc:
1173 gen_op_eval_bgu(r_dst, r_src);
1174 break;
1175 case 0xd:
1176 gen_op_eval_bcc(r_dst, r_src);
1177 break;
1178 case 0xe:
1179 gen_op_eval_bpos(r_dst, r_src);
1180 break;
1181 case 0xf:
1182 gen_op_eval_bvc(r_dst, r_src);
1183 break;
1184 }
1185 break;
1186 }
1187 }
1188
1189 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1190 {
1191 unsigned int offset;
1192 TCGv r_dst;
1193
1194 /* For now we still generate a straight boolean result. */
1195 cmp->cond = TCG_COND_NE;
1196 cmp->is_bool = true;
1197 cmp->c1 = r_dst = tcg_temp_new();
1198 cmp->c2 = tcg_constant_tl(0);
1199
1200 switch (cc) {
1201 default:
1202 case 0x0:
1203 offset = 0;
1204 break;
1205 case 0x1:
1206 offset = 32 - 10;
1207 break;
1208 case 0x2:
1209 offset = 34 - 10;
1210 break;
1211 case 0x3:
1212 offset = 36 - 10;
1213 break;
1214 }
1215
1216 switch (cond) {
1217 case 0x0:
1218 gen_op_eval_bn(r_dst);
1219 break;
1220 case 0x1:
1221 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1222 break;
1223 case 0x2:
1224 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1225 break;
1226 case 0x3:
1227 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1228 break;
1229 case 0x4:
1230 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1231 break;
1232 case 0x5:
1233 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1234 break;
1235 case 0x6:
1236 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1237 break;
1238 case 0x7:
1239 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1240 break;
1241 case 0x8:
1242 gen_op_eval_ba(r_dst);
1243 break;
1244 case 0x9:
1245 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1246 break;
1247 case 0xa:
1248 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1249 break;
1250 case 0xb:
1251 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1252 break;
1253 case 0xc:
1254 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1255 break;
1256 case 0xd:
1257 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1258 break;
1259 case 0xe:
1260 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1261 break;
1262 case 0xf:
1263 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1264 break;
1265 }
1266 }
1267
1268 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1269 DisasContext *dc)
1270 {
1271 DisasCompare cmp;
1272 gen_compare(&cmp, cc, cond, dc);
1273
1274 /* The interface is to return a boolean in r_dst. */
1275 if (cmp.is_bool) {
1276 tcg_gen_mov_tl(r_dst, cmp.c1);
1277 } else {
1278 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1279 }
1280 }
1281
1282 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1283 {
1284 DisasCompare cmp;
1285 gen_fcompare(&cmp, cc, cond);
1286
1287 /* The interface is to return a boolean in r_dst. */
1288 if (cmp.is_bool) {
1289 tcg_gen_mov_tl(r_dst, cmp.c1);
1290 } else {
1291 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1292 }
1293 }
1294
1295 #ifdef TARGET_SPARC64
1296 // Inverted logic
1297 static const int gen_tcg_cond_reg[8] = {
1298 -1,
1299 TCG_COND_NE,
1300 TCG_COND_GT,
1301 TCG_COND_GE,
1302 -1,
1303 TCG_COND_EQ,
1304 TCG_COND_LE,
1305 TCG_COND_LT,
1306 };
1307
1308 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1309 {
1310 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1311 cmp->is_bool = false;
1312 cmp->c1 = r_src;
1313 cmp->c2 = tcg_constant_tl(0);
1314 }
1315
1316 static void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1317 {
1318 DisasCompare cmp;
1319 gen_compare_reg(&cmp, cond, r_src);
1320
1321 /* The interface is to return a boolean in r_dst. */
1322 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1323 }
1324 #endif
1325
1326 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1327 {
1328 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1329 target_ulong target = dc->pc + offset;
1330
1331 #ifdef TARGET_SPARC64
1332 if (unlikely(AM_CHECK(dc))) {
1333 target &= 0xffffffffULL;
1334 }
1335 #endif
1336 if (cond == 0x0) {
1337 /* unconditional not taken */
1338 if (a) {
1339 dc->pc = dc->npc + 4;
1340 dc->npc = dc->pc + 4;
1341 } else {
1342 dc->pc = dc->npc;
1343 dc->npc = dc->pc + 4;
1344 }
1345 } else if (cond == 0x8) {
1346 /* unconditional taken */
1347 if (a) {
1348 dc->pc = target;
1349 dc->npc = dc->pc + 4;
1350 } else {
1351 dc->pc = dc->npc;
1352 dc->npc = target;
1353 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1354 }
1355 } else {
1356 flush_cond(dc);
1357 gen_cond(cpu_cond, cc, cond, dc);
1358 if (a) {
1359 gen_branch_a(dc, target);
1360 } else {
1361 gen_branch_n(dc, target);
1362 }
1363 }
1364 }
1365
1366 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1367 {
1368 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1369 target_ulong target = dc->pc + offset;
1370
1371 #ifdef TARGET_SPARC64
1372 if (unlikely(AM_CHECK(dc))) {
1373 target &= 0xffffffffULL;
1374 }
1375 #endif
1376 if (cond == 0x0) {
1377 /* unconditional not taken */
1378 if (a) {
1379 dc->pc = dc->npc + 4;
1380 dc->npc = dc->pc + 4;
1381 } else {
1382 dc->pc = dc->npc;
1383 dc->npc = dc->pc + 4;
1384 }
1385 } else if (cond == 0x8) {
1386 /* unconditional taken */
1387 if (a) {
1388 dc->pc = target;
1389 dc->npc = dc->pc + 4;
1390 } else {
1391 dc->pc = dc->npc;
1392 dc->npc = target;
1393 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1394 }
1395 } else {
1396 flush_cond(dc);
1397 gen_fcond(cpu_cond, cc, cond);
1398 if (a) {
1399 gen_branch_a(dc, target);
1400 } else {
1401 gen_branch_n(dc, target);
1402 }
1403 }
1404 }
1405
1406 #ifdef TARGET_SPARC64
1407 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1408 TCGv r_reg)
1409 {
1410 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1411 target_ulong target = dc->pc + offset;
1412
1413 if (unlikely(AM_CHECK(dc))) {
1414 target &= 0xffffffffULL;
1415 }
1416 flush_cond(dc);
1417 gen_cond_reg(cpu_cond, cond, r_reg);
1418 if (a) {
1419 gen_branch_a(dc, target);
1420 } else {
1421 gen_branch_n(dc, target);
1422 }
1423 }
1424
1425 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1426 {
1427 switch (fccno) {
1428 case 0:
1429 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1430 break;
1431 case 1:
1432 gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1433 break;
1434 case 2:
1435 gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1436 break;
1437 case 3:
1438 gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1439 break;
1440 }
1441 }
1442
1443 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1444 {
1445 switch (fccno) {
1446 case 0:
1447 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1448 break;
1449 case 1:
1450 gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1451 break;
1452 case 2:
1453 gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1454 break;
1455 case 3:
1456 gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1457 break;
1458 }
1459 }
1460
1461 static void gen_op_fcmpq(int fccno)
1462 {
1463 switch (fccno) {
1464 case 0:
1465 gen_helper_fcmpq(cpu_fsr, cpu_env);
1466 break;
1467 case 1:
1468 gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
1469 break;
1470 case 2:
1471 gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
1472 break;
1473 case 3:
1474 gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
1475 break;
1476 }
1477 }
1478
1479 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1480 {
1481 switch (fccno) {
1482 case 0:
1483 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1484 break;
1485 case 1:
1486 gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1487 break;
1488 case 2:
1489 gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1490 break;
1491 case 3:
1492 gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1493 break;
1494 }
1495 }
1496
1497 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1498 {
1499 switch (fccno) {
1500 case 0:
1501 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1502 break;
1503 case 1:
1504 gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1505 break;
1506 case 2:
1507 gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1508 break;
1509 case 3:
1510 gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1511 break;
1512 }
1513 }
1514
1515 static void gen_op_fcmpeq(int fccno)
1516 {
1517 switch (fccno) {
1518 case 0:
1519 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1520 break;
1521 case 1:
1522 gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
1523 break;
1524 case 2:
1525 gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
1526 break;
1527 case 3:
1528 gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
1529 break;
1530 }
1531 }
1532
1533 #else
1534
1535 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1536 {
1537 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1538 }
1539
1540 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1541 {
1542 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1543 }
1544
1545 static void gen_op_fcmpq(int fccno)
1546 {
1547 gen_helper_fcmpq(cpu_fsr, cpu_env);
1548 }
1549
1550 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1551 {
1552 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1553 }
1554
1555 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1556 {
1557 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1558 }
1559
1560 static void gen_op_fcmpeq(int fccno)
1561 {
1562 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1563 }
1564 #endif
1565
1566 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1567 {
1568 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1569 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1570 gen_exception(dc, TT_FP_EXCP);
1571 }
1572
1573 static int gen_trap_ifnofpu(DisasContext *dc)
1574 {
1575 #if !defined(CONFIG_USER_ONLY)
1576 if (!dc->fpu_enabled) {
1577 gen_exception(dc, TT_NFPU_INSN);
1578 return 1;
1579 }
1580 #endif
1581 return 0;
1582 }
1583
1584 static void gen_op_clear_ieee_excp_and_FTT(void)
1585 {
1586 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1587 }
1588
1589 static void gen_fop_FF(DisasContext *dc, int rd, int rs,
1590 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1591 {
1592 TCGv_i32 dst, src;
1593
1594 src = gen_load_fpr_F(dc, rs);
1595 dst = gen_dest_fpr_F(dc);
1596
1597 gen(dst, cpu_env, src);
1598 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1599
1600 gen_store_fpr_F(dc, rd, dst);
1601 }
1602
1603 static void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1604 void (*gen)(TCGv_i32, TCGv_i32))
1605 {
1606 TCGv_i32 dst, src;
1607
1608 src = gen_load_fpr_F(dc, rs);
1609 dst = gen_dest_fpr_F(dc);
1610
1611 gen(dst, src);
1612
1613 gen_store_fpr_F(dc, rd, dst);
1614 }
1615
1616 static void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1617 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1618 {
1619 TCGv_i32 dst, src1, src2;
1620
1621 src1 = gen_load_fpr_F(dc, rs1);
1622 src2 = gen_load_fpr_F(dc, rs2);
1623 dst = gen_dest_fpr_F(dc);
1624
1625 gen(dst, cpu_env, src1, src2);
1626 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1627
1628 gen_store_fpr_F(dc, rd, dst);
1629 }
1630
1631 #ifdef TARGET_SPARC64
1632 static void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1633 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1634 {
1635 TCGv_i32 dst, src1, src2;
1636
1637 src1 = gen_load_fpr_F(dc, rs1);
1638 src2 = gen_load_fpr_F(dc, rs2);
1639 dst = gen_dest_fpr_F(dc);
1640
1641 gen(dst, src1, src2);
1642
1643 gen_store_fpr_F(dc, rd, dst);
1644 }
1645 #endif
1646
1647 static void gen_fop_DD(DisasContext *dc, int rd, int rs,
1648 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1649 {
1650 TCGv_i64 dst, src;
1651
1652 src = gen_load_fpr_D(dc, rs);
1653 dst = gen_dest_fpr_D(dc, rd);
1654
1655 gen(dst, cpu_env, src);
1656 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1657
1658 gen_store_fpr_D(dc, rd, dst);
1659 }
1660
1661 #ifdef TARGET_SPARC64
1662 static void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1663 void (*gen)(TCGv_i64, TCGv_i64))
1664 {
1665 TCGv_i64 dst, src;
1666
1667 src = gen_load_fpr_D(dc, rs);
1668 dst = gen_dest_fpr_D(dc, rd);
1669
1670 gen(dst, src);
1671
1672 gen_store_fpr_D(dc, rd, dst);
1673 }
1674 #endif
1675
1676 static void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1677 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1678 {
1679 TCGv_i64 dst, src1, src2;
1680
1681 src1 = gen_load_fpr_D(dc, rs1);
1682 src2 = gen_load_fpr_D(dc, rs2);
1683 dst = gen_dest_fpr_D(dc, rd);
1684
1685 gen(dst, cpu_env, src1, src2);
1686 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1687
1688 gen_store_fpr_D(dc, rd, dst);
1689 }
1690
1691 #ifdef TARGET_SPARC64
1692 static void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1693 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1694 {
1695 TCGv_i64 dst, src1, src2;
1696
1697 src1 = gen_load_fpr_D(dc, rs1);
1698 src2 = gen_load_fpr_D(dc, rs2);
1699 dst = gen_dest_fpr_D(dc, rd);
1700
1701 gen(dst, src1, src2);
1702
1703 gen_store_fpr_D(dc, rd, dst);
1704 }
1705
1706 static void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1707 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1708 {
1709 TCGv_i64 dst, src1, src2;
1710
1711 src1 = gen_load_fpr_D(dc, rs1);
1712 src2 = gen_load_fpr_D(dc, rs2);
1713 dst = gen_dest_fpr_D(dc, rd);
1714
1715 gen(dst, cpu_gsr, src1, src2);
1716
1717 gen_store_fpr_D(dc, rd, dst);
1718 }
1719
1720 static void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1721 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1722 {
1723 TCGv_i64 dst, src0, src1, src2;
1724
1725 src1 = gen_load_fpr_D(dc, rs1);
1726 src2 = gen_load_fpr_D(dc, rs2);
1727 src0 = gen_load_fpr_D(dc, rd);
1728 dst = gen_dest_fpr_D(dc, rd);
1729
1730 gen(dst, src0, src1, src2);
1731
1732 gen_store_fpr_D(dc, rd, dst);
1733 }
1734 #endif
1735
1736 static void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1737 void (*gen)(TCGv_ptr))
1738 {
1739 gen_op_load_fpr_QT1(QFPREG(rs));
1740
1741 gen(cpu_env);
1742 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1743
1744 gen_op_store_QT0_fpr(QFPREG(rd));
1745 gen_update_fprs_dirty(dc, QFPREG(rd));
1746 }
1747
1748 #ifdef TARGET_SPARC64
1749 static void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1750 void (*gen)(TCGv_ptr))
1751 {
1752 gen_op_load_fpr_QT1(QFPREG(rs));
1753
1754 gen(cpu_env);
1755
1756 gen_op_store_QT0_fpr(QFPREG(rd));
1757 gen_update_fprs_dirty(dc, QFPREG(rd));
1758 }
1759 #endif
1760
1761 static void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1762 void (*gen)(TCGv_ptr))
1763 {
1764 gen_op_load_fpr_QT0(QFPREG(rs1));
1765 gen_op_load_fpr_QT1(QFPREG(rs2));
1766
1767 gen(cpu_env);
1768 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1769
1770 gen_op_store_QT0_fpr(QFPREG(rd));
1771 gen_update_fprs_dirty(dc, QFPREG(rd));
1772 }
1773
1774 static void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1775 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1776 {
1777 TCGv_i64 dst;
1778 TCGv_i32 src1, src2;
1779
1780 src1 = gen_load_fpr_F(dc, rs1);
1781 src2 = gen_load_fpr_F(dc, rs2);
1782 dst = gen_dest_fpr_D(dc, rd);
1783
1784 gen(dst, cpu_env, src1, src2);
1785 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1786
1787 gen_store_fpr_D(dc, rd, dst);
1788 }
1789
1790 static void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1791 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1792 {
1793 TCGv_i64 src1, src2;
1794
1795 src1 = gen_load_fpr_D(dc, rs1);
1796 src2 = gen_load_fpr_D(dc, rs2);
1797
1798 gen(cpu_env, src1, src2);
1799 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1800
1801 gen_op_store_QT0_fpr(QFPREG(rd));
1802 gen_update_fprs_dirty(dc, QFPREG(rd));
1803 }
1804
1805 #ifdef TARGET_SPARC64
1806 static void gen_fop_DF(DisasContext *dc, int rd, int rs,
1807 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1808 {
1809 TCGv_i64 dst;
1810 TCGv_i32 src;
1811
1812 src = gen_load_fpr_F(dc, rs);
1813 dst = gen_dest_fpr_D(dc, rd);
1814
1815 gen(dst, cpu_env, src);
1816 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1817
1818 gen_store_fpr_D(dc, rd, dst);
1819 }
1820 #endif
1821
1822 static void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1823 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1824 {
1825 TCGv_i64 dst;
1826 TCGv_i32 src;
1827
1828 src = gen_load_fpr_F(dc, rs);
1829 dst = gen_dest_fpr_D(dc, rd);
1830
1831 gen(dst, cpu_env, src);
1832
1833 gen_store_fpr_D(dc, rd, dst);
1834 }
1835
1836 static void gen_fop_FD(DisasContext *dc, int rd, int rs,
1837 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1838 {
1839 TCGv_i32 dst;
1840 TCGv_i64 src;
1841
1842 src = gen_load_fpr_D(dc, rs);
1843 dst = gen_dest_fpr_F(dc);
1844
1845 gen(dst, cpu_env, src);
1846 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1847
1848 gen_store_fpr_F(dc, rd, dst);
1849 }
1850
1851 static void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1852 void (*gen)(TCGv_i32, TCGv_ptr))
1853 {
1854 TCGv_i32 dst;
1855
1856 gen_op_load_fpr_QT1(QFPREG(rs));
1857 dst = gen_dest_fpr_F(dc);
1858
1859 gen(dst, cpu_env);
1860 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1861
1862 gen_store_fpr_F(dc, rd, dst);
1863 }
1864
1865 static void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1866 void (*gen)(TCGv_i64, TCGv_ptr))
1867 {
1868 TCGv_i64 dst;
1869
1870 gen_op_load_fpr_QT1(QFPREG(rs));
1871 dst = gen_dest_fpr_D(dc, rd);
1872
1873 gen(dst, cpu_env);
1874 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1875
1876 gen_store_fpr_D(dc, rd, dst);
1877 }
1878
1879 static void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1880 void (*gen)(TCGv_ptr, TCGv_i32))
1881 {
1882 TCGv_i32 src;
1883
1884 src = gen_load_fpr_F(dc, rs);
1885
1886 gen(cpu_env, src);
1887
1888 gen_op_store_QT0_fpr(QFPREG(rd));
1889 gen_update_fprs_dirty(dc, QFPREG(rd));
1890 }
1891
1892 static void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1893 void (*gen)(TCGv_ptr, TCGv_i64))
1894 {
1895 TCGv_i64 src;
1896
1897 src = gen_load_fpr_D(dc, rs);
1898
1899 gen(cpu_env, src);
1900
1901 gen_op_store_QT0_fpr(QFPREG(rd));
1902 gen_update_fprs_dirty(dc, QFPREG(rd));
1903 }
1904
1905 static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
1906 TCGv addr, int mmu_idx, MemOp memop)
1907 {
1908 gen_address_mask(dc, addr);
1909 tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN);
1910 }
1911
1912 static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
1913 {
1914 TCGv m1 = tcg_constant_tl(0xff);
1915 gen_address_mask(dc, addr);
1916 tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
1917 }
1918
1919 /* asi moves */
1920 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
1921 typedef enum {
1922 GET_ASI_HELPER,
1923 GET_ASI_EXCP,
1924 GET_ASI_DIRECT,
1925 GET_ASI_DTWINX,
1926 GET_ASI_BLOCK,
1927 GET_ASI_SHORT,
1928 GET_ASI_BCOPY,
1929 GET_ASI_BFILL,
1930 } ASIType;
1931
1932 typedef struct {
1933 ASIType type;
1934 int asi;
1935 int mem_idx;
1936 MemOp memop;
1937 } DisasASI;
1938
1939 static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop)
1940 {
1941 int asi = GET_FIELD(insn, 19, 26);
1942 ASIType type = GET_ASI_HELPER;
1943 int mem_idx = dc->mem_idx;
1944
1945 #ifndef TARGET_SPARC64
1946 /* Before v9, all asis are immediate and privileged. */
1947 if (IS_IMM) {
1948 gen_exception(dc, TT_ILL_INSN);
1949 type = GET_ASI_EXCP;
1950 } else if (supervisor(dc)
1951 /* Note that LEON accepts ASI_USERDATA in user mode, for
1952 use with CASA. Also note that previous versions of
1953 QEMU allowed (and old versions of gcc emitted) ASI_P
1954 for LEON, which is incorrect. */
1955 || (asi == ASI_USERDATA
1956 && (dc->def->features & CPU_FEATURE_CASA))) {
1957 switch (asi) {
1958 case ASI_USERDATA: /* User data access */
1959 mem_idx = MMU_USER_IDX;
1960 type = GET_ASI_DIRECT;
1961 break;
1962 case ASI_KERNELDATA: /* Supervisor data access */
1963 mem_idx = MMU_KERNEL_IDX;
1964 type = GET_ASI_DIRECT;
1965 break;
1966 case ASI_M_BYPASS: /* MMU passthrough */
1967 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1968 mem_idx = MMU_PHYS_IDX;
1969 type = GET_ASI_DIRECT;
1970 break;
1971 case ASI_M_BCOPY: /* Block copy, sta access */
1972 mem_idx = MMU_KERNEL_IDX;
1973 type = GET_ASI_BCOPY;
1974 break;
1975 case ASI_M_BFILL: /* Block fill, stda access */
1976 mem_idx = MMU_KERNEL_IDX;
1977 type = GET_ASI_BFILL;
1978 break;
1979 }
1980
1981 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1982 * permissions check in get_physical_address(..).
1983 */
1984 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1985 } else {
1986 gen_exception(dc, TT_PRIV_INSN);
1987 type = GET_ASI_EXCP;
1988 }
1989 #else
1990 if (IS_IMM) {
1991 asi = dc->asi;
1992 }
1993 /* With v9, all asis below 0x80 are privileged. */
1994 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1995 down that bit into DisasContext. For the moment that's ok,
1996 since the direct implementations below doesn't have any ASIs
1997 in the restricted [0x30, 0x7f] range, and the check will be
1998 done properly in the helper. */
1999 if (!supervisor(dc) && asi < 0x80) {
2000 gen_exception(dc, TT_PRIV_ACT);
2001 type = GET_ASI_EXCP;
2002 } else {
2003 switch (asi) {
2004 case ASI_REAL: /* Bypass */
2005 case ASI_REAL_IO: /* Bypass, non-cacheable */
2006 case ASI_REAL_L: /* Bypass LE */
2007 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
2008 case ASI_TWINX_REAL: /* Real address, twinx */
2009 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
2010 case ASI_QUAD_LDD_PHYS:
2011 case ASI_QUAD_LDD_PHYS_L:
2012 mem_idx = MMU_PHYS_IDX;
2013 break;
2014 case ASI_N: /* Nucleus */
2015 case ASI_NL: /* Nucleus LE */
2016 case ASI_TWINX_N:
2017 case ASI_TWINX_NL:
2018 case ASI_NUCLEUS_QUAD_LDD:
2019 case ASI_NUCLEUS_QUAD_LDD_L:
2020 if (hypervisor(dc)) {
2021 mem_idx = MMU_PHYS_IDX;
2022 } else {
2023 mem_idx = MMU_NUCLEUS_IDX;
2024 }
2025 break;
2026 case ASI_AIUP: /* As if user primary */
2027 case ASI_AIUPL: /* As if user primary LE */
2028 case ASI_TWINX_AIUP:
2029 case ASI_TWINX_AIUP_L:
2030 case ASI_BLK_AIUP_4V:
2031 case ASI_BLK_AIUP_L_4V:
2032 case ASI_BLK_AIUP:
2033 case ASI_BLK_AIUPL:
2034 mem_idx = MMU_USER_IDX;
2035 break;
2036 case ASI_AIUS: /* As if user secondary */
2037 case ASI_AIUSL: /* As if user secondary LE */
2038 case ASI_TWINX_AIUS:
2039 case ASI_TWINX_AIUS_L:
2040 case ASI_BLK_AIUS_4V:
2041 case ASI_BLK_AIUS_L_4V:
2042 case ASI_BLK_AIUS:
2043 case ASI_BLK_AIUSL:
2044 mem_idx = MMU_USER_SECONDARY_IDX;
2045 break;
2046 case ASI_S: /* Secondary */
2047 case ASI_SL: /* Secondary LE */
2048 case ASI_TWINX_S:
2049 case ASI_TWINX_SL:
2050 case ASI_BLK_COMMIT_S:
2051 case ASI_BLK_S:
2052 case ASI_BLK_SL:
2053 case ASI_FL8_S:
2054 case ASI_FL8_SL:
2055 case ASI_FL16_S:
2056 case ASI_FL16_SL:
2057 if (mem_idx == MMU_USER_IDX) {
2058 mem_idx = MMU_USER_SECONDARY_IDX;
2059 } else if (mem_idx == MMU_KERNEL_IDX) {
2060 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2061 }
2062 break;
2063 case ASI_P: /* Primary */
2064 case ASI_PL: /* Primary LE */
2065 case ASI_TWINX_P:
2066 case ASI_TWINX_PL:
2067 case ASI_BLK_COMMIT_P:
2068 case ASI_BLK_P:
2069 case ASI_BLK_PL:
2070 case ASI_FL8_P:
2071 case ASI_FL8_PL:
2072 case ASI_FL16_P:
2073 case ASI_FL16_PL:
2074 break;
2075 }
2076 switch (asi) {
2077 case ASI_REAL:
2078 case ASI_REAL_IO:
2079 case ASI_REAL_L:
2080 case ASI_REAL_IO_L:
2081 case ASI_N:
2082 case ASI_NL:
2083 case ASI_AIUP:
2084 case ASI_AIUPL:
2085 case ASI_AIUS:
2086 case ASI_AIUSL:
2087 case ASI_S:
2088 case ASI_SL:
2089 case ASI_P:
2090 case ASI_PL:
2091 type = GET_ASI_DIRECT;
2092 break;
2093 case ASI_TWINX_REAL:
2094 case ASI_TWINX_REAL_L:
2095 case ASI_TWINX_N:
2096 case ASI_TWINX_NL:
2097 case ASI_TWINX_AIUP:
2098 case ASI_TWINX_AIUP_L:
2099 case ASI_TWINX_AIUS:
2100 case ASI_TWINX_AIUS_L:
2101 case ASI_TWINX_P:
2102 case ASI_TWINX_PL:
2103 case ASI_TWINX_S:
2104 case ASI_TWINX_SL:
2105 case ASI_QUAD_LDD_PHYS:
2106 case ASI_QUAD_LDD_PHYS_L:
2107 case ASI_NUCLEUS_QUAD_LDD:
2108 case ASI_NUCLEUS_QUAD_LDD_L:
2109 type = GET_ASI_DTWINX;
2110 break;
2111 case ASI_BLK_COMMIT_P:
2112 case ASI_BLK_COMMIT_S:
2113 case ASI_BLK_AIUP_4V:
2114 case ASI_BLK_AIUP_L_4V:
2115 case ASI_BLK_AIUP:
2116 case ASI_BLK_AIUPL:
2117 case ASI_BLK_AIUS_4V:
2118 case ASI_BLK_AIUS_L_4V:
2119 case ASI_BLK_AIUS:
2120 case ASI_BLK_AIUSL:
2121 case ASI_BLK_S:
2122 case ASI_BLK_SL:
2123 case ASI_BLK_P:
2124 case ASI_BLK_PL:
2125 type = GET_ASI_BLOCK;
2126 break;
2127 case ASI_FL8_S:
2128 case ASI_FL8_SL:
2129 case ASI_FL8_P:
2130 case ASI_FL8_PL:
2131 memop = MO_UB;
2132 type = GET_ASI_SHORT;
2133 break;
2134 case ASI_FL16_S:
2135 case ASI_FL16_SL:
2136 case ASI_FL16_P:
2137 case ASI_FL16_PL:
2138 memop = MO_TEUW;
2139 type = GET_ASI_SHORT;
2140 break;
2141 }
2142 /* The little-endian asis all have bit 3 set. */
2143 if (asi & 8) {
2144 memop ^= MO_BSWAP;
2145 }
2146 }
2147 #endif
2148
2149 return (DisasASI){ type, asi, mem_idx, memop };
2150 }
2151
2152 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2153 int insn, MemOp memop)
2154 {
2155 DisasASI da = get_asi(dc, insn, memop);
2156
2157 switch (da.type) {
2158 case GET_ASI_EXCP:
2159 break;
2160 case GET_ASI_DTWINX: /* Reserved for ldda. */
2161 gen_exception(dc, TT_ILL_INSN);
2162 break;
2163 case GET_ASI_DIRECT:
2164 gen_address_mask(dc, addr);
2165 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN);
2166 break;
2167 default:
2168 {
2169 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2170 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2171
2172 save_state(dc);
2173 #ifdef TARGET_SPARC64
2174 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
2175 #else
2176 {
2177 TCGv_i64 t64 = tcg_temp_new_i64();
2178 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2179 tcg_gen_trunc_i64_tl(dst, t64);
2180 }
2181 #endif
2182 }
2183 break;
2184 }
2185 }
2186
2187 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2188 int insn, MemOp memop)
2189 {
2190 DisasASI da = get_asi(dc, insn, memop);
2191
2192 switch (da.type) {
2193 case GET_ASI_EXCP:
2194 break;
2195 case GET_ASI_DTWINX: /* Reserved for stda. */
2196 #ifndef TARGET_SPARC64
2197 gen_exception(dc, TT_ILL_INSN);
2198 break;
2199 #else
2200 if (!(dc->def->features & CPU_FEATURE_HYPV)) {
2201 /* Pre OpenSPARC CPUs don't have these */
2202 gen_exception(dc, TT_ILL_INSN);
2203 return;
2204 }
2205 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2206 * are ST_BLKINIT_ ASIs */
2207 #endif
2208 /* fall through */
2209 case GET_ASI_DIRECT:
2210 gen_address_mask(dc, addr);
2211 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN);
2212 break;
2213 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2214 case GET_ASI_BCOPY:
2215 /* Copy 32 bytes from the address in SRC to ADDR. */
2216 /* ??? The original qemu code suggests 4-byte alignment, dropping
2217 the low bits, but the only place I can see this used is in the
2218 Linux kernel with 32 byte alignment, which would make more sense
2219 as a cacheline-style operation. */
2220 {
2221 TCGv saddr = tcg_temp_new();
2222 TCGv daddr = tcg_temp_new();
2223 TCGv four = tcg_constant_tl(4);
2224 TCGv_i32 tmp = tcg_temp_new_i32();
2225 int i;
2226
2227 tcg_gen_andi_tl(saddr, src, -4);
2228 tcg_gen_andi_tl(daddr, addr, -4);
2229 for (i = 0; i < 32; i += 4) {
2230 /* Since the loads and stores are paired, allow the
2231 copy to happen in the host endianness. */
2232 tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
2233 tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
2234 tcg_gen_add_tl(saddr, saddr, four);
2235 tcg_gen_add_tl(daddr, daddr, four);
2236 }
2237 }
2238 break;
2239 #endif
2240 default:
2241 {
2242 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2243 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2244
2245 save_state(dc);
2246 #ifdef TARGET_SPARC64
2247 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
2248 #else
2249 {
2250 TCGv_i64 t64 = tcg_temp_new_i64();
2251 tcg_gen_extu_tl_i64(t64, src);
2252 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2253 }
2254 #endif
2255
2256 /* A write to a TLB register may alter page maps. End the TB. */
2257 dc->npc = DYNAMIC_PC;
2258 }
2259 break;
2260 }
2261 }
2262
2263 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2264 TCGv addr, int insn)
2265 {
2266 DisasASI da = get_asi(dc, insn, MO_TEUL);
2267
2268 switch (da.type) {
2269 case GET_ASI_EXCP:
2270 break;
2271 case GET_ASI_DIRECT:
2272 gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
2273 break;
2274 default:
2275 /* ??? Should be DAE_invalid_asi. */
2276 gen_exception(dc, TT_DATA_ACCESS);
2277 break;
2278 }
2279 }
2280
2281 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2282 int insn, int rd)
2283 {
2284 DisasASI da = get_asi(dc, insn, MO_TEUL);
2285 TCGv oldv;
2286
2287 switch (da.type) {
2288 case GET_ASI_EXCP:
2289 return;
2290 case GET_ASI_DIRECT:
2291 oldv = tcg_temp_new();
2292 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2293 da.mem_idx, da.memop | MO_ALIGN);
2294 gen_store_gpr(dc, rd, oldv);
2295 break;
2296 default:
2297 /* ??? Should be DAE_invalid_asi. */
2298 gen_exception(dc, TT_DATA_ACCESS);
2299 break;
2300 }
2301 }
2302
2303 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2304 {
2305 DisasASI da = get_asi(dc, insn, MO_UB);
2306
2307 switch (da.type) {
2308 case GET_ASI_EXCP:
2309 break;
2310 case GET_ASI_DIRECT:
2311 gen_ldstub(dc, dst, addr, da.mem_idx);
2312 break;
2313 default:
2314 /* ??? In theory, this should be raise DAE_invalid_asi.
2315 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2316 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
2317 gen_helper_exit_atomic(cpu_env);
2318 } else {
2319 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2320 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
2321 TCGv_i64 s64, t64;
2322
2323 save_state(dc);
2324 t64 = tcg_temp_new_i64();
2325 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2326
2327 s64 = tcg_constant_i64(0xff);
2328 gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop);
2329
2330 tcg_gen_trunc_i64_tl(dst, t64);
2331
2332 /* End the TB. */
2333 dc->npc = DYNAMIC_PC;
2334 }
2335 break;
2336 }
2337 }
2338 #endif
2339
2340 #ifdef TARGET_SPARC64
2341 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2342 int insn, int size, int rd)
2343 {
2344 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
2345 TCGv_i32 d32;
2346 TCGv_i64 d64;
2347
2348 switch (da.type) {
2349 case GET_ASI_EXCP:
2350 break;
2351
2352 case GET_ASI_DIRECT:
2353 gen_address_mask(dc, addr);
2354 switch (size) {
2355 case 4:
2356 d32 = gen_dest_fpr_F(dc);
2357 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
2358 gen_store_fpr_F(dc, rd, d32);
2359 break;
2360 case 8:
2361 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2362 da.memop | MO_ALIGN_4);
2363 break;
2364 case 16:
2365 d64 = tcg_temp_new_i64();
2366 tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
2367 tcg_gen_addi_tl(addr, addr, 8);
2368 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
2369 da.memop | MO_ALIGN_4);
2370 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2371 break;
2372 default:
2373 g_assert_not_reached();
2374 }
2375 break;
2376
2377 case GET_ASI_BLOCK:
2378 /* Valid for lddfa on aligned registers only. */
2379 if (size == 8 && (rd & 7) == 0) {
2380 MemOp memop;
2381 TCGv eight;
2382 int i;
2383
2384 gen_address_mask(dc, addr);
2385
2386 /* The first operation checks required alignment. */
2387 memop = da.memop | MO_ALIGN_64;
2388 eight = tcg_constant_tl(8);
2389 for (i = 0; ; ++i) {
2390 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2391 da.mem_idx, memop);
2392 if (i == 7) {
2393 break;
2394 }
2395 tcg_gen_add_tl(addr, addr, eight);
2396 memop = da.memop;
2397 }
2398 } else {
2399 gen_exception(dc, TT_ILL_INSN);
2400 }
2401 break;
2402
2403 case GET_ASI_SHORT:
2404 /* Valid for lddfa only. */
2405 if (size == 8) {
2406 gen_address_mask(dc, addr);
2407 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2408 da.memop | MO_ALIGN);
2409 } else {
2410 gen_exception(dc, TT_ILL_INSN);
2411 }
2412 break;
2413
2414 default:
2415 {
2416 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2417 TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN);
2418
2419 save_state(dc);
2420 /* According to the table in the UA2011 manual, the only
2421 other asis that are valid for ldfa/lddfa/ldqfa are
2422 the NO_FAULT asis. We still need a helper for these,
2423 but we can just use the integer asi helper for them. */
2424 switch (size) {
2425 case 4:
2426 d64 = tcg_temp_new_i64();
2427 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2428 d32 = gen_dest_fpr_F(dc);
2429 tcg_gen_extrl_i64_i32(d32, d64);
2430 gen_store_fpr_F(dc, rd, d32);
2431 break;
2432 case 8:
2433 gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2434 break;
2435 case 16:
2436 d64 = tcg_temp_new_i64();
2437 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2438 tcg_gen_addi_tl(addr, addr, 8);
2439 gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
2440 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2441 break;
2442 default:
2443 g_assert_not_reached();
2444 }
2445 }
2446 break;
2447 }
2448 }
2449
2450 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2451 int insn, int size, int rd)
2452 {
2453 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
2454 TCGv_i32 d32;
2455
2456 switch (da.type) {
2457 case GET_ASI_EXCP:
2458 break;
2459
2460 case GET_ASI_DIRECT:
2461 gen_address_mask(dc, addr);
2462 switch (size) {
2463 case 4:
2464 d32 = gen_load_fpr_F(dc, rd);
2465 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
2466 break;
2467 case 8:
2468 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2469 da.memop | MO_ALIGN_4);
2470 break;
2471 case 16:
2472 /* Only 4-byte alignment required. However, it is legal for the
2473 cpu to signal the alignment fault, and the OS trap handler is
2474 required to fix it up. Requiring 16-byte alignment here avoids
2475 having to probe the second page before performing the first
2476 write. */
2477 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2478 da.memop | MO_ALIGN_16);
2479 tcg_gen_addi_tl(addr, addr, 8);
2480 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2481 break;
2482 default:
2483 g_assert_not_reached();
2484 }
2485 break;
2486
2487 case GET_ASI_BLOCK:
2488 /* Valid for stdfa on aligned registers only. */
2489 if (size == 8 && (rd & 7) == 0) {
2490 MemOp memop;
2491 TCGv eight;
2492 int i;
2493
2494 gen_address_mask(dc, addr);
2495
2496 /* The first operation checks required alignment. */
2497 memop = da.memop | MO_ALIGN_64;
2498 eight = tcg_constant_tl(8);
2499 for (i = 0; ; ++i) {
2500 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2501 da.mem_idx, memop);
2502 if (i == 7) {
2503 break;
2504 }
2505 tcg_gen_add_tl(addr, addr, eight);
2506 memop = da.memop;
2507 }
2508 } else {
2509 gen_exception(dc, TT_ILL_INSN);
2510 }
2511 break;
2512
2513 case GET_ASI_SHORT:
2514 /* Valid for stdfa only. */
2515 if (size == 8) {
2516 gen_address_mask(dc, addr);
2517 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2518 da.memop | MO_ALIGN);
2519 } else {
2520 gen_exception(dc, TT_ILL_INSN);
2521 }
2522 break;
2523
2524 default:
2525 /* According to the table in the UA2011 manual, the only
2526 other asis that are valid for ldfa/lddfa/ldqfa are
2527 the PST* asis, which aren't currently handled. */
2528 gen_exception(dc, TT_ILL_INSN);
2529 break;
2530 }
2531 }
2532
2533 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2534 {
2535 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2536 TCGv_i64 hi = gen_dest_gpr(dc, rd);
2537 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2538
2539 switch (da.type) {
2540 case GET_ASI_EXCP:
2541 return;
2542
2543 case GET_ASI_DTWINX:
2544 gen_address_mask(dc, addr);
2545 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2546 tcg_gen_addi_tl(addr, addr, 8);
2547 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2548 break;
2549
2550 case GET_ASI_DIRECT:
2551 {
2552 TCGv_i64 tmp = tcg_temp_new_i64();
2553
2554 gen_address_mask(dc, addr);
2555 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN);
2556
2557 /* Note that LE ldda acts as if each 32-bit register
2558 result is byte swapped. Having just performed one
2559 64-bit bswap, we need now to swap the writebacks. */
2560 if ((da.memop & MO_BSWAP) == MO_TE) {
2561 tcg_gen_extr32_i64(lo, hi, tmp);
2562 } else {
2563 tcg_gen_extr32_i64(hi, lo, tmp);
2564 }
2565 }
2566 break;
2567
2568 default:
2569 /* ??? In theory we've handled all of the ASIs that are valid
2570 for ldda, and this should raise DAE_invalid_asi. However,
2571 real hardware allows others. This can be seen with e.g.
2572 FreeBSD 10.3 wrt ASI_IC_TAG. */
2573 {
2574 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2575 TCGv_i32 r_mop = tcg_constant_i32(da.memop);
2576 TCGv_i64 tmp = tcg_temp_new_i64();
2577
2578 save_state(dc);
2579 gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop);
2580
2581 /* See above. */
2582 if ((da.memop & MO_BSWAP) == MO_TE) {
2583 tcg_gen_extr32_i64(lo, hi, tmp);
2584 } else {
2585 tcg_gen_extr32_i64(hi, lo, tmp);
2586 }
2587 }
2588 break;
2589 }
2590
2591 gen_store_gpr(dc, rd, hi);
2592 gen_store_gpr(dc, rd + 1, lo);
2593 }
2594
2595 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2596 int insn, int rd)
2597 {
2598 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2599 TCGv lo = gen_load_gpr(dc, rd + 1);
2600
2601 switch (da.type) {
2602 case GET_ASI_EXCP:
2603 break;
2604
2605 case GET_ASI_DTWINX:
2606 gen_address_mask(dc, addr);
2607 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2608 tcg_gen_addi_tl(addr, addr, 8);
2609 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2610 break;
2611
2612 case GET_ASI_DIRECT:
2613 {
2614 TCGv_i64 t64 = tcg_temp_new_i64();
2615
2616 /* Note that LE stda acts as if each 32-bit register result is
2617 byte swapped. We will perform one 64-bit LE store, so now
2618 we must swap the order of the construction. */
2619 if ((da.memop & MO_BSWAP) == MO_TE) {
2620 tcg_gen_concat32_i64(t64, lo, hi);
2621 } else {
2622 tcg_gen_concat32_i64(t64, hi, lo);
2623 }
2624 gen_address_mask(dc, addr);
2625 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
2626 }
2627 break;
2628
2629 default:
2630 /* ??? In theory we've handled all of the ASIs that are valid
2631 for stda, and this should raise DAE_invalid_asi. */
2632 {
2633 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2634 TCGv_i32 r_mop = tcg_constant_i32(da.memop);
2635 TCGv_i64 t64 = tcg_temp_new_i64();
2636
2637 /* See above. */
2638 if ((da.memop & MO_BSWAP) == MO_TE) {
2639 tcg_gen_concat32_i64(t64, lo, hi);
2640 } else {
2641 tcg_gen_concat32_i64(t64, hi, lo);
2642 }
2643
2644 save_state(dc);
2645 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2646 }
2647 break;
2648 }
2649 }
2650
2651 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2652 int insn, int rd)
2653 {
2654 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2655 TCGv oldv;
2656
2657 switch (da.type) {
2658 case GET_ASI_EXCP:
2659 return;
2660 case GET_ASI_DIRECT:
2661 oldv = tcg_temp_new();
2662 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2663 da.mem_idx, da.memop | MO_ALIGN);
2664 gen_store_gpr(dc, rd, oldv);
2665 break;
2666 default:
2667 /* ??? Should be DAE_invalid_asi. */
2668 gen_exception(dc, TT_DATA_ACCESS);
2669 break;
2670 }
2671 }
2672
2673 #elif !defined(CONFIG_USER_ONLY)
2674 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2675 {
2676 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2677 whereby "rd + 1" elicits "error: array subscript is above array".
2678 Since we have already asserted that rd is even, the semantics
2679 are unchanged. */
2680 TCGv lo = gen_dest_gpr(dc, rd | 1);
2681 TCGv hi = gen_dest_gpr(dc, rd);
2682 TCGv_i64 t64 = tcg_temp_new_i64();
2683 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2684
2685 switch (da.type) {
2686 case GET_ASI_EXCP:
2687 return;
2688 case GET_ASI_DIRECT:
2689 gen_address_mask(dc, addr);
2690 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
2691 break;
2692 default:
2693 {
2694 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2695 TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
2696
2697 save_state(dc);
2698 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2699 }
2700 break;
2701 }
2702
2703 tcg_gen_extr_i64_i32(lo, hi, t64);
2704 gen_store_gpr(dc, rd | 1, lo);
2705 gen_store_gpr(dc, rd, hi);
2706 }
2707
2708 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2709 int insn, int rd)
2710 {
2711 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2712 TCGv lo = gen_load_gpr(dc, rd + 1);
2713 TCGv_i64 t64 = tcg_temp_new_i64();
2714
2715 tcg_gen_concat_tl_i64(t64, lo, hi);
2716
2717 switch (da.type) {
2718 case GET_ASI_EXCP:
2719 break;
2720 case GET_ASI_DIRECT:
2721 gen_address_mask(dc, addr);
2722 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
2723 break;
2724 case GET_ASI_BFILL:
2725 /* Store 32 bytes of T64 to ADDR. */
2726 /* ??? The original qemu code suggests 8-byte alignment, dropping
2727 the low bits, but the only place I can see this used is in the
2728 Linux kernel with 32 byte alignment, which would make more sense
2729 as a cacheline-style operation. */
2730 {
2731 TCGv d_addr = tcg_temp_new();
2732 TCGv eight = tcg_constant_tl(8);
2733 int i;
2734
2735 tcg_gen_andi_tl(d_addr, addr, -8);
2736 for (i = 0; i < 32; i += 8) {
2737 tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
2738 tcg_gen_add_tl(d_addr, d_addr, eight);
2739 }
2740 }
2741 break;
2742 default:
2743 {
2744 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2745 TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
2746
2747 save_state(dc);
2748 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2749 }
2750 break;
2751 }
2752 }
2753 #endif
2754
2755 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2756 {
2757 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2758 return gen_load_gpr(dc, rs1);
2759 }
2760
2761 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2762 {
2763 if (IS_IMM) { /* immediate */
2764 target_long simm = GET_FIELDs(insn, 19, 31);
2765 TCGv t = tcg_temp_new();
2766 tcg_gen_movi_tl(t, simm);
2767 return t;
2768 } else { /* register */
2769 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2770 return gen_load_gpr(dc, rs2);
2771 }
2772 }
2773
2774 #ifdef TARGET_SPARC64
2775 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2776 {
2777 TCGv_i32 c32, zero, dst, s1, s2;
2778
2779 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2780 or fold the comparison down to 32 bits and use movcond_i32. Choose
2781 the later. */
2782 c32 = tcg_temp_new_i32();
2783 if (cmp->is_bool) {
2784 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2785 } else {
2786 TCGv_i64 c64 = tcg_temp_new_i64();
2787 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2788 tcg_gen_extrl_i64_i32(c32, c64);
2789 }
2790
2791 s1 = gen_load_fpr_F(dc, rs);
2792 s2 = gen_load_fpr_F(dc, rd);
2793 dst = gen_dest_fpr_F(dc);
2794 zero = tcg_constant_i32(0);
2795
2796 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2797
2798 gen_store_fpr_F(dc, rd, dst);
2799 }
2800
2801 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2802 {
2803 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2804 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2805 gen_load_fpr_D(dc, rs),
2806 gen_load_fpr_D(dc, rd));
2807 gen_store_fpr_D(dc, rd, dst);
2808 }
2809
2810 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2811 {
2812 int qd = QFPREG(rd);
2813 int qs = QFPREG(rs);
2814
2815 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2816 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2817 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2818 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2819
2820 gen_update_fprs_dirty(dc, qd);
2821 }
2822
2823 #ifndef CONFIG_USER_ONLY
2824 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2825 {
2826 TCGv_i32 r_tl = tcg_temp_new_i32();
2827
2828 /* load env->tl into r_tl */
2829 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2830
2831 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2832 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2833
2834 /* calculate offset to current trap state from env->ts, reuse r_tl */
2835 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2836 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2837
2838 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2839 {
2840 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2841 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2842 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2843 }
2844 }
2845 #endif
2846
2847 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2848 int width, bool cc, bool left)
2849 {
2850 TCGv lo1, lo2;
2851 uint64_t amask, tabl, tabr;
2852 int shift, imask, omask;
2853
2854 if (cc) {
2855 tcg_gen_mov_tl(cpu_cc_src, s1);
2856 tcg_gen_mov_tl(cpu_cc_src2, s2);
2857 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2858 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2859 dc->cc_op = CC_OP_SUB;
2860 }
2861
2862 /* Theory of operation: there are two tables, left and right (not to
2863 be confused with the left and right versions of the opcode). These
2864 are indexed by the low 3 bits of the inputs. To make things "easy",
2865 these tables are loaded into two constants, TABL and TABR below.
2866 The operation index = (input & imask) << shift calculates the index
2867 into the constant, while val = (table >> index) & omask calculates
2868 the value we're looking for. */
2869 switch (width) {
2870 case 8:
2871 imask = 0x7;
2872 shift = 3;
2873 omask = 0xff;
2874 if (left) {
2875 tabl = 0x80c0e0f0f8fcfeffULL;
2876 tabr = 0xff7f3f1f0f070301ULL;
2877 } else {
2878 tabl = 0x0103070f1f3f7fffULL;
2879 tabr = 0xfffefcf8f0e0c080ULL;
2880 }
2881 break;
2882 case 16:
2883 imask = 0x6;
2884 shift = 1;
2885 omask = 0xf;
2886 if (left) {
2887 tabl = 0x8cef;
2888 tabr = 0xf731;
2889 } else {
2890 tabl = 0x137f;
2891 tabr = 0xfec8;
2892 }
2893 break;
2894 case 32:
2895 imask = 0x4;
2896 shift = 0;
2897 omask = 0x3;
2898 if (left) {
2899 tabl = (2 << 2) | 3;
2900 tabr = (3 << 2) | 1;
2901 } else {
2902 tabl = (1 << 2) | 3;
2903 tabr = (3 << 2) | 2;
2904 }
2905 break;
2906 default:
2907 abort();
2908 }
2909
2910 lo1 = tcg_temp_new();
2911 lo2 = tcg_temp_new();
2912 tcg_gen_andi_tl(lo1, s1, imask);
2913 tcg_gen_andi_tl(lo2, s2, imask);
2914 tcg_gen_shli_tl(lo1, lo1, shift);
2915 tcg_gen_shli_tl(lo2, lo2, shift);
2916
2917 tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
2918 tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
2919 tcg_gen_andi_tl(dst, lo1, omask);
2920 tcg_gen_andi_tl(lo2, lo2, omask);
2921
2922 amask = -8;
2923 if (AM_CHECK(dc)) {
2924 amask &= 0xffffffffULL;
2925 }
2926 tcg_gen_andi_tl(s1, s1, amask);
2927 tcg_gen_andi_tl(s2, s2, amask);
2928
2929 /* We want to compute
2930 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2931 We've already done dst = lo1, so this reduces to
2932 dst &= (s1 == s2 ? -1 : lo2)
2933 Which we perform by
2934 lo2 |= -(s1 == s2)
2935 dst &= lo2
2936 */
2937 tcg_gen_setcond_tl(TCG_COND_EQ, lo1, s1, s2);
2938 tcg_gen_neg_tl(lo1, lo1);
2939 tcg_gen_or_tl(lo2, lo2, lo1);
2940 tcg_gen_and_tl(dst, dst, lo2);
2941 }
2942
2943 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2944 {
2945 TCGv tmp = tcg_temp_new();
2946
2947 tcg_gen_add_tl(tmp, s1, s2);
2948 tcg_gen_andi_tl(dst, tmp, -8);
2949 if (left) {
2950 tcg_gen_neg_tl(tmp, tmp);
2951 }
2952 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2953 }
2954
2955 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2956 {
2957 TCGv t1, t2, shift;
2958
2959 t1 = tcg_temp_new();
2960 t2 = tcg_temp_new();
2961 shift = tcg_temp_new();
2962
2963 tcg_gen_andi_tl(shift, gsr, 7);
2964 tcg_gen_shli_tl(shift, shift, 3);
2965 tcg_gen_shl_tl(t1, s1, shift);
2966
2967 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2968 shift of (up to 63) followed by a constant shift of 1. */
2969 tcg_gen_xori_tl(shift, shift, 63);
2970 tcg_gen_shr_tl(t2, s2, shift);
2971 tcg_gen_shri_tl(t2, t2, 1);
2972
2973 tcg_gen_or_tl(dst, t1, t2);
2974 }
2975 #endif
2976
2977 #define CHECK_IU_FEATURE(dc, FEATURE) \
2978 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2979 goto illegal_insn;
2980 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2981 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2982 goto nfpu_insn;
2983
2984 /* before an instruction, dc->pc must be static */
2985 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2986 {
2987 unsigned int opc, rs1, rs2, rd;
2988 TCGv cpu_src1, cpu_src2;
2989 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2990 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2991 target_long simm;
2992
2993 opc = GET_FIELD(insn, 0, 1);
2994 rd = GET_FIELD(insn, 2, 6);
2995
2996 switch (opc) {
2997 case 0: /* branches/sethi */
2998 {
2999 unsigned int xop = GET_FIELD(insn, 7, 9);
3000 int32_t target;
3001 switch (xop) {
3002 #ifdef TARGET_SPARC64
3003 case 0x1: /* V9 BPcc */
3004 {
3005 int cc;
3006
3007 target = GET_FIELD_SP(insn, 0, 18);
3008 target = sign_extend(target, 19);
3009 target <<= 2;
3010 cc = GET_FIELD_SP(insn, 20, 21);
3011 if (cc == 0)
3012 do_branch(dc, target, insn, 0);
3013 else if (cc == 2)
3014 do_branch(dc, target, insn, 1);
3015 else
3016 goto illegal_insn;
3017 goto jmp_insn;
3018 }
3019 case 0x3: /* V9 BPr */
3020 {
3021 target = GET_FIELD_SP(insn, 0, 13) |
3022 (GET_FIELD_SP(insn, 20, 21) << 14);
3023 target = sign_extend(target, 16);
3024 target <<= 2;
3025 cpu_src1 = get_src1(dc, insn);
3026 do_branch_reg(dc, target, insn, cpu_src1);
3027 goto jmp_insn;
3028 }
3029 case 0x5: /* V9 FBPcc */
3030 {
3031 int cc = GET_FIELD_SP(insn, 20, 21);
3032 if (gen_trap_ifnofpu(dc)) {
3033 goto jmp_insn;
3034 }
3035 target = GET_FIELD_SP(insn, 0, 18);
3036 target = sign_extend(target, 19);
3037 target <<= 2;
3038 do_fbranch(dc, target, insn, cc);
3039 goto jmp_insn;
3040 }
3041 #else
3042 case 0x7: /* CBN+x */
3043 {
3044 goto ncp_insn;
3045 }
3046 #endif
3047 case 0x2: /* BN+x */
3048 {
3049 target = GET_FIELD(insn, 10, 31);
3050 target = sign_extend(target, 22);
3051 target <<= 2;
3052 do_branch(dc, target, insn, 0);
3053 goto jmp_insn;
3054 }
3055 case 0x6: /* FBN+x */
3056 {
3057 if (gen_trap_ifnofpu(dc)) {
3058 goto jmp_insn;
3059 }
3060 target = GET_FIELD(insn, 10, 31);
3061 target = sign_extend(target, 22);
3062 target <<= 2;
3063 do_fbranch(dc, target, insn, 0);
3064 goto jmp_insn;
3065 }
3066 case 0x4: /* SETHI */
3067 /* Special-case %g0 because that's the canonical nop. */
3068 if (rd) {
3069 uint32_t value = GET_FIELD(insn, 10, 31);
3070 TCGv t = gen_dest_gpr(dc, rd);
3071 tcg_gen_movi_tl(t, value << 10);
3072 gen_store_gpr(dc, rd, t);
3073 }
3074 break;
3075 case 0x0: /* UNIMPL */
3076 default:
3077 goto illegal_insn;
3078 }
3079 break;
3080 }
3081 break;
3082 case 1: /*CALL*/
3083 {
3084 target_long target = GET_FIELDs(insn, 2, 31) << 2;
3085 TCGv o7 = gen_dest_gpr(dc, 15);
3086
3087 tcg_gen_movi_tl(o7, dc->pc);
3088 gen_store_gpr(dc, 15, o7);
3089 target += dc->pc;
3090 gen_mov_pc_npc(dc);
3091 #ifdef TARGET_SPARC64
3092 if (unlikely(AM_CHECK(dc))) {
3093 target &= 0xffffffffULL;
3094 }
3095 #endif
3096 dc->npc = target;
3097 }
3098 goto jmp_insn;
3099 case 2: /* FPU & Logical Operations */
3100 {
3101 unsigned int xop = GET_FIELD(insn, 7, 12);
3102 TCGv cpu_dst = tcg_temp_new();
3103 TCGv cpu_tmp0;
3104
3105 if (xop == 0x3a) { /* generate trap */
3106 int cond = GET_FIELD(insn, 3, 6);
3107 TCGv_i32 trap;
3108 TCGLabel *l1 = NULL;
3109 int mask;
3110
3111 if (cond == 0) {
3112 /* Trap never. */
3113 break;
3114 }
3115
3116 save_state(dc);
3117
3118 if (cond != 8) {
3119 /* Conditional trap. */
3120 DisasCompare cmp;
3121 #ifdef TARGET_SPARC64
3122 /* V9 icc/xcc */
3123 int cc = GET_FIELD_SP(insn, 11, 12);
3124 if (cc == 0) {
3125 gen_compare(&cmp, 0, cond, dc);
3126 } else if (cc == 2) {
3127 gen_compare(&cmp, 1, cond, dc);
3128 } else {
3129 goto illegal_insn;
3130 }
3131 #else
3132 gen_compare(&cmp, 0, cond, dc);
3133 #endif
3134 l1 = gen_new_label();
3135 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
3136 cmp.c1, cmp.c2, l1);
3137 }
3138
3139 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3140 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3141
3142 /* Don't use the normal temporaries, as they may well have
3143 gone out of scope with the branch above. While we're
3144 doing that we might as well pre-truncate to 32-bit. */
3145 trap = tcg_temp_new_i32();
3146
3147 rs1 = GET_FIELD_SP(insn, 14, 18);
3148 if (IS_IMM) {
3149 rs2 = GET_FIELD_SP(insn, 0, 7);
3150 if (rs1 == 0) {
3151 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
3152 /* Signal that the trap value is fully constant. */
3153 mask = 0;
3154 } else {
3155 TCGv t1 = gen_load_gpr(dc, rs1);
3156 tcg_gen_trunc_tl_i32(trap, t1);
3157 tcg_gen_addi_i32(trap, trap, rs2);
3158 }
3159 } else {
3160 TCGv t1, t2;
3161 rs2 = GET_FIELD_SP(insn, 0, 4);
3162 t1 = gen_load_gpr(dc, rs1);
3163 t2 = gen_load_gpr(dc, rs2);
3164 tcg_gen_add_tl(t1, t1, t2);
3165 tcg_gen_trunc_tl_i32(trap, t1);
3166 }
3167 if (mask != 0) {
3168 tcg_gen_andi_i32(trap, trap, mask);
3169 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3170 }
3171
3172 gen_helper_raise_exception(cpu_env, trap);
3173
3174 if (cond == 8) {
3175 /* An unconditional trap ends the TB. */
3176 dc->base.is_jmp = DISAS_NORETURN;
3177 goto jmp_insn;
3178 } else {
3179 /* A conditional trap falls through to the next insn. */
3180 gen_set_label(l1);
3181 break;
3182 }
3183 } else if (xop == 0x28) {
3184 rs1 = GET_FIELD(insn, 13, 17);
3185 switch(rs1) {
3186 case 0: /* rdy */
3187 #ifndef TARGET_SPARC64
3188 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3189 manual, rdy on the microSPARC
3190 II */
3191 case 0x0f: /* stbar in the SPARCv8 manual,
3192 rdy on the microSPARC II */
3193 case 0x10 ... 0x1f: /* implementation-dependent in the
3194 SPARCv8 manual, rdy on the
3195 microSPARC II */
3196 /* Read Asr17 */
3197 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
3198 TCGv t = gen_dest_gpr(dc, rd);
3199 /* Read Asr17 for a Leon3 monoprocessor */
3200 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
3201 gen_store_gpr(dc, rd, t);
3202 break;
3203 }
3204 #endif
3205 gen_store_gpr(dc, rd, cpu_y);
3206 break;
3207 #ifdef TARGET_SPARC64
3208 case 0x2: /* V9 rdccr */
3209 update_psr(dc);
3210 gen_helper_rdccr(cpu_dst, cpu_env);
3211 gen_store_gpr(dc, rd, cpu_dst);
3212 break;
3213 case 0x3: /* V9 rdasi */
3214 tcg_gen_movi_tl(cpu_dst, dc->asi);
3215 gen_store_gpr(dc, rd, cpu_dst);
3216 break;
3217 case 0x4: /* V9 rdtick */
3218 {
3219 TCGv_ptr r_tickptr;
3220 TCGv_i32 r_const;
3221
3222 r_tickptr = tcg_temp_new_ptr();
3223 r_const = tcg_constant_i32(dc->mem_idx);
3224 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3225 offsetof(CPUSPARCState, tick));
3226 if (translator_io_start(&dc->base)) {
3227 dc->base.is_jmp = DISAS_EXIT;
3228 }
3229 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3230 r_const);
3231 gen_store_gpr(dc, rd, cpu_dst);
3232 }
3233 break;
3234 case 0x5: /* V9 rdpc */
3235 {
3236 TCGv t = gen_dest_gpr(dc, rd);
3237 if (unlikely(AM_CHECK(dc))) {
3238 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
3239 } else {
3240 tcg_gen_movi_tl(t, dc->pc);
3241 }
3242 gen_store_gpr(dc, rd, t);
3243 }
3244 break;
3245 case 0x6: /* V9 rdfprs */
3246 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
3247 gen_store_gpr(dc, rd, cpu_dst);
3248 break;
3249 case 0xf: /* V9 membar */
3250 break; /* no effect */
3251 case 0x13: /* Graphics Status */
3252 if (gen_trap_ifnofpu(dc)) {
3253 goto jmp_insn;
3254 }
3255 gen_store_gpr(dc, rd, cpu_gsr);
3256 break;
3257 case 0x16: /* Softint */
3258 tcg_gen_ld32s_tl(cpu_dst, cpu_env,
3259 offsetof(CPUSPARCState, softint));
3260 gen_store_gpr(dc, rd, cpu_dst);
3261 break;
3262 case 0x17: /* Tick compare */
3263 gen_store_gpr(dc, rd, cpu_tick_cmpr);
3264 break;
3265 case 0x18: /* System tick */
3266 {
3267 TCGv_ptr r_tickptr;
3268 TCGv_i32 r_const;
3269
3270 r_tickptr = tcg_temp_new_ptr();
3271 r_const = tcg_constant_i32(dc->mem_idx);
3272 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3273 offsetof(CPUSPARCState, stick));
3274 if (translator_io_start(&dc->base)) {
3275 dc->base.is_jmp = DISAS_EXIT;
3276 }
3277 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3278 r_const);
3279 gen_store_gpr(dc, rd, cpu_dst);
3280 }
3281 break;
3282 case 0x19: /* System tick compare */
3283 gen_store_gpr(dc, rd, cpu_stick_cmpr);
3284 break;
3285 case 0x1a: /* UltraSPARC-T1 Strand status */
3286 /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
3287 * this ASR as impl. dep
3288 */
3289 CHECK_IU_FEATURE(dc, HYPV);
3290 {
3291 TCGv t = gen_dest_gpr(dc, rd);
3292 tcg_gen_movi_tl(t, 1UL);
3293 gen_store_gpr(dc, rd, t);
3294 }
3295 break;
3296 case 0x10: /* Performance Control */
3297 case 0x11: /* Performance Instrumentation Counter */
3298 case 0x12: /* Dispatch Control */
3299 case 0x14: /* Softint set, WO */
3300 case 0x15: /* Softint clear, WO */
3301 #endif
3302 default:
3303 goto illegal_insn;
3304 }
3305 #if !defined(CONFIG_USER_ONLY)
3306 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
3307 #ifndef TARGET_SPARC64
3308 if (!supervisor(dc)) {
3309 goto priv_insn;
3310 }
3311 update_psr(dc);
3312 gen_helper_rdpsr(cpu_dst, cpu_env);
3313 #else
3314 CHECK_IU_FEATURE(dc, HYPV);
3315 if (!hypervisor(dc))
3316 goto priv_insn;
3317 rs1 = GET_FIELD(insn, 13, 17);
3318 switch (rs1) {
3319 case 0: // hpstate
3320 tcg_gen_ld_i64(cpu_dst, cpu_env,
3321 offsetof(CPUSPARCState, hpstate));
3322 break;
3323 case 1: // htstate
3324 // gen_op_rdhtstate();
3325 break;
3326 case 3: // hintp
3327 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
3328 break;
3329 case 5: // htba
3330 tcg_gen_mov_tl(cpu_dst, cpu_htba);
3331 break;
3332 case 6: // hver
3333 tcg_gen_mov_tl(cpu_dst, cpu_hver);
3334 break;
3335 case 31: // hstick_cmpr
3336 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
3337 break;
3338 default:
3339 goto illegal_insn;
3340 }
3341 #endif
3342 gen_store_gpr(dc, rd, cpu_dst);
3343 break;
3344 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
3345 if (!supervisor(dc)) {
3346 goto priv_insn;
3347 }
3348 cpu_tmp0 = tcg_temp_new();
3349 #ifdef TARGET_SPARC64
3350 rs1 = GET_FIELD(insn, 13, 17);
3351 switch (rs1) {
3352 case 0: // tpc
3353 {
3354 TCGv_ptr r_tsptr;
3355
3356 r_tsptr = tcg_temp_new_ptr();
3357 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3358 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3359 offsetof(trap_state, tpc));
3360 }
3361 break;
3362 case 1: // tnpc
3363 {
3364 TCGv_ptr r_tsptr;
3365
3366 r_tsptr = tcg_temp_new_ptr();
3367 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3368 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3369 offsetof(trap_state, tnpc));
3370 }
3371 break;
3372 case 2: // tstate
3373 {
3374 TCGv_ptr r_tsptr;
3375
3376 r_tsptr = tcg_temp_new_ptr();
3377 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3378 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3379 offsetof(trap_state, tstate));
3380 }
3381 break;
3382 case 3: // tt
3383 {
3384 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3385
3386 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3387 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
3388 offsetof(trap_state, tt));
3389 }
3390 break;
3391 case 4: // tick
3392 {
3393 TCGv_ptr r_tickptr;
3394 TCGv_i32 r_const;
3395
3396 r_tickptr = tcg_temp_new_ptr();
3397 r_const = tcg_constant_i32(dc->mem_idx);
3398 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3399 offsetof(CPUSPARCState, tick));
3400 if (translator_io_start(&dc->base)) {
3401 dc->base.is_jmp = DISAS_EXIT;
3402 }
3403 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
3404 r_tickptr, r_const);
3405 }
3406 break;
3407 case 5: // tba
3408 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
3409 break;
3410 case 6: // pstate
3411 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3412 offsetof(CPUSPARCState, pstate));
3413 break;
3414 case 7: // tl
3415 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3416 offsetof(CPUSPARCState, tl));
3417 break;
3418 case 8: // pil
3419 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3420 offsetof(CPUSPARCState, psrpil));
3421 break;
3422 case 9: // cwp
3423 gen_helper_rdcwp(cpu_tmp0, cpu_env);
3424 break;
3425 case 10: // cansave
3426 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3427 offsetof(CPUSPARCState, cansave));
3428 break;
3429 case 11: // canrestore
3430 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3431 offsetof(CPUSPARCState, canrestore));
3432 break;
3433 case 12: // cleanwin
3434 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3435 offsetof(CPUSPARCState, cleanwin));
3436 break;
3437 case 13: // otherwin
3438 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3439 offsetof(CPUSPARCState, otherwin));
3440 break;
3441 case 14: // wstate
3442 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3443 offsetof(CPUSPARCState, wstate));
3444 break;
3445 case 16: // UA2005 gl
3446 CHECK_IU_FEATURE(dc, GL);
3447 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3448 offsetof(CPUSPARCState, gl));
3449 break;
3450 case 26: // UA2005 strand status
3451 CHECK_IU_FEATURE(dc, HYPV);
3452 if (!hypervisor(dc))
3453 goto priv_insn;
3454 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
3455 break;
3456 case 31: // ver
3457 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
3458 break;
3459 case 15: // fq
3460 default:
3461 goto illegal_insn;
3462 }
3463 #else
3464 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
3465 #endif
3466 gen_store_gpr(dc, rd, cpu_tmp0);
3467 break;
3468 #endif
3469 #if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)
3470 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
3471 #ifdef TARGET_SPARC64
3472 gen_helper_flushw(cpu_env);
3473 #else
3474 if (!supervisor(dc))
3475 goto priv_insn;
3476 gen_store_gpr(dc, rd, cpu_tbr);
3477 #endif
3478 break;
3479 #endif
3480 } else if (xop == 0x34) { /* FPU Operations */
3481 if (gen_trap_ifnofpu(dc)) {
3482 goto jmp_insn;
3483 }
3484 gen_op_clear_ieee_excp_and_FTT();
3485 rs1 = GET_FIELD(insn, 13, 17);
3486 rs2 = GET_FIELD(insn, 27, 31);
3487 xop = GET_FIELD(insn, 18, 26);
3488
3489 switch (xop) {
3490 case 0x1: /* fmovs */
3491 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3492 gen_store_fpr_F(dc, rd, cpu_src1_32);
3493 break;
3494 case 0x5: /* fnegs */
3495 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
3496 break;
3497 case 0x9: /* fabss */
3498 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
3499 break;
3500 case 0x29: /* fsqrts */
3501 CHECK_FPU_FEATURE(dc, FSQRT);
3502 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
3503 break;
3504 case 0x2a: /* fsqrtd */
3505 CHECK_FPU_FEATURE(dc, FSQRT);
3506 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3507 break;
3508 case 0x2b: /* fsqrtq */
3509 CHECK_FPU_FEATURE(dc, FLOAT128);
3510 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3511 break;
3512 case 0x41: /* fadds */
3513 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3514 break;
3515 case 0x42: /* faddd */
3516 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3517 break;
3518 case 0x43: /* faddq */
3519 CHECK_FPU_FEATURE(dc, FLOAT128);
3520 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3521 break;
3522 case 0x45: /* fsubs */
3523 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3524 break;
3525 case 0x46: /* fsubd */
3526 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3527 break;
3528 case 0x47: /* fsubq */
3529 CHECK_FPU_FEATURE(dc, FLOAT128);
3530 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3531 break;
3532 case 0x49: /* fmuls */
3533 CHECK_FPU_FEATURE(dc, FMUL);
3534 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3535 break;
3536 case 0x4a: /* fmuld */
3537 CHECK_FPU_FEATURE(dc, FMUL);
3538 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3539 break;
3540 case 0x4b: /* fmulq */
3541 CHECK_FPU_FEATURE(dc, FLOAT128);
3542 CHECK_FPU_FEATURE(dc, FMUL);
3543 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3544 break;
3545 case 0x4d: /* fdivs */
3546 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3547 break;
3548 case 0x4e: /* fdivd */
3549 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3550 break;
3551 case 0x4f: /* fdivq */
3552 CHECK_FPU_FEATURE(dc, FLOAT128);
3553 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3554 break;
3555 case 0x69: /* fsmuld */
3556 CHECK_FPU_FEATURE(dc, FSMULD);
3557 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3558 break;
3559 case 0x6e: /* fdmulq */
3560 CHECK_FPU_FEATURE(dc, FLOAT128);
3561 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3562 break;
3563 case 0xc4: /* fitos */
3564 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3565 break;
3566 case 0xc6: /* fdtos */
3567 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3568 break;
3569 case 0xc7: /* fqtos */
3570 CHECK_FPU_FEATURE(dc, FLOAT128);
3571 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3572 break;
3573 case 0xc8: /* fitod */
3574 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3575 break;
3576 case 0xc9: /* fstod */
3577 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3578 break;
3579 case 0xcb: /* fqtod */
3580 CHECK_FPU_FEATURE(dc, FLOAT128);
3581 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3582 break;
3583 case 0xcc: /* fitoq */
3584 CHECK_FPU_FEATURE(dc, FLOAT128);
3585 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3586 break;
3587 case 0xcd: /* fstoq */
3588 CHECK_FPU_FEATURE(dc, FLOAT128);
3589 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3590 break;
3591 case 0xce: /* fdtoq */
3592 CHECK_FPU_FEATURE(dc, FLOAT128);
3593 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3594 break;
3595 case 0xd1: /* fstoi */
3596 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3597 break;
3598 case 0xd2: /* fdtoi */
3599 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3600 break;
3601 case 0xd3: /* fqtoi */
3602 CHECK_FPU_FEATURE(dc, FLOAT128);
3603 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3604 break;
3605 #ifdef TARGET_SPARC64
3606 case 0x2: /* V9 fmovd */
3607 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3608 gen_store_fpr_D(dc, rd, cpu_src1_64);
3609 break;
3610 case 0x3: /* V9 fmovq */
3611 CHECK_FPU_FEATURE(dc, FLOAT128);
3612 gen_move_Q(dc, rd, rs2);
3613 break;
3614 case 0x6: /* V9 fnegd */
3615 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3616 break;
3617 case 0x7: /* V9 fnegq */
3618 CHECK_FPU_FEATURE(dc, FLOAT128);
3619 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3620 break;
3621 case 0xa: /* V9 fabsd */
3622 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3623 break;
3624 case 0xb: /* V9 fabsq */
3625 CHECK_FPU_FEATURE(dc, FLOAT128);
3626 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3627 break;
3628 case 0x81: /* V9 fstox */
3629 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3630 break;
3631 case 0x82: /* V9 fdtox */
3632 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3633 break;
3634 case 0x83: /* V9 fqtox */
3635 CHECK_FPU_FEATURE(dc, FLOAT128);
3636 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3637 break;
3638 case 0x84: /* V9 fxtos */
3639 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3640 break;
3641 case 0x88: /* V9 fxtod */
3642 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3643 break;
3644 case 0x8c: /* V9 fxtoq */
3645 CHECK_FPU_FEATURE(dc, FLOAT128);
3646 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3647 break;
3648 #endif
3649 default:
3650 goto illegal_insn;
3651 }
3652 } else if (xop == 0x35) { /* FPU Operations */
3653 #ifdef TARGET_SPARC64
3654 int cond;
3655 #endif
3656 if (gen_trap_ifnofpu(dc)) {
3657 goto jmp_insn;
3658 }
3659 gen_op_clear_ieee_excp_and_FTT();
3660 rs1 = GET_FIELD(insn, 13, 17);
3661 rs2 = GET_FIELD(insn, 27, 31);
3662 xop = GET_FIELD(insn, 18, 26);
3663
3664 #ifdef TARGET_SPARC64
3665 #define FMOVR(sz) \
3666 do { \
3667 DisasCompare cmp; \
3668 cond = GET_FIELD_SP(insn, 10, 12); \
3669 cpu_src1 = get_src1(dc, insn); \
3670 gen_compare_reg(&cmp, cond, cpu_src1); \
3671 gen_fmov##sz(dc, &cmp, rd, rs2); \
3672 } while (0)
3673
3674 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3675 FMOVR(s);
3676 break;
3677 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3678 FMOVR(d);
3679 break;
3680 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3681 CHECK_FPU_FEATURE(dc, FLOAT128);
3682 FMOVR(q);
3683 break;
3684 }
3685 #undef FMOVR
3686 #endif
3687 switch (xop) {
3688 #ifdef TARGET_SPARC64
3689 #define FMOVCC(fcc, sz) \
3690 do { \
3691 DisasCompare cmp; \
3692 cond = GET_FIELD_SP(insn, 14, 17); \
3693 gen_fcompare(&cmp, fcc, cond); \
3694 gen_fmov##sz(dc, &cmp, rd, rs2); \
3695 } while (0)
3696
3697 case 0x001: /* V9 fmovscc %fcc0 */
3698 FMOVCC(0, s);
3699 break;
3700 case 0x002: /* V9 fmovdcc %fcc0 */
3701 FMOVCC(0, d);
3702 break;
3703 case 0x003: /* V9 fmovqcc %fcc0 */
3704 CHECK_FPU_FEATURE(dc, FLOAT128);
3705 FMOVCC(0, q);
3706 break;
3707 case 0x041: /* V9 fmovscc %fcc1 */
3708 FMOVCC(1, s);
3709 break;
3710 case 0x042: /* V9 fmovdcc %fcc1 */
3711 FMOVCC(1, d);
3712 break;
3713 case 0x043: /* V9 fmovqcc %fcc1 */
3714 CHECK_FPU_FEATURE(dc, FLOAT128);
3715 FMOVCC(1, q);
3716 break;
3717 case 0x081: /* V9 fmovscc %fcc2 */
3718 FMOVCC(2, s);
3719 break;
3720 case 0x082: /* V9 fmovdcc %fcc2 */
3721 FMOVCC(2, d);
3722 break;
3723 case 0x083: /* V9 fmovqcc %fcc2 */
3724 CHECK_FPU_FEATURE(dc, FLOAT128);
3725 FMOVCC(2, q);
3726 break;
3727 case 0x0c1: /* V9 fmovscc %fcc3 */
3728 FMOVCC(3, s);
3729 break;
3730 case 0x0c2: /* V9 fmovdcc %fcc3 */
3731 FMOVCC(3, d);
3732 break;
3733 case 0x0c3: /* V9 fmovqcc %fcc3 */
3734 CHECK_FPU_FEATURE(dc, FLOAT128);
3735 FMOVCC(3, q);
3736 break;
3737 #undef FMOVCC
3738 #define FMOVCC(xcc, sz) \
3739 do { \
3740 DisasCompare cmp; \
3741 cond = GET_FIELD_SP(insn, 14, 17); \
3742 gen_compare(&cmp, xcc, cond, dc); \
3743 gen_fmov##sz(dc, &cmp, rd, rs2); \
3744 } while (0)
3745
3746 case 0x101: /* V9 fmovscc %icc */
3747 FMOVCC(0, s);
3748 break;
3749 case 0x102: /* V9 fmovdcc %icc */
3750 FMOVCC(0, d);
3751 break;
3752 case 0x103: /* V9 fmovqcc %icc */
3753 CHECK_FPU_FEATURE(dc, FLOAT128);
3754 FMOVCC(0, q);
3755 break;
3756 case 0x181: /* V9 fmovscc %xcc */
3757 FMOVCC(1, s);
3758 break;
3759 case 0x182: /* V9 fmovdcc %xcc */
3760 FMOVCC(1, d);
3761 break;
3762 case 0x183: /* V9 fmovqcc %xcc */
3763 CHECK_FPU_FEATURE(dc, FLOAT128);
3764 FMOVCC(1, q);
3765 break;
3766 #undef FMOVCC
3767 #endif
3768 case 0x51: /* fcmps, V9 %fcc */
3769 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3770 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3771 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3772 break;
3773 case 0x52: /* fcmpd, V9 %fcc */
3774 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3775 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3776 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3777 break;
3778 case 0x53: /* fcmpq, V9 %fcc */
3779 CHECK_FPU_FEATURE(dc, FLOAT128);
3780 gen_op_load_fpr_QT0(QFPREG(rs1));
3781 gen_op_load_fpr_QT1(QFPREG(rs2));
3782 gen_op_fcmpq(rd & 3);
3783 break;
3784 case 0x55: /* fcmpes, V9 %fcc */
3785 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3786 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3787 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3788 break;
3789 case 0x56: /* fcmped, V9 %fcc */
3790 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3791 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3792 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3793 break;
3794 case 0x57: /* fcmpeq, V9 %fcc */
3795 CHECK_FPU_FEATURE(dc, FLOAT128);
3796 gen_op_load_fpr_QT0(QFPREG(rs1));
3797 gen_op_load_fpr_QT1(QFPREG(rs2));
3798 gen_op_fcmpeq(rd & 3);
3799 break;
3800 default:
3801 goto illegal_insn;
3802 }
3803 } else if (xop == 0x2) {
3804 TCGv dst = gen_dest_gpr(dc, rd);
3805 rs1 = GET_FIELD(insn, 13, 17);
3806 if (rs1 == 0) {
3807 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3808 if (IS_IMM) { /* immediate */
3809 simm = GET_FIELDs(insn, 19, 31);
3810 tcg_gen_movi_tl(dst, simm);
3811 gen_store_gpr(dc, rd, dst);
3812 } else { /* register */
3813 rs2 = GET_FIELD(insn, 27, 31);
3814 if (rs2 == 0) {
3815 tcg_gen_movi_tl(dst, 0);
3816 gen_store_gpr(dc, rd, dst);
3817 } else {
3818 cpu_src2 = gen_load_gpr(dc, rs2);
3819 gen_store_gpr(dc, rd, cpu_src2);
3820 }
3821 }
3822 } else {
3823 cpu_src1 = get_src1(dc, insn);
3824 if (IS_IMM) { /* immediate */
3825 simm = GET_FIELDs(insn, 19, 31);
3826 tcg_gen_ori_tl(dst, cpu_src1, simm);
3827 gen_store_gpr(dc, rd, dst);
3828 } else { /* register */
3829 rs2 = GET_FIELD(insn, 27, 31);
3830 if (rs2 == 0) {
3831 /* mov shortcut: or x, %g0, y -> mov x, y */
3832 gen_store_gpr(dc, rd, cpu_src1);
3833 } else {
3834 cpu_src2 = gen_load_gpr(dc, rs2);
3835 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3836 gen_store_gpr(dc, rd, dst);
3837 }
3838 }
3839 }
3840 #ifdef TARGET_SPARC64
3841 } else if (xop == 0x25) { /* sll, V9 sllx */
3842 cpu_src1 = get_src1(dc, insn);
3843 if (IS_IMM) { /* immediate */
3844 simm = GET_FIELDs(insn, 20, 31);
3845 if (insn & (1 << 12)) {
3846 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3847 } else {
3848 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3849 }
3850 } else { /* register */
3851 rs2 = GET_FIELD(insn, 27, 31);
3852 cpu_src2 = gen_load_gpr(dc, rs2);
3853 cpu_tmp0 = tcg_temp_new();
3854 if (insn & (1 << 12)) {
3855 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3856 } else {
3857 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3858 }
3859 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3860 }
3861 gen_store_gpr(dc, rd, cpu_dst);
3862 } else if (xop == 0x26) { /* srl, V9 srlx */
3863 cpu_src1 = get_src1(dc, insn);
3864 if (IS_IMM) { /* immediate */
3865 simm = GET_FIELDs(insn, 20, 31);
3866 if (insn & (1 << 12)) {
3867 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3868 } else {
3869 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3870 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3871 }
3872 } else { /* register */
3873 rs2 = GET_FIELD(insn, 27, 31);
3874 cpu_src2 = gen_load_gpr(dc, rs2);
3875 cpu_tmp0 = tcg_temp_new();
3876 if (insn & (1 << 12)) {
3877 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3878 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3879 } else {
3880 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3881 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3882 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3883 }
3884 }
3885 gen_store_gpr(dc, rd, cpu_dst);
3886 } else if (xop == 0x27) { /* sra, V9 srax */
3887 cpu_src1 = get_src1(dc, insn);
3888 if (IS_IMM) { /* immediate */
3889 simm = GET_FIELDs(insn, 20, 31);
3890 if (insn & (1 << 12)) {
3891 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3892 } else {
3893 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3894 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3895 }
3896 } else { /* register */
3897 rs2 = GET_FIELD(insn, 27, 31);
3898 cpu_src2 = gen_load_gpr(dc, rs2);
3899 cpu_tmp0 = tcg_temp_new();
3900 if (insn & (1 << 12)) {
3901 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3902 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3903 } else {
3904 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3905 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3906 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3907 }
3908 }
3909 gen_store_gpr(dc, rd, cpu_dst);
3910 #endif
3911 } else if (xop < 0x36) {
3912 if (xop < 0x20) {
3913 cpu_src1 = get_src1(dc, insn);
3914 cpu_src2 = get_src2(dc, insn);
3915 switch (xop & ~0x10) {
3916 case 0x0: /* add */
3917 if (xop & 0x10) {
3918 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3919 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3920 dc->cc_op = CC_OP_ADD;
3921 } else {
3922 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3923 }
3924 break;
3925 case 0x1: /* and */
3926 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3927 if (xop & 0x10) {
3928 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3929 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3930 dc->cc_op = CC_OP_LOGIC;
3931 }
3932 break;
3933 case 0x2: /* or */
3934 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3935 if (xop & 0x10) {
3936 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3937 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3938 dc->cc_op = CC_OP_LOGIC;
3939 }
3940 break;
3941 case 0x3: /* xor */
3942 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3943 if (xop & 0x10) {
3944 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3945 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3946 dc->cc_op = CC_OP_LOGIC;
3947 }
3948 break;
3949 case 0x4: /* sub */
3950 if (xop & 0x10) {
3951 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3952 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3953 dc->cc_op = CC_OP_SUB;
3954 } else {
3955 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3956 }
3957 break;
3958 case 0x5: /* andn */
3959 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3960 if (xop & 0x10) {
3961 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3962 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3963 dc->cc_op = CC_OP_LOGIC;
3964 }
3965 break;
3966 case 0x6: /* orn */
3967 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3968 if (xop & 0x10) {
3969 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3970 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3971 dc->cc_op = CC_OP_LOGIC;
3972 }
3973 break;
3974 case 0x7: /* xorn */
3975 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3976 if (xop & 0x10) {
3977 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3978 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3979 dc->cc_op = CC_OP_LOGIC;
3980 }
3981 break;
3982 case 0x8: /* addx, V9 addc */
3983 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3984 (xop & 0x10));
3985 break;
3986 #ifdef TARGET_SPARC64
3987 case 0x9: /* V9 mulx */
3988 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3989 break;
3990 #endif
3991 case 0xa: /* umul */
3992 CHECK_IU_FEATURE(dc, MUL);
3993 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3994 if (xop & 0x10) {
3995 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3996 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3997 dc->cc_op = CC_OP_LOGIC;
3998 }
3999 break;
4000 case 0xb: /* smul */
4001 CHECK_IU_FEATURE(dc, MUL);
4002 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
4003 if (xop & 0x10) {
4004 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4005 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4006 dc->cc_op = CC_OP_LOGIC;
4007 }
4008 break;
4009 case 0xc: /* subx, V9 subc */
4010 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4011 (xop & 0x10));
4012 break;
4013 #ifdef TARGET_SPARC64
4014 case 0xd: /* V9 udivx */
4015 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4016 break;
4017 #endif
4018 case 0xe: /* udiv */
4019 CHECK_IU_FEATURE(dc, DIV);
4020 if (xop & 0x10) {
4021 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
4022 cpu_src2);
4023 dc->cc_op = CC_OP_DIV;
4024 } else {
4025 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
4026 cpu_src2);
4027 }
4028 break;
4029 case 0xf: /* sdiv */
4030 CHECK_IU_FEATURE(dc, DIV);
4031 if (xop & 0x10) {
4032 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
4033 cpu_src2);
4034 dc->cc_op = CC_OP_DIV;
4035 } else {
4036 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
4037 cpu_src2);
4038 }
4039 break;
4040 default:
4041 goto illegal_insn;
4042 }
4043 gen_store_gpr(dc, rd, cpu_dst);
4044 } else {
4045 cpu_src1 = get_src1(dc, insn);
4046 cpu_src2 = get_src2(dc, insn);
4047 switch (xop) {
4048 case 0x20: /* taddcc */
4049 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4050 gen_store_gpr(dc, rd, cpu_dst);
4051 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
4052 dc->cc_op = CC_OP_TADD;
4053 break;
4054 case 0x21: /* tsubcc */
4055 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4056 gen_store_gpr(dc, rd, cpu_dst);
4057 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
4058 dc->cc_op = CC_OP_TSUB;
4059 break;
4060 case 0x22: /* taddcctv */
4061 gen_helper_taddcctv(cpu_dst, cpu_env,
4062 cpu_src1, cpu_src2);
4063 gen_store_gpr(dc, rd, cpu_dst);
4064 dc->cc_op = CC_OP_TADDTV;
4065 break;
4066 case 0x23: /* tsubcctv */
4067 gen_helper_tsubcctv(cpu_dst, cpu_env,
4068 cpu_src1, cpu_src2);
4069 gen_store_gpr(dc, rd, cpu_dst);
4070 dc->cc_op = CC_OP_TSUBTV;
4071 break;
4072 case 0x24: /* mulscc */
4073 update_psr(dc);
4074 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
4075 gen_store_gpr(dc, rd, cpu_dst);
4076 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4077 dc->cc_op = CC_OP_ADD;
4078 break;
4079 #ifndef TARGET_SPARC64
4080 case 0x25: /* sll */
4081 if (IS_IMM) { /* immediate */
4082 simm = GET_FIELDs(insn, 20, 31);
4083 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
4084 } else { /* register */
4085 cpu_tmp0 = tcg_temp_new();
4086 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4087 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
4088 }
4089 gen_store_gpr(dc, rd, cpu_dst);
4090 break;
4091 case 0x26: /* srl */
4092 if (IS_IMM) { /* immediate */
4093 simm = GET_FIELDs(insn, 20, 31);
4094 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
4095 } else { /* register */
4096 cpu_tmp0 = tcg_temp_new();
4097 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4098 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
4099 }
4100 gen_store_gpr(dc, rd, cpu_dst);
4101 break;
4102 case 0x27: /* sra */
4103 if (IS_IMM) { /* immediate */
4104 simm = GET_FIELDs(insn, 20, 31);
4105 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
4106 } else { /* register */
4107 cpu_tmp0 = tcg_temp_new();
4108 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4109 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
4110 }
4111 gen_store_gpr(dc, rd, cpu_dst);
4112 break;
4113 #endif
4114 case 0x30:
4115 {
4116 cpu_tmp0 = tcg_temp_new();
4117 switch(rd) {
4118 case 0: /* wry */
4119 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4120 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
4121 break;
4122 #ifndef TARGET_SPARC64
4123 case 0x01 ... 0x0f: /* undefined in the
4124 SPARCv8 manual, nop
4125 on the microSPARC
4126 II */
4127 case 0x10 ... 0x1f: /* implementation-dependent
4128 in the SPARCv8
4129 manual, nop on the
4130 microSPARC II */
4131 if ((rd == 0x13) && (dc->def->features &
4132 CPU_FEATURE_POWERDOWN)) {
4133 /* LEON3 power-down */
4134 save_state(dc);
4135 gen_helper_power_down(cpu_env);
4136 }
4137 break;
4138 #else
4139 case 0x2: /* V9 wrccr */
4140 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4141 gen_helper_wrccr(cpu_env, cpu_tmp0);
4142 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4143 dc->cc_op = CC_OP_FLAGS;
4144 break;
4145 case 0x3: /* V9 wrasi */
4146 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4147 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
4148 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4149 offsetof(CPUSPARCState, asi));
4150 /* End TB to notice changed ASI. */
4151 save_state(dc);
4152 gen_op_next_insn();
4153 tcg_gen_exit_tb(NULL, 0);
4154 dc->base.is_jmp = DISAS_NORETURN;
4155 break;
4156 case 0x6: /* V9 wrfprs */
4157 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4158 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
4159 dc->fprs_dirty = 0;
4160 save_state(dc);
4161 gen_op_next_insn();
4162 tcg_gen_exit_tb(NULL, 0);
4163 dc->base.is_jmp = DISAS_NORETURN;
4164 break;
4165 case 0xf: /* V9 sir, nop if user */
4166 #if !defined(CONFIG_USER_ONLY)
4167 if (supervisor(dc)) {
4168 ; // XXX
4169 }
4170 #endif
4171 break;
4172 case 0x13: /* Graphics Status */
4173 if (gen_trap_ifnofpu(dc)) {
4174 goto jmp_insn;
4175 }
4176 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
4177 break;
4178 case 0x14: /* Softint set */
4179 if (!supervisor(dc))
4180 goto illegal_insn;
4181 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4182 gen_helper_set_softint(cpu_env, cpu_tmp0);
4183 break;
4184 case 0x15: /* Softint clear */
4185 if (!supervisor(dc))
4186 goto illegal_insn;
4187 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4188 gen_helper_clear_softint(cpu_env, cpu_tmp0);
4189 break;
4190 case 0x16: /* Softint write */
4191 if (!supervisor(dc))
4192 goto illegal_insn;
4193 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4194 gen_helper_write_softint(cpu_env, cpu_tmp0);
4195 break;
4196 case 0x17: /* Tick compare */
4197 #if !defined(CONFIG_USER_ONLY)
4198 if (!supervisor(dc))
4199 goto illegal_insn;
4200 #endif
4201 {
4202 TCGv_ptr r_tickptr;
4203
4204 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
4205 cpu_src2);
4206 r_tickptr = tcg_temp_new_ptr();
4207 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4208 offsetof(CPUSPARCState, tick));
4209 translator_io_start(&dc->base);
4210 gen_helper_tick_set_limit(r_tickptr,
4211 cpu_tick_cmpr);
4212 /* End TB to handle timer interrupt */
4213 dc->base.is_jmp = DISAS_EXIT;
4214 }
4215 break;
4216 case 0x18: /* System tick */
4217 #if !defined(CONFIG_USER_ONLY)
4218 if (!supervisor(dc))
4219 goto illegal_insn;
4220 #endif
4221 {
4222 TCGv_ptr r_tickptr;
4223
4224 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
4225 cpu_src2);
4226 r_tickptr = tcg_temp_new_ptr();
4227 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4228 offsetof(CPUSPARCState, stick));
4229 translator_io_start(&dc->base);
4230 gen_helper_tick_set_count(r_tickptr,
4231 cpu_tmp0);
4232 /* End TB to handle timer interrupt */
4233 dc->base.is_jmp = DISAS_EXIT;
4234 }
4235 break;
4236 case 0x19: /* System tick compare */
4237 #if !defined(CONFIG_USER_ONLY)
4238 if (!supervisor(dc))
4239 goto illegal_insn;
4240 #endif
4241 {
4242 TCGv_ptr r_tickptr;
4243
4244 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
4245 cpu_src2);
4246 r_tickptr = tcg_temp_new_ptr();
4247 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4248 offsetof(CPUSPARCState, stick));
4249 translator_io_start(&dc->base);
4250 gen_helper_tick_set_limit(r_tickptr,
4251 cpu_stick_cmpr);
4252 /* End TB to handle timer interrupt */
4253 dc->base.is_jmp = DISAS_EXIT;
4254 }
4255 break;
4256
4257 case 0x10: /* Performance Control */
4258 case 0x11: /* Performance Instrumentation
4259 Counter */
4260 case 0x12: /* Dispatch Control */
4261 #endif
4262 default:
4263 goto illegal_insn;
4264 }
4265 }
4266 break;
4267 #if !defined(CONFIG_USER_ONLY)
4268 case 0x31: /* wrpsr, V9 saved, restored */
4269 {
4270 if (!supervisor(dc))
4271 goto priv_insn;
4272 #ifdef TARGET_SPARC64
4273 switch (rd) {
4274 case 0:
4275 gen_helper_saved(cpu_env);
4276 break;
4277 case 1:
4278 gen_helper_restored(cpu_env);
4279 break;
4280 case 2: /* UA2005 allclean */
4281 case 3: /* UA2005 otherw */
4282 case 4: /* UA2005 normalw */
4283 case 5: /* UA2005 invalw */
4284 // XXX
4285 default:
4286 goto illegal_insn;
4287 }
4288 #else
4289 cpu_tmp0 = tcg_temp_new();
4290 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4291 gen_helper_wrpsr(cpu_env, cpu_tmp0);
4292 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4293 dc->cc_op = CC_OP_FLAGS;
4294 save_state(dc);
4295 gen_op_next_insn();
4296 tcg_gen_exit_tb(NULL, 0);
4297 dc->base.is_jmp = DISAS_NORETURN;
4298 #endif
4299 }
4300 break;
4301 case 0x32: /* wrwim, V9 wrpr */
4302 {
4303 if (!supervisor(dc))
4304 goto priv_insn;
4305 cpu_tmp0 = tcg_temp_new();
4306 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4307 #ifdef TARGET_SPARC64
4308 switch (rd) {
4309 case 0: // tpc
4310 {
4311 TCGv_ptr r_tsptr;
4312
4313 r_tsptr = tcg_temp_new_ptr();
4314 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4315 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4316 offsetof(trap_state, tpc));
4317 }
4318 break;
4319 case 1: // tnpc
4320 {
4321 TCGv_ptr r_tsptr;
4322
4323 r_tsptr = tcg_temp_new_ptr();
4324 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4325 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4326 offsetof(trap_state, tnpc));
4327 }
4328 break;
4329 case 2: // tstate
4330 {
4331 TCGv_ptr r_tsptr;
4332
4333 r_tsptr = tcg_temp_new_ptr();
4334 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4335 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4336 offsetof(trap_state,
4337 tstate));
4338 }
4339 break;
4340 case 3: // tt
4341 {
4342 TCGv_ptr r_tsptr;
4343
4344 r_tsptr = tcg_temp_new_ptr();
4345 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4346 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
4347 offsetof(trap_state, tt));
4348 }
4349 break;
4350 case 4: // tick
4351 {
4352 TCGv_ptr r_tickptr;
4353
4354 r_tickptr = tcg_temp_new_ptr();
4355 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4356 offsetof(CPUSPARCState, tick));
4357 translator_io_start(&dc->base);
4358 gen_helper_tick_set_count(r_tickptr,
4359 cpu_tmp0);
4360 /* End TB to handle timer interrupt */
4361 dc->base.is_jmp = DISAS_EXIT;
4362 }
4363 break;
4364 case 5: // tba
4365 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
4366 break;
4367 case 6: // pstate
4368 save_state(dc);
4369 if (translator_io_start(&dc->base)) {
4370 dc->base.is_jmp = DISAS_EXIT;
4371 }
4372 gen_helper_wrpstate(cpu_env, cpu_tmp0);
4373 dc->npc = DYNAMIC_PC;
4374 break;
4375 case 7: // tl
4376 save_state(dc);
4377 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4378 offsetof(CPUSPARCState, tl));
4379 dc->npc = DYNAMIC_PC;
4380 break;
4381 case 8: // pil
4382 if (translator_io_start(&dc->base)) {
4383 dc->base.is_jmp = DISAS_EXIT;
4384 }
4385 gen_helper_wrpil(cpu_env, cpu_tmp0);
4386 break;
4387 case 9: // cwp
4388 gen_helper_wrcwp(cpu_env, cpu_tmp0);
4389 break;
4390 case 10: // cansave
4391 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4392 offsetof(CPUSPARCState,
4393 cansave));
4394 break;
4395 case 11: // canrestore
4396 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4397 offsetof(CPUSPARCState,
4398 canrestore));
4399 break;
4400 case 12: // cleanwin
4401 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4402 offsetof(CPUSPARCState,
4403 cleanwin));
4404 break;
4405 case 13: // otherwin
4406 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4407 offsetof(CPUSPARCState,
4408 otherwin));
4409 break;
4410 case 14: // wstate
4411 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4412 offsetof(CPUSPARCState,
4413 wstate));
4414 break;
4415 case 16: // UA2005 gl
4416 CHECK_IU_FEATURE(dc, GL);
4417 gen_helper_wrgl(cpu_env, cpu_tmp0);
4418 break;
4419 case 26: // UA2005 strand status
4420 CHECK_IU_FEATURE(dc, HYPV);
4421 if (!hypervisor(dc))
4422 goto priv_insn;
4423 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
4424 break;
4425 default:
4426 goto illegal_insn;
4427 }
4428 #else
4429 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
4430 if (dc->def->nwindows != 32) {
4431 tcg_gen_andi_tl(cpu_wim, cpu_wim,
4432 (1 << dc->def->nwindows) - 1);
4433 }
4434 #endif
4435 }
4436 break;
4437 case 0x33: /* wrtbr, UA2005 wrhpr */
4438 {
4439 #ifndef TARGET_SPARC64
4440 if (!supervisor(dc))
4441 goto priv_insn;
4442 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
4443 #else
4444 CHECK_IU_FEATURE(dc, HYPV);
4445 if (!hypervisor(dc))
4446 goto priv_insn;
4447 cpu_tmp0 = tcg_temp_new();
4448 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4449 switch (rd) {
4450 case 0: // hpstate
4451 tcg_gen_st_i64(cpu_tmp0, cpu_env,
4452 offsetof(CPUSPARCState,
4453 hpstate));
4454 save_state(dc);
4455 gen_op_next_insn();
4456 tcg_gen_exit_tb(NULL, 0);
4457 dc->base.is_jmp = DISAS_NORETURN;
4458 break;
4459 case 1: // htstate
4460 // XXX gen_op_wrhtstate();
4461 break;
4462 case 3: // hintp
4463 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
4464 break;
4465 case 5: // htba
4466 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
4467 break;
4468 case 31: // hstick_cmpr
4469 {
4470 TCGv_ptr r_tickptr;
4471
4472 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
4473 r_tickptr = tcg_temp_new_ptr();
4474 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4475 offsetof(CPUSPARCState, hstick));
4476 translator_io_start(&dc->base);
4477 gen_helper_tick_set_limit(r_tickptr,
4478 cpu_hstick_cmpr);
4479 /* End TB to handle timer interrupt */
4480 dc->base.is_jmp = DISAS_EXIT;
4481 }
4482 break;
4483 case 6: // hver readonly
4484 default:
4485 goto illegal_insn;
4486 }
4487 #endif
4488 }
4489 break;
4490 #endif
4491 #ifdef TARGET_SPARC64
4492 case 0x2c: /* V9 movcc */
4493 {
4494 int cc = GET_FIELD_SP(insn, 11, 12);
4495 int cond = GET_FIELD_SP(insn, 14, 17);
4496 DisasCompare cmp;
4497 TCGv dst;
4498
4499 if (insn & (1 << 18)) {
4500 if (cc == 0) {
4501 gen_compare(&cmp, 0, cond, dc);
4502 } else if (cc == 2) {
4503 gen_compare(&cmp, 1, cond, dc);
4504 } else {
4505 goto illegal_insn;
4506 }
4507 } else {
4508 gen_fcompare(&cmp, cc, cond);
4509 }
4510
4511 /* The get_src2 above loaded the normal 13-bit
4512 immediate field, not the 11-bit field we have
4513 in movcc. But it did handle the reg case. */
4514 if (IS_IMM) {
4515 simm = GET_FIELD_SPs(insn, 0, 10);
4516 tcg_gen_movi_tl(cpu_src2, simm);
4517 }
4518
4519 dst = gen_load_gpr(dc, rd);
4520 tcg_gen_movcond_tl(cmp.cond, dst,
4521 cmp.c1, cmp.c2,
4522 cpu_src2, dst);
4523 gen_store_gpr(dc, rd, dst);
4524 break;
4525 }
4526 case 0x2d: /* V9 sdivx */
4527 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4528 gen_store_gpr(dc, rd, cpu_dst);
4529 break;
4530 case 0x2e: /* V9 popc */
4531 tcg_gen_ctpop_tl(cpu_dst, cpu_src2);
4532 gen_store_gpr(dc, rd, cpu_dst);
4533 break;
4534 case 0x2f: /* V9 movr */
4535 {
4536 int cond = GET_FIELD_SP(insn, 10, 12);
4537 DisasCompare cmp;
4538 TCGv dst;
4539
4540 gen_compare_reg(&cmp, cond, cpu_src1);
4541
4542 /* The get_src2 above loaded the normal 13-bit
4543 immediate field, not the 10-bit field we have
4544 in movr. But it did handle the reg case. */
4545 if (IS_IMM) {
4546 simm = GET_FIELD_SPs(insn, 0, 9);
4547 tcg_gen_movi_tl(cpu_src2, simm);
4548 }
4549
4550 dst = gen_load_gpr(dc, rd);
4551 tcg_gen_movcond_tl(cmp.cond, dst,
4552 cmp.c1, cmp.c2,
4553 cpu_src2, dst);
4554 gen_store_gpr(dc, rd, dst);
4555 break;
4556 }
4557 #endif
4558 default:
4559 goto illegal_insn;
4560 }
4561 }
4562 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4563 #ifdef TARGET_SPARC64
4564 int opf = GET_FIELD_SP(insn, 5, 13);
4565 rs1 = GET_FIELD(insn, 13, 17);
4566 rs2 = GET_FIELD(insn, 27, 31);
4567 if (gen_trap_ifnofpu(dc)) {
4568 goto jmp_insn;
4569 }
4570
4571 switch (opf) {
4572 case 0x000: /* VIS I edge8cc */
4573 CHECK_FPU_FEATURE(dc, VIS1);
4574 cpu_src1 = gen_load_gpr(dc, rs1);
4575 cpu_src2 = gen_load_gpr(dc, rs2);
4576 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4577 gen_store_gpr(dc, rd, cpu_dst);
4578 break;
4579 case 0x001: /* VIS II edge8n */
4580 CHECK_FPU_FEATURE(dc, VIS2);
4581 cpu_src1 = gen_load_gpr(dc, rs1);
4582 cpu_src2 = gen_load_gpr(dc, rs2);
4583 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4584 gen_store_gpr(dc, rd, cpu_dst);
4585 break;
4586 case 0x002: /* VIS I edge8lcc */
4587 CHECK_FPU_FEATURE(dc, VIS1);
4588 cpu_src1 = gen_load_gpr(dc, rs1);
4589 cpu_src2 = gen_load_gpr(dc, rs2);
4590 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4591 gen_store_gpr(dc, rd, cpu_dst);
4592 break;
4593 case 0x003: /* VIS II edge8ln */
4594 CHECK_FPU_FEATURE(dc, VIS2);
4595 cpu_src1 = gen_load_gpr(dc, rs1);
4596 cpu_src2 = gen_load_gpr(dc, rs2);
4597 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4598 gen_store_gpr(dc, rd, cpu_dst);
4599 break;
4600 case 0x004: /* VIS I edge16cc */
4601 CHECK_FPU_FEATURE(dc, VIS1);
4602 cpu_src1 = gen_load_gpr(dc, rs1);
4603 cpu_src2 = gen_load_gpr(dc, rs2);
4604 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4605 gen_store_gpr(dc, rd, cpu_dst);
4606 break;
4607 case 0x005: /* VIS II edge16n */
4608 CHECK_FPU_FEATURE(dc, VIS2);
4609 cpu_src1 = gen_load_gpr(dc, rs1);
4610 cpu_src2 = gen_load_gpr(dc, rs2);
4611 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4612 gen_store_gpr(dc, rd, cpu_dst);
4613 break;
4614 case 0x006: /* VIS I edge16lcc */
4615 CHECK_FPU_FEATURE(dc, VIS1);
4616 cpu_src1 = gen_load_gpr(dc, rs1);
4617 cpu_src2 = gen_load_gpr(dc, rs2);
4618 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4619 gen_store_gpr(dc, rd, cpu_dst);
4620 break;
4621 case 0x007: /* VIS II edge16ln */
4622 CHECK_FPU_FEATURE(dc, VIS2);
4623 cpu_src1 = gen_load_gpr(dc, rs1);
4624 cpu_src2 = gen_load_gpr(dc, rs2);
4625 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4626 gen_store_gpr(dc, rd, cpu_dst);
4627 break;
4628 case 0x008: /* VIS I edge32cc */
4629 CHECK_FPU_FEATURE(dc, VIS1);
4630 cpu_src1 = gen_load_gpr(dc, rs1);
4631 cpu_src2 = gen_load_gpr(dc, rs2);
4632 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4633 gen_store_gpr(dc, rd, cpu_dst);
4634 break;
4635 case 0x009: /* VIS II edge32n */
4636 CHECK_FPU_FEATURE(dc, VIS2);
4637 cpu_src1 = gen_load_gpr(dc, rs1);
4638 cpu_src2 = gen_load_gpr(dc, rs2);
4639 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4640 gen_store_gpr(dc, rd, cpu_dst);
4641 break;
4642 case 0x00a: /* VIS I edge32lcc */
4643 CHECK_FPU_FEATURE(dc, VIS1);
4644 cpu_src1 = gen_load_gpr(dc, rs1);
4645 cpu_src2 = gen_load_gpr(dc, rs2);
4646 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4647 gen_store_gpr(dc, rd, cpu_dst);
4648 break;
4649 case 0x00b: /* VIS II edge32ln */
4650 CHECK_FPU_FEATURE(dc, VIS2);
4651 cpu_src1 = gen_load_gpr(dc, rs1);
4652 cpu_src2 = gen_load_gpr(dc, rs2);
4653 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4654 gen_store_gpr(dc, rd, cpu_dst);
4655 break;
4656 case 0x010: /* VIS I array8 */
4657 CHECK_FPU_FEATURE(dc, VIS1);
4658 cpu_src1 = gen_load_gpr(dc, rs1);
4659 cpu_src2 = gen_load_gpr(dc, rs2);
4660 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4661 gen_store_gpr(dc, rd, cpu_dst);
4662 break;
4663 case 0x012: /* VIS I array16 */
4664 CHECK_FPU_FEATURE(dc, VIS1);
4665 cpu_src1 = gen_load_gpr(dc, rs1);
4666 cpu_src2 = gen_load_gpr(dc, rs2);
4667 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4668 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4669 gen_store_gpr(dc, rd, cpu_dst);
4670 break;
4671 case 0x014: /* VIS I array32 */
4672 CHECK_FPU_FEATURE(dc, VIS1);
4673 cpu_src1 = gen_load_gpr(dc, rs1);
4674 cpu_src2 = gen_load_gpr(dc, rs2);
4675 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4676 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4677 gen_store_gpr(dc, rd, cpu_dst);
4678 break;
4679 case 0x018: /* VIS I alignaddr */
4680 CHECK_FPU_FEATURE(dc, VIS1);
4681 cpu_src1 = gen_load_gpr(dc, rs1);
4682 cpu_src2 = gen_load_gpr(dc, rs2);
4683 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4684 gen_store_gpr(dc, rd, cpu_dst);
4685 break;
4686 case 0x01a: /* VIS I alignaddrl */
4687 CHECK_FPU_FEATURE(dc, VIS1);
4688 cpu_src1 = gen_load_gpr(dc, rs1);
4689 cpu_src2 = gen_load_gpr(dc, rs2);
4690 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4691 gen_store_gpr(dc, rd, cpu_dst);
4692 break;
4693 case 0x019: /* VIS II bmask */
4694 CHECK_FPU_FEATURE(dc, VIS2);
4695 cpu_src1 = gen_load_gpr(dc, rs1);
4696 cpu_src2 = gen_load_gpr(dc, rs2);
4697 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4698 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4699 gen_store_gpr(dc, rd, cpu_dst);
4700 break;
4701 case 0x020: /* VIS I fcmple16 */
4702 CHECK_FPU_FEATURE(dc, VIS1);
4703 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4704 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4705 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4706 gen_store_gpr(dc, rd, cpu_dst);
4707 break;
4708 case 0x022: /* VIS I fcmpne16 */
4709 CHECK_FPU_FEATURE(dc, VIS1);
4710 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4711 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4712 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4713 gen_store_gpr(dc, rd, cpu_dst);
4714 break;
4715 case 0x024: /* VIS I fcmple32 */
4716 CHECK_FPU_FEATURE(dc, VIS1);
4717 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4718 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4719 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4720 gen_store_gpr(dc, rd, cpu_dst);
4721 break;
4722 case 0x026: /* VIS I fcmpne32 */
4723 CHECK_FPU_FEATURE(dc, VIS1);
4724 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4725 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4726 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4727 gen_store_gpr(dc, rd, cpu_dst);
4728 break;
4729 case 0x028: /* VIS I fcmpgt16 */
4730 CHECK_FPU_FEATURE(dc, VIS1);
4731 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4732 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4733 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4734 gen_store_gpr(dc, rd, cpu_dst);
4735 break;
4736 case 0x02a: /* VIS I fcmpeq16 */
4737 CHECK_FPU_FEATURE(dc, VIS1);
4738 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4739 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4740 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4741 gen_store_gpr(dc, rd, cpu_dst);
4742 break;
4743 case 0x02c: /* VIS I fcmpgt32 */
4744 CHECK_FPU_FEATURE(dc, VIS1);
4745 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4746 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4747 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4748 gen_store_gpr(dc, rd, cpu_dst);
4749 break;
4750 case 0x02e: /* VIS I fcmpeq32 */
4751 CHECK_FPU_FEATURE(dc, VIS1);
4752 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4753 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4754 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4755 gen_store_gpr(dc, rd, cpu_dst);
4756 break;
4757 case 0x031: /* VIS I fmul8x16 */
4758 CHECK_FPU_FEATURE(dc, VIS1);
4759 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4760 break;
4761 case 0x033: /* VIS I fmul8x16au */
4762 CHECK_FPU_FEATURE(dc, VIS1);
4763 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4764 break;
4765 case 0x035: /* VIS I fmul8x16al */
4766 CHECK_FPU_FEATURE(dc, VIS1);
4767 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4768 break;
4769 case 0x036: /* VIS I fmul8sux16 */
4770 CHECK_FPU_FEATURE(dc, VIS1);
4771 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4772 break;
4773 case 0x037: /* VIS I fmul8ulx16 */
4774 CHECK_FPU_FEATURE(dc, VIS1);
4775 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4776 break;
4777 case 0x038: /* VIS I fmuld8sux16 */
4778 CHECK_FPU_FEATURE(dc, VIS1);
4779 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4780 break;
4781 case 0x039: /* VIS I fmuld8ulx16 */
4782 CHECK_FPU_FEATURE(dc, VIS1);
4783 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4784 break;
4785 case 0x03a: /* VIS I fpack32 */
4786 CHECK_FPU_FEATURE(dc, VIS1);
4787 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4788 break;
4789 case 0x03b: /* VIS I fpack16 */
4790 CHECK_FPU_FEATURE(dc, VIS1);
4791 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4792 cpu_dst_32 = gen_dest_fpr_F(dc);
4793 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4794 gen_store_fpr_F(dc, rd, cpu_dst_32);
4795 break;
4796 case 0x03d: /* VIS I fpackfix */
4797 CHECK_FPU_FEATURE(dc, VIS1);
4798 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4799 cpu_dst_32 = gen_dest_fpr_F(dc);
4800 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4801 gen_store_fpr_F(dc, rd, cpu_dst_32);
4802 break;
4803 case 0x03e: /* VIS I pdist */
4804 CHECK_FPU_FEATURE(dc, VIS1);
4805 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4806 break;
4807 case 0x048: /* VIS I faligndata */
4808 CHECK_FPU_FEATURE(dc, VIS1);
4809 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4810 break;
4811 case 0x04b: /* VIS I fpmerge */
4812 CHECK_FPU_FEATURE(dc, VIS1);
4813 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4814 break;
4815 case 0x04c: /* VIS II bshuffle */
4816 CHECK_FPU_FEATURE(dc, VIS2);
4817 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4818 break;
4819 case 0x04d: /* VIS I fexpand */
4820 CHECK_FPU_FEATURE(dc, VIS1);
4821 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4822 break;
4823 case 0x050: /* VIS I fpadd16 */
4824 CHECK_FPU_FEATURE(dc, VIS1);
4825 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4826 break;
4827 case 0x051: /* VIS I fpadd16s */
4828 CHECK_FPU_FEATURE(dc, VIS1);
4829 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4830 break;
4831 case 0x052: /* VIS I fpadd32 */
4832 CHECK_FPU_FEATURE(dc, VIS1);
4833 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4834 break;
4835 case 0x053: /* VIS I fpadd32s */
4836 CHECK_FPU_FEATURE(dc, VIS1);
4837 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4838 break;
4839 case 0x054: /* VIS I fpsub16 */
4840 CHECK_FPU_FEATURE(dc, VIS1);
4841 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4842 break;
4843 case 0x055: /* VIS I fpsub16s */
4844 CHECK_FPU_FEATURE(dc, VIS1);
4845 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4846 break;
4847 case 0x056: /* VIS I fpsub32 */
4848 CHECK_FPU_FEATURE(dc, VIS1);
4849 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4850 break;
4851 case 0x057: /* VIS I fpsub32s */
4852 CHECK_FPU_FEATURE(dc, VIS1);
4853 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4854 break;
4855 case 0x060: /* VIS I fzero */
4856 CHECK_FPU_FEATURE(dc, VIS1);
4857 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4858 tcg_gen_movi_i64(cpu_dst_64, 0);
4859 gen_store_fpr_D(dc, rd, cpu_dst_64);
4860 break;
4861 case 0x061: /* VIS I fzeros */
4862 CHECK_FPU_FEATURE(dc, VIS1);
4863 cpu_dst_32 = gen_dest_fpr_F(dc);
4864 tcg_gen_movi_i32(cpu_dst_32, 0);
4865 gen_store_fpr_F(dc, rd, cpu_dst_32);
4866 break;
4867 case 0x062: /* VIS I fnor */
4868 CHECK_FPU_FEATURE(dc, VIS1);
4869 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4870 break;
4871 case 0x063: /* VIS I fnors */
4872 CHECK_FPU_FEATURE(dc, VIS1);
4873 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4874 break;
4875 case 0x064: /* VIS I fandnot2 */
4876 CHECK_FPU_FEATURE(dc, VIS1);
4877 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4878 break;
4879 case 0x065: /* VIS I fandnot2s */
4880 CHECK_FPU_FEATURE(dc, VIS1);
4881 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4882 break;
4883 case 0x066: /* VIS I fnot2 */
4884 CHECK_FPU_FEATURE(dc, VIS1);
4885 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4886 break;
4887 case 0x067: /* VIS I fnot2s */
4888 CHECK_FPU_FEATURE(dc, VIS1);
4889 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4890 break;
4891 case 0x068: /* VIS I fandnot1 */
4892 CHECK_FPU_FEATURE(dc, VIS1);
4893 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4894 break;
4895 case 0x069: /* VIS I fandnot1s */
4896 CHECK_FPU_FEATURE(dc, VIS1);
4897 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4898 break;
4899 case 0x06a: /* VIS I fnot1 */
4900 CHECK_FPU_FEATURE(dc, VIS1);
4901 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4902 break;
4903 case 0x06b: /* VIS I fnot1s */
4904 CHECK_FPU_FEATURE(dc, VIS1);
4905 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4906 break;
4907 case 0x06c: /* VIS I fxor */
4908 CHECK_FPU_FEATURE(dc, VIS1);
4909 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4910 break;
4911 case 0x06d: /* VIS I fxors */
4912 CHECK_FPU_FEATURE(dc, VIS1);
4913 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4914 break;
4915 case 0x06e: /* VIS I fnand */
4916 CHECK_FPU_FEATURE(dc, VIS1);
4917 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4918 break;
4919 case 0x06f: /* VIS I fnands */
4920 CHECK_FPU_FEATURE(dc, VIS1);
4921 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4922 break;
4923 case 0x070: /* VIS I fand */
4924 CHECK_FPU_FEATURE(dc, VIS1);
4925 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4926 break;
4927 case 0x071: /* VIS I fands */
4928 CHECK_FPU_FEATURE(dc, VIS1);
4929 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4930 break;
4931 case 0x072: /* VIS I fxnor */
4932 CHECK_FPU_FEATURE(dc, VIS1);
4933 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4934 break;
4935 case 0x073: /* VIS I fxnors */
4936 CHECK_FPU_FEATURE(dc, VIS1);
4937 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4938 break;
4939 case 0x074: /* VIS I fsrc1 */
4940 CHECK_FPU_FEATURE(dc, VIS1);
4941 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4942 gen_store_fpr_D(dc, rd, cpu_src1_64);
4943 break;
4944 case 0x075: /* VIS I fsrc1s */
4945 CHECK_FPU_FEATURE(dc, VIS1);
4946 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4947 gen_store_fpr_F(dc, rd, cpu_src1_32);
4948 break;
4949 case 0x076: /* VIS I fornot2 */
4950 CHECK_FPU_FEATURE(dc, VIS1);
4951 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4952 break;
4953 case 0x077: /* VIS I fornot2s */
4954 CHECK_FPU_FEATURE(dc, VIS1);
4955 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4956 break;
4957 case 0x078: /* VIS I fsrc2 */
4958 CHECK_FPU_FEATURE(dc, VIS1);
4959 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4960 gen_store_fpr_D(dc, rd, cpu_src1_64);
4961 break;
4962 case 0x079: /* VIS I fsrc2s */
4963 CHECK_FPU_FEATURE(dc, VIS1);
4964 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4965 gen_store_fpr_F(dc, rd, cpu_src1_32);
4966 break;
4967 case 0x07a: /* VIS I fornot1 */
4968 CHECK_FPU_FEATURE(dc, VIS1);
4969 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4970 break;
4971 case 0x07b: /* VIS I fornot1s */
4972 CHECK_FPU_FEATURE(dc, VIS1);
4973 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4974 break;
4975 case 0x07c: /* VIS I for */
4976 CHECK_FPU_FEATURE(dc, VIS1);
4977 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4978 break;
4979 case 0x07d: /* VIS I fors */
4980 CHECK_FPU_FEATURE(dc, VIS1);
4981 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4982 break;
4983 case 0x07e: /* VIS I fone */
4984 CHECK_FPU_FEATURE(dc, VIS1);
4985 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4986 tcg_gen_movi_i64(cpu_dst_64, -1);
4987 gen_store_fpr_D(dc, rd, cpu_dst_64);
4988 break;
4989 case 0x07f: /* VIS I fones */
4990 CHECK_FPU_FEATURE(dc, VIS1);
4991 cpu_dst_32 = gen_dest_fpr_F(dc);
4992 tcg_gen_movi_i32(cpu_dst_32, -1);
4993 gen_store_fpr_F(dc, rd, cpu_dst_32);
4994 break;
4995 case 0x080: /* VIS I shutdown */
4996 case 0x081: /* VIS II siam */
4997 // XXX
4998 goto illegal_insn;
4999 default:
5000 goto illegal_insn;
5001 }
5002 #else
5003 goto ncp_insn;
5004 #endif
5005 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
5006 #ifdef TARGET_SPARC64
5007 goto illegal_insn;
5008 #else
5009 goto ncp_insn;
5010 #endif
5011 #ifdef TARGET_SPARC64
5012 } else if (xop == 0x39) { /* V9 return */
5013 save_state(dc);
5014 cpu_src1 = get_src1(dc, insn);
5015 cpu_tmp0 = tcg_temp_new();
5016 if (IS_IMM) { /* immediate */
5017 simm = GET_FIELDs(insn, 19, 31);
5018 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5019 } else { /* register */
5020 rs2 = GET_FIELD(insn, 27, 31);
5021 if (rs2) {
5022 cpu_src2 = gen_load_gpr(dc, rs2);
5023 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5024 } else {
5025 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5026 }
5027 }
5028 gen_helper_restore(cpu_env);
5029 gen_mov_pc_npc(dc);
5030 gen_check_align(cpu_tmp0, 3);
5031 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5032 dc->npc = DYNAMIC_PC;
5033 goto jmp_insn;
5034 #endif
5035 } else {
5036 cpu_src1 = get_src1(dc, insn);
5037 cpu_tmp0 = tcg_temp_new();
5038 if (IS_IMM) { /* immediate */
5039 simm = GET_FIELDs(insn, 19, 31);
5040 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5041 } else { /* register */
5042 rs2 = GET_FIELD(insn, 27, 31);
5043 if (rs2) {
5044 cpu_src2 = gen_load_gpr(dc, rs2);
5045 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5046 } else {
5047 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5048 }
5049 }
5050 switch (xop) {
5051 case 0x38: /* jmpl */
5052 {
5053 TCGv t = gen_dest_gpr(dc, rd);
5054 tcg_gen_movi_tl(t, dc->pc);
5055 gen_store_gpr(dc, rd, t);
5056
5057 gen_mov_pc_npc(dc);
5058 gen_check_align(cpu_tmp0, 3);
5059 gen_address_mask(dc, cpu_tmp0);
5060 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5061 dc->npc = DYNAMIC_PC_LOOKUP;
5062 }
5063 goto jmp_insn;
5064 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5065 case 0x39: /* rett, V9 return */
5066 {
5067 if (!supervisor(dc))
5068 goto priv_insn;
5069 gen_mov_pc_npc(dc);
5070 gen_check_align(cpu_tmp0, 3);
5071 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5072 dc->npc = DYNAMIC_PC;
5073 gen_helper_rett(cpu_env);
5074 }
5075 goto jmp_insn;
5076 #endif
5077 case 0x3b: /* flush */
5078 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
5079 goto unimp_flush;
5080 /* nop */
5081 break;
5082 case 0x3c: /* save */
5083 gen_helper_save(cpu_env);
5084 gen_store_gpr(dc, rd, cpu_tmp0);
5085 break;
5086 case 0x3d: /* restore */
5087 gen_helper_restore(cpu_env);
5088 gen_store_gpr(dc, rd, cpu_tmp0);
5089 break;
5090 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5091 case 0x3e: /* V9 done/retry */
5092 {
5093 switch (rd) {
5094 case 0:
5095 if (!supervisor(dc))
5096 goto priv_insn;
5097 dc->npc = DYNAMIC_PC;
5098 dc->pc = DYNAMIC_PC;
5099 translator_io_start(&dc->base);
5100 gen_helper_done(cpu_env);
5101 goto jmp_insn;
5102 case 1:
5103 if (!supervisor(dc))
5104 goto priv_insn;
5105 dc->npc = DYNAMIC_PC;
5106 dc->pc = DYNAMIC_PC;
5107 translator_io_start(&dc->base);
5108 gen_helper_retry(cpu_env);
5109 goto jmp_insn;
5110 default:
5111 goto illegal_insn;
5112 }
5113 }
5114 break;
5115 #endif
5116 default:
5117 goto illegal_insn;
5118 }
5119 }
5120 break;
5121 }
5122 break;
5123 case 3: /* load/store instructions */
5124 {
5125 unsigned int xop = GET_FIELD(insn, 7, 12);
5126 /* ??? gen_address_mask prevents us from using a source
5127 register directly. Always generate a temporary. */
5128 TCGv cpu_addr = tcg_temp_new();
5129
5130 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5131 if (xop == 0x3c || xop == 0x3e) {
5132 /* V9 casa/casxa : no offset */
5133 } else if (IS_IMM) { /* immediate */
5134 simm = GET_FIELDs(insn, 19, 31);
5135 if (simm != 0) {
5136 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5137 }
5138 } else { /* register */
5139 rs2 = GET_FIELD(insn, 27, 31);
5140 if (rs2 != 0) {
5141 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5142 }
5143 }
5144 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5145 (xop > 0x17 && xop <= 0x1d ) ||
5146 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5147 TCGv cpu_val = gen_dest_gpr(dc, rd);
5148
5149 switch (xop) {
5150 case 0x0: /* ld, V9 lduw, load unsigned word */
5151 gen_address_mask(dc, cpu_addr);
5152 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5153 dc->mem_idx, MO_TEUL | MO_ALIGN);
5154 break;
5155 case 0x1: /* ldub, load unsigned byte */
5156 gen_address_mask(dc, cpu_addr);
5157 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5158 dc->mem_idx, MO_UB);
5159 break;
5160 case 0x2: /* lduh, load unsigned halfword */
5161 gen_address_mask(dc, cpu_addr);
5162 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5163 dc->mem_idx, MO_TEUW | MO_ALIGN);
5164 break;
5165 case 0x3: /* ldd, load double word */
5166 if (rd & 1)
5167 goto illegal_insn;
5168 else {
5169 TCGv_i64 t64;
5170
5171 gen_address_mask(dc, cpu_addr);
5172 t64 = tcg_temp_new_i64();
5173 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5174 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5175 tcg_gen_trunc_i64_tl(cpu_val, t64);
5176 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5177 gen_store_gpr(dc, rd + 1, cpu_val);
5178 tcg_gen_shri_i64(t64, t64, 32);
5179 tcg_gen_trunc_i64_tl(cpu_val, t64);
5180 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5181 }
5182 break;
5183 case 0x9: /* ldsb, load signed byte */
5184 gen_address_mask(dc, cpu_addr);
5185 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, dc->mem_idx, MO_SB);
5186 break;
5187 case 0xa: /* ldsh, load signed halfword */
5188 gen_address_mask(dc, cpu_addr);
5189 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5190 dc->mem_idx, MO_TESW | MO_ALIGN);
5191 break;
5192 case 0xd: /* ldstub */
5193 gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
5194 break;
5195 case 0x0f:
5196 /* swap, swap register with memory. Also atomically */
5197 CHECK_IU_FEATURE(dc, SWAP);
5198 cpu_src1 = gen_load_gpr(dc, rd);
5199 gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
5200 dc->mem_idx, MO_TEUL);
5201 break;
5202 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5203 case 0x10: /* lda, V9 lduwa, load word alternate */
5204 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5205 break;
5206 case 0x11: /* lduba, load unsigned byte alternate */
5207 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5208 break;
5209 case 0x12: /* lduha, load unsigned halfword alternate */
5210 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5211 break;
5212 case 0x13: /* ldda, load double word alternate */
5213 if (rd & 1) {
5214 goto illegal_insn;
5215 }
5216 gen_ldda_asi(dc, cpu_addr, insn, rd);
5217 goto skip_move;
5218 case 0x19: /* ldsba, load signed byte alternate */
5219 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5220 break;
5221 case 0x1a: /* ldsha, load signed halfword alternate */
5222 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5223 break;
5224 case 0x1d: /* ldstuba -- XXX: should be atomically */
5225 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5226 break;
5227 case 0x1f: /* swapa, swap reg with alt. memory. Also
5228 atomically */
5229 CHECK_IU_FEATURE(dc, SWAP);
5230 cpu_src1 = gen_load_gpr(dc, rd);
5231 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5232 break;
5233
5234 #ifndef TARGET_SPARC64
5235 case 0x30: /* ldc */
5236 case 0x31: /* ldcsr */
5237 case 0x33: /* lddc */
5238 goto ncp_insn;
5239 #endif
5240 #endif
5241 #ifdef TARGET_SPARC64
5242 case 0x08: /* V9 ldsw */
5243 gen_address_mask(dc, cpu_addr);
5244 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5245 dc->mem_idx, MO_TESL | MO_ALIGN);
5246 break;
5247 case 0x0b: /* V9 ldx */
5248 gen_address_mask(dc, cpu_addr);
5249 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5250 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5251 break;
5252 case 0x18: /* V9 ldswa */
5253 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5254 break;
5255 case 0x1b: /* V9 ldxa */
5256 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
5257 break;
5258 case 0x2d: /* V9 prefetch, no effect */
5259 goto skip_move;
5260 case 0x30: /* V9 ldfa */
5261 if (gen_trap_ifnofpu(dc)) {
5262 goto jmp_insn;
5263 }
5264 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5265 gen_update_fprs_dirty(dc, rd);
5266 goto skip_move;
5267 case 0x33: /* V9 lddfa */
5268 if (gen_trap_ifnofpu(dc)) {
5269 goto jmp_insn;
5270 }
5271 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5272 gen_update_fprs_dirty(dc, DFPREG(rd));
5273 goto skip_move;
5274 case 0x3d: /* V9 prefetcha, no effect */
5275 goto skip_move;
5276 case 0x32: /* V9 ldqfa */
5277 CHECK_FPU_FEATURE(dc, FLOAT128);
5278 if (gen_trap_ifnofpu(dc)) {
5279 goto jmp_insn;
5280 }
5281 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5282 gen_update_fprs_dirty(dc, QFPREG(rd));
5283 goto skip_move;
5284 #endif
5285 default:
5286 goto illegal_insn;
5287 }
5288 gen_store_gpr(dc, rd, cpu_val);
5289 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5290 skip_move: ;
5291 #endif
5292 } else if (xop >= 0x20 && xop < 0x24) {
5293 if (gen_trap_ifnofpu(dc)) {
5294 goto jmp_insn;
5295 }
5296 switch (xop) {
5297 case 0x20: /* ldf, load fpreg */
5298 gen_address_mask(dc, cpu_addr);
5299 cpu_dst_32 = gen_dest_fpr_F(dc);
5300 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5301 dc->mem_idx, MO_TEUL | MO_ALIGN);
5302 gen_store_fpr_F(dc, rd, cpu_dst_32);
5303 break;
5304 case 0x21: /* ldfsr, V9 ldxfsr */
5305 #ifdef TARGET_SPARC64
5306 gen_address_mask(dc, cpu_addr);
5307 if (rd == 1) {
5308 TCGv_i64 t64 = tcg_temp_new_i64();
5309 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5310 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5311 gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
5312 break;
5313 }
5314 #endif
5315 cpu_dst_32 = tcg_temp_new_i32();
5316 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5317 dc->mem_idx, MO_TEUL | MO_ALIGN);
5318 gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
5319 break;
5320 case 0x22: /* ldqf, load quad fpreg */
5321 CHECK_FPU_FEATURE(dc, FLOAT128);
5322 gen_address_mask(dc, cpu_addr);
5323 cpu_src1_64 = tcg_temp_new_i64();
5324 tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5325 MO_TEUQ | MO_ALIGN_4);
5326 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5327 cpu_src2_64 = tcg_temp_new_i64();
5328 tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
5329 MO_TEUQ | MO_ALIGN_4);
5330 gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
5331 break;
5332 case 0x23: /* lddf, load double fpreg */
5333 gen_address_mask(dc, cpu_addr);
5334 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5335 tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
5336 MO_TEUQ | MO_ALIGN_4);
5337 gen_store_fpr_D(dc, rd, cpu_dst_64);
5338 break;
5339 default:
5340 goto illegal_insn;
5341 }
5342 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5343 xop == 0xe || xop == 0x1e) {
5344 TCGv cpu_val = gen_load_gpr(dc, rd);
5345
5346 switch (xop) {
5347 case 0x4: /* st, store word */
5348 gen_address_mask(dc, cpu_addr);
5349 tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
5350 dc->mem_idx, MO_TEUL | MO_ALIGN);
5351 break;
5352 case 0x5: /* stb, store byte */
5353 gen_address_mask(dc, cpu_addr);
5354 tcg_gen_qemu_st_tl(cpu_val, cpu_addr, dc->mem_idx, MO_UB);
5355 break;
5356 case 0x6: /* sth, store halfword */
5357 gen_address_mask(dc, cpu_addr);
5358 tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
5359 dc->mem_idx, MO_TEUW | MO_ALIGN);
5360 break;
5361 case 0x7: /* std, store double word */
5362 if (rd & 1)
5363 goto illegal_insn;
5364 else {
5365 TCGv_i64 t64;
5366 TCGv lo;
5367
5368 gen_address_mask(dc, cpu_addr);
5369 lo = gen_load_gpr(dc, rd + 1);
5370 t64 = tcg_temp_new_i64();
5371 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5372 tcg_gen_qemu_st_i64(t64, cpu_addr,
5373 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5374 }
5375 break;
5376 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5377 case 0x14: /* sta, V9 stwa, store word alternate */
5378 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5379 break;
5380 case 0x15: /* stba, store byte alternate */
5381 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5382 break;
5383 case 0x16: /* stha, store halfword alternate */
5384 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5385 break;
5386 case 0x17: /* stda, store double word alternate */
5387 if (rd & 1) {
5388 goto illegal_insn;
5389 }
5390 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5391 break;
5392 #endif
5393 #ifdef TARGET_SPARC64
5394 case 0x0e: /* V9 stx */
5395 gen_address_mask(dc, cpu_addr);
5396 tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
5397 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5398 break;
5399 case 0x1e: /* V9 stxa */
5400 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
5401 break;
5402 #endif
5403 default:
5404 goto illegal_insn;
5405 }
5406 } else if (xop > 0x23 && xop < 0x28) {
5407 if (gen_trap_ifnofpu(dc)) {
5408 goto jmp_insn;
5409 }
5410 switch (xop) {
5411 case 0x24: /* stf, store fpreg */
5412 gen_address_mask(dc, cpu_addr);
5413 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5414 tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
5415 dc->mem_idx, MO_TEUL | MO_ALIGN);
5416 break;
5417 case 0x25: /* stfsr, V9 stxfsr */
5418 {
5419 #ifdef TARGET_SPARC64
5420 gen_address_mask(dc, cpu_addr);
5421 if (rd == 1) {
5422 tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
5423 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5424 break;
5425 }
5426 #endif
5427 tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
5428 dc->mem_idx, MO_TEUL | MO_ALIGN);
5429 }
5430 break;
5431 case 0x26:
5432 #ifdef TARGET_SPARC64
5433 /* V9 stqf, store quad fpreg */
5434 CHECK_FPU_FEATURE(dc, FLOAT128);
5435 gen_address_mask(dc, cpu_addr);
5436 /* ??? While stqf only requires 4-byte alignment, it is
5437 legal for the cpu to signal the unaligned exception.
5438 The OS trap handler is then required to fix it up.
5439 For qemu, this avoids having to probe the second page
5440 before performing the first write. */
5441 cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
5442 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5443 dc->mem_idx, MO_TEUQ | MO_ALIGN_16);
5444 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5445 cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
5446 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5447 dc->mem_idx, MO_TEUQ);
5448 break;
5449 #else /* !TARGET_SPARC64 */
5450 /* stdfq, store floating point queue */
5451 #if defined(CONFIG_USER_ONLY)
5452 goto illegal_insn;
5453 #else
5454 if (!supervisor(dc))
5455 goto priv_insn;
5456 if (gen_trap_ifnofpu(dc)) {
5457 goto jmp_insn;
5458 }
5459 goto nfq_insn;
5460 #endif
5461 #endif
5462 case 0x27: /* stdf, store double fpreg */
5463 gen_address_mask(dc, cpu_addr);
5464 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5465 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5466 MO_TEUQ | MO_ALIGN_4);
5467 break;
5468 default:
5469 goto illegal_insn;
5470 }
5471 } else if (xop > 0x33 && xop < 0x3f) {
5472 switch (xop) {
5473 #ifdef TARGET_SPARC64
5474 case 0x34: /* V9 stfa */
5475 if (gen_trap_ifnofpu(dc)) {
5476 goto jmp_insn;
5477 }
5478 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5479 break;
5480 case 0x36: /* V9 stqfa */
5481 {
5482 CHECK_FPU_FEATURE(dc, FLOAT128);
5483 if (gen_trap_ifnofpu(dc)) {
5484 goto jmp_insn;
5485 }
5486 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5487 }
5488 break;
5489 case 0x37: /* V9 stdfa */
5490 if (gen_trap_ifnofpu(dc)) {
5491 goto jmp_insn;
5492 }
5493 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5494 break;
5495 case 0x3e: /* V9 casxa */
5496 rs2 = GET_FIELD(insn, 27, 31);
5497 cpu_src2 = gen_load_gpr(dc, rs2);
5498 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5499 break;
5500 #else
5501 case 0x34: /* stc */
5502 case 0x35: /* stcsr */
5503 case 0x36: /* stdcq */
5504 case 0x37: /* stdc */
5505 goto ncp_insn;
5506 #endif
5507 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5508 case 0x3c: /* V9 or LEON3 casa */
5509 #ifndef TARGET_SPARC64
5510 CHECK_IU_FEATURE(dc, CASA);
5511 #endif
5512 rs2 = GET_FIELD(insn, 27, 31);
5513 cpu_src2 = gen_load_gpr(dc, rs2);
5514 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5515 break;
5516 #endif
5517 default:
5518 goto illegal_insn;
5519 }
5520 } else {
5521 goto illegal_insn;
5522 }
5523 }
5524 break;
5525 }
5526 /* default case for non jump instructions */
5527 if (dc->npc & 3) {
5528 switch (dc->npc) {
5529 case DYNAMIC_PC:
5530 case DYNAMIC_PC_LOOKUP:
5531 dc->pc = dc->npc;
5532 gen_op_next_insn();
5533 break;
5534 case JUMP_PC:
5535 /* we can do a static jump */
5536 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5537 dc->base.is_jmp = DISAS_NORETURN;
5538 break;
5539 default:
5540 g_assert_not_reached();
5541 }
5542 } else {
5543 dc->pc = dc->npc;
5544 dc->npc = dc->npc + 4;
5545 }
5546 jmp_insn:
5547 return;
5548 illegal_insn:
5549 gen_exception(dc, TT_ILL_INSN);
5550 return;
5551 unimp_flush:
5552 gen_exception(dc, TT_UNIMP_FLUSH);
5553 return;
5554 #if !defined(CONFIG_USER_ONLY)
5555 priv_insn:
5556 gen_exception(dc, TT_PRIV_INSN);
5557 return;
5558 #endif
5559 nfpu_insn:
5560 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5561 return;
5562 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5563 nfq_insn:
5564 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5565 return;
5566 #endif
5567 #ifndef TARGET_SPARC64
5568 ncp_insn:
5569 gen_exception(dc, TT_NCP_INSN);
5570 return;
5571 #endif
5572 }
5573
5574 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5575 {
5576 DisasContext *dc = container_of(dcbase, DisasContext, base);
5577 CPUSPARCState *env = cs->env_ptr;
5578 int bound;
5579
5580 dc->pc = dc->base.pc_first;
5581 dc->npc = (target_ulong)dc->base.tb->cs_base;
5582 dc->cc_op = CC_OP_DYNAMIC;
5583 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5584 dc->def = &env->def;
5585 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5586 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5587 #ifndef CONFIG_USER_ONLY
5588 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5589 #endif
5590 #ifdef TARGET_SPARC64
5591 dc->fprs_dirty = 0;
5592 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5593 #ifndef CONFIG_USER_ONLY
5594 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5595 #endif
5596 #endif
5597 /*
5598 * if we reach a page boundary, we stop generation so that the
5599 * PC of a TT_TFAULT exception is always in the right page
5600 */
5601 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5602 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5603 }
5604
5605 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5606 {
5607 }
5608
5609 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5610 {
5611 DisasContext *dc = container_of(dcbase, DisasContext, base);
5612 target_ulong npc = dc->npc;
5613
5614 if (npc & 3) {
5615 switch (npc) {
5616 case JUMP_PC:
5617 assert(dc->jump_pc[1] == dc->pc + 4);
5618 npc = dc->jump_pc[0] | JUMP_PC;
5619 break;
5620 case DYNAMIC_PC:
5621 case DYNAMIC_PC_LOOKUP:
5622 npc = DYNAMIC_PC;
5623 break;
5624 default:
5625 g_assert_not_reached();
5626 }
5627 }
5628 tcg_gen_insn_start(dc->pc, npc);
5629 }
5630
5631 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5632 {
5633 DisasContext *dc = container_of(dcbase, DisasContext, base);
5634 CPUSPARCState *env = cs->env_ptr;
5635 unsigned int insn;
5636
5637 insn = translator_ldl(env, &dc->base, dc->pc);
5638 dc->base.pc_next += 4;
5639 disas_sparc_insn(dc, insn);
5640
5641 if (dc->base.is_jmp == DISAS_NORETURN) {
5642 return;
5643 }
5644 if (dc->pc != dc->base.pc_next) {
5645 dc->base.is_jmp = DISAS_TOO_MANY;
5646 }
5647 }
5648
5649 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5650 {
5651 DisasContext *dc = container_of(dcbase, DisasContext, base);
5652 bool may_lookup;
5653
5654 switch (dc->base.is_jmp) {
5655 case DISAS_NEXT:
5656 case DISAS_TOO_MANY:
5657 if (((dc->pc | dc->npc) & 3) == 0) {
5658 /* static PC and NPC: we can use direct chaining */
5659 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5660 break;
5661 }
5662
5663 if (dc->pc & 3) {
5664 switch (dc->pc) {
5665 case DYNAMIC_PC_LOOKUP:
5666 may_lookup = true;
5667 break;
5668 case DYNAMIC_PC:
5669 may_lookup = false;
5670 break;
5671 default:
5672 g_assert_not_reached();
5673 }
5674 } else {
5675 tcg_gen_movi_tl(cpu_pc, dc->pc);
5676 may_lookup = true;
5677 }
5678
5679 save_npc(dc);
5680 if (may_lookup) {
5681 tcg_gen_lookup_and_goto_ptr();
5682 } else {
5683 tcg_gen_exit_tb(NULL, 0);
5684 }
5685 break;
5686
5687 case DISAS_NORETURN:
5688 break;
5689
5690 case DISAS_EXIT:
5691 /* Exit TB */
5692 save_state(dc);
5693 tcg_gen_exit_tb(NULL, 0);
5694 break;
5695
5696 default:
5697 g_assert_not_reached();
5698 }
5699 }
5700
5701 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5702 CPUState *cpu, FILE *logfile)
5703 {
5704 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5705 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5706 }
5707
5708 static const TranslatorOps sparc_tr_ops = {
5709 .init_disas_context = sparc_tr_init_disas_context,
5710 .tb_start = sparc_tr_tb_start,
5711 .insn_start = sparc_tr_insn_start,
5712 .translate_insn = sparc_tr_translate_insn,
5713 .tb_stop = sparc_tr_tb_stop,
5714 .disas_log = sparc_tr_disas_log,
5715 };
5716
5717 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5718 target_ulong pc, void *host_pc)
5719 {
5720 DisasContext dc = {};
5721
5722 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5723 }
5724
5725 void sparc_tcg_init(void)
5726 {
5727 static const char gregnames[32][4] = {
5728 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5729 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5730 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5731 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5732 };
5733 static const char fregnames[32][4] = {
5734 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5735 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5736 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5737 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5738 };
5739
5740 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5741 #ifdef TARGET_SPARC64
5742 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5743 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5744 #else
5745 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5746 #endif
5747 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5748 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5749 };
5750
5751 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5752 #ifdef TARGET_SPARC64
5753 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5754 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5755 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5756 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5757 "hstick_cmpr" },
5758 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5759 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5760 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5761 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5762 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5763 #endif
5764 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5765 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5766 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5767 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5768 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5769 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5770 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5771 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5772 #ifndef CONFIG_USER_ONLY
5773 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5774 #endif
5775 };
5776
5777 unsigned int i;
5778
5779 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5780 offsetof(CPUSPARCState, regwptr),
5781 "regwptr");
5782
5783 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5784 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5785 }
5786
5787 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5788 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5789 }
5790
5791 cpu_regs[0] = NULL;
5792 for (i = 1; i < 8; ++i) {
5793 cpu_regs[i] = tcg_global_mem_new(cpu_env,
5794 offsetof(CPUSPARCState, gregs[i]),
5795 gregnames[i]);
5796 }
5797
5798 for (i = 8; i < 32; ++i) {
5799 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5800 (i - 8) * sizeof(target_ulong),
5801 gregnames[i]);
5802 }
5803
5804 for (i = 0; i < TARGET_DPREGS; i++) {
5805 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5806 offsetof(CPUSPARCState, fpr[i]),
5807 fregnames[i]);
5808 }
5809 }
5810
5811 void sparc_restore_state_to_opc(CPUState *cs,
5812 const TranslationBlock *tb,
5813 const uint64_t *data)
5814 {
5815 SPARCCPU *cpu = SPARC_CPU(cs);
5816 CPUSPARCState *env = &cpu->env;
5817 target_ulong pc = data[0];
5818 target_ulong npc = data[1];
5819
5820 env->pc = pc;
5821 if (npc == DYNAMIC_PC) {
5822 /* dynamic NPC: already stored */
5823 } else if (npc & JUMP_PC) {
5824 /* jump PC: use 'cond' and the jump targets of the translation */
5825 if (env->cond) {
5826 env->npc = npc & ~3;
5827 } else {
5828 env->npc = pc + 4;
5829 }
5830 } else {
5831 env->npc = npc;
5832 }
5833 }