]> git.proxmox.com Git - mirror_qemu.git/blob - target/sparc/translate.c
fa80a911618187e286ceb3f306bb68f8fadf7b11
[mirror_qemu.git] / target / sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
29
30 #include "exec/helper-gen.h"
31
32 #include "exec/translator.h"
33 #include "exec/log.h"
34 #include "asi.h"
35
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
38 #undef HELPER_H
39
40 /* Dynamic PC, must exit to main loop. */
41 #define DYNAMIC_PC 1
42 /* Dynamic PC, one of two values according to jump_pc[T2]. */
43 #define JUMP_PC 2
44 /* Dynamic PC, may lookup next TB. */
45 #define DYNAMIC_PC_LOOKUP 3
46
47 #define DISAS_EXIT DISAS_TARGET_0
48
49 /* global register indexes */
50 static TCGv_ptr cpu_regwptr;
51 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
52 static TCGv_i32 cpu_cc_op;
53 static TCGv_i32 cpu_psr;
54 static TCGv cpu_fsr, cpu_pc, cpu_npc;
55 static TCGv cpu_regs[32];
56 static TCGv cpu_y;
57 #ifndef CONFIG_USER_ONLY
58 static TCGv cpu_tbr;
59 #endif
60 static TCGv cpu_cond;
61 #ifdef TARGET_SPARC64
62 static TCGv_i32 cpu_xcc, cpu_fprs;
63 static TCGv cpu_gsr;
64 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
65 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
66 #else
67 static TCGv cpu_wim;
68 #endif
69 /* Floating point registers */
70 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
71
72 typedef struct DisasContext {
73 DisasContextBase base;
74 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
75 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
76 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
77 int mem_idx;
78 bool fpu_enabled;
79 bool address_mask_32bit;
80 #ifndef CONFIG_USER_ONLY
81 bool supervisor;
82 #ifdef TARGET_SPARC64
83 bool hypervisor;
84 #endif
85 #endif
86
87 uint32_t cc_op; /* current CC operation */
88 sparc_def_t *def;
89 #ifdef TARGET_SPARC64
90 int fprs_dirty;
91 int asi;
92 #endif
93 } DisasContext;
94
95 typedef struct {
96 TCGCond cond;
97 bool is_bool;
98 TCGv c1, c2;
99 } DisasCompare;
100
101 // This function uses non-native bit order
102 #define GET_FIELD(X, FROM, TO) \
103 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
104
105 // This function uses the order in the manuals, i.e. bit 0 is 2^0
106 #define GET_FIELD_SP(X, FROM, TO) \
107 GET_FIELD(X, 31 - (TO), 31 - (FROM))
108
109 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
110 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
111
112 #ifdef TARGET_SPARC64
113 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
114 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
115 #else
116 #define DFPREG(r) (r & 0x1e)
117 #define QFPREG(r) (r & 0x1c)
118 #endif
119
120 #define UA2005_HTRAP_MASK 0xff
121 #define V8_TRAP_MASK 0x7f
122
123 static int sign_extend(int x, int len)
124 {
125 len = 32 - len;
126 return (x << len) >> len;
127 }
128
129 #define IS_IMM (insn & (1<<13))
130
131 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
132 {
133 #if defined(TARGET_SPARC64)
134 int bit = (rd < 32) ? 1 : 2;
135 /* If we know we've already set this bit within the TB,
136 we can avoid setting it again. */
137 if (!(dc->fprs_dirty & bit)) {
138 dc->fprs_dirty |= bit;
139 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
140 }
141 #endif
142 }
143
144 /* floating point registers moves */
145 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
146 {
147 TCGv_i32 ret = tcg_temp_new_i32();
148 if (src & 1) {
149 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
150 } else {
151 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
152 }
153 return ret;
154 }
155
156 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
157 {
158 TCGv_i64 t = tcg_temp_new_i64();
159
160 tcg_gen_extu_i32_i64(t, v);
161 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
162 (dst & 1 ? 0 : 32), 32);
163 gen_update_fprs_dirty(dc, dst);
164 }
165
166 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
167 {
168 return tcg_temp_new_i32();
169 }
170
171 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
172 {
173 src = DFPREG(src);
174 return cpu_fpr[src / 2];
175 }
176
177 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
178 {
179 dst = DFPREG(dst);
180 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
181 gen_update_fprs_dirty(dc, dst);
182 }
183
184 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
185 {
186 return cpu_fpr[DFPREG(dst) / 2];
187 }
188
189 static void gen_op_load_fpr_QT0(unsigned int src)
190 {
191 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
192 offsetof(CPU_QuadU, ll.upper));
193 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
194 offsetof(CPU_QuadU, ll.lower));
195 }
196
197 static void gen_op_load_fpr_QT1(unsigned int src)
198 {
199 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
200 offsetof(CPU_QuadU, ll.upper));
201 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
202 offsetof(CPU_QuadU, ll.lower));
203 }
204
205 static void gen_op_store_QT0_fpr(unsigned int dst)
206 {
207 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
208 offsetof(CPU_QuadU, ll.upper));
209 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
210 offsetof(CPU_QuadU, ll.lower));
211 }
212
213 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
214 TCGv_i64 v1, TCGv_i64 v2)
215 {
216 dst = QFPREG(dst);
217
218 tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
219 tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
220 gen_update_fprs_dirty(dc, dst);
221 }
222
223 #ifdef TARGET_SPARC64
224 static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
225 {
226 src = QFPREG(src);
227 return cpu_fpr[src / 2];
228 }
229
230 static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
231 {
232 src = QFPREG(src);
233 return cpu_fpr[src / 2 + 1];
234 }
235
236 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
237 {
238 rd = QFPREG(rd);
239 rs = QFPREG(rs);
240
241 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
242 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
243 gen_update_fprs_dirty(dc, rd);
244 }
245 #endif
246
247 /* moves */
248 #ifdef CONFIG_USER_ONLY
249 #define supervisor(dc) 0
250 #ifdef TARGET_SPARC64
251 #define hypervisor(dc) 0
252 #endif
253 #else
254 #ifdef TARGET_SPARC64
255 #define hypervisor(dc) (dc->hypervisor)
256 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
257 #else
258 #define supervisor(dc) (dc->supervisor)
259 #endif
260 #endif
261
262 #ifdef TARGET_SPARC64
263 #ifndef TARGET_ABI32
264 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
265 #else
266 #define AM_CHECK(dc) (1)
267 #endif
268 #endif
269
270 static void gen_address_mask(DisasContext *dc, TCGv addr)
271 {
272 #ifdef TARGET_SPARC64
273 if (AM_CHECK(dc))
274 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
275 #endif
276 }
277
278 static TCGv gen_load_gpr(DisasContext *dc, int reg)
279 {
280 if (reg > 0) {
281 assert(reg < 32);
282 return cpu_regs[reg];
283 } else {
284 TCGv t = tcg_temp_new();
285 tcg_gen_movi_tl(t, 0);
286 return t;
287 }
288 }
289
290 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
291 {
292 if (reg > 0) {
293 assert(reg < 32);
294 tcg_gen_mov_tl(cpu_regs[reg], v);
295 }
296 }
297
298 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
299 {
300 if (reg > 0) {
301 assert(reg < 32);
302 return cpu_regs[reg];
303 } else {
304 return tcg_temp_new();
305 }
306 }
307
308 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
309 {
310 return translator_use_goto_tb(&s->base, pc) &&
311 translator_use_goto_tb(&s->base, npc);
312 }
313
314 static void gen_goto_tb(DisasContext *s, int tb_num,
315 target_ulong pc, target_ulong npc)
316 {
317 if (use_goto_tb(s, pc, npc)) {
318 /* jump to same page: we can use a direct jump */
319 tcg_gen_goto_tb(tb_num);
320 tcg_gen_movi_tl(cpu_pc, pc);
321 tcg_gen_movi_tl(cpu_npc, npc);
322 tcg_gen_exit_tb(s->base.tb, tb_num);
323 } else {
324 /* jump to another page: we can use an indirect jump */
325 tcg_gen_movi_tl(cpu_pc, pc);
326 tcg_gen_movi_tl(cpu_npc, npc);
327 tcg_gen_lookup_and_goto_ptr();
328 }
329 }
330
331 // XXX suboptimal
332 static void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
333 {
334 tcg_gen_extu_i32_tl(reg, src);
335 tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1);
336 }
337
338 static void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
339 {
340 tcg_gen_extu_i32_tl(reg, src);
341 tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1);
342 }
343
344 static void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
345 {
346 tcg_gen_extu_i32_tl(reg, src);
347 tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1);
348 }
349
350 static void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
351 {
352 tcg_gen_extu_i32_tl(reg, src);
353 tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1);
354 }
355
356 static void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
357 {
358 tcg_gen_mov_tl(cpu_cc_src, src1);
359 tcg_gen_mov_tl(cpu_cc_src2, src2);
360 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
361 tcg_gen_mov_tl(dst, cpu_cc_dst);
362 }
363
364 static TCGv_i32 gen_add32_carry32(void)
365 {
366 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
367
368 /* Carry is computed from a previous add: (dst < src) */
369 #if TARGET_LONG_BITS == 64
370 cc_src1_32 = tcg_temp_new_i32();
371 cc_src2_32 = tcg_temp_new_i32();
372 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
373 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
374 #else
375 cc_src1_32 = cpu_cc_dst;
376 cc_src2_32 = cpu_cc_src;
377 #endif
378
379 carry_32 = tcg_temp_new_i32();
380 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
381
382 return carry_32;
383 }
384
385 static TCGv_i32 gen_sub32_carry32(void)
386 {
387 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
388
389 /* Carry is computed from a previous borrow: (src1 < src2) */
390 #if TARGET_LONG_BITS == 64
391 cc_src1_32 = tcg_temp_new_i32();
392 cc_src2_32 = tcg_temp_new_i32();
393 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
394 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
395 #else
396 cc_src1_32 = cpu_cc_src;
397 cc_src2_32 = cpu_cc_src2;
398 #endif
399
400 carry_32 = tcg_temp_new_i32();
401 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
402
403 return carry_32;
404 }
405
406 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
407 TCGv src2, int update_cc)
408 {
409 TCGv_i32 carry_32;
410 TCGv carry;
411
412 switch (dc->cc_op) {
413 case CC_OP_DIV:
414 case CC_OP_LOGIC:
415 /* Carry is known to be zero. Fall back to plain ADD. */
416 if (update_cc) {
417 gen_op_add_cc(dst, src1, src2);
418 } else {
419 tcg_gen_add_tl(dst, src1, src2);
420 }
421 return;
422
423 case CC_OP_ADD:
424 case CC_OP_TADD:
425 case CC_OP_TADDTV:
426 if (TARGET_LONG_BITS == 32) {
427 /* We can re-use the host's hardware carry generation by using
428 an ADD2 opcode. We discard the low part of the output.
429 Ideally we'd combine this operation with the add that
430 generated the carry in the first place. */
431 carry = tcg_temp_new();
432 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
433 goto add_done;
434 }
435 carry_32 = gen_add32_carry32();
436 break;
437
438 case CC_OP_SUB:
439 case CC_OP_TSUB:
440 case CC_OP_TSUBTV:
441 carry_32 = gen_sub32_carry32();
442 break;
443
444 default:
445 /* We need external help to produce the carry. */
446 carry_32 = tcg_temp_new_i32();
447 gen_helper_compute_C_icc(carry_32, cpu_env);
448 break;
449 }
450
451 #if TARGET_LONG_BITS == 64
452 carry = tcg_temp_new();
453 tcg_gen_extu_i32_i64(carry, carry_32);
454 #else
455 carry = carry_32;
456 #endif
457
458 tcg_gen_add_tl(dst, src1, src2);
459 tcg_gen_add_tl(dst, dst, carry);
460
461 add_done:
462 if (update_cc) {
463 tcg_gen_mov_tl(cpu_cc_src, src1);
464 tcg_gen_mov_tl(cpu_cc_src2, src2);
465 tcg_gen_mov_tl(cpu_cc_dst, dst);
466 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
467 dc->cc_op = CC_OP_ADDX;
468 }
469 }
470
471 static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
472 {
473 tcg_gen_mov_tl(cpu_cc_src, src1);
474 tcg_gen_mov_tl(cpu_cc_src2, src2);
475 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
476 tcg_gen_mov_tl(dst, cpu_cc_dst);
477 }
478
479 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
480 TCGv src2, int update_cc)
481 {
482 TCGv_i32 carry_32;
483 TCGv carry;
484
485 switch (dc->cc_op) {
486 case CC_OP_DIV:
487 case CC_OP_LOGIC:
488 /* Carry is known to be zero. Fall back to plain SUB. */
489 if (update_cc) {
490 gen_op_sub_cc(dst, src1, src2);
491 } else {
492 tcg_gen_sub_tl(dst, src1, src2);
493 }
494 return;
495
496 case CC_OP_ADD:
497 case CC_OP_TADD:
498 case CC_OP_TADDTV:
499 carry_32 = gen_add32_carry32();
500 break;
501
502 case CC_OP_SUB:
503 case CC_OP_TSUB:
504 case CC_OP_TSUBTV:
505 if (TARGET_LONG_BITS == 32) {
506 /* We can re-use the host's hardware carry generation by using
507 a SUB2 opcode. We discard the low part of the output.
508 Ideally we'd combine this operation with the add that
509 generated the carry in the first place. */
510 carry = tcg_temp_new();
511 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
512 goto sub_done;
513 }
514 carry_32 = gen_sub32_carry32();
515 break;
516
517 default:
518 /* We need external help to produce the carry. */
519 carry_32 = tcg_temp_new_i32();
520 gen_helper_compute_C_icc(carry_32, cpu_env);
521 break;
522 }
523
524 #if TARGET_LONG_BITS == 64
525 carry = tcg_temp_new();
526 tcg_gen_extu_i32_i64(carry, carry_32);
527 #else
528 carry = carry_32;
529 #endif
530
531 tcg_gen_sub_tl(dst, src1, src2);
532 tcg_gen_sub_tl(dst, dst, carry);
533
534 sub_done:
535 if (update_cc) {
536 tcg_gen_mov_tl(cpu_cc_src, src1);
537 tcg_gen_mov_tl(cpu_cc_src2, src2);
538 tcg_gen_mov_tl(cpu_cc_dst, dst);
539 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
540 dc->cc_op = CC_OP_SUBX;
541 }
542 }
543
544 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
545 {
546 TCGv r_temp, zero, t0;
547
548 r_temp = tcg_temp_new();
549 t0 = tcg_temp_new();
550
551 /* old op:
552 if (!(env->y & 1))
553 T1 = 0;
554 */
555 zero = tcg_constant_tl(0);
556 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
557 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
558 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
559 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
560 zero, cpu_cc_src2);
561
562 // b2 = T0 & 1;
563 // env->y = (b2 << 31) | (env->y >> 1);
564 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
565 tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1);
566
567 // b1 = N ^ V;
568 gen_mov_reg_N(t0, cpu_psr);
569 gen_mov_reg_V(r_temp, cpu_psr);
570 tcg_gen_xor_tl(t0, t0, r_temp);
571
572 // T0 = (b1 << 31) | (T0 >> 1);
573 // src1 = T0;
574 tcg_gen_shli_tl(t0, t0, 31);
575 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
576 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
577
578 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
579
580 tcg_gen_mov_tl(dst, cpu_cc_dst);
581 }
582
583 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
584 {
585 #if TARGET_LONG_BITS == 32
586 if (sign_ext) {
587 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
588 } else {
589 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
590 }
591 #else
592 TCGv t0 = tcg_temp_new_i64();
593 TCGv t1 = tcg_temp_new_i64();
594
595 if (sign_ext) {
596 tcg_gen_ext32s_i64(t0, src1);
597 tcg_gen_ext32s_i64(t1, src2);
598 } else {
599 tcg_gen_ext32u_i64(t0, src1);
600 tcg_gen_ext32u_i64(t1, src2);
601 }
602
603 tcg_gen_mul_i64(dst, t0, t1);
604 tcg_gen_shri_i64(cpu_y, dst, 32);
605 #endif
606 }
607
608 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
609 {
610 /* zero-extend truncated operands before multiplication */
611 gen_op_multiply(dst, src1, src2, 0);
612 }
613
614 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
615 {
616 /* sign-extend truncated operands before multiplication */
617 gen_op_multiply(dst, src1, src2, 1);
618 }
619
620 // 1
621 static void gen_op_eval_ba(TCGv dst)
622 {
623 tcg_gen_movi_tl(dst, 1);
624 }
625
626 // Z
627 static void gen_op_eval_be(TCGv dst, TCGv_i32 src)
628 {
629 gen_mov_reg_Z(dst, src);
630 }
631
632 // Z | (N ^ V)
633 static void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
634 {
635 TCGv t0 = tcg_temp_new();
636 gen_mov_reg_N(t0, src);
637 gen_mov_reg_V(dst, src);
638 tcg_gen_xor_tl(dst, dst, t0);
639 gen_mov_reg_Z(t0, src);
640 tcg_gen_or_tl(dst, dst, t0);
641 }
642
643 // N ^ V
644 static void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
645 {
646 TCGv t0 = tcg_temp_new();
647 gen_mov_reg_V(t0, src);
648 gen_mov_reg_N(dst, src);
649 tcg_gen_xor_tl(dst, dst, t0);
650 }
651
652 // C | Z
653 static void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
654 {
655 TCGv t0 = tcg_temp_new();
656 gen_mov_reg_Z(t0, src);
657 gen_mov_reg_C(dst, src);
658 tcg_gen_or_tl(dst, dst, t0);
659 }
660
661 // C
662 static void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
663 {
664 gen_mov_reg_C(dst, src);
665 }
666
667 // V
668 static void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
669 {
670 gen_mov_reg_V(dst, src);
671 }
672
673 // 0
674 static void gen_op_eval_bn(TCGv dst)
675 {
676 tcg_gen_movi_tl(dst, 0);
677 }
678
679 // N
680 static void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
681 {
682 gen_mov_reg_N(dst, src);
683 }
684
685 // !Z
686 static void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
687 {
688 gen_mov_reg_Z(dst, src);
689 tcg_gen_xori_tl(dst, dst, 0x1);
690 }
691
692 // !(Z | (N ^ V))
693 static void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
694 {
695 gen_op_eval_ble(dst, src);
696 tcg_gen_xori_tl(dst, dst, 0x1);
697 }
698
699 // !(N ^ V)
700 static void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
701 {
702 gen_op_eval_bl(dst, src);
703 tcg_gen_xori_tl(dst, dst, 0x1);
704 }
705
706 // !(C | Z)
707 static void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
708 {
709 gen_op_eval_bleu(dst, src);
710 tcg_gen_xori_tl(dst, dst, 0x1);
711 }
712
713 // !C
714 static void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
715 {
716 gen_mov_reg_C(dst, src);
717 tcg_gen_xori_tl(dst, dst, 0x1);
718 }
719
720 // !N
721 static void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
722 {
723 gen_mov_reg_N(dst, src);
724 tcg_gen_xori_tl(dst, dst, 0x1);
725 }
726
727 // !V
728 static void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
729 {
730 gen_mov_reg_V(dst, src);
731 tcg_gen_xori_tl(dst, dst, 0x1);
732 }
733
734 /*
735 FPSR bit field FCC1 | FCC0:
736 0 =
737 1 <
738 2 >
739 3 unordered
740 */
741 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
742 unsigned int fcc_offset)
743 {
744 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
745 tcg_gen_andi_tl(reg, reg, 0x1);
746 }
747
748 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
749 {
750 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
751 tcg_gen_andi_tl(reg, reg, 0x1);
752 }
753
754 // !0: FCC0 | FCC1
755 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
756 {
757 TCGv t0 = tcg_temp_new();
758 gen_mov_reg_FCC0(dst, src, fcc_offset);
759 gen_mov_reg_FCC1(t0, src, fcc_offset);
760 tcg_gen_or_tl(dst, dst, t0);
761 }
762
763 // 1 or 2: FCC0 ^ FCC1
764 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
765 {
766 TCGv t0 = tcg_temp_new();
767 gen_mov_reg_FCC0(dst, src, fcc_offset);
768 gen_mov_reg_FCC1(t0, src, fcc_offset);
769 tcg_gen_xor_tl(dst, dst, t0);
770 }
771
772 // 1 or 3: FCC0
773 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
774 {
775 gen_mov_reg_FCC0(dst, src, fcc_offset);
776 }
777
778 // 1: FCC0 & !FCC1
779 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
780 {
781 TCGv t0 = tcg_temp_new();
782 gen_mov_reg_FCC0(dst, src, fcc_offset);
783 gen_mov_reg_FCC1(t0, src, fcc_offset);
784 tcg_gen_andc_tl(dst, dst, t0);
785 }
786
787 // 2 or 3: FCC1
788 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
789 {
790 gen_mov_reg_FCC1(dst, src, fcc_offset);
791 }
792
793 // 2: !FCC0 & FCC1
794 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
795 {
796 TCGv t0 = tcg_temp_new();
797 gen_mov_reg_FCC0(dst, src, fcc_offset);
798 gen_mov_reg_FCC1(t0, src, fcc_offset);
799 tcg_gen_andc_tl(dst, t0, dst);
800 }
801
802 // 3: FCC0 & FCC1
803 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
804 {
805 TCGv t0 = tcg_temp_new();
806 gen_mov_reg_FCC0(dst, src, fcc_offset);
807 gen_mov_reg_FCC1(t0, src, fcc_offset);
808 tcg_gen_and_tl(dst, dst, t0);
809 }
810
811 // 0: !(FCC0 | FCC1)
812 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
813 {
814 TCGv t0 = tcg_temp_new();
815 gen_mov_reg_FCC0(dst, src, fcc_offset);
816 gen_mov_reg_FCC1(t0, src, fcc_offset);
817 tcg_gen_or_tl(dst, dst, t0);
818 tcg_gen_xori_tl(dst, dst, 0x1);
819 }
820
821 // 0 or 3: !(FCC0 ^ FCC1)
822 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
823 {
824 TCGv t0 = tcg_temp_new();
825 gen_mov_reg_FCC0(dst, src, fcc_offset);
826 gen_mov_reg_FCC1(t0, src, fcc_offset);
827 tcg_gen_xor_tl(dst, dst, t0);
828 tcg_gen_xori_tl(dst, dst, 0x1);
829 }
830
831 // 0 or 2: !FCC0
832 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
833 {
834 gen_mov_reg_FCC0(dst, src, fcc_offset);
835 tcg_gen_xori_tl(dst, dst, 0x1);
836 }
837
838 // !1: !(FCC0 & !FCC1)
839 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
840 {
841 TCGv t0 = tcg_temp_new();
842 gen_mov_reg_FCC0(dst, src, fcc_offset);
843 gen_mov_reg_FCC1(t0, src, fcc_offset);
844 tcg_gen_andc_tl(dst, dst, t0);
845 tcg_gen_xori_tl(dst, dst, 0x1);
846 }
847
848 // 0 or 1: !FCC1
849 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
850 {
851 gen_mov_reg_FCC1(dst, src, fcc_offset);
852 tcg_gen_xori_tl(dst, dst, 0x1);
853 }
854
855 // !2: !(!FCC0 & FCC1)
856 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
857 {
858 TCGv t0 = tcg_temp_new();
859 gen_mov_reg_FCC0(dst, src, fcc_offset);
860 gen_mov_reg_FCC1(t0, src, fcc_offset);
861 tcg_gen_andc_tl(dst, t0, dst);
862 tcg_gen_xori_tl(dst, dst, 0x1);
863 }
864
865 // !3: !(FCC0 & FCC1)
866 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
867 {
868 TCGv t0 = tcg_temp_new();
869 gen_mov_reg_FCC0(dst, src, fcc_offset);
870 gen_mov_reg_FCC1(t0, src, fcc_offset);
871 tcg_gen_and_tl(dst, dst, t0);
872 tcg_gen_xori_tl(dst, dst, 0x1);
873 }
874
875 static void gen_branch2(DisasContext *dc, target_ulong pc1,
876 target_ulong pc2, TCGv r_cond)
877 {
878 TCGLabel *l1 = gen_new_label();
879
880 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
881
882 gen_goto_tb(dc, 0, pc1, pc1 + 4);
883
884 gen_set_label(l1);
885 gen_goto_tb(dc, 1, pc2, pc2 + 4);
886 }
887
888 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
889 {
890 TCGLabel *l1 = gen_new_label();
891 target_ulong npc = dc->npc;
892
893 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
894
895 gen_goto_tb(dc, 0, npc, pc1);
896
897 gen_set_label(l1);
898 gen_goto_tb(dc, 1, npc + 4, npc + 8);
899
900 dc->base.is_jmp = DISAS_NORETURN;
901 }
902
903 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
904 {
905 target_ulong npc = dc->npc;
906
907 if (npc & 3) {
908 switch (npc) {
909 case DYNAMIC_PC:
910 case DYNAMIC_PC_LOOKUP:
911 tcg_gen_mov_tl(cpu_pc, cpu_npc);
912 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
913 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc,
914 cpu_cond, tcg_constant_tl(0),
915 tcg_constant_tl(pc1), cpu_npc);
916 dc->pc = npc;
917 break;
918 default:
919 g_assert_not_reached();
920 }
921 } else {
922 dc->pc = npc;
923 dc->jump_pc[0] = pc1;
924 dc->jump_pc[1] = npc + 4;
925 dc->npc = JUMP_PC;
926 }
927 }
928
929 static void gen_generic_branch(DisasContext *dc)
930 {
931 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
932 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
933 TCGv zero = tcg_constant_tl(0);
934
935 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
936 }
937
938 /* call this function before using the condition register as it may
939 have been set for a jump */
940 static void flush_cond(DisasContext *dc)
941 {
942 if (dc->npc == JUMP_PC) {
943 gen_generic_branch(dc);
944 dc->npc = DYNAMIC_PC_LOOKUP;
945 }
946 }
947
948 static void save_npc(DisasContext *dc)
949 {
950 if (dc->npc & 3) {
951 switch (dc->npc) {
952 case JUMP_PC:
953 gen_generic_branch(dc);
954 dc->npc = DYNAMIC_PC_LOOKUP;
955 break;
956 case DYNAMIC_PC:
957 case DYNAMIC_PC_LOOKUP:
958 break;
959 default:
960 g_assert_not_reached();
961 }
962 } else {
963 tcg_gen_movi_tl(cpu_npc, dc->npc);
964 }
965 }
966
967 static void update_psr(DisasContext *dc)
968 {
969 if (dc->cc_op != CC_OP_FLAGS) {
970 dc->cc_op = CC_OP_FLAGS;
971 gen_helper_compute_psr(cpu_env);
972 }
973 }
974
975 static void save_state(DisasContext *dc)
976 {
977 tcg_gen_movi_tl(cpu_pc, dc->pc);
978 save_npc(dc);
979 }
980
981 static void gen_exception(DisasContext *dc, int which)
982 {
983 save_state(dc);
984 gen_helper_raise_exception(cpu_env, tcg_constant_i32(which));
985 dc->base.is_jmp = DISAS_NORETURN;
986 }
987
988 static void gen_check_align(TCGv addr, int mask)
989 {
990 gen_helper_check_align(cpu_env, addr, tcg_constant_i32(mask));
991 }
992
993 static void gen_mov_pc_npc(DisasContext *dc)
994 {
995 if (dc->npc & 3) {
996 switch (dc->npc) {
997 case JUMP_PC:
998 gen_generic_branch(dc);
999 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1000 dc->pc = DYNAMIC_PC_LOOKUP;
1001 break;
1002 case DYNAMIC_PC:
1003 case DYNAMIC_PC_LOOKUP:
1004 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1005 dc->pc = dc->npc;
1006 break;
1007 default:
1008 g_assert_not_reached();
1009 }
1010 } else {
1011 dc->pc = dc->npc;
1012 }
1013 }
1014
1015 static void gen_op_next_insn(void)
1016 {
1017 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1018 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1019 }
1020
1021 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1022 DisasContext *dc)
1023 {
1024 static int subcc_cond[16] = {
1025 TCG_COND_NEVER,
1026 TCG_COND_EQ,
1027 TCG_COND_LE,
1028 TCG_COND_LT,
1029 TCG_COND_LEU,
1030 TCG_COND_LTU,
1031 -1, /* neg */
1032 -1, /* overflow */
1033 TCG_COND_ALWAYS,
1034 TCG_COND_NE,
1035 TCG_COND_GT,
1036 TCG_COND_GE,
1037 TCG_COND_GTU,
1038 TCG_COND_GEU,
1039 -1, /* pos */
1040 -1, /* no overflow */
1041 };
1042
1043 static int logic_cond[16] = {
1044 TCG_COND_NEVER,
1045 TCG_COND_EQ, /* eq: Z */
1046 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1047 TCG_COND_LT, /* lt: N ^ V -> N */
1048 TCG_COND_EQ, /* leu: C | Z -> Z */
1049 TCG_COND_NEVER, /* ltu: C -> 0 */
1050 TCG_COND_LT, /* neg: N */
1051 TCG_COND_NEVER, /* vs: V -> 0 */
1052 TCG_COND_ALWAYS,
1053 TCG_COND_NE, /* ne: !Z */
1054 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1055 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1056 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1057 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1058 TCG_COND_GE, /* pos: !N */
1059 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1060 };
1061
1062 TCGv_i32 r_src;
1063 TCGv r_dst;
1064
1065 #ifdef TARGET_SPARC64
1066 if (xcc) {
1067 r_src = cpu_xcc;
1068 } else {
1069 r_src = cpu_psr;
1070 }
1071 #else
1072 r_src = cpu_psr;
1073 #endif
1074
1075 switch (dc->cc_op) {
1076 case CC_OP_LOGIC:
1077 cmp->cond = logic_cond[cond];
1078 do_compare_dst_0:
1079 cmp->is_bool = false;
1080 cmp->c2 = tcg_constant_tl(0);
1081 #ifdef TARGET_SPARC64
1082 if (!xcc) {
1083 cmp->c1 = tcg_temp_new();
1084 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1085 break;
1086 }
1087 #endif
1088 cmp->c1 = cpu_cc_dst;
1089 break;
1090
1091 case CC_OP_SUB:
1092 switch (cond) {
1093 case 6: /* neg */
1094 case 14: /* pos */
1095 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1096 goto do_compare_dst_0;
1097
1098 case 7: /* overflow */
1099 case 15: /* !overflow */
1100 goto do_dynamic;
1101
1102 default:
1103 cmp->cond = subcc_cond[cond];
1104 cmp->is_bool = false;
1105 #ifdef TARGET_SPARC64
1106 if (!xcc) {
1107 /* Note that sign-extension works for unsigned compares as
1108 long as both operands are sign-extended. */
1109 cmp->c1 = tcg_temp_new();
1110 cmp->c2 = tcg_temp_new();
1111 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1112 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1113 break;
1114 }
1115 #endif
1116 cmp->c1 = cpu_cc_src;
1117 cmp->c2 = cpu_cc_src2;
1118 break;
1119 }
1120 break;
1121
1122 default:
1123 do_dynamic:
1124 gen_helper_compute_psr(cpu_env);
1125 dc->cc_op = CC_OP_FLAGS;
1126 /* FALLTHRU */
1127
1128 case CC_OP_FLAGS:
1129 /* We're going to generate a boolean result. */
1130 cmp->cond = TCG_COND_NE;
1131 cmp->is_bool = true;
1132 cmp->c1 = r_dst = tcg_temp_new();
1133 cmp->c2 = tcg_constant_tl(0);
1134
1135 switch (cond) {
1136 case 0x0:
1137 gen_op_eval_bn(r_dst);
1138 break;
1139 case 0x1:
1140 gen_op_eval_be(r_dst, r_src);
1141 break;
1142 case 0x2:
1143 gen_op_eval_ble(r_dst, r_src);
1144 break;
1145 case 0x3:
1146 gen_op_eval_bl(r_dst, r_src);
1147 break;
1148 case 0x4:
1149 gen_op_eval_bleu(r_dst, r_src);
1150 break;
1151 case 0x5:
1152 gen_op_eval_bcs(r_dst, r_src);
1153 break;
1154 case 0x6:
1155 gen_op_eval_bneg(r_dst, r_src);
1156 break;
1157 case 0x7:
1158 gen_op_eval_bvs(r_dst, r_src);
1159 break;
1160 case 0x8:
1161 gen_op_eval_ba(r_dst);
1162 break;
1163 case 0x9:
1164 gen_op_eval_bne(r_dst, r_src);
1165 break;
1166 case 0xa:
1167 gen_op_eval_bg(r_dst, r_src);
1168 break;
1169 case 0xb:
1170 gen_op_eval_bge(r_dst, r_src);
1171 break;
1172 case 0xc:
1173 gen_op_eval_bgu(r_dst, r_src);
1174 break;
1175 case 0xd:
1176 gen_op_eval_bcc(r_dst, r_src);
1177 break;
1178 case 0xe:
1179 gen_op_eval_bpos(r_dst, r_src);
1180 break;
1181 case 0xf:
1182 gen_op_eval_bvc(r_dst, r_src);
1183 break;
1184 }
1185 break;
1186 }
1187 }
1188
1189 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1190 {
1191 unsigned int offset;
1192 TCGv r_dst;
1193
1194 /* For now we still generate a straight boolean result. */
1195 cmp->cond = TCG_COND_NE;
1196 cmp->is_bool = true;
1197 cmp->c1 = r_dst = tcg_temp_new();
1198 cmp->c2 = tcg_constant_tl(0);
1199
1200 switch (cc) {
1201 default:
1202 case 0x0:
1203 offset = 0;
1204 break;
1205 case 0x1:
1206 offset = 32 - 10;
1207 break;
1208 case 0x2:
1209 offset = 34 - 10;
1210 break;
1211 case 0x3:
1212 offset = 36 - 10;
1213 break;
1214 }
1215
1216 switch (cond) {
1217 case 0x0:
1218 gen_op_eval_bn(r_dst);
1219 break;
1220 case 0x1:
1221 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1222 break;
1223 case 0x2:
1224 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1225 break;
1226 case 0x3:
1227 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1228 break;
1229 case 0x4:
1230 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1231 break;
1232 case 0x5:
1233 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1234 break;
1235 case 0x6:
1236 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1237 break;
1238 case 0x7:
1239 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1240 break;
1241 case 0x8:
1242 gen_op_eval_ba(r_dst);
1243 break;
1244 case 0x9:
1245 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1246 break;
1247 case 0xa:
1248 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1249 break;
1250 case 0xb:
1251 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1252 break;
1253 case 0xc:
1254 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1255 break;
1256 case 0xd:
1257 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1258 break;
1259 case 0xe:
1260 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1261 break;
1262 case 0xf:
1263 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1264 break;
1265 }
1266 }
1267
1268 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1269 DisasContext *dc)
1270 {
1271 DisasCompare cmp;
1272 gen_compare(&cmp, cc, cond, dc);
1273
1274 /* The interface is to return a boolean in r_dst. */
1275 if (cmp.is_bool) {
1276 tcg_gen_mov_tl(r_dst, cmp.c1);
1277 } else {
1278 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1279 }
1280 }
1281
1282 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1283 {
1284 DisasCompare cmp;
1285 gen_fcompare(&cmp, cc, cond);
1286
1287 /* The interface is to return a boolean in r_dst. */
1288 if (cmp.is_bool) {
1289 tcg_gen_mov_tl(r_dst, cmp.c1);
1290 } else {
1291 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1292 }
1293 }
1294
1295 #ifdef TARGET_SPARC64
1296 // Inverted logic
1297 static const int gen_tcg_cond_reg[8] = {
1298 -1,
1299 TCG_COND_NE,
1300 TCG_COND_GT,
1301 TCG_COND_GE,
1302 -1,
1303 TCG_COND_EQ,
1304 TCG_COND_LE,
1305 TCG_COND_LT,
1306 };
1307
1308 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1309 {
1310 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1311 cmp->is_bool = false;
1312 cmp->c1 = r_src;
1313 cmp->c2 = tcg_constant_tl(0);
1314 }
1315
1316 static void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1317 {
1318 DisasCompare cmp;
1319 gen_compare_reg(&cmp, cond, r_src);
1320
1321 /* The interface is to return a boolean in r_dst. */
1322 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1323 }
1324 #endif
1325
1326 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1327 {
1328 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1329 target_ulong target = dc->pc + offset;
1330
1331 #ifdef TARGET_SPARC64
1332 if (unlikely(AM_CHECK(dc))) {
1333 target &= 0xffffffffULL;
1334 }
1335 #endif
1336 if (cond == 0x0) {
1337 /* unconditional not taken */
1338 if (a) {
1339 dc->pc = dc->npc + 4;
1340 dc->npc = dc->pc + 4;
1341 } else {
1342 dc->pc = dc->npc;
1343 dc->npc = dc->pc + 4;
1344 }
1345 } else if (cond == 0x8) {
1346 /* unconditional taken */
1347 if (a) {
1348 dc->pc = target;
1349 dc->npc = dc->pc + 4;
1350 } else {
1351 dc->pc = dc->npc;
1352 dc->npc = target;
1353 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1354 }
1355 } else {
1356 flush_cond(dc);
1357 gen_cond(cpu_cond, cc, cond, dc);
1358 if (a) {
1359 gen_branch_a(dc, target);
1360 } else {
1361 gen_branch_n(dc, target);
1362 }
1363 }
1364 }
1365
1366 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1367 {
1368 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1369 target_ulong target = dc->pc + offset;
1370
1371 #ifdef TARGET_SPARC64
1372 if (unlikely(AM_CHECK(dc))) {
1373 target &= 0xffffffffULL;
1374 }
1375 #endif
1376 if (cond == 0x0) {
1377 /* unconditional not taken */
1378 if (a) {
1379 dc->pc = dc->npc + 4;
1380 dc->npc = dc->pc + 4;
1381 } else {
1382 dc->pc = dc->npc;
1383 dc->npc = dc->pc + 4;
1384 }
1385 } else if (cond == 0x8) {
1386 /* unconditional taken */
1387 if (a) {
1388 dc->pc = target;
1389 dc->npc = dc->pc + 4;
1390 } else {
1391 dc->pc = dc->npc;
1392 dc->npc = target;
1393 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1394 }
1395 } else {
1396 flush_cond(dc);
1397 gen_fcond(cpu_cond, cc, cond);
1398 if (a) {
1399 gen_branch_a(dc, target);
1400 } else {
1401 gen_branch_n(dc, target);
1402 }
1403 }
1404 }
1405
1406 #ifdef TARGET_SPARC64
1407 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1408 TCGv r_reg)
1409 {
1410 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1411 target_ulong target = dc->pc + offset;
1412
1413 if (unlikely(AM_CHECK(dc))) {
1414 target &= 0xffffffffULL;
1415 }
1416 flush_cond(dc);
1417 gen_cond_reg(cpu_cond, cond, r_reg);
1418 if (a) {
1419 gen_branch_a(dc, target);
1420 } else {
1421 gen_branch_n(dc, target);
1422 }
1423 }
1424
1425 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1426 {
1427 switch (fccno) {
1428 case 0:
1429 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1430 break;
1431 case 1:
1432 gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1433 break;
1434 case 2:
1435 gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1436 break;
1437 case 3:
1438 gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1439 break;
1440 }
1441 }
1442
1443 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1444 {
1445 switch (fccno) {
1446 case 0:
1447 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1448 break;
1449 case 1:
1450 gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1451 break;
1452 case 2:
1453 gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1454 break;
1455 case 3:
1456 gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1457 break;
1458 }
1459 }
1460
1461 static void gen_op_fcmpq(int fccno)
1462 {
1463 switch (fccno) {
1464 case 0:
1465 gen_helper_fcmpq(cpu_fsr, cpu_env);
1466 break;
1467 case 1:
1468 gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
1469 break;
1470 case 2:
1471 gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
1472 break;
1473 case 3:
1474 gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
1475 break;
1476 }
1477 }
1478
1479 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1480 {
1481 switch (fccno) {
1482 case 0:
1483 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1484 break;
1485 case 1:
1486 gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1487 break;
1488 case 2:
1489 gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1490 break;
1491 case 3:
1492 gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1493 break;
1494 }
1495 }
1496
1497 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1498 {
1499 switch (fccno) {
1500 case 0:
1501 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1502 break;
1503 case 1:
1504 gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1505 break;
1506 case 2:
1507 gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1508 break;
1509 case 3:
1510 gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1511 break;
1512 }
1513 }
1514
1515 static void gen_op_fcmpeq(int fccno)
1516 {
1517 switch (fccno) {
1518 case 0:
1519 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1520 break;
1521 case 1:
1522 gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
1523 break;
1524 case 2:
1525 gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
1526 break;
1527 case 3:
1528 gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
1529 break;
1530 }
1531 }
1532
1533 #else
1534
1535 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1536 {
1537 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1538 }
1539
1540 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1541 {
1542 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1543 }
1544
1545 static void gen_op_fcmpq(int fccno)
1546 {
1547 gen_helper_fcmpq(cpu_fsr, cpu_env);
1548 }
1549
1550 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1551 {
1552 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1553 }
1554
1555 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1556 {
1557 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1558 }
1559
1560 static void gen_op_fcmpeq(int fccno)
1561 {
1562 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1563 }
1564 #endif
1565
1566 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1567 {
1568 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1569 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1570 gen_exception(dc, TT_FP_EXCP);
1571 }
1572
1573 static int gen_trap_ifnofpu(DisasContext *dc)
1574 {
1575 #if !defined(CONFIG_USER_ONLY)
1576 if (!dc->fpu_enabled) {
1577 gen_exception(dc, TT_NFPU_INSN);
1578 return 1;
1579 }
1580 #endif
1581 return 0;
1582 }
1583
1584 static void gen_op_clear_ieee_excp_and_FTT(void)
1585 {
1586 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1587 }
1588
1589 static void gen_fop_FF(DisasContext *dc, int rd, int rs,
1590 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1591 {
1592 TCGv_i32 dst, src;
1593
1594 src = gen_load_fpr_F(dc, rs);
1595 dst = gen_dest_fpr_F(dc);
1596
1597 gen(dst, cpu_env, src);
1598 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1599
1600 gen_store_fpr_F(dc, rd, dst);
1601 }
1602
1603 static void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1604 void (*gen)(TCGv_i32, TCGv_i32))
1605 {
1606 TCGv_i32 dst, src;
1607
1608 src = gen_load_fpr_F(dc, rs);
1609 dst = gen_dest_fpr_F(dc);
1610
1611 gen(dst, src);
1612
1613 gen_store_fpr_F(dc, rd, dst);
1614 }
1615
1616 static void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1617 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1618 {
1619 TCGv_i32 dst, src1, src2;
1620
1621 src1 = gen_load_fpr_F(dc, rs1);
1622 src2 = gen_load_fpr_F(dc, rs2);
1623 dst = gen_dest_fpr_F(dc);
1624
1625 gen(dst, cpu_env, src1, src2);
1626 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1627
1628 gen_store_fpr_F(dc, rd, dst);
1629 }
1630
1631 #ifdef TARGET_SPARC64
1632 static void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1633 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1634 {
1635 TCGv_i32 dst, src1, src2;
1636
1637 src1 = gen_load_fpr_F(dc, rs1);
1638 src2 = gen_load_fpr_F(dc, rs2);
1639 dst = gen_dest_fpr_F(dc);
1640
1641 gen(dst, src1, src2);
1642
1643 gen_store_fpr_F(dc, rd, dst);
1644 }
1645 #endif
1646
1647 static void gen_fop_DD(DisasContext *dc, int rd, int rs,
1648 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1649 {
1650 TCGv_i64 dst, src;
1651
1652 src = gen_load_fpr_D(dc, rs);
1653 dst = gen_dest_fpr_D(dc, rd);
1654
1655 gen(dst, cpu_env, src);
1656 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1657
1658 gen_store_fpr_D(dc, rd, dst);
1659 }
1660
1661 #ifdef TARGET_SPARC64
1662 static void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1663 void (*gen)(TCGv_i64, TCGv_i64))
1664 {
1665 TCGv_i64 dst, src;
1666
1667 src = gen_load_fpr_D(dc, rs);
1668 dst = gen_dest_fpr_D(dc, rd);
1669
1670 gen(dst, src);
1671
1672 gen_store_fpr_D(dc, rd, dst);
1673 }
1674 #endif
1675
1676 static void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1677 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1678 {
1679 TCGv_i64 dst, src1, src2;
1680
1681 src1 = gen_load_fpr_D(dc, rs1);
1682 src2 = gen_load_fpr_D(dc, rs2);
1683 dst = gen_dest_fpr_D(dc, rd);
1684
1685 gen(dst, cpu_env, src1, src2);
1686 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1687
1688 gen_store_fpr_D(dc, rd, dst);
1689 }
1690
1691 #ifdef TARGET_SPARC64
1692 static void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1693 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1694 {
1695 TCGv_i64 dst, src1, src2;
1696
1697 src1 = gen_load_fpr_D(dc, rs1);
1698 src2 = gen_load_fpr_D(dc, rs2);
1699 dst = gen_dest_fpr_D(dc, rd);
1700
1701 gen(dst, src1, src2);
1702
1703 gen_store_fpr_D(dc, rd, dst);
1704 }
1705
1706 static void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1707 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1708 {
1709 TCGv_i64 dst, src1, src2;
1710
1711 src1 = gen_load_fpr_D(dc, rs1);
1712 src2 = gen_load_fpr_D(dc, rs2);
1713 dst = gen_dest_fpr_D(dc, rd);
1714
1715 gen(dst, cpu_gsr, src1, src2);
1716
1717 gen_store_fpr_D(dc, rd, dst);
1718 }
1719
1720 static void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1721 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1722 {
1723 TCGv_i64 dst, src0, src1, src2;
1724
1725 src1 = gen_load_fpr_D(dc, rs1);
1726 src2 = gen_load_fpr_D(dc, rs2);
1727 src0 = gen_load_fpr_D(dc, rd);
1728 dst = gen_dest_fpr_D(dc, rd);
1729
1730 gen(dst, src0, src1, src2);
1731
1732 gen_store_fpr_D(dc, rd, dst);
1733 }
1734 #endif
1735
1736 static void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1737 void (*gen)(TCGv_ptr))
1738 {
1739 gen_op_load_fpr_QT1(QFPREG(rs));
1740
1741 gen(cpu_env);
1742 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1743
1744 gen_op_store_QT0_fpr(QFPREG(rd));
1745 gen_update_fprs_dirty(dc, QFPREG(rd));
1746 }
1747
1748 #ifdef TARGET_SPARC64
1749 static void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1750 void (*gen)(TCGv_ptr))
1751 {
1752 gen_op_load_fpr_QT1(QFPREG(rs));
1753
1754 gen(cpu_env);
1755
1756 gen_op_store_QT0_fpr(QFPREG(rd));
1757 gen_update_fprs_dirty(dc, QFPREG(rd));
1758 }
1759 #endif
1760
1761 static void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1762 void (*gen)(TCGv_ptr))
1763 {
1764 gen_op_load_fpr_QT0(QFPREG(rs1));
1765 gen_op_load_fpr_QT1(QFPREG(rs2));
1766
1767 gen(cpu_env);
1768 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1769
1770 gen_op_store_QT0_fpr(QFPREG(rd));
1771 gen_update_fprs_dirty(dc, QFPREG(rd));
1772 }
1773
1774 static void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1775 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1776 {
1777 TCGv_i64 dst;
1778 TCGv_i32 src1, src2;
1779
1780 src1 = gen_load_fpr_F(dc, rs1);
1781 src2 = gen_load_fpr_F(dc, rs2);
1782 dst = gen_dest_fpr_D(dc, rd);
1783
1784 gen(dst, cpu_env, src1, src2);
1785 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1786
1787 gen_store_fpr_D(dc, rd, dst);
1788 }
1789
1790 static void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1791 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1792 {
1793 TCGv_i64 src1, src2;
1794
1795 src1 = gen_load_fpr_D(dc, rs1);
1796 src2 = gen_load_fpr_D(dc, rs2);
1797
1798 gen(cpu_env, src1, src2);
1799 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1800
1801 gen_op_store_QT0_fpr(QFPREG(rd));
1802 gen_update_fprs_dirty(dc, QFPREG(rd));
1803 }
1804
1805 #ifdef TARGET_SPARC64
1806 static void gen_fop_DF(DisasContext *dc, int rd, int rs,
1807 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1808 {
1809 TCGv_i64 dst;
1810 TCGv_i32 src;
1811
1812 src = gen_load_fpr_F(dc, rs);
1813 dst = gen_dest_fpr_D(dc, rd);
1814
1815 gen(dst, cpu_env, src);
1816 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1817
1818 gen_store_fpr_D(dc, rd, dst);
1819 }
1820 #endif
1821
1822 static void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1823 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1824 {
1825 TCGv_i64 dst;
1826 TCGv_i32 src;
1827
1828 src = gen_load_fpr_F(dc, rs);
1829 dst = gen_dest_fpr_D(dc, rd);
1830
1831 gen(dst, cpu_env, src);
1832
1833 gen_store_fpr_D(dc, rd, dst);
1834 }
1835
1836 static void gen_fop_FD(DisasContext *dc, int rd, int rs,
1837 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1838 {
1839 TCGv_i32 dst;
1840 TCGv_i64 src;
1841
1842 src = gen_load_fpr_D(dc, rs);
1843 dst = gen_dest_fpr_F(dc);
1844
1845 gen(dst, cpu_env, src);
1846 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1847
1848 gen_store_fpr_F(dc, rd, dst);
1849 }
1850
1851 static void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1852 void (*gen)(TCGv_i32, TCGv_ptr))
1853 {
1854 TCGv_i32 dst;
1855
1856 gen_op_load_fpr_QT1(QFPREG(rs));
1857 dst = gen_dest_fpr_F(dc);
1858
1859 gen(dst, cpu_env);
1860 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1861
1862 gen_store_fpr_F(dc, rd, dst);
1863 }
1864
1865 static void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1866 void (*gen)(TCGv_i64, TCGv_ptr))
1867 {
1868 TCGv_i64 dst;
1869
1870 gen_op_load_fpr_QT1(QFPREG(rs));
1871 dst = gen_dest_fpr_D(dc, rd);
1872
1873 gen(dst, cpu_env);
1874 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1875
1876 gen_store_fpr_D(dc, rd, dst);
1877 }
1878
1879 static void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1880 void (*gen)(TCGv_ptr, TCGv_i32))
1881 {
1882 TCGv_i32 src;
1883
1884 src = gen_load_fpr_F(dc, rs);
1885
1886 gen(cpu_env, src);
1887
1888 gen_op_store_QT0_fpr(QFPREG(rd));
1889 gen_update_fprs_dirty(dc, QFPREG(rd));
1890 }
1891
1892 static void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1893 void (*gen)(TCGv_ptr, TCGv_i64))
1894 {
1895 TCGv_i64 src;
1896
1897 src = gen_load_fpr_D(dc, rs);
1898
1899 gen(cpu_env, src);
1900
1901 gen_op_store_QT0_fpr(QFPREG(rd));
1902 gen_update_fprs_dirty(dc, QFPREG(rd));
1903 }
1904
1905 static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
1906 TCGv addr, int mmu_idx, MemOp memop)
1907 {
1908 gen_address_mask(dc, addr);
1909 tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN);
1910 }
1911
1912 static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
1913 {
1914 TCGv m1 = tcg_constant_tl(0xff);
1915 gen_address_mask(dc, addr);
1916 tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
1917 }
1918
1919 /* asi moves */
1920 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
1921 typedef enum {
1922 GET_ASI_HELPER,
1923 GET_ASI_EXCP,
1924 GET_ASI_DIRECT,
1925 GET_ASI_DTWINX,
1926 GET_ASI_BLOCK,
1927 GET_ASI_SHORT,
1928 GET_ASI_BCOPY,
1929 GET_ASI_BFILL,
1930 } ASIType;
1931
1932 typedef struct {
1933 ASIType type;
1934 int asi;
1935 int mem_idx;
1936 MemOp memop;
1937 } DisasASI;
1938
1939 static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop)
1940 {
1941 int asi = GET_FIELD(insn, 19, 26);
1942 ASIType type = GET_ASI_HELPER;
1943 int mem_idx = dc->mem_idx;
1944
1945 #ifndef TARGET_SPARC64
1946 /* Before v9, all asis are immediate and privileged. */
1947 if (IS_IMM) {
1948 gen_exception(dc, TT_ILL_INSN);
1949 type = GET_ASI_EXCP;
1950 } else if (supervisor(dc)
1951 /* Note that LEON accepts ASI_USERDATA in user mode, for
1952 use with CASA. Also note that previous versions of
1953 QEMU allowed (and old versions of gcc emitted) ASI_P
1954 for LEON, which is incorrect. */
1955 || (asi == ASI_USERDATA
1956 && (dc->def->features & CPU_FEATURE_CASA))) {
1957 switch (asi) {
1958 case ASI_USERDATA: /* User data access */
1959 mem_idx = MMU_USER_IDX;
1960 type = GET_ASI_DIRECT;
1961 break;
1962 case ASI_KERNELDATA: /* Supervisor data access */
1963 mem_idx = MMU_KERNEL_IDX;
1964 type = GET_ASI_DIRECT;
1965 break;
1966 case ASI_M_BYPASS: /* MMU passthrough */
1967 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1968 mem_idx = MMU_PHYS_IDX;
1969 type = GET_ASI_DIRECT;
1970 break;
1971 case ASI_M_BCOPY: /* Block copy, sta access */
1972 mem_idx = MMU_KERNEL_IDX;
1973 type = GET_ASI_BCOPY;
1974 break;
1975 case ASI_M_BFILL: /* Block fill, stda access */
1976 mem_idx = MMU_KERNEL_IDX;
1977 type = GET_ASI_BFILL;
1978 break;
1979 }
1980
1981 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1982 * permissions check in get_physical_address(..).
1983 */
1984 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1985 } else {
1986 gen_exception(dc, TT_PRIV_INSN);
1987 type = GET_ASI_EXCP;
1988 }
1989 #else
1990 if (IS_IMM) {
1991 asi = dc->asi;
1992 }
1993 /* With v9, all asis below 0x80 are privileged. */
1994 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1995 down that bit into DisasContext. For the moment that's ok,
1996 since the direct implementations below doesn't have any ASIs
1997 in the restricted [0x30, 0x7f] range, and the check will be
1998 done properly in the helper. */
1999 if (!supervisor(dc) && asi < 0x80) {
2000 gen_exception(dc, TT_PRIV_ACT);
2001 type = GET_ASI_EXCP;
2002 } else {
2003 switch (asi) {
2004 case ASI_REAL: /* Bypass */
2005 case ASI_REAL_IO: /* Bypass, non-cacheable */
2006 case ASI_REAL_L: /* Bypass LE */
2007 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
2008 case ASI_TWINX_REAL: /* Real address, twinx */
2009 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
2010 case ASI_QUAD_LDD_PHYS:
2011 case ASI_QUAD_LDD_PHYS_L:
2012 mem_idx = MMU_PHYS_IDX;
2013 break;
2014 case ASI_N: /* Nucleus */
2015 case ASI_NL: /* Nucleus LE */
2016 case ASI_TWINX_N:
2017 case ASI_TWINX_NL:
2018 case ASI_NUCLEUS_QUAD_LDD:
2019 case ASI_NUCLEUS_QUAD_LDD_L:
2020 if (hypervisor(dc)) {
2021 mem_idx = MMU_PHYS_IDX;
2022 } else {
2023 mem_idx = MMU_NUCLEUS_IDX;
2024 }
2025 break;
2026 case ASI_AIUP: /* As if user primary */
2027 case ASI_AIUPL: /* As if user primary LE */
2028 case ASI_TWINX_AIUP:
2029 case ASI_TWINX_AIUP_L:
2030 case ASI_BLK_AIUP_4V:
2031 case ASI_BLK_AIUP_L_4V:
2032 case ASI_BLK_AIUP:
2033 case ASI_BLK_AIUPL:
2034 mem_idx = MMU_USER_IDX;
2035 break;
2036 case ASI_AIUS: /* As if user secondary */
2037 case ASI_AIUSL: /* As if user secondary LE */
2038 case ASI_TWINX_AIUS:
2039 case ASI_TWINX_AIUS_L:
2040 case ASI_BLK_AIUS_4V:
2041 case ASI_BLK_AIUS_L_4V:
2042 case ASI_BLK_AIUS:
2043 case ASI_BLK_AIUSL:
2044 mem_idx = MMU_USER_SECONDARY_IDX;
2045 break;
2046 case ASI_S: /* Secondary */
2047 case ASI_SL: /* Secondary LE */
2048 case ASI_TWINX_S:
2049 case ASI_TWINX_SL:
2050 case ASI_BLK_COMMIT_S:
2051 case ASI_BLK_S:
2052 case ASI_BLK_SL:
2053 case ASI_FL8_S:
2054 case ASI_FL8_SL:
2055 case ASI_FL16_S:
2056 case ASI_FL16_SL:
2057 if (mem_idx == MMU_USER_IDX) {
2058 mem_idx = MMU_USER_SECONDARY_IDX;
2059 } else if (mem_idx == MMU_KERNEL_IDX) {
2060 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2061 }
2062 break;
2063 case ASI_P: /* Primary */
2064 case ASI_PL: /* Primary LE */
2065 case ASI_TWINX_P:
2066 case ASI_TWINX_PL:
2067 case ASI_BLK_COMMIT_P:
2068 case ASI_BLK_P:
2069 case ASI_BLK_PL:
2070 case ASI_FL8_P:
2071 case ASI_FL8_PL:
2072 case ASI_FL16_P:
2073 case ASI_FL16_PL:
2074 break;
2075 }
2076 switch (asi) {
2077 case ASI_REAL:
2078 case ASI_REAL_IO:
2079 case ASI_REAL_L:
2080 case ASI_REAL_IO_L:
2081 case ASI_N:
2082 case ASI_NL:
2083 case ASI_AIUP:
2084 case ASI_AIUPL:
2085 case ASI_AIUS:
2086 case ASI_AIUSL:
2087 case ASI_S:
2088 case ASI_SL:
2089 case ASI_P:
2090 case ASI_PL:
2091 type = GET_ASI_DIRECT;
2092 break;
2093 case ASI_TWINX_REAL:
2094 case ASI_TWINX_REAL_L:
2095 case ASI_TWINX_N:
2096 case ASI_TWINX_NL:
2097 case ASI_TWINX_AIUP:
2098 case ASI_TWINX_AIUP_L:
2099 case ASI_TWINX_AIUS:
2100 case ASI_TWINX_AIUS_L:
2101 case ASI_TWINX_P:
2102 case ASI_TWINX_PL:
2103 case ASI_TWINX_S:
2104 case ASI_TWINX_SL:
2105 case ASI_QUAD_LDD_PHYS:
2106 case ASI_QUAD_LDD_PHYS_L:
2107 case ASI_NUCLEUS_QUAD_LDD:
2108 case ASI_NUCLEUS_QUAD_LDD_L:
2109 type = GET_ASI_DTWINX;
2110 break;
2111 case ASI_BLK_COMMIT_P:
2112 case ASI_BLK_COMMIT_S:
2113 case ASI_BLK_AIUP_4V:
2114 case ASI_BLK_AIUP_L_4V:
2115 case ASI_BLK_AIUP:
2116 case ASI_BLK_AIUPL:
2117 case ASI_BLK_AIUS_4V:
2118 case ASI_BLK_AIUS_L_4V:
2119 case ASI_BLK_AIUS:
2120 case ASI_BLK_AIUSL:
2121 case ASI_BLK_S:
2122 case ASI_BLK_SL:
2123 case ASI_BLK_P:
2124 case ASI_BLK_PL:
2125 type = GET_ASI_BLOCK;
2126 break;
2127 case ASI_FL8_S:
2128 case ASI_FL8_SL:
2129 case ASI_FL8_P:
2130 case ASI_FL8_PL:
2131 memop = MO_UB;
2132 type = GET_ASI_SHORT;
2133 break;
2134 case ASI_FL16_S:
2135 case ASI_FL16_SL:
2136 case ASI_FL16_P:
2137 case ASI_FL16_PL:
2138 memop = MO_TEUW;
2139 type = GET_ASI_SHORT;
2140 break;
2141 }
2142 /* The little-endian asis all have bit 3 set. */
2143 if (asi & 8) {
2144 memop ^= MO_BSWAP;
2145 }
2146 }
2147 #endif
2148
2149 return (DisasASI){ type, asi, mem_idx, memop };
2150 }
2151
2152 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2153 int insn, MemOp memop)
2154 {
2155 DisasASI da = get_asi(dc, insn, memop);
2156
2157 switch (da.type) {
2158 case GET_ASI_EXCP:
2159 break;
2160 case GET_ASI_DTWINX: /* Reserved for ldda. */
2161 gen_exception(dc, TT_ILL_INSN);
2162 break;
2163 case GET_ASI_DIRECT:
2164 gen_address_mask(dc, addr);
2165 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN);
2166 break;
2167 default:
2168 {
2169 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2170 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2171
2172 save_state(dc);
2173 #ifdef TARGET_SPARC64
2174 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
2175 #else
2176 {
2177 TCGv_i64 t64 = tcg_temp_new_i64();
2178 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2179 tcg_gen_trunc_i64_tl(dst, t64);
2180 }
2181 #endif
2182 }
2183 break;
2184 }
2185 }
2186
2187 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2188 int insn, MemOp memop)
2189 {
2190 DisasASI da = get_asi(dc, insn, memop);
2191
2192 switch (da.type) {
2193 case GET_ASI_EXCP:
2194 break;
2195 case GET_ASI_DTWINX: /* Reserved for stda. */
2196 #ifndef TARGET_SPARC64
2197 gen_exception(dc, TT_ILL_INSN);
2198 break;
2199 #else
2200 if (!(dc->def->features & CPU_FEATURE_HYPV)) {
2201 /* Pre OpenSPARC CPUs don't have these */
2202 gen_exception(dc, TT_ILL_INSN);
2203 return;
2204 }
2205 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2206 * are ST_BLKINIT_ ASIs */
2207 #endif
2208 /* fall through */
2209 case GET_ASI_DIRECT:
2210 gen_address_mask(dc, addr);
2211 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN);
2212 break;
2213 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2214 case GET_ASI_BCOPY:
2215 /* Copy 32 bytes from the address in SRC to ADDR. */
2216 /* ??? The original qemu code suggests 4-byte alignment, dropping
2217 the low bits, but the only place I can see this used is in the
2218 Linux kernel with 32 byte alignment, which would make more sense
2219 as a cacheline-style operation. */
2220 {
2221 TCGv saddr = tcg_temp_new();
2222 TCGv daddr = tcg_temp_new();
2223 TCGv four = tcg_constant_tl(4);
2224 TCGv_i32 tmp = tcg_temp_new_i32();
2225 int i;
2226
2227 tcg_gen_andi_tl(saddr, src, -4);
2228 tcg_gen_andi_tl(daddr, addr, -4);
2229 for (i = 0; i < 32; i += 4) {
2230 /* Since the loads and stores are paired, allow the
2231 copy to happen in the host endianness. */
2232 tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
2233 tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
2234 tcg_gen_add_tl(saddr, saddr, four);
2235 tcg_gen_add_tl(daddr, daddr, four);
2236 }
2237 }
2238 break;
2239 #endif
2240 default:
2241 {
2242 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2243 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2244
2245 save_state(dc);
2246 #ifdef TARGET_SPARC64
2247 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
2248 #else
2249 {
2250 TCGv_i64 t64 = tcg_temp_new_i64();
2251 tcg_gen_extu_tl_i64(t64, src);
2252 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2253 }
2254 #endif
2255
2256 /* A write to a TLB register may alter page maps. End the TB. */
2257 dc->npc = DYNAMIC_PC;
2258 }
2259 break;
2260 }
2261 }
2262
2263 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2264 TCGv addr, int insn)
2265 {
2266 DisasASI da = get_asi(dc, insn, MO_TEUL);
2267
2268 switch (da.type) {
2269 case GET_ASI_EXCP:
2270 break;
2271 case GET_ASI_DIRECT:
2272 gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
2273 break;
2274 default:
2275 /* ??? Should be DAE_invalid_asi. */
2276 gen_exception(dc, TT_DATA_ACCESS);
2277 break;
2278 }
2279 }
2280
2281 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2282 int insn, int rd)
2283 {
2284 DisasASI da = get_asi(dc, insn, MO_TEUL);
2285 TCGv oldv;
2286
2287 switch (da.type) {
2288 case GET_ASI_EXCP:
2289 return;
2290 case GET_ASI_DIRECT:
2291 oldv = tcg_temp_new();
2292 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2293 da.mem_idx, da.memop | MO_ALIGN);
2294 gen_store_gpr(dc, rd, oldv);
2295 break;
2296 default:
2297 /* ??? Should be DAE_invalid_asi. */
2298 gen_exception(dc, TT_DATA_ACCESS);
2299 break;
2300 }
2301 }
2302
2303 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2304 {
2305 DisasASI da = get_asi(dc, insn, MO_UB);
2306
2307 switch (da.type) {
2308 case GET_ASI_EXCP:
2309 break;
2310 case GET_ASI_DIRECT:
2311 gen_ldstub(dc, dst, addr, da.mem_idx);
2312 break;
2313 default:
2314 /* ??? In theory, this should be raise DAE_invalid_asi.
2315 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2316 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
2317 gen_helper_exit_atomic(cpu_env);
2318 } else {
2319 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2320 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
2321 TCGv_i64 s64, t64;
2322
2323 save_state(dc);
2324 t64 = tcg_temp_new_i64();
2325 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2326
2327 s64 = tcg_constant_i64(0xff);
2328 gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop);
2329
2330 tcg_gen_trunc_i64_tl(dst, t64);
2331
2332 /* End the TB. */
2333 dc->npc = DYNAMIC_PC;
2334 }
2335 break;
2336 }
2337 }
2338 #endif
2339
2340 #ifdef TARGET_SPARC64
2341 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2342 int insn, int size, int rd)
2343 {
2344 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
2345 TCGv_i32 d32;
2346 TCGv_i64 d64;
2347
2348 switch (da.type) {
2349 case GET_ASI_EXCP:
2350 break;
2351
2352 case GET_ASI_DIRECT:
2353 gen_address_mask(dc, addr);
2354 switch (size) {
2355 case 4:
2356 d32 = gen_dest_fpr_F(dc);
2357 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
2358 gen_store_fpr_F(dc, rd, d32);
2359 break;
2360 case 8:
2361 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2362 da.memop | MO_ALIGN_4);
2363 break;
2364 case 16:
2365 d64 = tcg_temp_new_i64();
2366 tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
2367 tcg_gen_addi_tl(addr, addr, 8);
2368 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
2369 da.memop | MO_ALIGN_4);
2370 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2371 break;
2372 default:
2373 g_assert_not_reached();
2374 }
2375 break;
2376
2377 case GET_ASI_BLOCK:
2378 /* Valid for lddfa on aligned registers only. */
2379 if (size == 8 && (rd & 7) == 0) {
2380 MemOp memop;
2381 TCGv eight;
2382 int i;
2383
2384 gen_address_mask(dc, addr);
2385
2386 /* The first operation checks required alignment. */
2387 memop = da.memop | MO_ALIGN_64;
2388 eight = tcg_constant_tl(8);
2389 for (i = 0; ; ++i) {
2390 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2391 da.mem_idx, memop);
2392 if (i == 7) {
2393 break;
2394 }
2395 tcg_gen_add_tl(addr, addr, eight);
2396 memop = da.memop;
2397 }
2398 } else {
2399 gen_exception(dc, TT_ILL_INSN);
2400 }
2401 break;
2402
2403 case GET_ASI_SHORT:
2404 /* Valid for lddfa only. */
2405 if (size == 8) {
2406 gen_address_mask(dc, addr);
2407 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2408 da.memop | MO_ALIGN);
2409 } else {
2410 gen_exception(dc, TT_ILL_INSN);
2411 }
2412 break;
2413
2414 default:
2415 {
2416 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2417 TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN);
2418
2419 save_state(dc);
2420 /* According to the table in the UA2011 manual, the only
2421 other asis that are valid for ldfa/lddfa/ldqfa are
2422 the NO_FAULT asis. We still need a helper for these,
2423 but we can just use the integer asi helper for them. */
2424 switch (size) {
2425 case 4:
2426 d64 = tcg_temp_new_i64();
2427 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2428 d32 = gen_dest_fpr_F(dc);
2429 tcg_gen_extrl_i64_i32(d32, d64);
2430 gen_store_fpr_F(dc, rd, d32);
2431 break;
2432 case 8:
2433 gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2434 break;
2435 case 16:
2436 d64 = tcg_temp_new_i64();
2437 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2438 tcg_gen_addi_tl(addr, addr, 8);
2439 gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
2440 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2441 break;
2442 default:
2443 g_assert_not_reached();
2444 }
2445 }
2446 break;
2447 }
2448 }
2449
2450 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2451 int insn, int size, int rd)
2452 {
2453 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
2454 TCGv_i32 d32;
2455
2456 switch (da.type) {
2457 case GET_ASI_EXCP:
2458 break;
2459
2460 case GET_ASI_DIRECT:
2461 gen_address_mask(dc, addr);
2462 switch (size) {
2463 case 4:
2464 d32 = gen_load_fpr_F(dc, rd);
2465 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
2466 break;
2467 case 8:
2468 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2469 da.memop | MO_ALIGN_4);
2470 break;
2471 case 16:
2472 /* Only 4-byte alignment required. However, it is legal for the
2473 cpu to signal the alignment fault, and the OS trap handler is
2474 required to fix it up. Requiring 16-byte alignment here avoids
2475 having to probe the second page before performing the first
2476 write. */
2477 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2478 da.memop | MO_ALIGN_16);
2479 tcg_gen_addi_tl(addr, addr, 8);
2480 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2481 break;
2482 default:
2483 g_assert_not_reached();
2484 }
2485 break;
2486
2487 case GET_ASI_BLOCK:
2488 /* Valid for stdfa on aligned registers only. */
2489 if (size == 8 && (rd & 7) == 0) {
2490 MemOp memop;
2491 TCGv eight;
2492 int i;
2493
2494 gen_address_mask(dc, addr);
2495
2496 /* The first operation checks required alignment. */
2497 memop = da.memop | MO_ALIGN_64;
2498 eight = tcg_constant_tl(8);
2499 for (i = 0; ; ++i) {
2500 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2501 da.mem_idx, memop);
2502 if (i == 7) {
2503 break;
2504 }
2505 tcg_gen_add_tl(addr, addr, eight);
2506 memop = da.memop;
2507 }
2508 } else {
2509 gen_exception(dc, TT_ILL_INSN);
2510 }
2511 break;
2512
2513 case GET_ASI_SHORT:
2514 /* Valid for stdfa only. */
2515 if (size == 8) {
2516 gen_address_mask(dc, addr);
2517 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2518 da.memop | MO_ALIGN);
2519 } else {
2520 gen_exception(dc, TT_ILL_INSN);
2521 }
2522 break;
2523
2524 default:
2525 /* According to the table in the UA2011 manual, the only
2526 other asis that are valid for ldfa/lddfa/ldqfa are
2527 the PST* asis, which aren't currently handled. */
2528 gen_exception(dc, TT_ILL_INSN);
2529 break;
2530 }
2531 }
2532
2533 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2534 {
2535 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2536 TCGv_i64 hi = gen_dest_gpr(dc, rd);
2537 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2538
2539 switch (da.type) {
2540 case GET_ASI_EXCP:
2541 return;
2542
2543 case GET_ASI_DTWINX:
2544 gen_address_mask(dc, addr);
2545 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2546 tcg_gen_addi_tl(addr, addr, 8);
2547 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2548 break;
2549
2550 case GET_ASI_DIRECT:
2551 {
2552 TCGv_i64 tmp = tcg_temp_new_i64();
2553
2554 gen_address_mask(dc, addr);
2555 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN);
2556
2557 /* Note that LE ldda acts as if each 32-bit register
2558 result is byte swapped. Having just performed one
2559 64-bit bswap, we need now to swap the writebacks. */
2560 if ((da.memop & MO_BSWAP) == MO_TE) {
2561 tcg_gen_extr32_i64(lo, hi, tmp);
2562 } else {
2563 tcg_gen_extr32_i64(hi, lo, tmp);
2564 }
2565 }
2566 break;
2567
2568 default:
2569 /* ??? In theory we've handled all of the ASIs that are valid
2570 for ldda, and this should raise DAE_invalid_asi. However,
2571 real hardware allows others. This can be seen with e.g.
2572 FreeBSD 10.3 wrt ASI_IC_TAG. */
2573 {
2574 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2575 TCGv_i32 r_mop = tcg_constant_i32(da.memop);
2576 TCGv_i64 tmp = tcg_temp_new_i64();
2577
2578 save_state(dc);
2579 gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop);
2580
2581 /* See above. */
2582 if ((da.memop & MO_BSWAP) == MO_TE) {
2583 tcg_gen_extr32_i64(lo, hi, tmp);
2584 } else {
2585 tcg_gen_extr32_i64(hi, lo, tmp);
2586 }
2587 }
2588 break;
2589 }
2590
2591 gen_store_gpr(dc, rd, hi);
2592 gen_store_gpr(dc, rd + 1, lo);
2593 }
2594
2595 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2596 int insn, int rd)
2597 {
2598 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2599 TCGv lo = gen_load_gpr(dc, rd + 1);
2600
2601 switch (da.type) {
2602 case GET_ASI_EXCP:
2603 break;
2604
2605 case GET_ASI_DTWINX:
2606 gen_address_mask(dc, addr);
2607 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2608 tcg_gen_addi_tl(addr, addr, 8);
2609 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2610 break;
2611
2612 case GET_ASI_DIRECT:
2613 {
2614 TCGv_i64 t64 = tcg_temp_new_i64();
2615
2616 /* Note that LE stda acts as if each 32-bit register result is
2617 byte swapped. We will perform one 64-bit LE store, so now
2618 we must swap the order of the construction. */
2619 if ((da.memop & MO_BSWAP) == MO_TE) {
2620 tcg_gen_concat32_i64(t64, lo, hi);
2621 } else {
2622 tcg_gen_concat32_i64(t64, hi, lo);
2623 }
2624 gen_address_mask(dc, addr);
2625 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
2626 }
2627 break;
2628
2629 default:
2630 /* ??? In theory we've handled all of the ASIs that are valid
2631 for stda, and this should raise DAE_invalid_asi. */
2632 {
2633 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2634 TCGv_i32 r_mop = tcg_constant_i32(da.memop);
2635 TCGv_i64 t64 = tcg_temp_new_i64();
2636
2637 /* See above. */
2638 if ((da.memop & MO_BSWAP) == MO_TE) {
2639 tcg_gen_concat32_i64(t64, lo, hi);
2640 } else {
2641 tcg_gen_concat32_i64(t64, hi, lo);
2642 }
2643
2644 save_state(dc);
2645 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2646 }
2647 break;
2648 }
2649 }
2650
2651 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2652 int insn, int rd)
2653 {
2654 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2655 TCGv oldv;
2656
2657 switch (da.type) {
2658 case GET_ASI_EXCP:
2659 return;
2660 case GET_ASI_DIRECT:
2661 oldv = tcg_temp_new();
2662 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2663 da.mem_idx, da.memop | MO_ALIGN);
2664 gen_store_gpr(dc, rd, oldv);
2665 break;
2666 default:
2667 /* ??? Should be DAE_invalid_asi. */
2668 gen_exception(dc, TT_DATA_ACCESS);
2669 break;
2670 }
2671 }
2672
2673 #elif !defined(CONFIG_USER_ONLY)
2674 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2675 {
2676 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2677 whereby "rd + 1" elicits "error: array subscript is above array".
2678 Since we have already asserted that rd is even, the semantics
2679 are unchanged. */
2680 TCGv lo = gen_dest_gpr(dc, rd | 1);
2681 TCGv hi = gen_dest_gpr(dc, rd);
2682 TCGv_i64 t64 = tcg_temp_new_i64();
2683 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2684
2685 switch (da.type) {
2686 case GET_ASI_EXCP:
2687 return;
2688 case GET_ASI_DIRECT:
2689 gen_address_mask(dc, addr);
2690 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
2691 break;
2692 default:
2693 {
2694 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2695 TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
2696
2697 save_state(dc);
2698 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2699 }
2700 break;
2701 }
2702
2703 tcg_gen_extr_i64_i32(lo, hi, t64);
2704 gen_store_gpr(dc, rd | 1, lo);
2705 gen_store_gpr(dc, rd, hi);
2706 }
2707
2708 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2709 int insn, int rd)
2710 {
2711 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2712 TCGv lo = gen_load_gpr(dc, rd + 1);
2713 TCGv_i64 t64 = tcg_temp_new_i64();
2714
2715 tcg_gen_concat_tl_i64(t64, lo, hi);
2716
2717 switch (da.type) {
2718 case GET_ASI_EXCP:
2719 break;
2720 case GET_ASI_DIRECT:
2721 gen_address_mask(dc, addr);
2722 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
2723 break;
2724 case GET_ASI_BFILL:
2725 /* Store 32 bytes of T64 to ADDR. */
2726 /* ??? The original qemu code suggests 8-byte alignment, dropping
2727 the low bits, but the only place I can see this used is in the
2728 Linux kernel with 32 byte alignment, which would make more sense
2729 as a cacheline-style operation. */
2730 {
2731 TCGv d_addr = tcg_temp_new();
2732 TCGv eight = tcg_constant_tl(8);
2733 int i;
2734
2735 tcg_gen_andi_tl(d_addr, addr, -8);
2736 for (i = 0; i < 32; i += 8) {
2737 tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
2738 tcg_gen_add_tl(d_addr, d_addr, eight);
2739 }
2740 }
2741 break;
2742 default:
2743 {
2744 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2745 TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
2746
2747 save_state(dc);
2748 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2749 }
2750 break;
2751 }
2752 }
2753 #endif
2754
2755 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2756 {
2757 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2758 return gen_load_gpr(dc, rs1);
2759 }
2760
2761 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2762 {
2763 if (IS_IMM) { /* immediate */
2764 target_long simm = GET_FIELDs(insn, 19, 31);
2765 TCGv t = tcg_temp_new();
2766 tcg_gen_movi_tl(t, simm);
2767 return t;
2768 } else { /* register */
2769 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2770 return gen_load_gpr(dc, rs2);
2771 }
2772 }
2773
2774 #ifdef TARGET_SPARC64
2775 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2776 {
2777 TCGv_i32 c32, zero, dst, s1, s2;
2778
2779 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2780 or fold the comparison down to 32 bits and use movcond_i32. Choose
2781 the later. */
2782 c32 = tcg_temp_new_i32();
2783 if (cmp->is_bool) {
2784 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2785 } else {
2786 TCGv_i64 c64 = tcg_temp_new_i64();
2787 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2788 tcg_gen_extrl_i64_i32(c32, c64);
2789 }
2790
2791 s1 = gen_load_fpr_F(dc, rs);
2792 s2 = gen_load_fpr_F(dc, rd);
2793 dst = gen_dest_fpr_F(dc);
2794 zero = tcg_constant_i32(0);
2795
2796 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2797
2798 gen_store_fpr_F(dc, rd, dst);
2799 }
2800
2801 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2802 {
2803 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2804 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2805 gen_load_fpr_D(dc, rs),
2806 gen_load_fpr_D(dc, rd));
2807 gen_store_fpr_D(dc, rd, dst);
2808 }
2809
2810 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2811 {
2812 int qd = QFPREG(rd);
2813 int qs = QFPREG(rs);
2814
2815 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2816 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2817 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2818 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2819
2820 gen_update_fprs_dirty(dc, qd);
2821 }
2822
2823 #ifndef CONFIG_USER_ONLY
2824 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2825 {
2826 TCGv_i32 r_tl = tcg_temp_new_i32();
2827
2828 /* load env->tl into r_tl */
2829 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2830
2831 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2832 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2833
2834 /* calculate offset to current trap state from env->ts, reuse r_tl */
2835 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2836 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2837
2838 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2839 {
2840 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2841 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2842 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2843 }
2844 }
2845 #endif
2846
2847 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2848 int width, bool cc, bool left)
2849 {
2850 TCGv lo1, lo2;
2851 uint64_t amask, tabl, tabr;
2852 int shift, imask, omask;
2853
2854 if (cc) {
2855 tcg_gen_mov_tl(cpu_cc_src, s1);
2856 tcg_gen_mov_tl(cpu_cc_src2, s2);
2857 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2858 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2859 dc->cc_op = CC_OP_SUB;
2860 }
2861
2862 /* Theory of operation: there are two tables, left and right (not to
2863 be confused with the left and right versions of the opcode). These
2864 are indexed by the low 3 bits of the inputs. To make things "easy",
2865 these tables are loaded into two constants, TABL and TABR below.
2866 The operation index = (input & imask) << shift calculates the index
2867 into the constant, while val = (table >> index) & omask calculates
2868 the value we're looking for. */
2869 switch (width) {
2870 case 8:
2871 imask = 0x7;
2872 shift = 3;
2873 omask = 0xff;
2874 if (left) {
2875 tabl = 0x80c0e0f0f8fcfeffULL;
2876 tabr = 0xff7f3f1f0f070301ULL;
2877 } else {
2878 tabl = 0x0103070f1f3f7fffULL;
2879 tabr = 0xfffefcf8f0e0c080ULL;
2880 }
2881 break;
2882 case 16:
2883 imask = 0x6;
2884 shift = 1;
2885 omask = 0xf;
2886 if (left) {
2887 tabl = 0x8cef;
2888 tabr = 0xf731;
2889 } else {
2890 tabl = 0x137f;
2891 tabr = 0xfec8;
2892 }
2893 break;
2894 case 32:
2895 imask = 0x4;
2896 shift = 0;
2897 omask = 0x3;
2898 if (left) {
2899 tabl = (2 << 2) | 3;
2900 tabr = (3 << 2) | 1;
2901 } else {
2902 tabl = (1 << 2) | 3;
2903 tabr = (3 << 2) | 2;
2904 }
2905 break;
2906 default:
2907 abort();
2908 }
2909
2910 lo1 = tcg_temp_new();
2911 lo2 = tcg_temp_new();
2912 tcg_gen_andi_tl(lo1, s1, imask);
2913 tcg_gen_andi_tl(lo2, s2, imask);
2914 tcg_gen_shli_tl(lo1, lo1, shift);
2915 tcg_gen_shli_tl(lo2, lo2, shift);
2916
2917 tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
2918 tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
2919 tcg_gen_andi_tl(lo1, lo1, omask);
2920 tcg_gen_andi_tl(lo2, lo2, omask);
2921
2922 amask = -8;
2923 if (AM_CHECK(dc)) {
2924 amask &= 0xffffffffULL;
2925 }
2926 tcg_gen_andi_tl(s1, s1, amask);
2927 tcg_gen_andi_tl(s2, s2, amask);
2928
2929 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
2930 tcg_gen_and_tl(lo2, lo2, lo1);
2931 tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
2932 }
2933
2934 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2935 {
2936 TCGv tmp = tcg_temp_new();
2937
2938 tcg_gen_add_tl(tmp, s1, s2);
2939 tcg_gen_andi_tl(dst, tmp, -8);
2940 if (left) {
2941 tcg_gen_neg_tl(tmp, tmp);
2942 }
2943 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2944 }
2945
2946 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2947 {
2948 TCGv t1, t2, shift;
2949
2950 t1 = tcg_temp_new();
2951 t2 = tcg_temp_new();
2952 shift = tcg_temp_new();
2953
2954 tcg_gen_andi_tl(shift, gsr, 7);
2955 tcg_gen_shli_tl(shift, shift, 3);
2956 tcg_gen_shl_tl(t1, s1, shift);
2957
2958 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2959 shift of (up to 63) followed by a constant shift of 1. */
2960 tcg_gen_xori_tl(shift, shift, 63);
2961 tcg_gen_shr_tl(t2, s2, shift);
2962 tcg_gen_shri_tl(t2, t2, 1);
2963
2964 tcg_gen_or_tl(dst, t1, t2);
2965 }
2966 #endif
2967
2968 #define CHECK_IU_FEATURE(dc, FEATURE) \
2969 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2970 goto illegal_insn;
2971 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2972 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2973 goto nfpu_insn;
2974
2975 /* before an instruction, dc->pc must be static */
2976 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2977 {
2978 unsigned int opc, rs1, rs2, rd;
2979 TCGv cpu_src1, cpu_src2;
2980 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2981 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2982 target_long simm;
2983
2984 opc = GET_FIELD(insn, 0, 1);
2985 rd = GET_FIELD(insn, 2, 6);
2986
2987 switch (opc) {
2988 case 0: /* branches/sethi */
2989 {
2990 unsigned int xop = GET_FIELD(insn, 7, 9);
2991 int32_t target;
2992 switch (xop) {
2993 #ifdef TARGET_SPARC64
2994 case 0x1: /* V9 BPcc */
2995 {
2996 int cc;
2997
2998 target = GET_FIELD_SP(insn, 0, 18);
2999 target = sign_extend(target, 19);
3000 target <<= 2;
3001 cc = GET_FIELD_SP(insn, 20, 21);
3002 if (cc == 0)
3003 do_branch(dc, target, insn, 0);
3004 else if (cc == 2)
3005 do_branch(dc, target, insn, 1);
3006 else
3007 goto illegal_insn;
3008 goto jmp_insn;
3009 }
3010 case 0x3: /* V9 BPr */
3011 {
3012 target = GET_FIELD_SP(insn, 0, 13) |
3013 (GET_FIELD_SP(insn, 20, 21) << 14);
3014 target = sign_extend(target, 16);
3015 target <<= 2;
3016 cpu_src1 = get_src1(dc, insn);
3017 do_branch_reg(dc, target, insn, cpu_src1);
3018 goto jmp_insn;
3019 }
3020 case 0x5: /* V9 FBPcc */
3021 {
3022 int cc = GET_FIELD_SP(insn, 20, 21);
3023 if (gen_trap_ifnofpu(dc)) {
3024 goto jmp_insn;
3025 }
3026 target = GET_FIELD_SP(insn, 0, 18);
3027 target = sign_extend(target, 19);
3028 target <<= 2;
3029 do_fbranch(dc, target, insn, cc);
3030 goto jmp_insn;
3031 }
3032 #else
3033 case 0x7: /* CBN+x */
3034 {
3035 goto ncp_insn;
3036 }
3037 #endif
3038 case 0x2: /* BN+x */
3039 {
3040 target = GET_FIELD(insn, 10, 31);
3041 target = sign_extend(target, 22);
3042 target <<= 2;
3043 do_branch(dc, target, insn, 0);
3044 goto jmp_insn;
3045 }
3046 case 0x6: /* FBN+x */
3047 {
3048 if (gen_trap_ifnofpu(dc)) {
3049 goto jmp_insn;
3050 }
3051 target = GET_FIELD(insn, 10, 31);
3052 target = sign_extend(target, 22);
3053 target <<= 2;
3054 do_fbranch(dc, target, insn, 0);
3055 goto jmp_insn;
3056 }
3057 case 0x4: /* SETHI */
3058 /* Special-case %g0 because that's the canonical nop. */
3059 if (rd) {
3060 uint32_t value = GET_FIELD(insn, 10, 31);
3061 TCGv t = gen_dest_gpr(dc, rd);
3062 tcg_gen_movi_tl(t, value << 10);
3063 gen_store_gpr(dc, rd, t);
3064 }
3065 break;
3066 case 0x0: /* UNIMPL */
3067 default:
3068 goto illegal_insn;
3069 }
3070 break;
3071 }
3072 break;
3073 case 1: /*CALL*/
3074 {
3075 target_long target = GET_FIELDs(insn, 2, 31) << 2;
3076 TCGv o7 = gen_dest_gpr(dc, 15);
3077
3078 tcg_gen_movi_tl(o7, dc->pc);
3079 gen_store_gpr(dc, 15, o7);
3080 target += dc->pc;
3081 gen_mov_pc_npc(dc);
3082 #ifdef TARGET_SPARC64
3083 if (unlikely(AM_CHECK(dc))) {
3084 target &= 0xffffffffULL;
3085 }
3086 #endif
3087 dc->npc = target;
3088 }
3089 goto jmp_insn;
3090 case 2: /* FPU & Logical Operations */
3091 {
3092 unsigned int xop = GET_FIELD(insn, 7, 12);
3093 TCGv cpu_dst = tcg_temp_new();
3094 TCGv cpu_tmp0;
3095
3096 if (xop == 0x3a) { /* generate trap */
3097 int cond = GET_FIELD(insn, 3, 6);
3098 TCGv_i32 trap;
3099 TCGLabel *l1 = NULL;
3100 int mask;
3101
3102 if (cond == 0) {
3103 /* Trap never. */
3104 break;
3105 }
3106
3107 save_state(dc);
3108
3109 if (cond != 8) {
3110 /* Conditional trap. */
3111 DisasCompare cmp;
3112 #ifdef TARGET_SPARC64
3113 /* V9 icc/xcc */
3114 int cc = GET_FIELD_SP(insn, 11, 12);
3115 if (cc == 0) {
3116 gen_compare(&cmp, 0, cond, dc);
3117 } else if (cc == 2) {
3118 gen_compare(&cmp, 1, cond, dc);
3119 } else {
3120 goto illegal_insn;
3121 }
3122 #else
3123 gen_compare(&cmp, 0, cond, dc);
3124 #endif
3125 l1 = gen_new_label();
3126 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
3127 cmp.c1, cmp.c2, l1);
3128 }
3129
3130 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3131 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3132
3133 /* Don't use the normal temporaries, as they may well have
3134 gone out of scope with the branch above. While we're
3135 doing that we might as well pre-truncate to 32-bit. */
3136 trap = tcg_temp_new_i32();
3137
3138 rs1 = GET_FIELD_SP(insn, 14, 18);
3139 if (IS_IMM) {
3140 rs2 = GET_FIELD_SP(insn, 0, 7);
3141 if (rs1 == 0) {
3142 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
3143 /* Signal that the trap value is fully constant. */
3144 mask = 0;
3145 } else {
3146 TCGv t1 = gen_load_gpr(dc, rs1);
3147 tcg_gen_trunc_tl_i32(trap, t1);
3148 tcg_gen_addi_i32(trap, trap, rs2);
3149 }
3150 } else {
3151 TCGv t1, t2;
3152 rs2 = GET_FIELD_SP(insn, 0, 4);
3153 t1 = gen_load_gpr(dc, rs1);
3154 t2 = gen_load_gpr(dc, rs2);
3155 tcg_gen_add_tl(t1, t1, t2);
3156 tcg_gen_trunc_tl_i32(trap, t1);
3157 }
3158 if (mask != 0) {
3159 tcg_gen_andi_i32(trap, trap, mask);
3160 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3161 }
3162
3163 gen_helper_raise_exception(cpu_env, trap);
3164
3165 if (cond == 8) {
3166 /* An unconditional trap ends the TB. */
3167 dc->base.is_jmp = DISAS_NORETURN;
3168 goto jmp_insn;
3169 } else {
3170 /* A conditional trap falls through to the next insn. */
3171 gen_set_label(l1);
3172 break;
3173 }
3174 } else if (xop == 0x28) {
3175 rs1 = GET_FIELD(insn, 13, 17);
3176 switch(rs1) {
3177 case 0: /* rdy */
3178 #ifndef TARGET_SPARC64
3179 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3180 manual, rdy on the microSPARC
3181 II */
3182 case 0x0f: /* stbar in the SPARCv8 manual,
3183 rdy on the microSPARC II */
3184 case 0x10 ... 0x1f: /* implementation-dependent in the
3185 SPARCv8 manual, rdy on the
3186 microSPARC II */
3187 /* Read Asr17 */
3188 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
3189 TCGv t = gen_dest_gpr(dc, rd);
3190 /* Read Asr17 for a Leon3 monoprocessor */
3191 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
3192 gen_store_gpr(dc, rd, t);
3193 break;
3194 }
3195 #endif
3196 gen_store_gpr(dc, rd, cpu_y);
3197 break;
3198 #ifdef TARGET_SPARC64
3199 case 0x2: /* V9 rdccr */
3200 update_psr(dc);
3201 gen_helper_rdccr(cpu_dst, cpu_env);
3202 gen_store_gpr(dc, rd, cpu_dst);
3203 break;
3204 case 0x3: /* V9 rdasi */
3205 tcg_gen_movi_tl(cpu_dst, dc->asi);
3206 gen_store_gpr(dc, rd, cpu_dst);
3207 break;
3208 case 0x4: /* V9 rdtick */
3209 {
3210 TCGv_ptr r_tickptr;
3211 TCGv_i32 r_const;
3212
3213 r_tickptr = tcg_temp_new_ptr();
3214 r_const = tcg_constant_i32(dc->mem_idx);
3215 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3216 offsetof(CPUSPARCState, tick));
3217 if (translator_io_start(&dc->base)) {
3218 dc->base.is_jmp = DISAS_EXIT;
3219 }
3220 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3221 r_const);
3222 gen_store_gpr(dc, rd, cpu_dst);
3223 }
3224 break;
3225 case 0x5: /* V9 rdpc */
3226 {
3227 TCGv t = gen_dest_gpr(dc, rd);
3228 if (unlikely(AM_CHECK(dc))) {
3229 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
3230 } else {
3231 tcg_gen_movi_tl(t, dc->pc);
3232 }
3233 gen_store_gpr(dc, rd, t);
3234 }
3235 break;
3236 case 0x6: /* V9 rdfprs */
3237 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
3238 gen_store_gpr(dc, rd, cpu_dst);
3239 break;
3240 case 0xf: /* V9 membar */
3241 break; /* no effect */
3242 case 0x13: /* Graphics Status */
3243 if (gen_trap_ifnofpu(dc)) {
3244 goto jmp_insn;
3245 }
3246 gen_store_gpr(dc, rd, cpu_gsr);
3247 break;
3248 case 0x16: /* Softint */
3249 tcg_gen_ld32s_tl(cpu_dst, cpu_env,
3250 offsetof(CPUSPARCState, softint));
3251 gen_store_gpr(dc, rd, cpu_dst);
3252 break;
3253 case 0x17: /* Tick compare */
3254 gen_store_gpr(dc, rd, cpu_tick_cmpr);
3255 break;
3256 case 0x18: /* System tick */
3257 {
3258 TCGv_ptr r_tickptr;
3259 TCGv_i32 r_const;
3260
3261 r_tickptr = tcg_temp_new_ptr();
3262 r_const = tcg_constant_i32(dc->mem_idx);
3263 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3264 offsetof(CPUSPARCState, stick));
3265 if (translator_io_start(&dc->base)) {
3266 dc->base.is_jmp = DISAS_EXIT;
3267 }
3268 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3269 r_const);
3270 gen_store_gpr(dc, rd, cpu_dst);
3271 }
3272 break;
3273 case 0x19: /* System tick compare */
3274 gen_store_gpr(dc, rd, cpu_stick_cmpr);
3275 break;
3276 case 0x1a: /* UltraSPARC-T1 Strand status */
3277 /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
3278 * this ASR as impl. dep
3279 */
3280 CHECK_IU_FEATURE(dc, HYPV);
3281 {
3282 TCGv t = gen_dest_gpr(dc, rd);
3283 tcg_gen_movi_tl(t, 1UL);
3284 gen_store_gpr(dc, rd, t);
3285 }
3286 break;
3287 case 0x10: /* Performance Control */
3288 case 0x11: /* Performance Instrumentation Counter */
3289 case 0x12: /* Dispatch Control */
3290 case 0x14: /* Softint set, WO */
3291 case 0x15: /* Softint clear, WO */
3292 #endif
3293 default:
3294 goto illegal_insn;
3295 }
3296 #if !defined(CONFIG_USER_ONLY)
3297 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
3298 #ifndef TARGET_SPARC64
3299 if (!supervisor(dc)) {
3300 goto priv_insn;
3301 }
3302 update_psr(dc);
3303 gen_helper_rdpsr(cpu_dst, cpu_env);
3304 #else
3305 CHECK_IU_FEATURE(dc, HYPV);
3306 if (!hypervisor(dc))
3307 goto priv_insn;
3308 rs1 = GET_FIELD(insn, 13, 17);
3309 switch (rs1) {
3310 case 0: // hpstate
3311 tcg_gen_ld_i64(cpu_dst, cpu_env,
3312 offsetof(CPUSPARCState, hpstate));
3313 break;
3314 case 1: // htstate
3315 // gen_op_rdhtstate();
3316 break;
3317 case 3: // hintp
3318 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
3319 break;
3320 case 5: // htba
3321 tcg_gen_mov_tl(cpu_dst, cpu_htba);
3322 break;
3323 case 6: // hver
3324 tcg_gen_mov_tl(cpu_dst, cpu_hver);
3325 break;
3326 case 31: // hstick_cmpr
3327 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
3328 break;
3329 default:
3330 goto illegal_insn;
3331 }
3332 #endif
3333 gen_store_gpr(dc, rd, cpu_dst);
3334 break;
3335 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
3336 if (!supervisor(dc)) {
3337 goto priv_insn;
3338 }
3339 cpu_tmp0 = tcg_temp_new();
3340 #ifdef TARGET_SPARC64
3341 rs1 = GET_FIELD(insn, 13, 17);
3342 switch (rs1) {
3343 case 0: // tpc
3344 {
3345 TCGv_ptr r_tsptr;
3346
3347 r_tsptr = tcg_temp_new_ptr();
3348 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3349 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3350 offsetof(trap_state, tpc));
3351 }
3352 break;
3353 case 1: // tnpc
3354 {
3355 TCGv_ptr r_tsptr;
3356
3357 r_tsptr = tcg_temp_new_ptr();
3358 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3359 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3360 offsetof(trap_state, tnpc));
3361 }
3362 break;
3363 case 2: // tstate
3364 {
3365 TCGv_ptr r_tsptr;
3366
3367 r_tsptr = tcg_temp_new_ptr();
3368 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3369 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3370 offsetof(trap_state, tstate));
3371 }
3372 break;
3373 case 3: // tt
3374 {
3375 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3376
3377 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3378 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
3379 offsetof(trap_state, tt));
3380 }
3381 break;
3382 case 4: // tick
3383 {
3384 TCGv_ptr r_tickptr;
3385 TCGv_i32 r_const;
3386
3387 r_tickptr = tcg_temp_new_ptr();
3388 r_const = tcg_constant_i32(dc->mem_idx);
3389 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3390 offsetof(CPUSPARCState, tick));
3391 if (translator_io_start(&dc->base)) {
3392 dc->base.is_jmp = DISAS_EXIT;
3393 }
3394 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
3395 r_tickptr, r_const);
3396 }
3397 break;
3398 case 5: // tba
3399 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
3400 break;
3401 case 6: // pstate
3402 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3403 offsetof(CPUSPARCState, pstate));
3404 break;
3405 case 7: // tl
3406 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3407 offsetof(CPUSPARCState, tl));
3408 break;
3409 case 8: // pil
3410 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3411 offsetof(CPUSPARCState, psrpil));
3412 break;
3413 case 9: // cwp
3414 gen_helper_rdcwp(cpu_tmp0, cpu_env);
3415 break;
3416 case 10: // cansave
3417 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3418 offsetof(CPUSPARCState, cansave));
3419 break;
3420 case 11: // canrestore
3421 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3422 offsetof(CPUSPARCState, canrestore));
3423 break;
3424 case 12: // cleanwin
3425 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3426 offsetof(CPUSPARCState, cleanwin));
3427 break;
3428 case 13: // otherwin
3429 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3430 offsetof(CPUSPARCState, otherwin));
3431 break;
3432 case 14: // wstate
3433 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3434 offsetof(CPUSPARCState, wstate));
3435 break;
3436 case 16: // UA2005 gl
3437 CHECK_IU_FEATURE(dc, GL);
3438 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3439 offsetof(CPUSPARCState, gl));
3440 break;
3441 case 26: // UA2005 strand status
3442 CHECK_IU_FEATURE(dc, HYPV);
3443 if (!hypervisor(dc))
3444 goto priv_insn;
3445 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
3446 break;
3447 case 31: // ver
3448 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
3449 break;
3450 case 15: // fq
3451 default:
3452 goto illegal_insn;
3453 }
3454 #else
3455 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
3456 #endif
3457 gen_store_gpr(dc, rd, cpu_tmp0);
3458 break;
3459 #endif
3460 #if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)
3461 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
3462 #ifdef TARGET_SPARC64
3463 gen_helper_flushw(cpu_env);
3464 #else
3465 if (!supervisor(dc))
3466 goto priv_insn;
3467 gen_store_gpr(dc, rd, cpu_tbr);
3468 #endif
3469 break;
3470 #endif
3471 } else if (xop == 0x34) { /* FPU Operations */
3472 if (gen_trap_ifnofpu(dc)) {
3473 goto jmp_insn;
3474 }
3475 gen_op_clear_ieee_excp_and_FTT();
3476 rs1 = GET_FIELD(insn, 13, 17);
3477 rs2 = GET_FIELD(insn, 27, 31);
3478 xop = GET_FIELD(insn, 18, 26);
3479
3480 switch (xop) {
3481 case 0x1: /* fmovs */
3482 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3483 gen_store_fpr_F(dc, rd, cpu_src1_32);
3484 break;
3485 case 0x5: /* fnegs */
3486 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
3487 break;
3488 case 0x9: /* fabss */
3489 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
3490 break;
3491 case 0x29: /* fsqrts */
3492 CHECK_FPU_FEATURE(dc, FSQRT);
3493 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
3494 break;
3495 case 0x2a: /* fsqrtd */
3496 CHECK_FPU_FEATURE(dc, FSQRT);
3497 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3498 break;
3499 case 0x2b: /* fsqrtq */
3500 CHECK_FPU_FEATURE(dc, FLOAT128);
3501 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3502 break;
3503 case 0x41: /* fadds */
3504 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3505 break;
3506 case 0x42: /* faddd */
3507 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3508 break;
3509 case 0x43: /* faddq */
3510 CHECK_FPU_FEATURE(dc, FLOAT128);
3511 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3512 break;
3513 case 0x45: /* fsubs */
3514 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3515 break;
3516 case 0x46: /* fsubd */
3517 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3518 break;
3519 case 0x47: /* fsubq */
3520 CHECK_FPU_FEATURE(dc, FLOAT128);
3521 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3522 break;
3523 case 0x49: /* fmuls */
3524 CHECK_FPU_FEATURE(dc, FMUL);
3525 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3526 break;
3527 case 0x4a: /* fmuld */
3528 CHECK_FPU_FEATURE(dc, FMUL);
3529 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3530 break;
3531 case 0x4b: /* fmulq */
3532 CHECK_FPU_FEATURE(dc, FLOAT128);
3533 CHECK_FPU_FEATURE(dc, FMUL);
3534 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3535 break;
3536 case 0x4d: /* fdivs */
3537 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3538 break;
3539 case 0x4e: /* fdivd */
3540 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3541 break;
3542 case 0x4f: /* fdivq */
3543 CHECK_FPU_FEATURE(dc, FLOAT128);
3544 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3545 break;
3546 case 0x69: /* fsmuld */
3547 CHECK_FPU_FEATURE(dc, FSMULD);
3548 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3549 break;
3550 case 0x6e: /* fdmulq */
3551 CHECK_FPU_FEATURE(dc, FLOAT128);
3552 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3553 break;
3554 case 0xc4: /* fitos */
3555 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3556 break;
3557 case 0xc6: /* fdtos */
3558 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3559 break;
3560 case 0xc7: /* fqtos */
3561 CHECK_FPU_FEATURE(dc, FLOAT128);
3562 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3563 break;
3564 case 0xc8: /* fitod */
3565 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3566 break;
3567 case 0xc9: /* fstod */
3568 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3569 break;
3570 case 0xcb: /* fqtod */
3571 CHECK_FPU_FEATURE(dc, FLOAT128);
3572 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3573 break;
3574 case 0xcc: /* fitoq */
3575 CHECK_FPU_FEATURE(dc, FLOAT128);
3576 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3577 break;
3578 case 0xcd: /* fstoq */
3579 CHECK_FPU_FEATURE(dc, FLOAT128);
3580 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3581 break;
3582 case 0xce: /* fdtoq */
3583 CHECK_FPU_FEATURE(dc, FLOAT128);
3584 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3585 break;
3586 case 0xd1: /* fstoi */
3587 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3588 break;
3589 case 0xd2: /* fdtoi */
3590 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3591 break;
3592 case 0xd3: /* fqtoi */
3593 CHECK_FPU_FEATURE(dc, FLOAT128);
3594 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3595 break;
3596 #ifdef TARGET_SPARC64
3597 case 0x2: /* V9 fmovd */
3598 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3599 gen_store_fpr_D(dc, rd, cpu_src1_64);
3600 break;
3601 case 0x3: /* V9 fmovq */
3602 CHECK_FPU_FEATURE(dc, FLOAT128);
3603 gen_move_Q(dc, rd, rs2);
3604 break;
3605 case 0x6: /* V9 fnegd */
3606 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3607 break;
3608 case 0x7: /* V9 fnegq */
3609 CHECK_FPU_FEATURE(dc, FLOAT128);
3610 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3611 break;
3612 case 0xa: /* V9 fabsd */
3613 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3614 break;
3615 case 0xb: /* V9 fabsq */
3616 CHECK_FPU_FEATURE(dc, FLOAT128);
3617 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3618 break;
3619 case 0x81: /* V9 fstox */
3620 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3621 break;
3622 case 0x82: /* V9 fdtox */
3623 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3624 break;
3625 case 0x83: /* V9 fqtox */
3626 CHECK_FPU_FEATURE(dc, FLOAT128);
3627 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3628 break;
3629 case 0x84: /* V9 fxtos */
3630 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3631 break;
3632 case 0x88: /* V9 fxtod */
3633 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3634 break;
3635 case 0x8c: /* V9 fxtoq */
3636 CHECK_FPU_FEATURE(dc, FLOAT128);
3637 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3638 break;
3639 #endif
3640 default:
3641 goto illegal_insn;
3642 }
3643 } else if (xop == 0x35) { /* FPU Operations */
3644 #ifdef TARGET_SPARC64
3645 int cond;
3646 #endif
3647 if (gen_trap_ifnofpu(dc)) {
3648 goto jmp_insn;
3649 }
3650 gen_op_clear_ieee_excp_and_FTT();
3651 rs1 = GET_FIELD(insn, 13, 17);
3652 rs2 = GET_FIELD(insn, 27, 31);
3653 xop = GET_FIELD(insn, 18, 26);
3654
3655 #ifdef TARGET_SPARC64
3656 #define FMOVR(sz) \
3657 do { \
3658 DisasCompare cmp; \
3659 cond = GET_FIELD_SP(insn, 10, 12); \
3660 cpu_src1 = get_src1(dc, insn); \
3661 gen_compare_reg(&cmp, cond, cpu_src1); \
3662 gen_fmov##sz(dc, &cmp, rd, rs2); \
3663 } while (0)
3664
3665 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3666 FMOVR(s);
3667 break;
3668 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3669 FMOVR(d);
3670 break;
3671 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3672 CHECK_FPU_FEATURE(dc, FLOAT128);
3673 FMOVR(q);
3674 break;
3675 }
3676 #undef FMOVR
3677 #endif
3678 switch (xop) {
3679 #ifdef TARGET_SPARC64
3680 #define FMOVCC(fcc, sz) \
3681 do { \
3682 DisasCompare cmp; \
3683 cond = GET_FIELD_SP(insn, 14, 17); \
3684 gen_fcompare(&cmp, fcc, cond); \
3685 gen_fmov##sz(dc, &cmp, rd, rs2); \
3686 } while (0)
3687
3688 case 0x001: /* V9 fmovscc %fcc0 */
3689 FMOVCC(0, s);
3690 break;
3691 case 0x002: /* V9 fmovdcc %fcc0 */
3692 FMOVCC(0, d);
3693 break;
3694 case 0x003: /* V9 fmovqcc %fcc0 */
3695 CHECK_FPU_FEATURE(dc, FLOAT128);
3696 FMOVCC(0, q);
3697 break;
3698 case 0x041: /* V9 fmovscc %fcc1 */
3699 FMOVCC(1, s);
3700 break;
3701 case 0x042: /* V9 fmovdcc %fcc1 */
3702 FMOVCC(1, d);
3703 break;
3704 case 0x043: /* V9 fmovqcc %fcc1 */
3705 CHECK_FPU_FEATURE(dc, FLOAT128);
3706 FMOVCC(1, q);
3707 break;
3708 case 0x081: /* V9 fmovscc %fcc2 */
3709 FMOVCC(2, s);
3710 break;
3711 case 0x082: /* V9 fmovdcc %fcc2 */
3712 FMOVCC(2, d);
3713 break;
3714 case 0x083: /* V9 fmovqcc %fcc2 */
3715 CHECK_FPU_FEATURE(dc, FLOAT128);
3716 FMOVCC(2, q);
3717 break;
3718 case 0x0c1: /* V9 fmovscc %fcc3 */
3719 FMOVCC(3, s);
3720 break;
3721 case 0x0c2: /* V9 fmovdcc %fcc3 */
3722 FMOVCC(3, d);
3723 break;
3724 case 0x0c3: /* V9 fmovqcc %fcc3 */
3725 CHECK_FPU_FEATURE(dc, FLOAT128);
3726 FMOVCC(3, q);
3727 break;
3728 #undef FMOVCC
3729 #define FMOVCC(xcc, sz) \
3730 do { \
3731 DisasCompare cmp; \
3732 cond = GET_FIELD_SP(insn, 14, 17); \
3733 gen_compare(&cmp, xcc, cond, dc); \
3734 gen_fmov##sz(dc, &cmp, rd, rs2); \
3735 } while (0)
3736
3737 case 0x101: /* V9 fmovscc %icc */
3738 FMOVCC(0, s);
3739 break;
3740 case 0x102: /* V9 fmovdcc %icc */
3741 FMOVCC(0, d);
3742 break;
3743 case 0x103: /* V9 fmovqcc %icc */
3744 CHECK_FPU_FEATURE(dc, FLOAT128);
3745 FMOVCC(0, q);
3746 break;
3747 case 0x181: /* V9 fmovscc %xcc */
3748 FMOVCC(1, s);
3749 break;
3750 case 0x182: /* V9 fmovdcc %xcc */
3751 FMOVCC(1, d);
3752 break;
3753 case 0x183: /* V9 fmovqcc %xcc */
3754 CHECK_FPU_FEATURE(dc, FLOAT128);
3755 FMOVCC(1, q);
3756 break;
3757 #undef FMOVCC
3758 #endif
3759 case 0x51: /* fcmps, V9 %fcc */
3760 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3761 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3762 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3763 break;
3764 case 0x52: /* fcmpd, V9 %fcc */
3765 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3766 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3767 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3768 break;
3769 case 0x53: /* fcmpq, V9 %fcc */
3770 CHECK_FPU_FEATURE(dc, FLOAT128);
3771 gen_op_load_fpr_QT0(QFPREG(rs1));
3772 gen_op_load_fpr_QT1(QFPREG(rs2));
3773 gen_op_fcmpq(rd & 3);
3774 break;
3775 case 0x55: /* fcmpes, V9 %fcc */
3776 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3777 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3778 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3779 break;
3780 case 0x56: /* fcmped, V9 %fcc */
3781 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3782 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3783 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3784 break;
3785 case 0x57: /* fcmpeq, V9 %fcc */
3786 CHECK_FPU_FEATURE(dc, FLOAT128);
3787 gen_op_load_fpr_QT0(QFPREG(rs1));
3788 gen_op_load_fpr_QT1(QFPREG(rs2));
3789 gen_op_fcmpeq(rd & 3);
3790 break;
3791 default:
3792 goto illegal_insn;
3793 }
3794 } else if (xop == 0x2) {
3795 TCGv dst = gen_dest_gpr(dc, rd);
3796 rs1 = GET_FIELD(insn, 13, 17);
3797 if (rs1 == 0) {
3798 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3799 if (IS_IMM) { /* immediate */
3800 simm = GET_FIELDs(insn, 19, 31);
3801 tcg_gen_movi_tl(dst, simm);
3802 gen_store_gpr(dc, rd, dst);
3803 } else { /* register */
3804 rs2 = GET_FIELD(insn, 27, 31);
3805 if (rs2 == 0) {
3806 tcg_gen_movi_tl(dst, 0);
3807 gen_store_gpr(dc, rd, dst);
3808 } else {
3809 cpu_src2 = gen_load_gpr(dc, rs2);
3810 gen_store_gpr(dc, rd, cpu_src2);
3811 }
3812 }
3813 } else {
3814 cpu_src1 = get_src1(dc, insn);
3815 if (IS_IMM) { /* immediate */
3816 simm = GET_FIELDs(insn, 19, 31);
3817 tcg_gen_ori_tl(dst, cpu_src1, simm);
3818 gen_store_gpr(dc, rd, dst);
3819 } else { /* register */
3820 rs2 = GET_FIELD(insn, 27, 31);
3821 if (rs2 == 0) {
3822 /* mov shortcut: or x, %g0, y -> mov x, y */
3823 gen_store_gpr(dc, rd, cpu_src1);
3824 } else {
3825 cpu_src2 = gen_load_gpr(dc, rs2);
3826 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3827 gen_store_gpr(dc, rd, dst);
3828 }
3829 }
3830 }
3831 #ifdef TARGET_SPARC64
3832 } else if (xop == 0x25) { /* sll, V9 sllx */
3833 cpu_src1 = get_src1(dc, insn);
3834 if (IS_IMM) { /* immediate */
3835 simm = GET_FIELDs(insn, 20, 31);
3836 if (insn & (1 << 12)) {
3837 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3838 } else {
3839 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3840 }
3841 } else { /* register */
3842 rs2 = GET_FIELD(insn, 27, 31);
3843 cpu_src2 = gen_load_gpr(dc, rs2);
3844 cpu_tmp0 = tcg_temp_new();
3845 if (insn & (1 << 12)) {
3846 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3847 } else {
3848 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3849 }
3850 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3851 }
3852 gen_store_gpr(dc, rd, cpu_dst);
3853 } else if (xop == 0x26) { /* srl, V9 srlx */
3854 cpu_src1 = get_src1(dc, insn);
3855 if (IS_IMM) { /* immediate */
3856 simm = GET_FIELDs(insn, 20, 31);
3857 if (insn & (1 << 12)) {
3858 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3859 } else {
3860 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3861 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3862 }
3863 } else { /* register */
3864 rs2 = GET_FIELD(insn, 27, 31);
3865 cpu_src2 = gen_load_gpr(dc, rs2);
3866 cpu_tmp0 = tcg_temp_new();
3867 if (insn & (1 << 12)) {
3868 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3869 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3870 } else {
3871 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3872 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3873 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3874 }
3875 }
3876 gen_store_gpr(dc, rd, cpu_dst);
3877 } else if (xop == 0x27) { /* sra, V9 srax */
3878 cpu_src1 = get_src1(dc, insn);
3879 if (IS_IMM) { /* immediate */
3880 simm = GET_FIELDs(insn, 20, 31);
3881 if (insn & (1 << 12)) {
3882 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3883 } else {
3884 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3885 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3886 }
3887 } else { /* register */
3888 rs2 = GET_FIELD(insn, 27, 31);
3889 cpu_src2 = gen_load_gpr(dc, rs2);
3890 cpu_tmp0 = tcg_temp_new();
3891 if (insn & (1 << 12)) {
3892 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3893 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3894 } else {
3895 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3896 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3897 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3898 }
3899 }
3900 gen_store_gpr(dc, rd, cpu_dst);
3901 #endif
3902 } else if (xop < 0x36) {
3903 if (xop < 0x20) {
3904 cpu_src1 = get_src1(dc, insn);
3905 cpu_src2 = get_src2(dc, insn);
3906 switch (xop & ~0x10) {
3907 case 0x0: /* add */
3908 if (xop & 0x10) {
3909 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3910 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3911 dc->cc_op = CC_OP_ADD;
3912 } else {
3913 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3914 }
3915 break;
3916 case 0x1: /* and */
3917 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3918 if (xop & 0x10) {
3919 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3920 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3921 dc->cc_op = CC_OP_LOGIC;
3922 }
3923 break;
3924 case 0x2: /* or */
3925 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3926 if (xop & 0x10) {
3927 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3928 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3929 dc->cc_op = CC_OP_LOGIC;
3930 }
3931 break;
3932 case 0x3: /* xor */
3933 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3934 if (xop & 0x10) {
3935 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3936 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3937 dc->cc_op = CC_OP_LOGIC;
3938 }
3939 break;
3940 case 0x4: /* sub */
3941 if (xop & 0x10) {
3942 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3943 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3944 dc->cc_op = CC_OP_SUB;
3945 } else {
3946 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3947 }
3948 break;
3949 case 0x5: /* andn */
3950 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3951 if (xop & 0x10) {
3952 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3953 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3954 dc->cc_op = CC_OP_LOGIC;
3955 }
3956 break;
3957 case 0x6: /* orn */
3958 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3959 if (xop & 0x10) {
3960 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3961 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3962 dc->cc_op = CC_OP_LOGIC;
3963 }
3964 break;
3965 case 0x7: /* xorn */
3966 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3967 if (xop & 0x10) {
3968 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3969 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3970 dc->cc_op = CC_OP_LOGIC;
3971 }
3972 break;
3973 case 0x8: /* addx, V9 addc */
3974 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3975 (xop & 0x10));
3976 break;
3977 #ifdef TARGET_SPARC64
3978 case 0x9: /* V9 mulx */
3979 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3980 break;
3981 #endif
3982 case 0xa: /* umul */
3983 CHECK_IU_FEATURE(dc, MUL);
3984 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3985 if (xop & 0x10) {
3986 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3987 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3988 dc->cc_op = CC_OP_LOGIC;
3989 }
3990 break;
3991 case 0xb: /* smul */
3992 CHECK_IU_FEATURE(dc, MUL);
3993 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3994 if (xop & 0x10) {
3995 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3996 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3997 dc->cc_op = CC_OP_LOGIC;
3998 }
3999 break;
4000 case 0xc: /* subx, V9 subc */
4001 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4002 (xop & 0x10));
4003 break;
4004 #ifdef TARGET_SPARC64
4005 case 0xd: /* V9 udivx */
4006 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4007 break;
4008 #endif
4009 case 0xe: /* udiv */
4010 CHECK_IU_FEATURE(dc, DIV);
4011 if (xop & 0x10) {
4012 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
4013 cpu_src2);
4014 dc->cc_op = CC_OP_DIV;
4015 } else {
4016 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
4017 cpu_src2);
4018 }
4019 break;
4020 case 0xf: /* sdiv */
4021 CHECK_IU_FEATURE(dc, DIV);
4022 if (xop & 0x10) {
4023 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
4024 cpu_src2);
4025 dc->cc_op = CC_OP_DIV;
4026 } else {
4027 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
4028 cpu_src2);
4029 }
4030 break;
4031 default:
4032 goto illegal_insn;
4033 }
4034 gen_store_gpr(dc, rd, cpu_dst);
4035 } else {
4036 cpu_src1 = get_src1(dc, insn);
4037 cpu_src2 = get_src2(dc, insn);
4038 switch (xop) {
4039 case 0x20: /* taddcc */
4040 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4041 gen_store_gpr(dc, rd, cpu_dst);
4042 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
4043 dc->cc_op = CC_OP_TADD;
4044 break;
4045 case 0x21: /* tsubcc */
4046 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4047 gen_store_gpr(dc, rd, cpu_dst);
4048 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
4049 dc->cc_op = CC_OP_TSUB;
4050 break;
4051 case 0x22: /* taddcctv */
4052 gen_helper_taddcctv(cpu_dst, cpu_env,
4053 cpu_src1, cpu_src2);
4054 gen_store_gpr(dc, rd, cpu_dst);
4055 dc->cc_op = CC_OP_TADDTV;
4056 break;
4057 case 0x23: /* tsubcctv */
4058 gen_helper_tsubcctv(cpu_dst, cpu_env,
4059 cpu_src1, cpu_src2);
4060 gen_store_gpr(dc, rd, cpu_dst);
4061 dc->cc_op = CC_OP_TSUBTV;
4062 break;
4063 case 0x24: /* mulscc */
4064 update_psr(dc);
4065 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
4066 gen_store_gpr(dc, rd, cpu_dst);
4067 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4068 dc->cc_op = CC_OP_ADD;
4069 break;
4070 #ifndef TARGET_SPARC64
4071 case 0x25: /* sll */
4072 if (IS_IMM) { /* immediate */
4073 simm = GET_FIELDs(insn, 20, 31);
4074 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
4075 } else { /* register */
4076 cpu_tmp0 = tcg_temp_new();
4077 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4078 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
4079 }
4080 gen_store_gpr(dc, rd, cpu_dst);
4081 break;
4082 case 0x26: /* srl */
4083 if (IS_IMM) { /* immediate */
4084 simm = GET_FIELDs(insn, 20, 31);
4085 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
4086 } else { /* register */
4087 cpu_tmp0 = tcg_temp_new();
4088 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4089 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
4090 }
4091 gen_store_gpr(dc, rd, cpu_dst);
4092 break;
4093 case 0x27: /* sra */
4094 if (IS_IMM) { /* immediate */
4095 simm = GET_FIELDs(insn, 20, 31);
4096 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
4097 } else { /* register */
4098 cpu_tmp0 = tcg_temp_new();
4099 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4100 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
4101 }
4102 gen_store_gpr(dc, rd, cpu_dst);
4103 break;
4104 #endif
4105 case 0x30:
4106 {
4107 cpu_tmp0 = tcg_temp_new();
4108 switch(rd) {
4109 case 0: /* wry */
4110 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4111 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
4112 break;
4113 #ifndef TARGET_SPARC64
4114 case 0x01 ... 0x0f: /* undefined in the
4115 SPARCv8 manual, nop
4116 on the microSPARC
4117 II */
4118 case 0x10 ... 0x1f: /* implementation-dependent
4119 in the SPARCv8
4120 manual, nop on the
4121 microSPARC II */
4122 if ((rd == 0x13) && (dc->def->features &
4123 CPU_FEATURE_POWERDOWN)) {
4124 /* LEON3 power-down */
4125 save_state(dc);
4126 gen_helper_power_down(cpu_env);
4127 }
4128 break;
4129 #else
4130 case 0x2: /* V9 wrccr */
4131 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4132 gen_helper_wrccr(cpu_env, cpu_tmp0);
4133 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4134 dc->cc_op = CC_OP_FLAGS;
4135 break;
4136 case 0x3: /* V9 wrasi */
4137 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4138 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
4139 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4140 offsetof(CPUSPARCState, asi));
4141 /*
4142 * End TB to notice changed ASI.
4143 * TODO: Could notice src1 = %g0 and IS_IMM,
4144 * update DisasContext and not exit the TB.
4145 */
4146 save_state(dc);
4147 gen_op_next_insn();
4148 tcg_gen_lookup_and_goto_ptr();
4149 dc->base.is_jmp = DISAS_NORETURN;
4150 break;
4151 case 0x6: /* V9 wrfprs */
4152 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4153 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
4154 dc->fprs_dirty = 0;
4155 save_state(dc);
4156 gen_op_next_insn();
4157 tcg_gen_exit_tb(NULL, 0);
4158 dc->base.is_jmp = DISAS_NORETURN;
4159 break;
4160 case 0xf: /* V9 sir, nop if user */
4161 #if !defined(CONFIG_USER_ONLY)
4162 if (supervisor(dc)) {
4163 ; // XXX
4164 }
4165 #endif
4166 break;
4167 case 0x13: /* Graphics Status */
4168 if (gen_trap_ifnofpu(dc)) {
4169 goto jmp_insn;
4170 }
4171 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
4172 break;
4173 case 0x14: /* Softint set */
4174 if (!supervisor(dc))
4175 goto illegal_insn;
4176 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4177 gen_helper_set_softint(cpu_env, cpu_tmp0);
4178 break;
4179 case 0x15: /* Softint clear */
4180 if (!supervisor(dc))
4181 goto illegal_insn;
4182 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4183 gen_helper_clear_softint(cpu_env, cpu_tmp0);
4184 break;
4185 case 0x16: /* Softint write */
4186 if (!supervisor(dc))
4187 goto illegal_insn;
4188 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4189 gen_helper_write_softint(cpu_env, cpu_tmp0);
4190 break;
4191 case 0x17: /* Tick compare */
4192 #if !defined(CONFIG_USER_ONLY)
4193 if (!supervisor(dc))
4194 goto illegal_insn;
4195 #endif
4196 {
4197 TCGv_ptr r_tickptr;
4198
4199 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
4200 cpu_src2);
4201 r_tickptr = tcg_temp_new_ptr();
4202 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4203 offsetof(CPUSPARCState, tick));
4204 translator_io_start(&dc->base);
4205 gen_helper_tick_set_limit(r_tickptr,
4206 cpu_tick_cmpr);
4207 /* End TB to handle timer interrupt */
4208 dc->base.is_jmp = DISAS_EXIT;
4209 }
4210 break;
4211 case 0x18: /* System tick */
4212 #if !defined(CONFIG_USER_ONLY)
4213 if (!supervisor(dc))
4214 goto illegal_insn;
4215 #endif
4216 {
4217 TCGv_ptr r_tickptr;
4218
4219 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
4220 cpu_src2);
4221 r_tickptr = tcg_temp_new_ptr();
4222 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4223 offsetof(CPUSPARCState, stick));
4224 translator_io_start(&dc->base);
4225 gen_helper_tick_set_count(r_tickptr,
4226 cpu_tmp0);
4227 /* End TB to handle timer interrupt */
4228 dc->base.is_jmp = DISAS_EXIT;
4229 }
4230 break;
4231 case 0x19: /* System tick compare */
4232 #if !defined(CONFIG_USER_ONLY)
4233 if (!supervisor(dc))
4234 goto illegal_insn;
4235 #endif
4236 {
4237 TCGv_ptr r_tickptr;
4238
4239 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
4240 cpu_src2);
4241 r_tickptr = tcg_temp_new_ptr();
4242 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4243 offsetof(CPUSPARCState, stick));
4244 translator_io_start(&dc->base);
4245 gen_helper_tick_set_limit(r_tickptr,
4246 cpu_stick_cmpr);
4247 /* End TB to handle timer interrupt */
4248 dc->base.is_jmp = DISAS_EXIT;
4249 }
4250 break;
4251
4252 case 0x10: /* Performance Control */
4253 case 0x11: /* Performance Instrumentation
4254 Counter */
4255 case 0x12: /* Dispatch Control */
4256 #endif
4257 default:
4258 goto illegal_insn;
4259 }
4260 }
4261 break;
4262 #if !defined(CONFIG_USER_ONLY)
4263 case 0x31: /* wrpsr, V9 saved, restored */
4264 {
4265 if (!supervisor(dc))
4266 goto priv_insn;
4267 #ifdef TARGET_SPARC64
4268 switch (rd) {
4269 case 0:
4270 gen_helper_saved(cpu_env);
4271 break;
4272 case 1:
4273 gen_helper_restored(cpu_env);
4274 break;
4275 case 2: /* UA2005 allclean */
4276 case 3: /* UA2005 otherw */
4277 case 4: /* UA2005 normalw */
4278 case 5: /* UA2005 invalw */
4279 // XXX
4280 default:
4281 goto illegal_insn;
4282 }
4283 #else
4284 cpu_tmp0 = tcg_temp_new();
4285 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4286 gen_helper_wrpsr(cpu_env, cpu_tmp0);
4287 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4288 dc->cc_op = CC_OP_FLAGS;
4289 save_state(dc);
4290 gen_op_next_insn();
4291 tcg_gen_exit_tb(NULL, 0);
4292 dc->base.is_jmp = DISAS_NORETURN;
4293 #endif
4294 }
4295 break;
4296 case 0x32: /* wrwim, V9 wrpr */
4297 {
4298 if (!supervisor(dc))
4299 goto priv_insn;
4300 cpu_tmp0 = tcg_temp_new();
4301 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4302 #ifdef TARGET_SPARC64
4303 switch (rd) {
4304 case 0: // tpc
4305 {
4306 TCGv_ptr r_tsptr;
4307
4308 r_tsptr = tcg_temp_new_ptr();
4309 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4310 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4311 offsetof(trap_state, tpc));
4312 }
4313 break;
4314 case 1: // tnpc
4315 {
4316 TCGv_ptr r_tsptr;
4317
4318 r_tsptr = tcg_temp_new_ptr();
4319 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4320 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4321 offsetof(trap_state, tnpc));
4322 }
4323 break;
4324 case 2: // tstate
4325 {
4326 TCGv_ptr r_tsptr;
4327
4328 r_tsptr = tcg_temp_new_ptr();
4329 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4330 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4331 offsetof(trap_state,
4332 tstate));
4333 }
4334 break;
4335 case 3: // tt
4336 {
4337 TCGv_ptr r_tsptr;
4338
4339 r_tsptr = tcg_temp_new_ptr();
4340 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4341 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
4342 offsetof(trap_state, tt));
4343 }
4344 break;
4345 case 4: // tick
4346 {
4347 TCGv_ptr r_tickptr;
4348
4349 r_tickptr = tcg_temp_new_ptr();
4350 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4351 offsetof(CPUSPARCState, tick));
4352 translator_io_start(&dc->base);
4353 gen_helper_tick_set_count(r_tickptr,
4354 cpu_tmp0);
4355 /* End TB to handle timer interrupt */
4356 dc->base.is_jmp = DISAS_EXIT;
4357 }
4358 break;
4359 case 5: // tba
4360 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
4361 break;
4362 case 6: // pstate
4363 save_state(dc);
4364 if (translator_io_start(&dc->base)) {
4365 dc->base.is_jmp = DISAS_EXIT;
4366 }
4367 gen_helper_wrpstate(cpu_env, cpu_tmp0);
4368 dc->npc = DYNAMIC_PC;
4369 break;
4370 case 7: // tl
4371 save_state(dc);
4372 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4373 offsetof(CPUSPARCState, tl));
4374 dc->npc = DYNAMIC_PC;
4375 break;
4376 case 8: // pil
4377 if (translator_io_start(&dc->base)) {
4378 dc->base.is_jmp = DISAS_EXIT;
4379 }
4380 gen_helper_wrpil(cpu_env, cpu_tmp0);
4381 break;
4382 case 9: // cwp
4383 gen_helper_wrcwp(cpu_env, cpu_tmp0);
4384 break;
4385 case 10: // cansave
4386 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4387 offsetof(CPUSPARCState,
4388 cansave));
4389 break;
4390 case 11: // canrestore
4391 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4392 offsetof(CPUSPARCState,
4393 canrestore));
4394 break;
4395 case 12: // cleanwin
4396 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4397 offsetof(CPUSPARCState,
4398 cleanwin));
4399 break;
4400 case 13: // otherwin
4401 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4402 offsetof(CPUSPARCState,
4403 otherwin));
4404 break;
4405 case 14: // wstate
4406 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4407 offsetof(CPUSPARCState,
4408 wstate));
4409 break;
4410 case 16: // UA2005 gl
4411 CHECK_IU_FEATURE(dc, GL);
4412 gen_helper_wrgl(cpu_env, cpu_tmp0);
4413 break;
4414 case 26: // UA2005 strand status
4415 CHECK_IU_FEATURE(dc, HYPV);
4416 if (!hypervisor(dc))
4417 goto priv_insn;
4418 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
4419 break;
4420 default:
4421 goto illegal_insn;
4422 }
4423 #else
4424 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
4425 if (dc->def->nwindows != 32) {
4426 tcg_gen_andi_tl(cpu_wim, cpu_wim,
4427 (1 << dc->def->nwindows) - 1);
4428 }
4429 #endif
4430 }
4431 break;
4432 case 0x33: /* wrtbr, UA2005 wrhpr */
4433 {
4434 #ifndef TARGET_SPARC64
4435 if (!supervisor(dc))
4436 goto priv_insn;
4437 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
4438 #else
4439 CHECK_IU_FEATURE(dc, HYPV);
4440 if (!hypervisor(dc))
4441 goto priv_insn;
4442 cpu_tmp0 = tcg_temp_new();
4443 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4444 switch (rd) {
4445 case 0: // hpstate
4446 tcg_gen_st_i64(cpu_tmp0, cpu_env,
4447 offsetof(CPUSPARCState,
4448 hpstate));
4449 save_state(dc);
4450 gen_op_next_insn();
4451 tcg_gen_exit_tb(NULL, 0);
4452 dc->base.is_jmp = DISAS_NORETURN;
4453 break;
4454 case 1: // htstate
4455 // XXX gen_op_wrhtstate();
4456 break;
4457 case 3: // hintp
4458 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
4459 break;
4460 case 5: // htba
4461 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
4462 break;
4463 case 31: // hstick_cmpr
4464 {
4465 TCGv_ptr r_tickptr;
4466
4467 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
4468 r_tickptr = tcg_temp_new_ptr();
4469 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4470 offsetof(CPUSPARCState, hstick));
4471 translator_io_start(&dc->base);
4472 gen_helper_tick_set_limit(r_tickptr,
4473 cpu_hstick_cmpr);
4474 /* End TB to handle timer interrupt */
4475 dc->base.is_jmp = DISAS_EXIT;
4476 }
4477 break;
4478 case 6: // hver readonly
4479 default:
4480 goto illegal_insn;
4481 }
4482 #endif
4483 }
4484 break;
4485 #endif
4486 #ifdef TARGET_SPARC64
4487 case 0x2c: /* V9 movcc */
4488 {
4489 int cc = GET_FIELD_SP(insn, 11, 12);
4490 int cond = GET_FIELD_SP(insn, 14, 17);
4491 DisasCompare cmp;
4492 TCGv dst;
4493
4494 if (insn & (1 << 18)) {
4495 if (cc == 0) {
4496 gen_compare(&cmp, 0, cond, dc);
4497 } else if (cc == 2) {
4498 gen_compare(&cmp, 1, cond, dc);
4499 } else {
4500 goto illegal_insn;
4501 }
4502 } else {
4503 gen_fcompare(&cmp, cc, cond);
4504 }
4505
4506 /* The get_src2 above loaded the normal 13-bit
4507 immediate field, not the 11-bit field we have
4508 in movcc. But it did handle the reg case. */
4509 if (IS_IMM) {
4510 simm = GET_FIELD_SPs(insn, 0, 10);
4511 tcg_gen_movi_tl(cpu_src2, simm);
4512 }
4513
4514 dst = gen_load_gpr(dc, rd);
4515 tcg_gen_movcond_tl(cmp.cond, dst,
4516 cmp.c1, cmp.c2,
4517 cpu_src2, dst);
4518 gen_store_gpr(dc, rd, dst);
4519 break;
4520 }
4521 case 0x2d: /* V9 sdivx */
4522 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4523 gen_store_gpr(dc, rd, cpu_dst);
4524 break;
4525 case 0x2e: /* V9 popc */
4526 tcg_gen_ctpop_tl(cpu_dst, cpu_src2);
4527 gen_store_gpr(dc, rd, cpu_dst);
4528 break;
4529 case 0x2f: /* V9 movr */
4530 {
4531 int cond = GET_FIELD_SP(insn, 10, 12);
4532 DisasCompare cmp;
4533 TCGv dst;
4534
4535 gen_compare_reg(&cmp, cond, cpu_src1);
4536
4537 /* The get_src2 above loaded the normal 13-bit
4538 immediate field, not the 10-bit field we have
4539 in movr. But it did handle the reg case. */
4540 if (IS_IMM) {
4541 simm = GET_FIELD_SPs(insn, 0, 9);
4542 tcg_gen_movi_tl(cpu_src2, simm);
4543 }
4544
4545 dst = gen_load_gpr(dc, rd);
4546 tcg_gen_movcond_tl(cmp.cond, dst,
4547 cmp.c1, cmp.c2,
4548 cpu_src2, dst);
4549 gen_store_gpr(dc, rd, dst);
4550 break;
4551 }
4552 #endif
4553 default:
4554 goto illegal_insn;
4555 }
4556 }
4557 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4558 #ifdef TARGET_SPARC64
4559 int opf = GET_FIELD_SP(insn, 5, 13);
4560 rs1 = GET_FIELD(insn, 13, 17);
4561 rs2 = GET_FIELD(insn, 27, 31);
4562 if (gen_trap_ifnofpu(dc)) {
4563 goto jmp_insn;
4564 }
4565
4566 switch (opf) {
4567 case 0x000: /* VIS I edge8cc */
4568 CHECK_FPU_FEATURE(dc, VIS1);
4569 cpu_src1 = gen_load_gpr(dc, rs1);
4570 cpu_src2 = gen_load_gpr(dc, rs2);
4571 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4572 gen_store_gpr(dc, rd, cpu_dst);
4573 break;
4574 case 0x001: /* VIS II edge8n */
4575 CHECK_FPU_FEATURE(dc, VIS2);
4576 cpu_src1 = gen_load_gpr(dc, rs1);
4577 cpu_src2 = gen_load_gpr(dc, rs2);
4578 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4579 gen_store_gpr(dc, rd, cpu_dst);
4580 break;
4581 case 0x002: /* VIS I edge8lcc */
4582 CHECK_FPU_FEATURE(dc, VIS1);
4583 cpu_src1 = gen_load_gpr(dc, rs1);
4584 cpu_src2 = gen_load_gpr(dc, rs2);
4585 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4586 gen_store_gpr(dc, rd, cpu_dst);
4587 break;
4588 case 0x003: /* VIS II edge8ln */
4589 CHECK_FPU_FEATURE(dc, VIS2);
4590 cpu_src1 = gen_load_gpr(dc, rs1);
4591 cpu_src2 = gen_load_gpr(dc, rs2);
4592 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4593 gen_store_gpr(dc, rd, cpu_dst);
4594 break;
4595 case 0x004: /* VIS I edge16cc */
4596 CHECK_FPU_FEATURE(dc, VIS1);
4597 cpu_src1 = gen_load_gpr(dc, rs1);
4598 cpu_src2 = gen_load_gpr(dc, rs2);
4599 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4600 gen_store_gpr(dc, rd, cpu_dst);
4601 break;
4602 case 0x005: /* VIS II edge16n */
4603 CHECK_FPU_FEATURE(dc, VIS2);
4604 cpu_src1 = gen_load_gpr(dc, rs1);
4605 cpu_src2 = gen_load_gpr(dc, rs2);
4606 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4607 gen_store_gpr(dc, rd, cpu_dst);
4608 break;
4609 case 0x006: /* VIS I edge16lcc */
4610 CHECK_FPU_FEATURE(dc, VIS1);
4611 cpu_src1 = gen_load_gpr(dc, rs1);
4612 cpu_src2 = gen_load_gpr(dc, rs2);
4613 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4614 gen_store_gpr(dc, rd, cpu_dst);
4615 break;
4616 case 0x007: /* VIS II edge16ln */
4617 CHECK_FPU_FEATURE(dc, VIS2);
4618 cpu_src1 = gen_load_gpr(dc, rs1);
4619 cpu_src2 = gen_load_gpr(dc, rs2);
4620 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4621 gen_store_gpr(dc, rd, cpu_dst);
4622 break;
4623 case 0x008: /* VIS I edge32cc */
4624 CHECK_FPU_FEATURE(dc, VIS1);
4625 cpu_src1 = gen_load_gpr(dc, rs1);
4626 cpu_src2 = gen_load_gpr(dc, rs2);
4627 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4628 gen_store_gpr(dc, rd, cpu_dst);
4629 break;
4630 case 0x009: /* VIS II edge32n */
4631 CHECK_FPU_FEATURE(dc, VIS2);
4632 cpu_src1 = gen_load_gpr(dc, rs1);
4633 cpu_src2 = gen_load_gpr(dc, rs2);
4634 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4635 gen_store_gpr(dc, rd, cpu_dst);
4636 break;
4637 case 0x00a: /* VIS I edge32lcc */
4638 CHECK_FPU_FEATURE(dc, VIS1);
4639 cpu_src1 = gen_load_gpr(dc, rs1);
4640 cpu_src2 = gen_load_gpr(dc, rs2);
4641 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4642 gen_store_gpr(dc, rd, cpu_dst);
4643 break;
4644 case 0x00b: /* VIS II edge32ln */
4645 CHECK_FPU_FEATURE(dc, VIS2);
4646 cpu_src1 = gen_load_gpr(dc, rs1);
4647 cpu_src2 = gen_load_gpr(dc, rs2);
4648 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4649 gen_store_gpr(dc, rd, cpu_dst);
4650 break;
4651 case 0x010: /* VIS I array8 */
4652 CHECK_FPU_FEATURE(dc, VIS1);
4653 cpu_src1 = gen_load_gpr(dc, rs1);
4654 cpu_src2 = gen_load_gpr(dc, rs2);
4655 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4656 gen_store_gpr(dc, rd, cpu_dst);
4657 break;
4658 case 0x012: /* VIS I array16 */
4659 CHECK_FPU_FEATURE(dc, VIS1);
4660 cpu_src1 = gen_load_gpr(dc, rs1);
4661 cpu_src2 = gen_load_gpr(dc, rs2);
4662 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4663 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4664 gen_store_gpr(dc, rd, cpu_dst);
4665 break;
4666 case 0x014: /* VIS I array32 */
4667 CHECK_FPU_FEATURE(dc, VIS1);
4668 cpu_src1 = gen_load_gpr(dc, rs1);
4669 cpu_src2 = gen_load_gpr(dc, rs2);
4670 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4671 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4672 gen_store_gpr(dc, rd, cpu_dst);
4673 break;
4674 case 0x018: /* VIS I alignaddr */
4675 CHECK_FPU_FEATURE(dc, VIS1);
4676 cpu_src1 = gen_load_gpr(dc, rs1);
4677 cpu_src2 = gen_load_gpr(dc, rs2);
4678 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4679 gen_store_gpr(dc, rd, cpu_dst);
4680 break;
4681 case 0x01a: /* VIS I alignaddrl */
4682 CHECK_FPU_FEATURE(dc, VIS1);
4683 cpu_src1 = gen_load_gpr(dc, rs1);
4684 cpu_src2 = gen_load_gpr(dc, rs2);
4685 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4686 gen_store_gpr(dc, rd, cpu_dst);
4687 break;
4688 case 0x019: /* VIS II bmask */
4689 CHECK_FPU_FEATURE(dc, VIS2);
4690 cpu_src1 = gen_load_gpr(dc, rs1);
4691 cpu_src2 = gen_load_gpr(dc, rs2);
4692 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4693 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4694 gen_store_gpr(dc, rd, cpu_dst);
4695 break;
4696 case 0x020: /* VIS I fcmple16 */
4697 CHECK_FPU_FEATURE(dc, VIS1);
4698 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4699 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4700 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4701 gen_store_gpr(dc, rd, cpu_dst);
4702 break;
4703 case 0x022: /* VIS I fcmpne16 */
4704 CHECK_FPU_FEATURE(dc, VIS1);
4705 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4706 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4707 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4708 gen_store_gpr(dc, rd, cpu_dst);
4709 break;
4710 case 0x024: /* VIS I fcmple32 */
4711 CHECK_FPU_FEATURE(dc, VIS1);
4712 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4713 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4714 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4715 gen_store_gpr(dc, rd, cpu_dst);
4716 break;
4717 case 0x026: /* VIS I fcmpne32 */
4718 CHECK_FPU_FEATURE(dc, VIS1);
4719 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4720 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4721 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4722 gen_store_gpr(dc, rd, cpu_dst);
4723 break;
4724 case 0x028: /* VIS I fcmpgt16 */
4725 CHECK_FPU_FEATURE(dc, VIS1);
4726 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4727 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4728 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4729 gen_store_gpr(dc, rd, cpu_dst);
4730 break;
4731 case 0x02a: /* VIS I fcmpeq16 */
4732 CHECK_FPU_FEATURE(dc, VIS1);
4733 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4734 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4735 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4736 gen_store_gpr(dc, rd, cpu_dst);
4737 break;
4738 case 0x02c: /* VIS I fcmpgt32 */
4739 CHECK_FPU_FEATURE(dc, VIS1);
4740 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4741 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4742 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4743 gen_store_gpr(dc, rd, cpu_dst);
4744 break;
4745 case 0x02e: /* VIS I fcmpeq32 */
4746 CHECK_FPU_FEATURE(dc, VIS1);
4747 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4748 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4749 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4750 gen_store_gpr(dc, rd, cpu_dst);
4751 break;
4752 case 0x031: /* VIS I fmul8x16 */
4753 CHECK_FPU_FEATURE(dc, VIS1);
4754 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4755 break;
4756 case 0x033: /* VIS I fmul8x16au */
4757 CHECK_FPU_FEATURE(dc, VIS1);
4758 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4759 break;
4760 case 0x035: /* VIS I fmul8x16al */
4761 CHECK_FPU_FEATURE(dc, VIS1);
4762 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4763 break;
4764 case 0x036: /* VIS I fmul8sux16 */
4765 CHECK_FPU_FEATURE(dc, VIS1);
4766 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4767 break;
4768 case 0x037: /* VIS I fmul8ulx16 */
4769 CHECK_FPU_FEATURE(dc, VIS1);
4770 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4771 break;
4772 case 0x038: /* VIS I fmuld8sux16 */
4773 CHECK_FPU_FEATURE(dc, VIS1);
4774 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4775 break;
4776 case 0x039: /* VIS I fmuld8ulx16 */
4777 CHECK_FPU_FEATURE(dc, VIS1);
4778 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4779 break;
4780 case 0x03a: /* VIS I fpack32 */
4781 CHECK_FPU_FEATURE(dc, VIS1);
4782 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4783 break;
4784 case 0x03b: /* VIS I fpack16 */
4785 CHECK_FPU_FEATURE(dc, VIS1);
4786 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4787 cpu_dst_32 = gen_dest_fpr_F(dc);
4788 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4789 gen_store_fpr_F(dc, rd, cpu_dst_32);
4790 break;
4791 case 0x03d: /* VIS I fpackfix */
4792 CHECK_FPU_FEATURE(dc, VIS1);
4793 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4794 cpu_dst_32 = gen_dest_fpr_F(dc);
4795 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4796 gen_store_fpr_F(dc, rd, cpu_dst_32);
4797 break;
4798 case 0x03e: /* VIS I pdist */
4799 CHECK_FPU_FEATURE(dc, VIS1);
4800 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4801 break;
4802 case 0x048: /* VIS I faligndata */
4803 CHECK_FPU_FEATURE(dc, VIS1);
4804 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4805 break;
4806 case 0x04b: /* VIS I fpmerge */
4807 CHECK_FPU_FEATURE(dc, VIS1);
4808 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4809 break;
4810 case 0x04c: /* VIS II bshuffle */
4811 CHECK_FPU_FEATURE(dc, VIS2);
4812 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4813 break;
4814 case 0x04d: /* VIS I fexpand */
4815 CHECK_FPU_FEATURE(dc, VIS1);
4816 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4817 break;
4818 case 0x050: /* VIS I fpadd16 */
4819 CHECK_FPU_FEATURE(dc, VIS1);
4820 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4821 break;
4822 case 0x051: /* VIS I fpadd16s */
4823 CHECK_FPU_FEATURE(dc, VIS1);
4824 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4825 break;
4826 case 0x052: /* VIS I fpadd32 */
4827 CHECK_FPU_FEATURE(dc, VIS1);
4828 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4829 break;
4830 case 0x053: /* VIS I fpadd32s */
4831 CHECK_FPU_FEATURE(dc, VIS1);
4832 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4833 break;
4834 case 0x054: /* VIS I fpsub16 */
4835 CHECK_FPU_FEATURE(dc, VIS1);
4836 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4837 break;
4838 case 0x055: /* VIS I fpsub16s */
4839 CHECK_FPU_FEATURE(dc, VIS1);
4840 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4841 break;
4842 case 0x056: /* VIS I fpsub32 */
4843 CHECK_FPU_FEATURE(dc, VIS1);
4844 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4845 break;
4846 case 0x057: /* VIS I fpsub32s */
4847 CHECK_FPU_FEATURE(dc, VIS1);
4848 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4849 break;
4850 case 0x060: /* VIS I fzero */
4851 CHECK_FPU_FEATURE(dc, VIS1);
4852 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4853 tcg_gen_movi_i64(cpu_dst_64, 0);
4854 gen_store_fpr_D(dc, rd, cpu_dst_64);
4855 break;
4856 case 0x061: /* VIS I fzeros */
4857 CHECK_FPU_FEATURE(dc, VIS1);
4858 cpu_dst_32 = gen_dest_fpr_F(dc);
4859 tcg_gen_movi_i32(cpu_dst_32, 0);
4860 gen_store_fpr_F(dc, rd, cpu_dst_32);
4861 break;
4862 case 0x062: /* VIS I fnor */
4863 CHECK_FPU_FEATURE(dc, VIS1);
4864 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4865 break;
4866 case 0x063: /* VIS I fnors */
4867 CHECK_FPU_FEATURE(dc, VIS1);
4868 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4869 break;
4870 case 0x064: /* VIS I fandnot2 */
4871 CHECK_FPU_FEATURE(dc, VIS1);
4872 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4873 break;
4874 case 0x065: /* VIS I fandnot2s */
4875 CHECK_FPU_FEATURE(dc, VIS1);
4876 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4877 break;
4878 case 0x066: /* VIS I fnot2 */
4879 CHECK_FPU_FEATURE(dc, VIS1);
4880 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4881 break;
4882 case 0x067: /* VIS I fnot2s */
4883 CHECK_FPU_FEATURE(dc, VIS1);
4884 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4885 break;
4886 case 0x068: /* VIS I fandnot1 */
4887 CHECK_FPU_FEATURE(dc, VIS1);
4888 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4889 break;
4890 case 0x069: /* VIS I fandnot1s */
4891 CHECK_FPU_FEATURE(dc, VIS1);
4892 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4893 break;
4894 case 0x06a: /* VIS I fnot1 */
4895 CHECK_FPU_FEATURE(dc, VIS1);
4896 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4897 break;
4898 case 0x06b: /* VIS I fnot1s */
4899 CHECK_FPU_FEATURE(dc, VIS1);
4900 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4901 break;
4902 case 0x06c: /* VIS I fxor */
4903 CHECK_FPU_FEATURE(dc, VIS1);
4904 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4905 break;
4906 case 0x06d: /* VIS I fxors */
4907 CHECK_FPU_FEATURE(dc, VIS1);
4908 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4909 break;
4910 case 0x06e: /* VIS I fnand */
4911 CHECK_FPU_FEATURE(dc, VIS1);
4912 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4913 break;
4914 case 0x06f: /* VIS I fnands */
4915 CHECK_FPU_FEATURE(dc, VIS1);
4916 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4917 break;
4918 case 0x070: /* VIS I fand */
4919 CHECK_FPU_FEATURE(dc, VIS1);
4920 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4921 break;
4922 case 0x071: /* VIS I fands */
4923 CHECK_FPU_FEATURE(dc, VIS1);
4924 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4925 break;
4926 case 0x072: /* VIS I fxnor */
4927 CHECK_FPU_FEATURE(dc, VIS1);
4928 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4929 break;
4930 case 0x073: /* VIS I fxnors */
4931 CHECK_FPU_FEATURE(dc, VIS1);
4932 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4933 break;
4934 case 0x074: /* VIS I fsrc1 */
4935 CHECK_FPU_FEATURE(dc, VIS1);
4936 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4937 gen_store_fpr_D(dc, rd, cpu_src1_64);
4938 break;
4939 case 0x075: /* VIS I fsrc1s */
4940 CHECK_FPU_FEATURE(dc, VIS1);
4941 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4942 gen_store_fpr_F(dc, rd, cpu_src1_32);
4943 break;
4944 case 0x076: /* VIS I fornot2 */
4945 CHECK_FPU_FEATURE(dc, VIS1);
4946 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4947 break;
4948 case 0x077: /* VIS I fornot2s */
4949 CHECK_FPU_FEATURE(dc, VIS1);
4950 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4951 break;
4952 case 0x078: /* VIS I fsrc2 */
4953 CHECK_FPU_FEATURE(dc, VIS1);
4954 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4955 gen_store_fpr_D(dc, rd, cpu_src1_64);
4956 break;
4957 case 0x079: /* VIS I fsrc2s */
4958 CHECK_FPU_FEATURE(dc, VIS1);
4959 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4960 gen_store_fpr_F(dc, rd, cpu_src1_32);
4961 break;
4962 case 0x07a: /* VIS I fornot1 */
4963 CHECK_FPU_FEATURE(dc, VIS1);
4964 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4965 break;
4966 case 0x07b: /* VIS I fornot1s */
4967 CHECK_FPU_FEATURE(dc, VIS1);
4968 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4969 break;
4970 case 0x07c: /* VIS I for */
4971 CHECK_FPU_FEATURE(dc, VIS1);
4972 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4973 break;
4974 case 0x07d: /* VIS I fors */
4975 CHECK_FPU_FEATURE(dc, VIS1);
4976 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4977 break;
4978 case 0x07e: /* VIS I fone */
4979 CHECK_FPU_FEATURE(dc, VIS1);
4980 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4981 tcg_gen_movi_i64(cpu_dst_64, -1);
4982 gen_store_fpr_D(dc, rd, cpu_dst_64);
4983 break;
4984 case 0x07f: /* VIS I fones */
4985 CHECK_FPU_FEATURE(dc, VIS1);
4986 cpu_dst_32 = gen_dest_fpr_F(dc);
4987 tcg_gen_movi_i32(cpu_dst_32, -1);
4988 gen_store_fpr_F(dc, rd, cpu_dst_32);
4989 break;
4990 case 0x080: /* VIS I shutdown */
4991 case 0x081: /* VIS II siam */
4992 // XXX
4993 goto illegal_insn;
4994 default:
4995 goto illegal_insn;
4996 }
4997 #else
4998 goto ncp_insn;
4999 #endif
5000 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
5001 #ifdef TARGET_SPARC64
5002 goto illegal_insn;
5003 #else
5004 goto ncp_insn;
5005 #endif
5006 #ifdef TARGET_SPARC64
5007 } else if (xop == 0x39) { /* V9 return */
5008 save_state(dc);
5009 cpu_src1 = get_src1(dc, insn);
5010 cpu_tmp0 = tcg_temp_new();
5011 if (IS_IMM) { /* immediate */
5012 simm = GET_FIELDs(insn, 19, 31);
5013 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5014 } else { /* register */
5015 rs2 = GET_FIELD(insn, 27, 31);
5016 if (rs2) {
5017 cpu_src2 = gen_load_gpr(dc, rs2);
5018 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5019 } else {
5020 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5021 }
5022 }
5023 gen_helper_restore(cpu_env);
5024 gen_mov_pc_npc(dc);
5025 gen_check_align(cpu_tmp0, 3);
5026 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5027 dc->npc = DYNAMIC_PC_LOOKUP;
5028 goto jmp_insn;
5029 #endif
5030 } else {
5031 cpu_src1 = get_src1(dc, insn);
5032 cpu_tmp0 = tcg_temp_new();
5033 if (IS_IMM) { /* immediate */
5034 simm = GET_FIELDs(insn, 19, 31);
5035 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5036 } else { /* register */
5037 rs2 = GET_FIELD(insn, 27, 31);
5038 if (rs2) {
5039 cpu_src2 = gen_load_gpr(dc, rs2);
5040 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5041 } else {
5042 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5043 }
5044 }
5045 switch (xop) {
5046 case 0x38: /* jmpl */
5047 {
5048 TCGv t = gen_dest_gpr(dc, rd);
5049 tcg_gen_movi_tl(t, dc->pc);
5050 gen_store_gpr(dc, rd, t);
5051
5052 gen_mov_pc_npc(dc);
5053 gen_check_align(cpu_tmp0, 3);
5054 gen_address_mask(dc, cpu_tmp0);
5055 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5056 dc->npc = DYNAMIC_PC_LOOKUP;
5057 }
5058 goto jmp_insn;
5059 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5060 case 0x39: /* rett, V9 return */
5061 {
5062 if (!supervisor(dc))
5063 goto priv_insn;
5064 gen_mov_pc_npc(dc);
5065 gen_check_align(cpu_tmp0, 3);
5066 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5067 dc->npc = DYNAMIC_PC;
5068 gen_helper_rett(cpu_env);
5069 }
5070 goto jmp_insn;
5071 #endif
5072 case 0x3b: /* flush */
5073 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
5074 goto unimp_flush;
5075 /* nop */
5076 break;
5077 case 0x3c: /* save */
5078 gen_helper_save(cpu_env);
5079 gen_store_gpr(dc, rd, cpu_tmp0);
5080 break;
5081 case 0x3d: /* restore */
5082 gen_helper_restore(cpu_env);
5083 gen_store_gpr(dc, rd, cpu_tmp0);
5084 break;
5085 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5086 case 0x3e: /* V9 done/retry */
5087 {
5088 switch (rd) {
5089 case 0:
5090 if (!supervisor(dc))
5091 goto priv_insn;
5092 dc->npc = DYNAMIC_PC;
5093 dc->pc = DYNAMIC_PC;
5094 translator_io_start(&dc->base);
5095 gen_helper_done(cpu_env);
5096 goto jmp_insn;
5097 case 1:
5098 if (!supervisor(dc))
5099 goto priv_insn;
5100 dc->npc = DYNAMIC_PC;
5101 dc->pc = DYNAMIC_PC;
5102 translator_io_start(&dc->base);
5103 gen_helper_retry(cpu_env);
5104 goto jmp_insn;
5105 default:
5106 goto illegal_insn;
5107 }
5108 }
5109 break;
5110 #endif
5111 default:
5112 goto illegal_insn;
5113 }
5114 }
5115 break;
5116 }
5117 break;
5118 case 3: /* load/store instructions */
5119 {
5120 unsigned int xop = GET_FIELD(insn, 7, 12);
5121 /* ??? gen_address_mask prevents us from using a source
5122 register directly. Always generate a temporary. */
5123 TCGv cpu_addr = tcg_temp_new();
5124
5125 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5126 if (xop == 0x3c || xop == 0x3e) {
5127 /* V9 casa/casxa : no offset */
5128 } else if (IS_IMM) { /* immediate */
5129 simm = GET_FIELDs(insn, 19, 31);
5130 if (simm != 0) {
5131 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5132 }
5133 } else { /* register */
5134 rs2 = GET_FIELD(insn, 27, 31);
5135 if (rs2 != 0) {
5136 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5137 }
5138 }
5139 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5140 (xop > 0x17 && xop <= 0x1d ) ||
5141 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5142 TCGv cpu_val = gen_dest_gpr(dc, rd);
5143
5144 switch (xop) {
5145 case 0x0: /* ld, V9 lduw, load unsigned word */
5146 gen_address_mask(dc, cpu_addr);
5147 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5148 dc->mem_idx, MO_TEUL | MO_ALIGN);
5149 break;
5150 case 0x1: /* ldub, load unsigned byte */
5151 gen_address_mask(dc, cpu_addr);
5152 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5153 dc->mem_idx, MO_UB);
5154 break;
5155 case 0x2: /* lduh, load unsigned halfword */
5156 gen_address_mask(dc, cpu_addr);
5157 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5158 dc->mem_idx, MO_TEUW | MO_ALIGN);
5159 break;
5160 case 0x3: /* ldd, load double word */
5161 if (rd & 1)
5162 goto illegal_insn;
5163 else {
5164 TCGv_i64 t64;
5165
5166 gen_address_mask(dc, cpu_addr);
5167 t64 = tcg_temp_new_i64();
5168 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5169 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5170 tcg_gen_trunc_i64_tl(cpu_val, t64);
5171 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5172 gen_store_gpr(dc, rd + 1, cpu_val);
5173 tcg_gen_shri_i64(t64, t64, 32);
5174 tcg_gen_trunc_i64_tl(cpu_val, t64);
5175 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5176 }
5177 break;
5178 case 0x9: /* ldsb, load signed byte */
5179 gen_address_mask(dc, cpu_addr);
5180 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, dc->mem_idx, MO_SB);
5181 break;
5182 case 0xa: /* ldsh, load signed halfword */
5183 gen_address_mask(dc, cpu_addr);
5184 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5185 dc->mem_idx, MO_TESW | MO_ALIGN);
5186 break;
5187 case 0xd: /* ldstub */
5188 gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
5189 break;
5190 case 0x0f:
5191 /* swap, swap register with memory. Also atomically */
5192 CHECK_IU_FEATURE(dc, SWAP);
5193 cpu_src1 = gen_load_gpr(dc, rd);
5194 gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
5195 dc->mem_idx, MO_TEUL);
5196 break;
5197 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5198 case 0x10: /* lda, V9 lduwa, load word alternate */
5199 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5200 break;
5201 case 0x11: /* lduba, load unsigned byte alternate */
5202 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5203 break;
5204 case 0x12: /* lduha, load unsigned halfword alternate */
5205 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5206 break;
5207 case 0x13: /* ldda, load double word alternate */
5208 if (rd & 1) {
5209 goto illegal_insn;
5210 }
5211 gen_ldda_asi(dc, cpu_addr, insn, rd);
5212 goto skip_move;
5213 case 0x19: /* ldsba, load signed byte alternate */
5214 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5215 break;
5216 case 0x1a: /* ldsha, load signed halfword alternate */
5217 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5218 break;
5219 case 0x1d: /* ldstuba -- XXX: should be atomically */
5220 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5221 break;
5222 case 0x1f: /* swapa, swap reg with alt. memory. Also
5223 atomically */
5224 CHECK_IU_FEATURE(dc, SWAP);
5225 cpu_src1 = gen_load_gpr(dc, rd);
5226 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5227 break;
5228
5229 #ifndef TARGET_SPARC64
5230 case 0x30: /* ldc */
5231 case 0x31: /* ldcsr */
5232 case 0x33: /* lddc */
5233 goto ncp_insn;
5234 #endif
5235 #endif
5236 #ifdef TARGET_SPARC64
5237 case 0x08: /* V9 ldsw */
5238 gen_address_mask(dc, cpu_addr);
5239 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5240 dc->mem_idx, MO_TESL | MO_ALIGN);
5241 break;
5242 case 0x0b: /* V9 ldx */
5243 gen_address_mask(dc, cpu_addr);
5244 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5245 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5246 break;
5247 case 0x18: /* V9 ldswa */
5248 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5249 break;
5250 case 0x1b: /* V9 ldxa */
5251 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
5252 break;
5253 case 0x2d: /* V9 prefetch, no effect */
5254 goto skip_move;
5255 case 0x30: /* V9 ldfa */
5256 if (gen_trap_ifnofpu(dc)) {
5257 goto jmp_insn;
5258 }
5259 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5260 gen_update_fprs_dirty(dc, rd);
5261 goto skip_move;
5262 case 0x33: /* V9 lddfa */
5263 if (gen_trap_ifnofpu(dc)) {
5264 goto jmp_insn;
5265 }
5266 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5267 gen_update_fprs_dirty(dc, DFPREG(rd));
5268 goto skip_move;
5269 case 0x3d: /* V9 prefetcha, no effect */
5270 goto skip_move;
5271 case 0x32: /* V9 ldqfa */
5272 CHECK_FPU_FEATURE(dc, FLOAT128);
5273 if (gen_trap_ifnofpu(dc)) {
5274 goto jmp_insn;
5275 }
5276 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5277 gen_update_fprs_dirty(dc, QFPREG(rd));
5278 goto skip_move;
5279 #endif
5280 default:
5281 goto illegal_insn;
5282 }
5283 gen_store_gpr(dc, rd, cpu_val);
5284 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5285 skip_move: ;
5286 #endif
5287 } else if (xop >= 0x20 && xop < 0x24) {
5288 if (gen_trap_ifnofpu(dc)) {
5289 goto jmp_insn;
5290 }
5291 switch (xop) {
5292 case 0x20: /* ldf, load fpreg */
5293 gen_address_mask(dc, cpu_addr);
5294 cpu_dst_32 = gen_dest_fpr_F(dc);
5295 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5296 dc->mem_idx, MO_TEUL | MO_ALIGN);
5297 gen_store_fpr_F(dc, rd, cpu_dst_32);
5298 break;
5299 case 0x21: /* ldfsr, V9 ldxfsr */
5300 #ifdef TARGET_SPARC64
5301 gen_address_mask(dc, cpu_addr);
5302 if (rd == 1) {
5303 TCGv_i64 t64 = tcg_temp_new_i64();
5304 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5305 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5306 gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
5307 break;
5308 }
5309 #endif
5310 cpu_dst_32 = tcg_temp_new_i32();
5311 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5312 dc->mem_idx, MO_TEUL | MO_ALIGN);
5313 gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
5314 break;
5315 case 0x22: /* ldqf, load quad fpreg */
5316 CHECK_FPU_FEATURE(dc, FLOAT128);
5317 gen_address_mask(dc, cpu_addr);
5318 cpu_src1_64 = tcg_temp_new_i64();
5319 tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5320 MO_TEUQ | MO_ALIGN_4);
5321 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5322 cpu_src2_64 = tcg_temp_new_i64();
5323 tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
5324 MO_TEUQ | MO_ALIGN_4);
5325 gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
5326 break;
5327 case 0x23: /* lddf, load double fpreg */
5328 gen_address_mask(dc, cpu_addr);
5329 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5330 tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
5331 MO_TEUQ | MO_ALIGN_4);
5332 gen_store_fpr_D(dc, rd, cpu_dst_64);
5333 break;
5334 default:
5335 goto illegal_insn;
5336 }
5337 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5338 xop == 0xe || xop == 0x1e) {
5339 TCGv cpu_val = gen_load_gpr(dc, rd);
5340
5341 switch (xop) {
5342 case 0x4: /* st, store word */
5343 gen_address_mask(dc, cpu_addr);
5344 tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
5345 dc->mem_idx, MO_TEUL | MO_ALIGN);
5346 break;
5347 case 0x5: /* stb, store byte */
5348 gen_address_mask(dc, cpu_addr);
5349 tcg_gen_qemu_st_tl(cpu_val, cpu_addr, dc->mem_idx, MO_UB);
5350 break;
5351 case 0x6: /* sth, store halfword */
5352 gen_address_mask(dc, cpu_addr);
5353 tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
5354 dc->mem_idx, MO_TEUW | MO_ALIGN);
5355 break;
5356 case 0x7: /* std, store double word */
5357 if (rd & 1)
5358 goto illegal_insn;
5359 else {
5360 TCGv_i64 t64;
5361 TCGv lo;
5362
5363 gen_address_mask(dc, cpu_addr);
5364 lo = gen_load_gpr(dc, rd + 1);
5365 t64 = tcg_temp_new_i64();
5366 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5367 tcg_gen_qemu_st_i64(t64, cpu_addr,
5368 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5369 }
5370 break;
5371 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5372 case 0x14: /* sta, V9 stwa, store word alternate */
5373 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5374 break;
5375 case 0x15: /* stba, store byte alternate */
5376 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5377 break;
5378 case 0x16: /* stha, store halfword alternate */
5379 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5380 break;
5381 case 0x17: /* stda, store double word alternate */
5382 if (rd & 1) {
5383 goto illegal_insn;
5384 }
5385 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5386 break;
5387 #endif
5388 #ifdef TARGET_SPARC64
5389 case 0x0e: /* V9 stx */
5390 gen_address_mask(dc, cpu_addr);
5391 tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
5392 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5393 break;
5394 case 0x1e: /* V9 stxa */
5395 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
5396 break;
5397 #endif
5398 default:
5399 goto illegal_insn;
5400 }
5401 } else if (xop > 0x23 && xop < 0x28) {
5402 if (gen_trap_ifnofpu(dc)) {
5403 goto jmp_insn;
5404 }
5405 switch (xop) {
5406 case 0x24: /* stf, store fpreg */
5407 gen_address_mask(dc, cpu_addr);
5408 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5409 tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
5410 dc->mem_idx, MO_TEUL | MO_ALIGN);
5411 break;
5412 case 0x25: /* stfsr, V9 stxfsr */
5413 {
5414 #ifdef TARGET_SPARC64
5415 gen_address_mask(dc, cpu_addr);
5416 if (rd == 1) {
5417 tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
5418 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5419 break;
5420 }
5421 #endif
5422 tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
5423 dc->mem_idx, MO_TEUL | MO_ALIGN);
5424 }
5425 break;
5426 case 0x26:
5427 #ifdef TARGET_SPARC64
5428 /* V9 stqf, store quad fpreg */
5429 CHECK_FPU_FEATURE(dc, FLOAT128);
5430 gen_address_mask(dc, cpu_addr);
5431 /* ??? While stqf only requires 4-byte alignment, it is
5432 legal for the cpu to signal the unaligned exception.
5433 The OS trap handler is then required to fix it up.
5434 For qemu, this avoids having to probe the second page
5435 before performing the first write. */
5436 cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
5437 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5438 dc->mem_idx, MO_TEUQ | MO_ALIGN_16);
5439 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5440 cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
5441 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5442 dc->mem_idx, MO_TEUQ);
5443 break;
5444 #else /* !TARGET_SPARC64 */
5445 /* stdfq, store floating point queue */
5446 #if defined(CONFIG_USER_ONLY)
5447 goto illegal_insn;
5448 #else
5449 if (!supervisor(dc))
5450 goto priv_insn;
5451 if (gen_trap_ifnofpu(dc)) {
5452 goto jmp_insn;
5453 }
5454 goto nfq_insn;
5455 #endif
5456 #endif
5457 case 0x27: /* stdf, store double fpreg */
5458 gen_address_mask(dc, cpu_addr);
5459 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5460 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5461 MO_TEUQ | MO_ALIGN_4);
5462 break;
5463 default:
5464 goto illegal_insn;
5465 }
5466 } else if (xop > 0x33 && xop < 0x3f) {
5467 switch (xop) {
5468 #ifdef TARGET_SPARC64
5469 case 0x34: /* V9 stfa */
5470 if (gen_trap_ifnofpu(dc)) {
5471 goto jmp_insn;
5472 }
5473 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5474 break;
5475 case 0x36: /* V9 stqfa */
5476 {
5477 CHECK_FPU_FEATURE(dc, FLOAT128);
5478 if (gen_trap_ifnofpu(dc)) {
5479 goto jmp_insn;
5480 }
5481 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5482 }
5483 break;
5484 case 0x37: /* V9 stdfa */
5485 if (gen_trap_ifnofpu(dc)) {
5486 goto jmp_insn;
5487 }
5488 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5489 break;
5490 case 0x3e: /* V9 casxa */
5491 rs2 = GET_FIELD(insn, 27, 31);
5492 cpu_src2 = gen_load_gpr(dc, rs2);
5493 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5494 break;
5495 #else
5496 case 0x34: /* stc */
5497 case 0x35: /* stcsr */
5498 case 0x36: /* stdcq */
5499 case 0x37: /* stdc */
5500 goto ncp_insn;
5501 #endif
5502 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5503 case 0x3c: /* V9 or LEON3 casa */
5504 #ifndef TARGET_SPARC64
5505 CHECK_IU_FEATURE(dc, CASA);
5506 #endif
5507 rs2 = GET_FIELD(insn, 27, 31);
5508 cpu_src2 = gen_load_gpr(dc, rs2);
5509 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5510 break;
5511 #endif
5512 default:
5513 goto illegal_insn;
5514 }
5515 } else {
5516 goto illegal_insn;
5517 }
5518 }
5519 break;
5520 }
5521 /* default case for non jump instructions */
5522 if (dc->npc & 3) {
5523 switch (dc->npc) {
5524 case DYNAMIC_PC:
5525 case DYNAMIC_PC_LOOKUP:
5526 dc->pc = dc->npc;
5527 gen_op_next_insn();
5528 break;
5529 case JUMP_PC:
5530 /* we can do a static jump */
5531 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5532 dc->base.is_jmp = DISAS_NORETURN;
5533 break;
5534 default:
5535 g_assert_not_reached();
5536 }
5537 } else {
5538 dc->pc = dc->npc;
5539 dc->npc = dc->npc + 4;
5540 }
5541 jmp_insn:
5542 return;
5543 illegal_insn:
5544 gen_exception(dc, TT_ILL_INSN);
5545 return;
5546 unimp_flush:
5547 gen_exception(dc, TT_UNIMP_FLUSH);
5548 return;
5549 #if !defined(CONFIG_USER_ONLY)
5550 priv_insn:
5551 gen_exception(dc, TT_PRIV_INSN);
5552 return;
5553 #endif
5554 nfpu_insn:
5555 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5556 return;
5557 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5558 nfq_insn:
5559 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5560 return;
5561 #endif
5562 #ifndef TARGET_SPARC64
5563 ncp_insn:
5564 gen_exception(dc, TT_NCP_INSN);
5565 return;
5566 #endif
5567 }
5568
5569 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5570 {
5571 DisasContext *dc = container_of(dcbase, DisasContext, base);
5572 CPUSPARCState *env = cs->env_ptr;
5573 int bound;
5574
5575 dc->pc = dc->base.pc_first;
5576 dc->npc = (target_ulong)dc->base.tb->cs_base;
5577 dc->cc_op = CC_OP_DYNAMIC;
5578 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5579 dc->def = &env->def;
5580 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5581 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5582 #ifndef CONFIG_USER_ONLY
5583 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5584 #endif
5585 #ifdef TARGET_SPARC64
5586 dc->fprs_dirty = 0;
5587 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5588 #ifndef CONFIG_USER_ONLY
5589 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5590 #endif
5591 #endif
5592 /*
5593 * if we reach a page boundary, we stop generation so that the
5594 * PC of a TT_TFAULT exception is always in the right page
5595 */
5596 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5597 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5598 }
5599
5600 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5601 {
5602 }
5603
5604 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5605 {
5606 DisasContext *dc = container_of(dcbase, DisasContext, base);
5607 target_ulong npc = dc->npc;
5608
5609 if (npc & 3) {
5610 switch (npc) {
5611 case JUMP_PC:
5612 assert(dc->jump_pc[1] == dc->pc + 4);
5613 npc = dc->jump_pc[0] | JUMP_PC;
5614 break;
5615 case DYNAMIC_PC:
5616 case DYNAMIC_PC_LOOKUP:
5617 npc = DYNAMIC_PC;
5618 break;
5619 default:
5620 g_assert_not_reached();
5621 }
5622 }
5623 tcg_gen_insn_start(dc->pc, npc);
5624 }
5625
5626 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5627 {
5628 DisasContext *dc = container_of(dcbase, DisasContext, base);
5629 CPUSPARCState *env = cs->env_ptr;
5630 unsigned int insn;
5631
5632 insn = translator_ldl(env, &dc->base, dc->pc);
5633 dc->base.pc_next += 4;
5634 disas_sparc_insn(dc, insn);
5635
5636 if (dc->base.is_jmp == DISAS_NORETURN) {
5637 return;
5638 }
5639 if (dc->pc != dc->base.pc_next) {
5640 dc->base.is_jmp = DISAS_TOO_MANY;
5641 }
5642 }
5643
5644 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5645 {
5646 DisasContext *dc = container_of(dcbase, DisasContext, base);
5647 bool may_lookup;
5648
5649 switch (dc->base.is_jmp) {
5650 case DISAS_NEXT:
5651 case DISAS_TOO_MANY:
5652 if (((dc->pc | dc->npc) & 3) == 0) {
5653 /* static PC and NPC: we can use direct chaining */
5654 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5655 break;
5656 }
5657
5658 if (dc->pc & 3) {
5659 switch (dc->pc) {
5660 case DYNAMIC_PC_LOOKUP:
5661 may_lookup = true;
5662 break;
5663 case DYNAMIC_PC:
5664 may_lookup = false;
5665 break;
5666 default:
5667 g_assert_not_reached();
5668 }
5669 } else {
5670 tcg_gen_movi_tl(cpu_pc, dc->pc);
5671 may_lookup = true;
5672 }
5673
5674 save_npc(dc);
5675 if (may_lookup) {
5676 tcg_gen_lookup_and_goto_ptr();
5677 } else {
5678 tcg_gen_exit_tb(NULL, 0);
5679 }
5680 break;
5681
5682 case DISAS_NORETURN:
5683 break;
5684
5685 case DISAS_EXIT:
5686 /* Exit TB */
5687 save_state(dc);
5688 tcg_gen_exit_tb(NULL, 0);
5689 break;
5690
5691 default:
5692 g_assert_not_reached();
5693 }
5694 }
5695
5696 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5697 CPUState *cpu, FILE *logfile)
5698 {
5699 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5700 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5701 }
5702
5703 static const TranslatorOps sparc_tr_ops = {
5704 .init_disas_context = sparc_tr_init_disas_context,
5705 .tb_start = sparc_tr_tb_start,
5706 .insn_start = sparc_tr_insn_start,
5707 .translate_insn = sparc_tr_translate_insn,
5708 .tb_stop = sparc_tr_tb_stop,
5709 .disas_log = sparc_tr_disas_log,
5710 };
5711
5712 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5713 target_ulong pc, void *host_pc)
5714 {
5715 DisasContext dc = {};
5716
5717 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5718 }
5719
5720 void sparc_tcg_init(void)
5721 {
5722 static const char gregnames[32][4] = {
5723 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5724 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5725 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5726 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5727 };
5728 static const char fregnames[32][4] = {
5729 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5730 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5731 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5732 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5733 };
5734
5735 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5736 #ifdef TARGET_SPARC64
5737 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5738 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5739 #else
5740 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5741 #endif
5742 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5743 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5744 };
5745
5746 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5747 #ifdef TARGET_SPARC64
5748 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5749 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5750 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5751 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5752 "hstick_cmpr" },
5753 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5754 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5755 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5756 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5757 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5758 #endif
5759 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5760 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5761 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5762 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5763 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5764 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5765 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5766 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5767 #ifndef CONFIG_USER_ONLY
5768 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5769 #endif
5770 };
5771
5772 unsigned int i;
5773
5774 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5775 offsetof(CPUSPARCState, regwptr),
5776 "regwptr");
5777
5778 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5779 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5780 }
5781
5782 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5783 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5784 }
5785
5786 cpu_regs[0] = NULL;
5787 for (i = 1; i < 8; ++i) {
5788 cpu_regs[i] = tcg_global_mem_new(cpu_env,
5789 offsetof(CPUSPARCState, gregs[i]),
5790 gregnames[i]);
5791 }
5792
5793 for (i = 8; i < 32; ++i) {
5794 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5795 (i - 8) * sizeof(target_ulong),
5796 gregnames[i]);
5797 }
5798
5799 for (i = 0; i < TARGET_DPREGS; i++) {
5800 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5801 offsetof(CPUSPARCState, fpr[i]),
5802 fregnames[i]);
5803 }
5804 }
5805
5806 void sparc_restore_state_to_opc(CPUState *cs,
5807 const TranslationBlock *tb,
5808 const uint64_t *data)
5809 {
5810 SPARCCPU *cpu = SPARC_CPU(cs);
5811 CPUSPARCState *env = &cpu->env;
5812 target_ulong pc = data[0];
5813 target_ulong npc = data[1];
5814
5815 env->pc = pc;
5816 if (npc == DYNAMIC_PC) {
5817 /* dynamic NPC: already stored */
5818 } else if (npc & JUMP_PC) {
5819 /* jump PC: use 'cond' and the jump targets of the translation */
5820 if (env->cond) {
5821 env->npc = npc & ~3;
5822 } else {
5823 env->npc = pc + 4;
5824 }
5825 } else {
5826 env->npc = npc;
5827 }
5828 }