]> git.proxmox.com Git - mirror_qemu.git/blob - target/sparc/translate.c
target/sparc: Move MOVcc, MOVR to decodetree
[mirror_qemu.git] / target / sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28
29 #include "exec/helper-gen.h"
30
31 #include "exec/translator.h"
32 #include "exec/log.h"
33 #include "asi.h"
34
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
37 #undef HELPER_H
38
39 #ifdef TARGET_SPARC64
40 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
43 #else
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_flushw(E) qemu_build_not_reached()
46 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
47 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
48 # define gen_helper_restored(E) qemu_build_not_reached()
49 # define gen_helper_saved(E) qemu_build_not_reached()
50 # define gen_helper_sdivx(D, E, A, B) qemu_build_not_reached()
51 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
52 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
53 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
54 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
55 # define gen_helper_udivx(D, E, A, B) qemu_build_not_reached()
56 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
57 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
58 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
59 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
60 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
61 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
62 # define MAXTL_MASK 0
63 #endif
64
65 /* Dynamic PC, must exit to main loop. */
66 #define DYNAMIC_PC 1
67 /* Dynamic PC, one of two values according to jump_pc[T2]. */
68 #define JUMP_PC 2
69 /* Dynamic PC, may lookup next TB. */
70 #define DYNAMIC_PC_LOOKUP 3
71
72 #define DISAS_EXIT DISAS_TARGET_0
73
74 /* global register indexes */
75 static TCGv_ptr cpu_regwptr;
76 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
77 static TCGv_i32 cpu_cc_op;
78 static TCGv_i32 cpu_psr;
79 static TCGv cpu_fsr, cpu_pc, cpu_npc;
80 static TCGv cpu_regs[32];
81 static TCGv cpu_y;
82 static TCGv cpu_tbr;
83 static TCGv cpu_cond;
84 #ifdef TARGET_SPARC64
85 static TCGv_i32 cpu_xcc, cpu_fprs;
86 static TCGv cpu_gsr;
87 #else
88 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
89 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
90 #endif
91 /* Floating point registers */
92 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
93
94 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
95 #ifdef TARGET_SPARC64
96 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
97 # define env64_field_offsetof(X) env_field_offsetof(X)
98 #else
99 # define env32_field_offsetof(X) env_field_offsetof(X)
100 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
101 #endif
102
103 typedef struct DisasDelayException {
104 struct DisasDelayException *next;
105 TCGLabel *lab;
106 TCGv_i32 excp;
107 /* Saved state at parent insn. */
108 target_ulong pc;
109 target_ulong npc;
110 } DisasDelayException;
111
112 typedef struct DisasContext {
113 DisasContextBase base;
114 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
115 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
116 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
117 int mem_idx;
118 bool fpu_enabled;
119 bool address_mask_32bit;
120 #ifndef CONFIG_USER_ONLY
121 bool supervisor;
122 #ifdef TARGET_SPARC64
123 bool hypervisor;
124 #endif
125 #endif
126
127 uint32_t cc_op; /* current CC operation */
128 sparc_def_t *def;
129 #ifdef TARGET_SPARC64
130 int fprs_dirty;
131 int asi;
132 #endif
133 DisasDelayException *delay_excp_list;
134 } DisasContext;
135
136 typedef struct {
137 TCGCond cond;
138 bool is_bool;
139 TCGv c1, c2;
140 } DisasCompare;
141
142 // This function uses non-native bit order
143 #define GET_FIELD(X, FROM, TO) \
144 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
145
146 // This function uses the order in the manuals, i.e. bit 0 is 2^0
147 #define GET_FIELD_SP(X, FROM, TO) \
148 GET_FIELD(X, 31 - (TO), 31 - (FROM))
149
150 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
151 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
152
153 #ifdef TARGET_SPARC64
154 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
155 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
156 #else
157 #define DFPREG(r) (r & 0x1e)
158 #define QFPREG(r) (r & 0x1c)
159 #endif
160
161 #define UA2005_HTRAP_MASK 0xff
162 #define V8_TRAP_MASK 0x7f
163
164 static int sign_extend(int x, int len)
165 {
166 len = 32 - len;
167 return (x << len) >> len;
168 }
169
170 #define IS_IMM (insn & (1<<13))
171
172 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
173 {
174 #if defined(TARGET_SPARC64)
175 int bit = (rd < 32) ? 1 : 2;
176 /* If we know we've already set this bit within the TB,
177 we can avoid setting it again. */
178 if (!(dc->fprs_dirty & bit)) {
179 dc->fprs_dirty |= bit;
180 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
181 }
182 #endif
183 }
184
185 /* floating point registers moves */
186 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
187 {
188 TCGv_i32 ret = tcg_temp_new_i32();
189 if (src & 1) {
190 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
191 } else {
192 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
193 }
194 return ret;
195 }
196
197 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
198 {
199 TCGv_i64 t = tcg_temp_new_i64();
200
201 tcg_gen_extu_i32_i64(t, v);
202 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
203 (dst & 1 ? 0 : 32), 32);
204 gen_update_fprs_dirty(dc, dst);
205 }
206
207 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
208 {
209 return tcg_temp_new_i32();
210 }
211
212 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
213 {
214 src = DFPREG(src);
215 return cpu_fpr[src / 2];
216 }
217
218 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
219 {
220 dst = DFPREG(dst);
221 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
222 gen_update_fprs_dirty(dc, dst);
223 }
224
225 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
226 {
227 return cpu_fpr[DFPREG(dst) / 2];
228 }
229
230 static void gen_op_load_fpr_QT0(unsigned int src)
231 {
232 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
233 offsetof(CPU_QuadU, ll.upper));
234 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
235 offsetof(CPU_QuadU, ll.lower));
236 }
237
238 static void gen_op_load_fpr_QT1(unsigned int src)
239 {
240 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) +
241 offsetof(CPU_QuadU, ll.upper));
242 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) +
243 offsetof(CPU_QuadU, ll.lower));
244 }
245
246 static void gen_op_store_QT0_fpr(unsigned int dst)
247 {
248 tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
249 offsetof(CPU_QuadU, ll.upper));
250 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
251 offsetof(CPU_QuadU, ll.lower));
252 }
253
254 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
255 TCGv_i64 v1, TCGv_i64 v2)
256 {
257 dst = QFPREG(dst);
258
259 tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
260 tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
261 gen_update_fprs_dirty(dc, dst);
262 }
263
264 #ifdef TARGET_SPARC64
265 static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
266 {
267 src = QFPREG(src);
268 return cpu_fpr[src / 2];
269 }
270
271 static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
272 {
273 src = QFPREG(src);
274 return cpu_fpr[src / 2 + 1];
275 }
276
277 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
278 {
279 rd = QFPREG(rd);
280 rs = QFPREG(rs);
281
282 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
283 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
284 gen_update_fprs_dirty(dc, rd);
285 }
286 #endif
287
288 /* moves */
289 #ifdef CONFIG_USER_ONLY
290 #define supervisor(dc) 0
291 #define hypervisor(dc) 0
292 #else
293 #ifdef TARGET_SPARC64
294 #define hypervisor(dc) (dc->hypervisor)
295 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
296 #else
297 #define supervisor(dc) (dc->supervisor)
298 #define hypervisor(dc) 0
299 #endif
300 #endif
301
302 #if !defined(TARGET_SPARC64)
303 # define AM_CHECK(dc) false
304 #elif defined(TARGET_ABI32)
305 # define AM_CHECK(dc) true
306 #elif defined(CONFIG_USER_ONLY)
307 # define AM_CHECK(dc) false
308 #else
309 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
310 #endif
311
312 static void gen_address_mask(DisasContext *dc, TCGv addr)
313 {
314 if (AM_CHECK(dc)) {
315 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
316 }
317 }
318
319 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
320 {
321 return AM_CHECK(dc) ? (uint32_t)addr : addr;
322 }
323
324 static TCGv gen_load_gpr(DisasContext *dc, int reg)
325 {
326 if (reg > 0) {
327 assert(reg < 32);
328 return cpu_regs[reg];
329 } else {
330 TCGv t = tcg_temp_new();
331 tcg_gen_movi_tl(t, 0);
332 return t;
333 }
334 }
335
336 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
337 {
338 if (reg > 0) {
339 assert(reg < 32);
340 tcg_gen_mov_tl(cpu_regs[reg], v);
341 }
342 }
343
344 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
345 {
346 if (reg > 0) {
347 assert(reg < 32);
348 return cpu_regs[reg];
349 } else {
350 return tcg_temp_new();
351 }
352 }
353
354 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
355 {
356 return translator_use_goto_tb(&s->base, pc) &&
357 translator_use_goto_tb(&s->base, npc);
358 }
359
360 static void gen_goto_tb(DisasContext *s, int tb_num,
361 target_ulong pc, target_ulong npc)
362 {
363 if (use_goto_tb(s, pc, npc)) {
364 /* jump to same page: we can use a direct jump */
365 tcg_gen_goto_tb(tb_num);
366 tcg_gen_movi_tl(cpu_pc, pc);
367 tcg_gen_movi_tl(cpu_npc, npc);
368 tcg_gen_exit_tb(s->base.tb, tb_num);
369 } else {
370 /* jump to another page: we can use an indirect jump */
371 tcg_gen_movi_tl(cpu_pc, pc);
372 tcg_gen_movi_tl(cpu_npc, npc);
373 tcg_gen_lookup_and_goto_ptr();
374 }
375 }
376
377 // XXX suboptimal
378 static void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
379 {
380 tcg_gen_extu_i32_tl(reg, src);
381 tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1);
382 }
383
384 static void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
385 {
386 tcg_gen_extu_i32_tl(reg, src);
387 tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1);
388 }
389
390 static void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
391 {
392 tcg_gen_extu_i32_tl(reg, src);
393 tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1);
394 }
395
396 static void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
397 {
398 tcg_gen_extu_i32_tl(reg, src);
399 tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1);
400 }
401
402 static void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
403 {
404 tcg_gen_mov_tl(cpu_cc_src, src1);
405 tcg_gen_mov_tl(cpu_cc_src2, src2);
406 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
407 tcg_gen_mov_tl(dst, cpu_cc_dst);
408 }
409
410 static TCGv_i32 gen_add32_carry32(void)
411 {
412 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
413
414 /* Carry is computed from a previous add: (dst < src) */
415 #if TARGET_LONG_BITS == 64
416 cc_src1_32 = tcg_temp_new_i32();
417 cc_src2_32 = tcg_temp_new_i32();
418 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
419 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
420 #else
421 cc_src1_32 = cpu_cc_dst;
422 cc_src2_32 = cpu_cc_src;
423 #endif
424
425 carry_32 = tcg_temp_new_i32();
426 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
427
428 return carry_32;
429 }
430
431 static TCGv_i32 gen_sub32_carry32(void)
432 {
433 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
434
435 /* Carry is computed from a previous borrow: (src1 < src2) */
436 #if TARGET_LONG_BITS == 64
437 cc_src1_32 = tcg_temp_new_i32();
438 cc_src2_32 = tcg_temp_new_i32();
439 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
440 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
441 #else
442 cc_src1_32 = cpu_cc_src;
443 cc_src2_32 = cpu_cc_src2;
444 #endif
445
446 carry_32 = tcg_temp_new_i32();
447 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
448
449 return carry_32;
450 }
451
452 static void gen_op_addc_int(TCGv dst, TCGv src1, TCGv src2,
453 TCGv_i32 carry_32, bool update_cc)
454 {
455 tcg_gen_add_tl(dst, src1, src2);
456
457 #ifdef TARGET_SPARC64
458 TCGv carry = tcg_temp_new();
459 tcg_gen_extu_i32_tl(carry, carry_32);
460 tcg_gen_add_tl(dst, dst, carry);
461 #else
462 tcg_gen_add_i32(dst, dst, carry_32);
463 #endif
464
465 if (update_cc) {
466 tcg_debug_assert(dst == cpu_cc_dst);
467 tcg_gen_mov_tl(cpu_cc_src, src1);
468 tcg_gen_mov_tl(cpu_cc_src2, src2);
469 }
470 }
471
472 static void gen_op_addc_int_add(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
473 {
474 TCGv discard;
475
476 if (TARGET_LONG_BITS == 64) {
477 gen_op_addc_int(dst, src1, src2, gen_add32_carry32(), update_cc);
478 return;
479 }
480
481 /*
482 * We can re-use the host's hardware carry generation by using
483 * an ADD2 opcode. We discard the low part of the output.
484 * Ideally we'd combine this operation with the add that
485 * generated the carry in the first place.
486 */
487 discard = tcg_temp_new();
488 tcg_gen_add2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
489
490 if (update_cc) {
491 tcg_debug_assert(dst == cpu_cc_dst);
492 tcg_gen_mov_tl(cpu_cc_src, src1);
493 tcg_gen_mov_tl(cpu_cc_src2, src2);
494 }
495 }
496
497 static void gen_op_addc_add(TCGv dst, TCGv src1, TCGv src2)
498 {
499 gen_op_addc_int_add(dst, src1, src2, false);
500 }
501
502 static void gen_op_addccc_add(TCGv dst, TCGv src1, TCGv src2)
503 {
504 gen_op_addc_int_add(dst, src1, src2, true);
505 }
506
507 static void gen_op_addc_sub(TCGv dst, TCGv src1, TCGv src2)
508 {
509 gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), false);
510 }
511
512 static void gen_op_addccc_sub(TCGv dst, TCGv src1, TCGv src2)
513 {
514 gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), true);
515 }
516
517 static void gen_op_addc_int_generic(TCGv dst, TCGv src1, TCGv src2,
518 bool update_cc)
519 {
520 TCGv_i32 carry_32 = tcg_temp_new_i32();
521 gen_helper_compute_C_icc(carry_32, tcg_env);
522 gen_op_addc_int(dst, src1, src2, carry_32, update_cc);
523 }
524
525 static void gen_op_addc_generic(TCGv dst, TCGv src1, TCGv src2)
526 {
527 gen_op_addc_int_generic(dst, src1, src2, false);
528 }
529
530 static void gen_op_addccc_generic(TCGv dst, TCGv src1, TCGv src2)
531 {
532 gen_op_addc_int_generic(dst, src1, src2, true);
533 }
534
535 static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
536 {
537 tcg_gen_mov_tl(cpu_cc_src, src1);
538 tcg_gen_mov_tl(cpu_cc_src2, src2);
539 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
540 tcg_gen_mov_tl(dst, cpu_cc_dst);
541 }
542
543 static void gen_op_subc_int(TCGv dst, TCGv src1, TCGv src2,
544 TCGv_i32 carry_32, bool update_cc)
545 {
546 TCGv carry;
547
548 #if TARGET_LONG_BITS == 64
549 carry = tcg_temp_new();
550 tcg_gen_extu_i32_i64(carry, carry_32);
551 #else
552 carry = carry_32;
553 #endif
554
555 tcg_gen_sub_tl(dst, src1, src2);
556 tcg_gen_sub_tl(dst, dst, carry);
557
558 if (update_cc) {
559 tcg_debug_assert(dst == cpu_cc_dst);
560 tcg_gen_mov_tl(cpu_cc_src, src1);
561 tcg_gen_mov_tl(cpu_cc_src2, src2);
562 }
563 }
564
565 static void gen_op_subc_add(TCGv dst, TCGv src1, TCGv src2)
566 {
567 gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), false);
568 }
569
570 static void gen_op_subccc_add(TCGv dst, TCGv src1, TCGv src2)
571 {
572 gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), true);
573 }
574
575 static void gen_op_subc_int_sub(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
576 {
577 TCGv discard;
578
579 if (TARGET_LONG_BITS == 64) {
580 gen_op_subc_int(dst, src1, src2, gen_sub32_carry32(), update_cc);
581 return;
582 }
583
584 /*
585 * We can re-use the host's hardware carry generation by using
586 * a SUB2 opcode. We discard the low part of the output.
587 */
588 discard = tcg_temp_new();
589 tcg_gen_sub2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
590
591 if (update_cc) {
592 tcg_debug_assert(dst == cpu_cc_dst);
593 tcg_gen_mov_tl(cpu_cc_src, src1);
594 tcg_gen_mov_tl(cpu_cc_src2, src2);
595 }
596 }
597
598 static void gen_op_subc_sub(TCGv dst, TCGv src1, TCGv src2)
599 {
600 gen_op_subc_int_sub(dst, src1, src2, false);
601 }
602
603 static void gen_op_subccc_sub(TCGv dst, TCGv src1, TCGv src2)
604 {
605 gen_op_subc_int_sub(dst, src1, src2, true);
606 }
607
608 static void gen_op_subc_int_generic(TCGv dst, TCGv src1, TCGv src2,
609 bool update_cc)
610 {
611 TCGv_i32 carry_32 = tcg_temp_new_i32();
612
613 gen_helper_compute_C_icc(carry_32, tcg_env);
614 gen_op_subc_int(dst, src1, src2, carry_32, update_cc);
615 }
616
617 static void gen_op_subc_generic(TCGv dst, TCGv src1, TCGv src2)
618 {
619 gen_op_subc_int_generic(dst, src1, src2, false);
620 }
621
622 static void gen_op_subccc_generic(TCGv dst, TCGv src1, TCGv src2)
623 {
624 gen_op_subc_int_generic(dst, src1, src2, true);
625 }
626
627 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
628 {
629 TCGv r_temp, zero, t0;
630
631 r_temp = tcg_temp_new();
632 t0 = tcg_temp_new();
633
634 /* old op:
635 if (!(env->y & 1))
636 T1 = 0;
637 */
638 zero = tcg_constant_tl(0);
639 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
640 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
641 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
642 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
643 zero, cpu_cc_src2);
644
645 // b2 = T0 & 1;
646 // env->y = (b2 << 31) | (env->y >> 1);
647 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
648 tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1);
649
650 // b1 = N ^ V;
651 gen_mov_reg_N(t0, cpu_psr);
652 gen_mov_reg_V(r_temp, cpu_psr);
653 tcg_gen_xor_tl(t0, t0, r_temp);
654
655 // T0 = (b1 << 31) | (T0 >> 1);
656 // src1 = T0;
657 tcg_gen_shli_tl(t0, t0, 31);
658 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
659 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
660
661 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
662
663 tcg_gen_mov_tl(dst, cpu_cc_dst);
664 }
665
666 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
667 {
668 #if TARGET_LONG_BITS == 32
669 if (sign_ext) {
670 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
671 } else {
672 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
673 }
674 #else
675 TCGv t0 = tcg_temp_new_i64();
676 TCGv t1 = tcg_temp_new_i64();
677
678 if (sign_ext) {
679 tcg_gen_ext32s_i64(t0, src1);
680 tcg_gen_ext32s_i64(t1, src2);
681 } else {
682 tcg_gen_ext32u_i64(t0, src1);
683 tcg_gen_ext32u_i64(t1, src2);
684 }
685
686 tcg_gen_mul_i64(dst, t0, t1);
687 tcg_gen_shri_i64(cpu_y, dst, 32);
688 #endif
689 }
690
691 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
692 {
693 /* zero-extend truncated operands before multiplication */
694 gen_op_multiply(dst, src1, src2, 0);
695 }
696
697 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
698 {
699 /* sign-extend truncated operands before multiplication */
700 gen_op_multiply(dst, src1, src2, 1);
701 }
702
703 static void gen_op_udivx(TCGv dst, TCGv src1, TCGv src2)
704 {
705 gen_helper_udivx(dst, tcg_env, src1, src2);
706 }
707
708 static void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2)
709 {
710 gen_helper_sdivx(dst, tcg_env, src1, src2);
711 }
712
713 static void gen_op_udiv(TCGv dst, TCGv src1, TCGv src2)
714 {
715 gen_helper_udiv(dst, tcg_env, src1, src2);
716 }
717
718 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
719 {
720 gen_helper_sdiv(dst, tcg_env, src1, src2);
721 }
722
723 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
724 {
725 gen_helper_udiv_cc(dst, tcg_env, src1, src2);
726 }
727
728 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
729 {
730 gen_helper_sdiv_cc(dst, tcg_env, src1, src2);
731 }
732
733 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
734 {
735 gen_helper_taddcctv(dst, tcg_env, src1, src2);
736 }
737
738 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
739 {
740 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
741 }
742
743 // 1
744 static void gen_op_eval_ba(TCGv dst)
745 {
746 tcg_gen_movi_tl(dst, 1);
747 }
748
749 // Z
750 static void gen_op_eval_be(TCGv dst, TCGv_i32 src)
751 {
752 gen_mov_reg_Z(dst, src);
753 }
754
755 // Z | (N ^ V)
756 static void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
757 {
758 TCGv t0 = tcg_temp_new();
759 gen_mov_reg_N(t0, src);
760 gen_mov_reg_V(dst, src);
761 tcg_gen_xor_tl(dst, dst, t0);
762 gen_mov_reg_Z(t0, src);
763 tcg_gen_or_tl(dst, dst, t0);
764 }
765
766 // N ^ V
767 static void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
768 {
769 TCGv t0 = tcg_temp_new();
770 gen_mov_reg_V(t0, src);
771 gen_mov_reg_N(dst, src);
772 tcg_gen_xor_tl(dst, dst, t0);
773 }
774
775 // C | Z
776 static void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
777 {
778 TCGv t0 = tcg_temp_new();
779 gen_mov_reg_Z(t0, src);
780 gen_mov_reg_C(dst, src);
781 tcg_gen_or_tl(dst, dst, t0);
782 }
783
784 // C
785 static void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
786 {
787 gen_mov_reg_C(dst, src);
788 }
789
790 // V
791 static void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
792 {
793 gen_mov_reg_V(dst, src);
794 }
795
796 // 0
797 static void gen_op_eval_bn(TCGv dst)
798 {
799 tcg_gen_movi_tl(dst, 0);
800 }
801
802 // N
803 static void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
804 {
805 gen_mov_reg_N(dst, src);
806 }
807
808 // !Z
809 static void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
810 {
811 gen_mov_reg_Z(dst, src);
812 tcg_gen_xori_tl(dst, dst, 0x1);
813 }
814
815 // !(Z | (N ^ V))
816 static void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
817 {
818 gen_op_eval_ble(dst, src);
819 tcg_gen_xori_tl(dst, dst, 0x1);
820 }
821
822 // !(N ^ V)
823 static void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
824 {
825 gen_op_eval_bl(dst, src);
826 tcg_gen_xori_tl(dst, dst, 0x1);
827 }
828
829 // !(C | Z)
830 static void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
831 {
832 gen_op_eval_bleu(dst, src);
833 tcg_gen_xori_tl(dst, dst, 0x1);
834 }
835
836 // !C
837 static void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
838 {
839 gen_mov_reg_C(dst, src);
840 tcg_gen_xori_tl(dst, dst, 0x1);
841 }
842
843 // !N
844 static void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
845 {
846 gen_mov_reg_N(dst, src);
847 tcg_gen_xori_tl(dst, dst, 0x1);
848 }
849
850 // !V
851 static void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
852 {
853 gen_mov_reg_V(dst, src);
854 tcg_gen_xori_tl(dst, dst, 0x1);
855 }
856
857 /*
858 FPSR bit field FCC1 | FCC0:
859 0 =
860 1 <
861 2 >
862 3 unordered
863 */
864 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
865 unsigned int fcc_offset)
866 {
867 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
868 tcg_gen_andi_tl(reg, reg, 0x1);
869 }
870
871 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
872 {
873 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
874 tcg_gen_andi_tl(reg, reg, 0x1);
875 }
876
877 // !0: FCC0 | FCC1
878 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
879 {
880 TCGv t0 = tcg_temp_new();
881 gen_mov_reg_FCC0(dst, src, fcc_offset);
882 gen_mov_reg_FCC1(t0, src, fcc_offset);
883 tcg_gen_or_tl(dst, dst, t0);
884 }
885
886 // 1 or 2: FCC0 ^ FCC1
887 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
888 {
889 TCGv t0 = tcg_temp_new();
890 gen_mov_reg_FCC0(dst, src, fcc_offset);
891 gen_mov_reg_FCC1(t0, src, fcc_offset);
892 tcg_gen_xor_tl(dst, dst, t0);
893 }
894
895 // 1 or 3: FCC0
896 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
897 {
898 gen_mov_reg_FCC0(dst, src, fcc_offset);
899 }
900
901 // 1: FCC0 & !FCC1
902 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
903 {
904 TCGv t0 = tcg_temp_new();
905 gen_mov_reg_FCC0(dst, src, fcc_offset);
906 gen_mov_reg_FCC1(t0, src, fcc_offset);
907 tcg_gen_andc_tl(dst, dst, t0);
908 }
909
910 // 2 or 3: FCC1
911 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
912 {
913 gen_mov_reg_FCC1(dst, src, fcc_offset);
914 }
915
916 // 2: !FCC0 & FCC1
917 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
918 {
919 TCGv t0 = tcg_temp_new();
920 gen_mov_reg_FCC0(dst, src, fcc_offset);
921 gen_mov_reg_FCC1(t0, src, fcc_offset);
922 tcg_gen_andc_tl(dst, t0, dst);
923 }
924
925 // 3: FCC0 & FCC1
926 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
927 {
928 TCGv t0 = tcg_temp_new();
929 gen_mov_reg_FCC0(dst, src, fcc_offset);
930 gen_mov_reg_FCC1(t0, src, fcc_offset);
931 tcg_gen_and_tl(dst, dst, t0);
932 }
933
934 // 0: !(FCC0 | FCC1)
935 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
936 {
937 TCGv t0 = tcg_temp_new();
938 gen_mov_reg_FCC0(dst, src, fcc_offset);
939 gen_mov_reg_FCC1(t0, src, fcc_offset);
940 tcg_gen_or_tl(dst, dst, t0);
941 tcg_gen_xori_tl(dst, dst, 0x1);
942 }
943
944 // 0 or 3: !(FCC0 ^ FCC1)
945 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
946 {
947 TCGv t0 = tcg_temp_new();
948 gen_mov_reg_FCC0(dst, src, fcc_offset);
949 gen_mov_reg_FCC1(t0, src, fcc_offset);
950 tcg_gen_xor_tl(dst, dst, t0);
951 tcg_gen_xori_tl(dst, dst, 0x1);
952 }
953
954 // 0 or 2: !FCC0
955 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
956 {
957 gen_mov_reg_FCC0(dst, src, fcc_offset);
958 tcg_gen_xori_tl(dst, dst, 0x1);
959 }
960
961 // !1: !(FCC0 & !FCC1)
962 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
963 {
964 TCGv t0 = tcg_temp_new();
965 gen_mov_reg_FCC0(dst, src, fcc_offset);
966 gen_mov_reg_FCC1(t0, src, fcc_offset);
967 tcg_gen_andc_tl(dst, dst, t0);
968 tcg_gen_xori_tl(dst, dst, 0x1);
969 }
970
971 // 0 or 1: !FCC1
972 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
973 {
974 gen_mov_reg_FCC1(dst, src, fcc_offset);
975 tcg_gen_xori_tl(dst, dst, 0x1);
976 }
977
978 // !2: !(!FCC0 & FCC1)
979 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
980 {
981 TCGv t0 = tcg_temp_new();
982 gen_mov_reg_FCC0(dst, src, fcc_offset);
983 gen_mov_reg_FCC1(t0, src, fcc_offset);
984 tcg_gen_andc_tl(dst, t0, dst);
985 tcg_gen_xori_tl(dst, dst, 0x1);
986 }
987
988 // !3: !(FCC0 & FCC1)
989 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
990 {
991 TCGv t0 = tcg_temp_new();
992 gen_mov_reg_FCC0(dst, src, fcc_offset);
993 gen_mov_reg_FCC1(t0, src, fcc_offset);
994 tcg_gen_and_tl(dst, dst, t0);
995 tcg_gen_xori_tl(dst, dst, 0x1);
996 }
997
998 static void gen_branch2(DisasContext *dc, target_ulong pc1,
999 target_ulong pc2, TCGv r_cond)
1000 {
1001 TCGLabel *l1 = gen_new_label();
1002
1003 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
1004
1005 gen_goto_tb(dc, 0, pc1, pc1 + 4);
1006
1007 gen_set_label(l1);
1008 gen_goto_tb(dc, 1, pc2, pc2 + 4);
1009 }
1010
1011 static void gen_generic_branch(DisasContext *dc)
1012 {
1013 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1014 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1015 TCGv zero = tcg_constant_tl(0);
1016
1017 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1018 }
1019
1020 /* call this function before using the condition register as it may
1021 have been set for a jump */
1022 static void flush_cond(DisasContext *dc)
1023 {
1024 if (dc->npc == JUMP_PC) {
1025 gen_generic_branch(dc);
1026 dc->npc = DYNAMIC_PC_LOOKUP;
1027 }
1028 }
1029
1030 static void save_npc(DisasContext *dc)
1031 {
1032 if (dc->npc & 3) {
1033 switch (dc->npc) {
1034 case JUMP_PC:
1035 gen_generic_branch(dc);
1036 dc->npc = DYNAMIC_PC_LOOKUP;
1037 break;
1038 case DYNAMIC_PC:
1039 case DYNAMIC_PC_LOOKUP:
1040 break;
1041 default:
1042 g_assert_not_reached();
1043 }
1044 } else {
1045 tcg_gen_movi_tl(cpu_npc, dc->npc);
1046 }
1047 }
1048
1049 static void update_psr(DisasContext *dc)
1050 {
1051 if (dc->cc_op != CC_OP_FLAGS) {
1052 dc->cc_op = CC_OP_FLAGS;
1053 gen_helper_compute_psr(tcg_env);
1054 }
1055 }
1056
1057 static void save_state(DisasContext *dc)
1058 {
1059 tcg_gen_movi_tl(cpu_pc, dc->pc);
1060 save_npc(dc);
1061 }
1062
1063 static void gen_exception(DisasContext *dc, int which)
1064 {
1065 save_state(dc);
1066 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1067 dc->base.is_jmp = DISAS_NORETURN;
1068 }
1069
1070 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1071 {
1072 DisasDelayException *e = g_new0(DisasDelayException, 1);
1073
1074 e->next = dc->delay_excp_list;
1075 dc->delay_excp_list = e;
1076
1077 e->lab = gen_new_label();
1078 e->excp = excp;
1079 e->pc = dc->pc;
1080 /* Caller must have used flush_cond before branch. */
1081 assert(e->npc != JUMP_PC);
1082 e->npc = dc->npc;
1083
1084 return e->lab;
1085 }
1086
1087 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1088 {
1089 return delay_exceptionv(dc, tcg_constant_i32(excp));
1090 }
1091
1092 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1093 {
1094 TCGv t = tcg_temp_new();
1095 TCGLabel *lab;
1096
1097 tcg_gen_andi_tl(t, addr, mask);
1098
1099 flush_cond(dc);
1100 lab = delay_exception(dc, TT_UNALIGNED);
1101 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1102 }
1103
1104 static void gen_mov_pc_npc(DisasContext *dc)
1105 {
1106 if (dc->npc & 3) {
1107 switch (dc->npc) {
1108 case JUMP_PC:
1109 gen_generic_branch(dc);
1110 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1111 dc->pc = DYNAMIC_PC_LOOKUP;
1112 break;
1113 case DYNAMIC_PC:
1114 case DYNAMIC_PC_LOOKUP:
1115 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1116 dc->pc = dc->npc;
1117 break;
1118 default:
1119 g_assert_not_reached();
1120 }
1121 } else {
1122 dc->pc = dc->npc;
1123 }
1124 }
1125
1126 static void gen_op_next_insn(void)
1127 {
1128 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1129 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1130 }
1131
1132 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1133 DisasContext *dc)
1134 {
1135 static int subcc_cond[16] = {
1136 TCG_COND_NEVER,
1137 TCG_COND_EQ,
1138 TCG_COND_LE,
1139 TCG_COND_LT,
1140 TCG_COND_LEU,
1141 TCG_COND_LTU,
1142 -1, /* neg */
1143 -1, /* overflow */
1144 TCG_COND_ALWAYS,
1145 TCG_COND_NE,
1146 TCG_COND_GT,
1147 TCG_COND_GE,
1148 TCG_COND_GTU,
1149 TCG_COND_GEU,
1150 -1, /* pos */
1151 -1, /* no overflow */
1152 };
1153
1154 static int logic_cond[16] = {
1155 TCG_COND_NEVER,
1156 TCG_COND_EQ, /* eq: Z */
1157 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1158 TCG_COND_LT, /* lt: N ^ V -> N */
1159 TCG_COND_EQ, /* leu: C | Z -> Z */
1160 TCG_COND_NEVER, /* ltu: C -> 0 */
1161 TCG_COND_LT, /* neg: N */
1162 TCG_COND_NEVER, /* vs: V -> 0 */
1163 TCG_COND_ALWAYS,
1164 TCG_COND_NE, /* ne: !Z */
1165 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1166 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1167 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1168 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1169 TCG_COND_GE, /* pos: !N */
1170 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1171 };
1172
1173 TCGv_i32 r_src;
1174 TCGv r_dst;
1175
1176 #ifdef TARGET_SPARC64
1177 if (xcc) {
1178 r_src = cpu_xcc;
1179 } else {
1180 r_src = cpu_psr;
1181 }
1182 #else
1183 r_src = cpu_psr;
1184 #endif
1185
1186 switch (dc->cc_op) {
1187 case CC_OP_LOGIC:
1188 cmp->cond = logic_cond[cond];
1189 do_compare_dst_0:
1190 cmp->is_bool = false;
1191 cmp->c2 = tcg_constant_tl(0);
1192 #ifdef TARGET_SPARC64
1193 if (!xcc) {
1194 cmp->c1 = tcg_temp_new();
1195 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1196 break;
1197 }
1198 #endif
1199 cmp->c1 = cpu_cc_dst;
1200 break;
1201
1202 case CC_OP_SUB:
1203 switch (cond) {
1204 case 6: /* neg */
1205 case 14: /* pos */
1206 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1207 goto do_compare_dst_0;
1208
1209 case 7: /* overflow */
1210 case 15: /* !overflow */
1211 goto do_dynamic;
1212
1213 default:
1214 cmp->cond = subcc_cond[cond];
1215 cmp->is_bool = false;
1216 #ifdef TARGET_SPARC64
1217 if (!xcc) {
1218 /* Note that sign-extension works for unsigned compares as
1219 long as both operands are sign-extended. */
1220 cmp->c1 = tcg_temp_new();
1221 cmp->c2 = tcg_temp_new();
1222 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1223 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1224 break;
1225 }
1226 #endif
1227 cmp->c1 = cpu_cc_src;
1228 cmp->c2 = cpu_cc_src2;
1229 break;
1230 }
1231 break;
1232
1233 default:
1234 do_dynamic:
1235 gen_helper_compute_psr(tcg_env);
1236 dc->cc_op = CC_OP_FLAGS;
1237 /* FALLTHRU */
1238
1239 case CC_OP_FLAGS:
1240 /* We're going to generate a boolean result. */
1241 cmp->cond = TCG_COND_NE;
1242 cmp->is_bool = true;
1243 cmp->c1 = r_dst = tcg_temp_new();
1244 cmp->c2 = tcg_constant_tl(0);
1245
1246 switch (cond) {
1247 case 0x0:
1248 gen_op_eval_bn(r_dst);
1249 break;
1250 case 0x1:
1251 gen_op_eval_be(r_dst, r_src);
1252 break;
1253 case 0x2:
1254 gen_op_eval_ble(r_dst, r_src);
1255 break;
1256 case 0x3:
1257 gen_op_eval_bl(r_dst, r_src);
1258 break;
1259 case 0x4:
1260 gen_op_eval_bleu(r_dst, r_src);
1261 break;
1262 case 0x5:
1263 gen_op_eval_bcs(r_dst, r_src);
1264 break;
1265 case 0x6:
1266 gen_op_eval_bneg(r_dst, r_src);
1267 break;
1268 case 0x7:
1269 gen_op_eval_bvs(r_dst, r_src);
1270 break;
1271 case 0x8:
1272 gen_op_eval_ba(r_dst);
1273 break;
1274 case 0x9:
1275 gen_op_eval_bne(r_dst, r_src);
1276 break;
1277 case 0xa:
1278 gen_op_eval_bg(r_dst, r_src);
1279 break;
1280 case 0xb:
1281 gen_op_eval_bge(r_dst, r_src);
1282 break;
1283 case 0xc:
1284 gen_op_eval_bgu(r_dst, r_src);
1285 break;
1286 case 0xd:
1287 gen_op_eval_bcc(r_dst, r_src);
1288 break;
1289 case 0xe:
1290 gen_op_eval_bpos(r_dst, r_src);
1291 break;
1292 case 0xf:
1293 gen_op_eval_bvc(r_dst, r_src);
1294 break;
1295 }
1296 break;
1297 }
1298 }
1299
1300 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1301 {
1302 unsigned int offset;
1303 TCGv r_dst;
1304
1305 /* For now we still generate a straight boolean result. */
1306 cmp->cond = TCG_COND_NE;
1307 cmp->is_bool = true;
1308 cmp->c1 = r_dst = tcg_temp_new();
1309 cmp->c2 = tcg_constant_tl(0);
1310
1311 switch (cc) {
1312 default:
1313 case 0x0:
1314 offset = 0;
1315 break;
1316 case 0x1:
1317 offset = 32 - 10;
1318 break;
1319 case 0x2:
1320 offset = 34 - 10;
1321 break;
1322 case 0x3:
1323 offset = 36 - 10;
1324 break;
1325 }
1326
1327 switch (cond) {
1328 case 0x0:
1329 gen_op_eval_bn(r_dst);
1330 break;
1331 case 0x1:
1332 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1333 break;
1334 case 0x2:
1335 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1336 break;
1337 case 0x3:
1338 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1339 break;
1340 case 0x4:
1341 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1342 break;
1343 case 0x5:
1344 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1345 break;
1346 case 0x6:
1347 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1348 break;
1349 case 0x7:
1350 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1351 break;
1352 case 0x8:
1353 gen_op_eval_ba(r_dst);
1354 break;
1355 case 0x9:
1356 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1357 break;
1358 case 0xa:
1359 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1360 break;
1361 case 0xb:
1362 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1363 break;
1364 case 0xc:
1365 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1366 break;
1367 case 0xd:
1368 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1369 break;
1370 case 0xe:
1371 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1372 break;
1373 case 0xf:
1374 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1375 break;
1376 }
1377 }
1378
1379 // Inverted logic
1380 static const TCGCond gen_tcg_cond_reg[8] = {
1381 TCG_COND_NEVER, /* reserved */
1382 TCG_COND_NE,
1383 TCG_COND_GT,
1384 TCG_COND_GE,
1385 TCG_COND_NEVER, /* reserved */
1386 TCG_COND_EQ,
1387 TCG_COND_LE,
1388 TCG_COND_LT,
1389 };
1390
1391 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1392 {
1393 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1394 cmp->is_bool = false;
1395 cmp->c1 = r_src;
1396 cmp->c2 = tcg_constant_tl(0);
1397 }
1398
1399 #ifdef TARGET_SPARC64
1400 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1401 {
1402 switch (fccno) {
1403 case 0:
1404 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1405 break;
1406 case 1:
1407 gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1408 break;
1409 case 2:
1410 gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1411 break;
1412 case 3:
1413 gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1414 break;
1415 }
1416 }
1417
1418 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1419 {
1420 switch (fccno) {
1421 case 0:
1422 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1423 break;
1424 case 1:
1425 gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1426 break;
1427 case 2:
1428 gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1429 break;
1430 case 3:
1431 gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1432 break;
1433 }
1434 }
1435
1436 static void gen_op_fcmpq(int fccno)
1437 {
1438 switch (fccno) {
1439 case 0:
1440 gen_helper_fcmpq(cpu_fsr, tcg_env);
1441 break;
1442 case 1:
1443 gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env);
1444 break;
1445 case 2:
1446 gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env);
1447 break;
1448 case 3:
1449 gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env);
1450 break;
1451 }
1452 }
1453
1454 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1455 {
1456 switch (fccno) {
1457 case 0:
1458 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1459 break;
1460 case 1:
1461 gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1462 break;
1463 case 2:
1464 gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1465 break;
1466 case 3:
1467 gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1468 break;
1469 }
1470 }
1471
1472 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1473 {
1474 switch (fccno) {
1475 case 0:
1476 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1477 break;
1478 case 1:
1479 gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1480 break;
1481 case 2:
1482 gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1483 break;
1484 case 3:
1485 gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1486 break;
1487 }
1488 }
1489
1490 static void gen_op_fcmpeq(int fccno)
1491 {
1492 switch (fccno) {
1493 case 0:
1494 gen_helper_fcmpeq(cpu_fsr, tcg_env);
1495 break;
1496 case 1:
1497 gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env);
1498 break;
1499 case 2:
1500 gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env);
1501 break;
1502 case 3:
1503 gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env);
1504 break;
1505 }
1506 }
1507
1508 #else
1509
1510 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1511 {
1512 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1513 }
1514
1515 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1516 {
1517 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1518 }
1519
1520 static void gen_op_fcmpq(int fccno)
1521 {
1522 gen_helper_fcmpq(cpu_fsr, tcg_env);
1523 }
1524
1525 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1526 {
1527 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1528 }
1529
1530 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1531 {
1532 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1533 }
1534
1535 static void gen_op_fcmpeq(int fccno)
1536 {
1537 gen_helper_fcmpeq(cpu_fsr, tcg_env);
1538 }
1539 #endif
1540
1541 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1542 {
1543 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1544 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1545 gen_exception(dc, TT_FP_EXCP);
1546 }
1547
1548 static int gen_trap_ifnofpu(DisasContext *dc)
1549 {
1550 #if !defined(CONFIG_USER_ONLY)
1551 if (!dc->fpu_enabled) {
1552 gen_exception(dc, TT_NFPU_INSN);
1553 return 1;
1554 }
1555 #endif
1556 return 0;
1557 }
1558
1559 static void gen_op_clear_ieee_excp_and_FTT(void)
1560 {
1561 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1562 }
1563
1564 static void gen_fop_FF(DisasContext *dc, int rd, int rs,
1565 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1566 {
1567 TCGv_i32 dst, src;
1568
1569 src = gen_load_fpr_F(dc, rs);
1570 dst = gen_dest_fpr_F(dc);
1571
1572 gen(dst, tcg_env, src);
1573 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1574
1575 gen_store_fpr_F(dc, rd, dst);
1576 }
1577
1578 static void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1579 void (*gen)(TCGv_i32, TCGv_i32))
1580 {
1581 TCGv_i32 dst, src;
1582
1583 src = gen_load_fpr_F(dc, rs);
1584 dst = gen_dest_fpr_F(dc);
1585
1586 gen(dst, src);
1587
1588 gen_store_fpr_F(dc, rd, dst);
1589 }
1590
1591 static void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1592 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1593 {
1594 TCGv_i32 dst, src1, src2;
1595
1596 src1 = gen_load_fpr_F(dc, rs1);
1597 src2 = gen_load_fpr_F(dc, rs2);
1598 dst = gen_dest_fpr_F(dc);
1599
1600 gen(dst, tcg_env, src1, src2);
1601 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1602
1603 gen_store_fpr_F(dc, rd, dst);
1604 }
1605
1606 #ifdef TARGET_SPARC64
1607 static void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1608 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1609 {
1610 TCGv_i32 dst, src1, src2;
1611
1612 src1 = gen_load_fpr_F(dc, rs1);
1613 src2 = gen_load_fpr_F(dc, rs2);
1614 dst = gen_dest_fpr_F(dc);
1615
1616 gen(dst, src1, src2);
1617
1618 gen_store_fpr_F(dc, rd, dst);
1619 }
1620 #endif
1621
1622 static void gen_fop_DD(DisasContext *dc, int rd, int rs,
1623 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1624 {
1625 TCGv_i64 dst, src;
1626
1627 src = gen_load_fpr_D(dc, rs);
1628 dst = gen_dest_fpr_D(dc, rd);
1629
1630 gen(dst, tcg_env, src);
1631 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1632
1633 gen_store_fpr_D(dc, rd, dst);
1634 }
1635
1636 #ifdef TARGET_SPARC64
1637 static void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1638 void (*gen)(TCGv_i64, TCGv_i64))
1639 {
1640 TCGv_i64 dst, src;
1641
1642 src = gen_load_fpr_D(dc, rs);
1643 dst = gen_dest_fpr_D(dc, rd);
1644
1645 gen(dst, src);
1646
1647 gen_store_fpr_D(dc, rd, dst);
1648 }
1649 #endif
1650
1651 static void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1652 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1653 {
1654 TCGv_i64 dst, src1, src2;
1655
1656 src1 = gen_load_fpr_D(dc, rs1);
1657 src2 = gen_load_fpr_D(dc, rs2);
1658 dst = gen_dest_fpr_D(dc, rd);
1659
1660 gen(dst, tcg_env, src1, src2);
1661 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1662
1663 gen_store_fpr_D(dc, rd, dst);
1664 }
1665
1666 #ifdef TARGET_SPARC64
1667 static void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1668 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1669 {
1670 TCGv_i64 dst, src1, src2;
1671
1672 src1 = gen_load_fpr_D(dc, rs1);
1673 src2 = gen_load_fpr_D(dc, rs2);
1674 dst = gen_dest_fpr_D(dc, rd);
1675
1676 gen(dst, src1, src2);
1677
1678 gen_store_fpr_D(dc, rd, dst);
1679 }
1680
1681 static void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1682 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1683 {
1684 TCGv_i64 dst, src1, src2;
1685
1686 src1 = gen_load_fpr_D(dc, rs1);
1687 src2 = gen_load_fpr_D(dc, rs2);
1688 dst = gen_dest_fpr_D(dc, rd);
1689
1690 gen(dst, cpu_gsr, src1, src2);
1691
1692 gen_store_fpr_D(dc, rd, dst);
1693 }
1694
1695 static void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1696 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1697 {
1698 TCGv_i64 dst, src0, src1, src2;
1699
1700 src1 = gen_load_fpr_D(dc, rs1);
1701 src2 = gen_load_fpr_D(dc, rs2);
1702 src0 = gen_load_fpr_D(dc, rd);
1703 dst = gen_dest_fpr_D(dc, rd);
1704
1705 gen(dst, src0, src1, src2);
1706
1707 gen_store_fpr_D(dc, rd, dst);
1708 }
1709 #endif
1710
1711 static void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1712 void (*gen)(TCGv_ptr))
1713 {
1714 gen_op_load_fpr_QT1(QFPREG(rs));
1715
1716 gen(tcg_env);
1717 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1718
1719 gen_op_store_QT0_fpr(QFPREG(rd));
1720 gen_update_fprs_dirty(dc, QFPREG(rd));
1721 }
1722
1723 #ifdef TARGET_SPARC64
1724 static void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1725 void (*gen)(TCGv_ptr))
1726 {
1727 gen_op_load_fpr_QT1(QFPREG(rs));
1728
1729 gen(tcg_env);
1730
1731 gen_op_store_QT0_fpr(QFPREG(rd));
1732 gen_update_fprs_dirty(dc, QFPREG(rd));
1733 }
1734 #endif
1735
1736 static void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1737 void (*gen)(TCGv_ptr))
1738 {
1739 gen_op_load_fpr_QT0(QFPREG(rs1));
1740 gen_op_load_fpr_QT1(QFPREG(rs2));
1741
1742 gen(tcg_env);
1743 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1744
1745 gen_op_store_QT0_fpr(QFPREG(rd));
1746 gen_update_fprs_dirty(dc, QFPREG(rd));
1747 }
1748
1749 static void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1750 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1751 {
1752 TCGv_i64 dst;
1753 TCGv_i32 src1, src2;
1754
1755 src1 = gen_load_fpr_F(dc, rs1);
1756 src2 = gen_load_fpr_F(dc, rs2);
1757 dst = gen_dest_fpr_D(dc, rd);
1758
1759 gen(dst, tcg_env, src1, src2);
1760 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1761
1762 gen_store_fpr_D(dc, rd, dst);
1763 }
1764
1765 static void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1766 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1767 {
1768 TCGv_i64 src1, src2;
1769
1770 src1 = gen_load_fpr_D(dc, rs1);
1771 src2 = gen_load_fpr_D(dc, rs2);
1772
1773 gen(tcg_env, src1, src2);
1774 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1775
1776 gen_op_store_QT0_fpr(QFPREG(rd));
1777 gen_update_fprs_dirty(dc, QFPREG(rd));
1778 }
1779
1780 #ifdef TARGET_SPARC64
1781 static void gen_fop_DF(DisasContext *dc, int rd, int rs,
1782 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1783 {
1784 TCGv_i64 dst;
1785 TCGv_i32 src;
1786
1787 src = gen_load_fpr_F(dc, rs);
1788 dst = gen_dest_fpr_D(dc, rd);
1789
1790 gen(dst, tcg_env, src);
1791 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1792
1793 gen_store_fpr_D(dc, rd, dst);
1794 }
1795 #endif
1796
1797 static void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1798 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1799 {
1800 TCGv_i64 dst;
1801 TCGv_i32 src;
1802
1803 src = gen_load_fpr_F(dc, rs);
1804 dst = gen_dest_fpr_D(dc, rd);
1805
1806 gen(dst, tcg_env, src);
1807
1808 gen_store_fpr_D(dc, rd, dst);
1809 }
1810
1811 static void gen_fop_FD(DisasContext *dc, int rd, int rs,
1812 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1813 {
1814 TCGv_i32 dst;
1815 TCGv_i64 src;
1816
1817 src = gen_load_fpr_D(dc, rs);
1818 dst = gen_dest_fpr_F(dc);
1819
1820 gen(dst, tcg_env, src);
1821 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1822
1823 gen_store_fpr_F(dc, rd, dst);
1824 }
1825
1826 static void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1827 void (*gen)(TCGv_i32, TCGv_ptr))
1828 {
1829 TCGv_i32 dst;
1830
1831 gen_op_load_fpr_QT1(QFPREG(rs));
1832 dst = gen_dest_fpr_F(dc);
1833
1834 gen(dst, tcg_env);
1835 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1836
1837 gen_store_fpr_F(dc, rd, dst);
1838 }
1839
1840 static void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1841 void (*gen)(TCGv_i64, TCGv_ptr))
1842 {
1843 TCGv_i64 dst;
1844
1845 gen_op_load_fpr_QT1(QFPREG(rs));
1846 dst = gen_dest_fpr_D(dc, rd);
1847
1848 gen(dst, tcg_env);
1849 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
1850
1851 gen_store_fpr_D(dc, rd, dst);
1852 }
1853
1854 static void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1855 void (*gen)(TCGv_ptr, TCGv_i32))
1856 {
1857 TCGv_i32 src;
1858
1859 src = gen_load_fpr_F(dc, rs);
1860
1861 gen(tcg_env, src);
1862
1863 gen_op_store_QT0_fpr(QFPREG(rd));
1864 gen_update_fprs_dirty(dc, QFPREG(rd));
1865 }
1866
1867 static void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1868 void (*gen)(TCGv_ptr, TCGv_i64))
1869 {
1870 TCGv_i64 src;
1871
1872 src = gen_load_fpr_D(dc, rs);
1873
1874 gen(tcg_env, src);
1875
1876 gen_op_store_QT0_fpr(QFPREG(rd));
1877 gen_update_fprs_dirty(dc, QFPREG(rd));
1878 }
1879
1880 static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
1881 TCGv addr, int mmu_idx, MemOp memop)
1882 {
1883 gen_address_mask(dc, addr);
1884 tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN);
1885 }
1886
1887 static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
1888 {
1889 TCGv m1 = tcg_constant_tl(0xff);
1890 gen_address_mask(dc, addr);
1891 tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
1892 }
1893
1894 /* asi moves */
1895 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
1896 typedef enum {
1897 GET_ASI_HELPER,
1898 GET_ASI_EXCP,
1899 GET_ASI_DIRECT,
1900 GET_ASI_DTWINX,
1901 GET_ASI_BLOCK,
1902 GET_ASI_SHORT,
1903 GET_ASI_BCOPY,
1904 GET_ASI_BFILL,
1905 } ASIType;
1906
1907 typedef struct {
1908 ASIType type;
1909 int asi;
1910 int mem_idx;
1911 MemOp memop;
1912 } DisasASI;
1913
1914 static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop)
1915 {
1916 int asi = GET_FIELD(insn, 19, 26);
1917 ASIType type = GET_ASI_HELPER;
1918 int mem_idx = dc->mem_idx;
1919
1920 #ifndef TARGET_SPARC64
1921 /* Before v9, all asis are immediate and privileged. */
1922 if (IS_IMM) {
1923 gen_exception(dc, TT_ILL_INSN);
1924 type = GET_ASI_EXCP;
1925 } else if (supervisor(dc)
1926 /* Note that LEON accepts ASI_USERDATA in user mode, for
1927 use with CASA. Also note that previous versions of
1928 QEMU allowed (and old versions of gcc emitted) ASI_P
1929 for LEON, which is incorrect. */
1930 || (asi == ASI_USERDATA
1931 && (dc->def->features & CPU_FEATURE_CASA))) {
1932 switch (asi) {
1933 case ASI_USERDATA: /* User data access */
1934 mem_idx = MMU_USER_IDX;
1935 type = GET_ASI_DIRECT;
1936 break;
1937 case ASI_KERNELDATA: /* Supervisor data access */
1938 mem_idx = MMU_KERNEL_IDX;
1939 type = GET_ASI_DIRECT;
1940 break;
1941 case ASI_M_BYPASS: /* MMU passthrough */
1942 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1943 mem_idx = MMU_PHYS_IDX;
1944 type = GET_ASI_DIRECT;
1945 break;
1946 case ASI_M_BCOPY: /* Block copy, sta access */
1947 mem_idx = MMU_KERNEL_IDX;
1948 type = GET_ASI_BCOPY;
1949 break;
1950 case ASI_M_BFILL: /* Block fill, stda access */
1951 mem_idx = MMU_KERNEL_IDX;
1952 type = GET_ASI_BFILL;
1953 break;
1954 }
1955
1956 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1957 * permissions check in get_physical_address(..).
1958 */
1959 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1960 } else {
1961 gen_exception(dc, TT_PRIV_INSN);
1962 type = GET_ASI_EXCP;
1963 }
1964 #else
1965 if (IS_IMM) {
1966 asi = dc->asi;
1967 }
1968 /* With v9, all asis below 0x80 are privileged. */
1969 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1970 down that bit into DisasContext. For the moment that's ok,
1971 since the direct implementations below doesn't have any ASIs
1972 in the restricted [0x30, 0x7f] range, and the check will be
1973 done properly in the helper. */
1974 if (!supervisor(dc) && asi < 0x80) {
1975 gen_exception(dc, TT_PRIV_ACT);
1976 type = GET_ASI_EXCP;
1977 } else {
1978 switch (asi) {
1979 case ASI_REAL: /* Bypass */
1980 case ASI_REAL_IO: /* Bypass, non-cacheable */
1981 case ASI_REAL_L: /* Bypass LE */
1982 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1983 case ASI_TWINX_REAL: /* Real address, twinx */
1984 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1985 case ASI_QUAD_LDD_PHYS:
1986 case ASI_QUAD_LDD_PHYS_L:
1987 mem_idx = MMU_PHYS_IDX;
1988 break;
1989 case ASI_N: /* Nucleus */
1990 case ASI_NL: /* Nucleus LE */
1991 case ASI_TWINX_N:
1992 case ASI_TWINX_NL:
1993 case ASI_NUCLEUS_QUAD_LDD:
1994 case ASI_NUCLEUS_QUAD_LDD_L:
1995 if (hypervisor(dc)) {
1996 mem_idx = MMU_PHYS_IDX;
1997 } else {
1998 mem_idx = MMU_NUCLEUS_IDX;
1999 }
2000 break;
2001 case ASI_AIUP: /* As if user primary */
2002 case ASI_AIUPL: /* As if user primary LE */
2003 case ASI_TWINX_AIUP:
2004 case ASI_TWINX_AIUP_L:
2005 case ASI_BLK_AIUP_4V:
2006 case ASI_BLK_AIUP_L_4V:
2007 case ASI_BLK_AIUP:
2008 case ASI_BLK_AIUPL:
2009 mem_idx = MMU_USER_IDX;
2010 break;
2011 case ASI_AIUS: /* As if user secondary */
2012 case ASI_AIUSL: /* As if user secondary LE */
2013 case ASI_TWINX_AIUS:
2014 case ASI_TWINX_AIUS_L:
2015 case ASI_BLK_AIUS_4V:
2016 case ASI_BLK_AIUS_L_4V:
2017 case ASI_BLK_AIUS:
2018 case ASI_BLK_AIUSL:
2019 mem_idx = MMU_USER_SECONDARY_IDX;
2020 break;
2021 case ASI_S: /* Secondary */
2022 case ASI_SL: /* Secondary LE */
2023 case ASI_TWINX_S:
2024 case ASI_TWINX_SL:
2025 case ASI_BLK_COMMIT_S:
2026 case ASI_BLK_S:
2027 case ASI_BLK_SL:
2028 case ASI_FL8_S:
2029 case ASI_FL8_SL:
2030 case ASI_FL16_S:
2031 case ASI_FL16_SL:
2032 if (mem_idx == MMU_USER_IDX) {
2033 mem_idx = MMU_USER_SECONDARY_IDX;
2034 } else if (mem_idx == MMU_KERNEL_IDX) {
2035 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2036 }
2037 break;
2038 case ASI_P: /* Primary */
2039 case ASI_PL: /* Primary LE */
2040 case ASI_TWINX_P:
2041 case ASI_TWINX_PL:
2042 case ASI_BLK_COMMIT_P:
2043 case ASI_BLK_P:
2044 case ASI_BLK_PL:
2045 case ASI_FL8_P:
2046 case ASI_FL8_PL:
2047 case ASI_FL16_P:
2048 case ASI_FL16_PL:
2049 break;
2050 }
2051 switch (asi) {
2052 case ASI_REAL:
2053 case ASI_REAL_IO:
2054 case ASI_REAL_L:
2055 case ASI_REAL_IO_L:
2056 case ASI_N:
2057 case ASI_NL:
2058 case ASI_AIUP:
2059 case ASI_AIUPL:
2060 case ASI_AIUS:
2061 case ASI_AIUSL:
2062 case ASI_S:
2063 case ASI_SL:
2064 case ASI_P:
2065 case ASI_PL:
2066 type = GET_ASI_DIRECT;
2067 break;
2068 case ASI_TWINX_REAL:
2069 case ASI_TWINX_REAL_L:
2070 case ASI_TWINX_N:
2071 case ASI_TWINX_NL:
2072 case ASI_TWINX_AIUP:
2073 case ASI_TWINX_AIUP_L:
2074 case ASI_TWINX_AIUS:
2075 case ASI_TWINX_AIUS_L:
2076 case ASI_TWINX_P:
2077 case ASI_TWINX_PL:
2078 case ASI_TWINX_S:
2079 case ASI_TWINX_SL:
2080 case ASI_QUAD_LDD_PHYS:
2081 case ASI_QUAD_LDD_PHYS_L:
2082 case ASI_NUCLEUS_QUAD_LDD:
2083 case ASI_NUCLEUS_QUAD_LDD_L:
2084 type = GET_ASI_DTWINX;
2085 break;
2086 case ASI_BLK_COMMIT_P:
2087 case ASI_BLK_COMMIT_S:
2088 case ASI_BLK_AIUP_4V:
2089 case ASI_BLK_AIUP_L_4V:
2090 case ASI_BLK_AIUP:
2091 case ASI_BLK_AIUPL:
2092 case ASI_BLK_AIUS_4V:
2093 case ASI_BLK_AIUS_L_4V:
2094 case ASI_BLK_AIUS:
2095 case ASI_BLK_AIUSL:
2096 case ASI_BLK_S:
2097 case ASI_BLK_SL:
2098 case ASI_BLK_P:
2099 case ASI_BLK_PL:
2100 type = GET_ASI_BLOCK;
2101 break;
2102 case ASI_FL8_S:
2103 case ASI_FL8_SL:
2104 case ASI_FL8_P:
2105 case ASI_FL8_PL:
2106 memop = MO_UB;
2107 type = GET_ASI_SHORT;
2108 break;
2109 case ASI_FL16_S:
2110 case ASI_FL16_SL:
2111 case ASI_FL16_P:
2112 case ASI_FL16_PL:
2113 memop = MO_TEUW;
2114 type = GET_ASI_SHORT;
2115 break;
2116 }
2117 /* The little-endian asis all have bit 3 set. */
2118 if (asi & 8) {
2119 memop ^= MO_BSWAP;
2120 }
2121 }
2122 #endif
2123
2124 return (DisasASI){ type, asi, mem_idx, memop };
2125 }
2126
2127 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2128 int insn, MemOp memop)
2129 {
2130 DisasASI da = get_asi(dc, insn, memop);
2131
2132 switch (da.type) {
2133 case GET_ASI_EXCP:
2134 break;
2135 case GET_ASI_DTWINX: /* Reserved for ldda. */
2136 gen_exception(dc, TT_ILL_INSN);
2137 break;
2138 case GET_ASI_DIRECT:
2139 gen_address_mask(dc, addr);
2140 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN);
2141 break;
2142 default:
2143 {
2144 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2145 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2146
2147 save_state(dc);
2148 #ifdef TARGET_SPARC64
2149 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
2150 #else
2151 {
2152 TCGv_i64 t64 = tcg_temp_new_i64();
2153 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2154 tcg_gen_trunc_i64_tl(dst, t64);
2155 }
2156 #endif
2157 }
2158 break;
2159 }
2160 }
2161
2162 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2163 int insn, MemOp memop)
2164 {
2165 DisasASI da = get_asi(dc, insn, memop);
2166
2167 switch (da.type) {
2168 case GET_ASI_EXCP:
2169 break;
2170 case GET_ASI_DTWINX: /* Reserved for stda. */
2171 #ifndef TARGET_SPARC64
2172 gen_exception(dc, TT_ILL_INSN);
2173 break;
2174 #else
2175 if (!(dc->def->features & CPU_FEATURE_HYPV)) {
2176 /* Pre OpenSPARC CPUs don't have these */
2177 gen_exception(dc, TT_ILL_INSN);
2178 return;
2179 }
2180 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2181 * are ST_BLKINIT_ ASIs */
2182 #endif
2183 /* fall through */
2184 case GET_ASI_DIRECT:
2185 gen_address_mask(dc, addr);
2186 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN);
2187 break;
2188 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2189 case GET_ASI_BCOPY:
2190 /* Copy 32 bytes from the address in SRC to ADDR. */
2191 /* ??? The original qemu code suggests 4-byte alignment, dropping
2192 the low bits, but the only place I can see this used is in the
2193 Linux kernel with 32 byte alignment, which would make more sense
2194 as a cacheline-style operation. */
2195 {
2196 TCGv saddr = tcg_temp_new();
2197 TCGv daddr = tcg_temp_new();
2198 TCGv four = tcg_constant_tl(4);
2199 TCGv_i32 tmp = tcg_temp_new_i32();
2200 int i;
2201
2202 tcg_gen_andi_tl(saddr, src, -4);
2203 tcg_gen_andi_tl(daddr, addr, -4);
2204 for (i = 0; i < 32; i += 4) {
2205 /* Since the loads and stores are paired, allow the
2206 copy to happen in the host endianness. */
2207 tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
2208 tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
2209 tcg_gen_add_tl(saddr, saddr, four);
2210 tcg_gen_add_tl(daddr, daddr, four);
2211 }
2212 }
2213 break;
2214 #endif
2215 default:
2216 {
2217 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2218 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2219
2220 save_state(dc);
2221 #ifdef TARGET_SPARC64
2222 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
2223 #else
2224 {
2225 TCGv_i64 t64 = tcg_temp_new_i64();
2226 tcg_gen_extu_tl_i64(t64, src);
2227 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2228 }
2229 #endif
2230
2231 /* A write to a TLB register may alter page maps. End the TB. */
2232 dc->npc = DYNAMIC_PC;
2233 }
2234 break;
2235 }
2236 }
2237
2238 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2239 TCGv addr, int insn)
2240 {
2241 DisasASI da = get_asi(dc, insn, MO_TEUL);
2242
2243 switch (da.type) {
2244 case GET_ASI_EXCP:
2245 break;
2246 case GET_ASI_DIRECT:
2247 gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
2248 break;
2249 default:
2250 /* ??? Should be DAE_invalid_asi. */
2251 gen_exception(dc, TT_DATA_ACCESS);
2252 break;
2253 }
2254 }
2255
2256 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2257 int insn, int rd)
2258 {
2259 DisasASI da = get_asi(dc, insn, MO_TEUL);
2260 TCGv oldv;
2261
2262 switch (da.type) {
2263 case GET_ASI_EXCP:
2264 return;
2265 case GET_ASI_DIRECT:
2266 oldv = tcg_temp_new();
2267 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2268 da.mem_idx, da.memop | MO_ALIGN);
2269 gen_store_gpr(dc, rd, oldv);
2270 break;
2271 default:
2272 /* ??? Should be DAE_invalid_asi. */
2273 gen_exception(dc, TT_DATA_ACCESS);
2274 break;
2275 }
2276 }
2277
2278 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2279 {
2280 DisasASI da = get_asi(dc, insn, MO_UB);
2281
2282 switch (da.type) {
2283 case GET_ASI_EXCP:
2284 break;
2285 case GET_ASI_DIRECT:
2286 gen_ldstub(dc, dst, addr, da.mem_idx);
2287 break;
2288 default:
2289 /* ??? In theory, this should be raise DAE_invalid_asi.
2290 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2291 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
2292 gen_helper_exit_atomic(tcg_env);
2293 } else {
2294 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2295 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
2296 TCGv_i64 s64, t64;
2297
2298 save_state(dc);
2299 t64 = tcg_temp_new_i64();
2300 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2301
2302 s64 = tcg_constant_i64(0xff);
2303 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
2304
2305 tcg_gen_trunc_i64_tl(dst, t64);
2306
2307 /* End the TB. */
2308 dc->npc = DYNAMIC_PC;
2309 }
2310 break;
2311 }
2312 }
2313 #endif
2314
2315 #ifdef TARGET_SPARC64
2316 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2317 int insn, int size, int rd)
2318 {
2319 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
2320 TCGv_i32 d32;
2321 TCGv_i64 d64;
2322
2323 switch (da.type) {
2324 case GET_ASI_EXCP:
2325 break;
2326
2327 case GET_ASI_DIRECT:
2328 gen_address_mask(dc, addr);
2329 switch (size) {
2330 case 4:
2331 d32 = gen_dest_fpr_F(dc);
2332 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
2333 gen_store_fpr_F(dc, rd, d32);
2334 break;
2335 case 8:
2336 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2337 da.memop | MO_ALIGN_4);
2338 break;
2339 case 16:
2340 d64 = tcg_temp_new_i64();
2341 tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
2342 tcg_gen_addi_tl(addr, addr, 8);
2343 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
2344 da.memop | MO_ALIGN_4);
2345 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2346 break;
2347 default:
2348 g_assert_not_reached();
2349 }
2350 break;
2351
2352 case GET_ASI_BLOCK:
2353 /* Valid for lddfa on aligned registers only. */
2354 if (size == 8 && (rd & 7) == 0) {
2355 MemOp memop;
2356 TCGv eight;
2357 int i;
2358
2359 gen_address_mask(dc, addr);
2360
2361 /* The first operation checks required alignment. */
2362 memop = da.memop | MO_ALIGN_64;
2363 eight = tcg_constant_tl(8);
2364 for (i = 0; ; ++i) {
2365 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2366 da.mem_idx, memop);
2367 if (i == 7) {
2368 break;
2369 }
2370 tcg_gen_add_tl(addr, addr, eight);
2371 memop = da.memop;
2372 }
2373 } else {
2374 gen_exception(dc, TT_ILL_INSN);
2375 }
2376 break;
2377
2378 case GET_ASI_SHORT:
2379 /* Valid for lddfa only. */
2380 if (size == 8) {
2381 gen_address_mask(dc, addr);
2382 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2383 da.memop | MO_ALIGN);
2384 } else {
2385 gen_exception(dc, TT_ILL_INSN);
2386 }
2387 break;
2388
2389 default:
2390 {
2391 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2392 TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN);
2393
2394 save_state(dc);
2395 /* According to the table in the UA2011 manual, the only
2396 other asis that are valid for ldfa/lddfa/ldqfa are
2397 the NO_FAULT asis. We still need a helper for these,
2398 but we can just use the integer asi helper for them. */
2399 switch (size) {
2400 case 4:
2401 d64 = tcg_temp_new_i64();
2402 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2403 d32 = gen_dest_fpr_F(dc);
2404 tcg_gen_extrl_i64_i32(d32, d64);
2405 gen_store_fpr_F(dc, rd, d32);
2406 break;
2407 case 8:
2408 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, r_asi, r_mop);
2409 break;
2410 case 16:
2411 d64 = tcg_temp_new_i64();
2412 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2413 tcg_gen_addi_tl(addr, addr, 8);
2414 gen_helper_ld_asi(cpu_fpr[rd/2+1], tcg_env, addr, r_asi, r_mop);
2415 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2416 break;
2417 default:
2418 g_assert_not_reached();
2419 }
2420 }
2421 break;
2422 }
2423 }
2424
2425 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2426 int insn, int size, int rd)
2427 {
2428 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
2429 TCGv_i32 d32;
2430
2431 switch (da.type) {
2432 case GET_ASI_EXCP:
2433 break;
2434
2435 case GET_ASI_DIRECT:
2436 gen_address_mask(dc, addr);
2437 switch (size) {
2438 case 4:
2439 d32 = gen_load_fpr_F(dc, rd);
2440 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
2441 break;
2442 case 8:
2443 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2444 da.memop | MO_ALIGN_4);
2445 break;
2446 case 16:
2447 /* Only 4-byte alignment required. However, it is legal for the
2448 cpu to signal the alignment fault, and the OS trap handler is
2449 required to fix it up. Requiring 16-byte alignment here avoids
2450 having to probe the second page before performing the first
2451 write. */
2452 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2453 da.memop | MO_ALIGN_16);
2454 tcg_gen_addi_tl(addr, addr, 8);
2455 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2456 break;
2457 default:
2458 g_assert_not_reached();
2459 }
2460 break;
2461
2462 case GET_ASI_BLOCK:
2463 /* Valid for stdfa on aligned registers only. */
2464 if (size == 8 && (rd & 7) == 0) {
2465 MemOp memop;
2466 TCGv eight;
2467 int i;
2468
2469 gen_address_mask(dc, addr);
2470
2471 /* The first operation checks required alignment. */
2472 memop = da.memop | MO_ALIGN_64;
2473 eight = tcg_constant_tl(8);
2474 for (i = 0; ; ++i) {
2475 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2476 da.mem_idx, memop);
2477 if (i == 7) {
2478 break;
2479 }
2480 tcg_gen_add_tl(addr, addr, eight);
2481 memop = da.memop;
2482 }
2483 } else {
2484 gen_exception(dc, TT_ILL_INSN);
2485 }
2486 break;
2487
2488 case GET_ASI_SHORT:
2489 /* Valid for stdfa only. */
2490 if (size == 8) {
2491 gen_address_mask(dc, addr);
2492 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2493 da.memop | MO_ALIGN);
2494 } else {
2495 gen_exception(dc, TT_ILL_INSN);
2496 }
2497 break;
2498
2499 default:
2500 /* According to the table in the UA2011 manual, the only
2501 other asis that are valid for ldfa/lddfa/ldqfa are
2502 the PST* asis, which aren't currently handled. */
2503 gen_exception(dc, TT_ILL_INSN);
2504 break;
2505 }
2506 }
2507
2508 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2509 {
2510 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2511 TCGv_i64 hi = gen_dest_gpr(dc, rd);
2512 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2513
2514 switch (da.type) {
2515 case GET_ASI_EXCP:
2516 return;
2517
2518 case GET_ASI_DTWINX:
2519 gen_address_mask(dc, addr);
2520 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2521 tcg_gen_addi_tl(addr, addr, 8);
2522 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2523 break;
2524
2525 case GET_ASI_DIRECT:
2526 {
2527 TCGv_i64 tmp = tcg_temp_new_i64();
2528
2529 gen_address_mask(dc, addr);
2530 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN);
2531
2532 /* Note that LE ldda acts as if each 32-bit register
2533 result is byte swapped. Having just performed one
2534 64-bit bswap, we need now to swap the writebacks. */
2535 if ((da.memop & MO_BSWAP) == MO_TE) {
2536 tcg_gen_extr32_i64(lo, hi, tmp);
2537 } else {
2538 tcg_gen_extr32_i64(hi, lo, tmp);
2539 }
2540 }
2541 break;
2542
2543 default:
2544 /* ??? In theory we've handled all of the ASIs that are valid
2545 for ldda, and this should raise DAE_invalid_asi. However,
2546 real hardware allows others. This can be seen with e.g.
2547 FreeBSD 10.3 wrt ASI_IC_TAG. */
2548 {
2549 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2550 TCGv_i32 r_mop = tcg_constant_i32(da.memop);
2551 TCGv_i64 tmp = tcg_temp_new_i64();
2552
2553 save_state(dc);
2554 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2555
2556 /* See above. */
2557 if ((da.memop & MO_BSWAP) == MO_TE) {
2558 tcg_gen_extr32_i64(lo, hi, tmp);
2559 } else {
2560 tcg_gen_extr32_i64(hi, lo, tmp);
2561 }
2562 }
2563 break;
2564 }
2565
2566 gen_store_gpr(dc, rd, hi);
2567 gen_store_gpr(dc, rd + 1, lo);
2568 }
2569
2570 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2571 int insn, int rd)
2572 {
2573 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2574 TCGv lo = gen_load_gpr(dc, rd + 1);
2575
2576 switch (da.type) {
2577 case GET_ASI_EXCP:
2578 break;
2579
2580 case GET_ASI_DTWINX:
2581 gen_address_mask(dc, addr);
2582 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2583 tcg_gen_addi_tl(addr, addr, 8);
2584 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2585 break;
2586
2587 case GET_ASI_DIRECT:
2588 {
2589 TCGv_i64 t64 = tcg_temp_new_i64();
2590
2591 /* Note that LE stda acts as if each 32-bit register result is
2592 byte swapped. We will perform one 64-bit LE store, so now
2593 we must swap the order of the construction. */
2594 if ((da.memop & MO_BSWAP) == MO_TE) {
2595 tcg_gen_concat32_i64(t64, lo, hi);
2596 } else {
2597 tcg_gen_concat32_i64(t64, hi, lo);
2598 }
2599 gen_address_mask(dc, addr);
2600 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
2601 }
2602 break;
2603
2604 default:
2605 /* ??? In theory we've handled all of the ASIs that are valid
2606 for stda, and this should raise DAE_invalid_asi. */
2607 {
2608 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2609 TCGv_i32 r_mop = tcg_constant_i32(da.memop);
2610 TCGv_i64 t64 = tcg_temp_new_i64();
2611
2612 /* See above. */
2613 if ((da.memop & MO_BSWAP) == MO_TE) {
2614 tcg_gen_concat32_i64(t64, lo, hi);
2615 } else {
2616 tcg_gen_concat32_i64(t64, hi, lo);
2617 }
2618
2619 save_state(dc);
2620 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2621 }
2622 break;
2623 }
2624 }
2625
2626 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2627 int insn, int rd)
2628 {
2629 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2630 TCGv oldv;
2631
2632 switch (da.type) {
2633 case GET_ASI_EXCP:
2634 return;
2635 case GET_ASI_DIRECT:
2636 oldv = tcg_temp_new();
2637 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2638 da.mem_idx, da.memop | MO_ALIGN);
2639 gen_store_gpr(dc, rd, oldv);
2640 break;
2641 default:
2642 /* ??? Should be DAE_invalid_asi. */
2643 gen_exception(dc, TT_DATA_ACCESS);
2644 break;
2645 }
2646 }
2647
2648 #elif !defined(CONFIG_USER_ONLY)
2649 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2650 {
2651 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2652 whereby "rd + 1" elicits "error: array subscript is above array".
2653 Since we have already asserted that rd is even, the semantics
2654 are unchanged. */
2655 TCGv lo = gen_dest_gpr(dc, rd | 1);
2656 TCGv hi = gen_dest_gpr(dc, rd);
2657 TCGv_i64 t64 = tcg_temp_new_i64();
2658 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2659
2660 switch (da.type) {
2661 case GET_ASI_EXCP:
2662 return;
2663 case GET_ASI_DIRECT:
2664 gen_address_mask(dc, addr);
2665 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
2666 break;
2667 default:
2668 {
2669 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2670 TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
2671
2672 save_state(dc);
2673 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2674 }
2675 break;
2676 }
2677
2678 tcg_gen_extr_i64_i32(lo, hi, t64);
2679 gen_store_gpr(dc, rd | 1, lo);
2680 gen_store_gpr(dc, rd, hi);
2681 }
2682
2683 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2684 int insn, int rd)
2685 {
2686 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2687 TCGv lo = gen_load_gpr(dc, rd + 1);
2688 TCGv_i64 t64 = tcg_temp_new_i64();
2689
2690 tcg_gen_concat_tl_i64(t64, lo, hi);
2691
2692 switch (da.type) {
2693 case GET_ASI_EXCP:
2694 break;
2695 case GET_ASI_DIRECT:
2696 gen_address_mask(dc, addr);
2697 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
2698 break;
2699 case GET_ASI_BFILL:
2700 /* Store 32 bytes of T64 to ADDR. */
2701 /* ??? The original qemu code suggests 8-byte alignment, dropping
2702 the low bits, but the only place I can see this used is in the
2703 Linux kernel with 32 byte alignment, which would make more sense
2704 as a cacheline-style operation. */
2705 {
2706 TCGv d_addr = tcg_temp_new();
2707 TCGv eight = tcg_constant_tl(8);
2708 int i;
2709
2710 tcg_gen_andi_tl(d_addr, addr, -8);
2711 for (i = 0; i < 32; i += 8) {
2712 tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
2713 tcg_gen_add_tl(d_addr, d_addr, eight);
2714 }
2715 }
2716 break;
2717 default:
2718 {
2719 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2720 TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
2721
2722 save_state(dc);
2723 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2724 }
2725 break;
2726 }
2727 }
2728 #endif
2729
2730 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2731 {
2732 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2733 return gen_load_gpr(dc, rs1);
2734 }
2735
2736 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2737 {
2738 if (IS_IMM) { /* immediate */
2739 target_long simm = GET_FIELDs(insn, 19, 31);
2740 TCGv t = tcg_temp_new();
2741 tcg_gen_movi_tl(t, simm);
2742 return t;
2743 } else { /* register */
2744 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2745 return gen_load_gpr(dc, rs2);
2746 }
2747 }
2748
2749 #ifdef TARGET_SPARC64
2750 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2751 {
2752 TCGv_i32 c32, zero, dst, s1, s2;
2753
2754 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2755 or fold the comparison down to 32 bits and use movcond_i32. Choose
2756 the later. */
2757 c32 = tcg_temp_new_i32();
2758 if (cmp->is_bool) {
2759 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2760 } else {
2761 TCGv_i64 c64 = tcg_temp_new_i64();
2762 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2763 tcg_gen_extrl_i64_i32(c32, c64);
2764 }
2765
2766 s1 = gen_load_fpr_F(dc, rs);
2767 s2 = gen_load_fpr_F(dc, rd);
2768 dst = gen_dest_fpr_F(dc);
2769 zero = tcg_constant_i32(0);
2770
2771 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2772
2773 gen_store_fpr_F(dc, rd, dst);
2774 }
2775
2776 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2777 {
2778 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2779 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2780 gen_load_fpr_D(dc, rs),
2781 gen_load_fpr_D(dc, rd));
2782 gen_store_fpr_D(dc, rd, dst);
2783 }
2784
2785 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2786 {
2787 int qd = QFPREG(rd);
2788 int qs = QFPREG(rs);
2789
2790 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2791 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2792 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2793 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2794
2795 gen_update_fprs_dirty(dc, qd);
2796 }
2797
2798 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2799 {
2800 TCGv_i32 r_tl = tcg_temp_new_i32();
2801
2802 /* load env->tl into r_tl */
2803 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2804
2805 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2806 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2807
2808 /* calculate offset to current trap state from env->ts, reuse r_tl */
2809 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2810 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2811
2812 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2813 {
2814 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2815 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2816 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2817 }
2818 }
2819
2820 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2821 int width, bool cc, bool left)
2822 {
2823 TCGv lo1, lo2;
2824 uint64_t amask, tabl, tabr;
2825 int shift, imask, omask;
2826
2827 if (cc) {
2828 tcg_gen_mov_tl(cpu_cc_src, s1);
2829 tcg_gen_mov_tl(cpu_cc_src2, s2);
2830 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2831 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2832 dc->cc_op = CC_OP_SUB;
2833 }
2834
2835 /* Theory of operation: there are two tables, left and right (not to
2836 be confused with the left and right versions of the opcode). These
2837 are indexed by the low 3 bits of the inputs. To make things "easy",
2838 these tables are loaded into two constants, TABL and TABR below.
2839 The operation index = (input & imask) << shift calculates the index
2840 into the constant, while val = (table >> index) & omask calculates
2841 the value we're looking for. */
2842 switch (width) {
2843 case 8:
2844 imask = 0x7;
2845 shift = 3;
2846 omask = 0xff;
2847 if (left) {
2848 tabl = 0x80c0e0f0f8fcfeffULL;
2849 tabr = 0xff7f3f1f0f070301ULL;
2850 } else {
2851 tabl = 0x0103070f1f3f7fffULL;
2852 tabr = 0xfffefcf8f0e0c080ULL;
2853 }
2854 break;
2855 case 16:
2856 imask = 0x6;
2857 shift = 1;
2858 omask = 0xf;
2859 if (left) {
2860 tabl = 0x8cef;
2861 tabr = 0xf731;
2862 } else {
2863 tabl = 0x137f;
2864 tabr = 0xfec8;
2865 }
2866 break;
2867 case 32:
2868 imask = 0x4;
2869 shift = 0;
2870 omask = 0x3;
2871 if (left) {
2872 tabl = (2 << 2) | 3;
2873 tabr = (3 << 2) | 1;
2874 } else {
2875 tabl = (1 << 2) | 3;
2876 tabr = (3 << 2) | 2;
2877 }
2878 break;
2879 default:
2880 abort();
2881 }
2882
2883 lo1 = tcg_temp_new();
2884 lo2 = tcg_temp_new();
2885 tcg_gen_andi_tl(lo1, s1, imask);
2886 tcg_gen_andi_tl(lo2, s2, imask);
2887 tcg_gen_shli_tl(lo1, lo1, shift);
2888 tcg_gen_shli_tl(lo2, lo2, shift);
2889
2890 tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
2891 tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
2892 tcg_gen_andi_tl(lo1, lo1, omask);
2893 tcg_gen_andi_tl(lo2, lo2, omask);
2894
2895 amask = -8;
2896 if (AM_CHECK(dc)) {
2897 amask &= 0xffffffffULL;
2898 }
2899 tcg_gen_andi_tl(s1, s1, amask);
2900 tcg_gen_andi_tl(s2, s2, amask);
2901
2902 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
2903 tcg_gen_and_tl(lo2, lo2, lo1);
2904 tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
2905 }
2906
2907 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2908 {
2909 TCGv tmp = tcg_temp_new();
2910
2911 tcg_gen_add_tl(tmp, s1, s2);
2912 tcg_gen_andi_tl(dst, tmp, -8);
2913 if (left) {
2914 tcg_gen_neg_tl(tmp, tmp);
2915 }
2916 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2917 }
2918
2919 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2920 {
2921 TCGv t1, t2, shift;
2922
2923 t1 = tcg_temp_new();
2924 t2 = tcg_temp_new();
2925 shift = tcg_temp_new();
2926
2927 tcg_gen_andi_tl(shift, gsr, 7);
2928 tcg_gen_shli_tl(shift, shift, 3);
2929 tcg_gen_shl_tl(t1, s1, shift);
2930
2931 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2932 shift of (up to 63) followed by a constant shift of 1. */
2933 tcg_gen_xori_tl(shift, shift, 63);
2934 tcg_gen_shr_tl(t2, s2, shift);
2935 tcg_gen_shri_tl(t2, t2, 1);
2936
2937 tcg_gen_or_tl(dst, t1, t2);
2938 }
2939 #endif
2940
2941 /* Include the auto-generated decoder. */
2942 #include "decode-insns.c.inc"
2943
2944 #define TRANS(NAME, AVAIL, FUNC, ...) \
2945 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2946 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2947
2948 #define avail_ALL(C) true
2949 #ifdef TARGET_SPARC64
2950 # define avail_32(C) false
2951 # define avail_ASR17(C) false
2952 # define avail_DIV(C) true
2953 # define avail_MUL(C) true
2954 # define avail_POWERDOWN(C) false
2955 # define avail_64(C) true
2956 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2957 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2958 #else
2959 # define avail_32(C) true
2960 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2961 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2962 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2963 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2964 # define avail_64(C) false
2965 # define avail_GL(C) false
2966 # define avail_HYPV(C) false
2967 #endif
2968
2969 /* Default case for non jump instructions. */
2970 static bool advance_pc(DisasContext *dc)
2971 {
2972 if (dc->npc & 3) {
2973 switch (dc->npc) {
2974 case DYNAMIC_PC:
2975 case DYNAMIC_PC_LOOKUP:
2976 dc->pc = dc->npc;
2977 gen_op_next_insn();
2978 break;
2979 case JUMP_PC:
2980 /* we can do a static jump */
2981 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
2982 dc->base.is_jmp = DISAS_NORETURN;
2983 break;
2984 default:
2985 g_assert_not_reached();
2986 }
2987 } else {
2988 dc->pc = dc->npc;
2989 dc->npc = dc->npc + 4;
2990 }
2991 return true;
2992 }
2993
2994 /*
2995 * Major opcodes 00 and 01 -- branches, call, and sethi
2996 */
2997
2998 static bool advance_jump_uncond_never(DisasContext *dc, bool annul)
2999 {
3000 if (annul) {
3001 dc->pc = dc->npc + 4;
3002 dc->npc = dc->pc + 4;
3003 } else {
3004 dc->pc = dc->npc;
3005 dc->npc = dc->pc + 4;
3006 }
3007 return true;
3008 }
3009
3010 static bool advance_jump_uncond_always(DisasContext *dc, bool annul,
3011 target_ulong dest)
3012 {
3013 if (annul) {
3014 dc->pc = dest;
3015 dc->npc = dest + 4;
3016 } else {
3017 dc->pc = dc->npc;
3018 dc->npc = dest;
3019 tcg_gen_mov_tl(cpu_pc, cpu_npc);
3020 }
3021 return true;
3022 }
3023
3024 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
3025 bool annul, target_ulong dest)
3026 {
3027 target_ulong npc = dc->npc;
3028
3029 if (annul) {
3030 TCGLabel *l1 = gen_new_label();
3031
3032 tcg_gen_brcond_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
3033 gen_goto_tb(dc, 0, npc, dest);
3034 gen_set_label(l1);
3035 gen_goto_tb(dc, 1, npc + 4, npc + 8);
3036
3037 dc->base.is_jmp = DISAS_NORETURN;
3038 } else {
3039 if (npc & 3) {
3040 switch (npc) {
3041 case DYNAMIC_PC:
3042 case DYNAMIC_PC_LOOKUP:
3043 tcg_gen_mov_tl(cpu_pc, cpu_npc);
3044 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
3045 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
3046 cmp->c1, cmp->c2,
3047 tcg_constant_tl(dest), cpu_npc);
3048 dc->pc = npc;
3049 break;
3050 default:
3051 g_assert_not_reached();
3052 }
3053 } else {
3054 dc->pc = npc;
3055 dc->jump_pc[0] = dest;
3056 dc->jump_pc[1] = npc + 4;
3057 dc->npc = JUMP_PC;
3058 if (cmp->is_bool) {
3059 tcg_gen_mov_tl(cpu_cond, cmp->c1);
3060 } else {
3061 tcg_gen_setcond_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
3062 }
3063 }
3064 }
3065 return true;
3066 }
3067
3068 static bool raise_priv(DisasContext *dc)
3069 {
3070 gen_exception(dc, TT_PRIV_INSN);
3071 return true;
3072 }
3073
3074 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
3075 {
3076 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3077 DisasCompare cmp;
3078
3079 switch (a->cond) {
3080 case 0x0:
3081 return advance_jump_uncond_never(dc, a->a);
3082 case 0x8:
3083 return advance_jump_uncond_always(dc, a->a, target);
3084 default:
3085 flush_cond(dc);
3086
3087 gen_compare(&cmp, a->cc, a->cond, dc);
3088 return advance_jump_cond(dc, &cmp, a->a, target);
3089 }
3090 }
3091
3092 TRANS(Bicc, ALL, do_bpcc, a)
3093 TRANS(BPcc, 64, do_bpcc, a)
3094
3095 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
3096 {
3097 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3098 DisasCompare cmp;
3099
3100 if (gen_trap_ifnofpu(dc)) {
3101 return true;
3102 }
3103 switch (a->cond) {
3104 case 0x0:
3105 return advance_jump_uncond_never(dc, a->a);
3106 case 0x8:
3107 return advance_jump_uncond_always(dc, a->a, target);
3108 default:
3109 flush_cond(dc);
3110
3111 gen_fcompare(&cmp, a->cc, a->cond);
3112 return advance_jump_cond(dc, &cmp, a->a, target);
3113 }
3114 }
3115
3116 TRANS(FBPfcc, 64, do_fbpfcc, a)
3117 TRANS(FBfcc, ALL, do_fbpfcc, a)
3118
3119 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
3120 {
3121 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3122 DisasCompare cmp;
3123
3124 if (!avail_64(dc)) {
3125 return false;
3126 }
3127 if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) {
3128 return false;
3129 }
3130
3131 flush_cond(dc);
3132 gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
3133 return advance_jump_cond(dc, &cmp, a->a, target);
3134 }
3135
3136 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
3137 {
3138 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
3139
3140 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
3141 gen_mov_pc_npc(dc);
3142 dc->npc = target;
3143 return true;
3144 }
3145
3146 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
3147 {
3148 /*
3149 * For sparc32, always generate the no-coprocessor exception.
3150 * For sparc64, always generate illegal instruction.
3151 */
3152 #ifdef TARGET_SPARC64
3153 return false;
3154 #else
3155 gen_exception(dc, TT_NCP_INSN);
3156 return true;
3157 #endif
3158 }
3159
3160 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
3161 {
3162 /* Special-case %g0 because that's the canonical nop. */
3163 if (a->rd) {
3164 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
3165 }
3166 return advance_pc(dc);
3167 }
3168
3169 /*
3170 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
3171 */
3172
3173 static bool do_tcc(DisasContext *dc, int cond, int cc,
3174 int rs1, bool imm, int rs2_or_imm)
3175 {
3176 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3177 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3178 DisasCompare cmp;
3179 TCGLabel *lab;
3180 TCGv_i32 trap;
3181
3182 /* Trap never. */
3183 if (cond == 0) {
3184 return advance_pc(dc);
3185 }
3186
3187 /*
3188 * Immediate traps are the most common case. Since this value is
3189 * live across the branch, it really pays to evaluate the constant.
3190 */
3191 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
3192 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
3193 } else {
3194 trap = tcg_temp_new_i32();
3195 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
3196 if (imm) {
3197 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
3198 } else {
3199 TCGv_i32 t2 = tcg_temp_new_i32();
3200 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
3201 tcg_gen_add_i32(trap, trap, t2);
3202 }
3203 tcg_gen_andi_i32(trap, trap, mask);
3204 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3205 }
3206
3207 /* Trap always. */
3208 if (cond == 8) {
3209 save_state(dc);
3210 gen_helper_raise_exception(tcg_env, trap);
3211 dc->base.is_jmp = DISAS_NORETURN;
3212 return true;
3213 }
3214
3215 /* Conditional trap. */
3216 flush_cond(dc);
3217 lab = delay_exceptionv(dc, trap);
3218 gen_compare(&cmp, cc, cond, dc);
3219 tcg_gen_brcond_tl(cmp.cond, cmp.c1, cmp.c2, lab);
3220
3221 return advance_pc(dc);
3222 }
3223
3224 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
3225 {
3226 if (avail_32(dc) && a->cc) {
3227 return false;
3228 }
3229 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
3230 }
3231
3232 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
3233 {
3234 if (avail_64(dc)) {
3235 return false;
3236 }
3237 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
3238 }
3239
3240 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
3241 {
3242 if (avail_32(dc)) {
3243 return false;
3244 }
3245 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
3246 }
3247
3248 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
3249 {
3250 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
3251 return advance_pc(dc);
3252 }
3253
3254 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
3255 {
3256 if (avail_32(dc)) {
3257 return false;
3258 }
3259 if (a->mmask) {
3260 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
3261 tcg_gen_mb(a->mmask | TCG_BAR_SC);
3262 }
3263 if (a->cmask) {
3264 /* For #Sync, etc, end the TB to recognize interrupts. */
3265 dc->base.is_jmp = DISAS_EXIT;
3266 }
3267 return advance_pc(dc);
3268 }
3269
3270 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
3271 TCGv (*func)(DisasContext *, TCGv))
3272 {
3273 if (!priv) {
3274 return raise_priv(dc);
3275 }
3276 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
3277 return advance_pc(dc);
3278 }
3279
3280 static TCGv do_rdy(DisasContext *dc, TCGv dst)
3281 {
3282 return cpu_y;
3283 }
3284
3285 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
3286 {
3287 /*
3288 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
3289 * 32-bit cpus like sparcv7, which ignores the rs1 field.
3290 * This matches after all other ASR, so Leon3 Asr17 is handled first.
3291 */
3292 if (avail_64(dc) && a->rs1 != 0) {
3293 return false;
3294 }
3295 return do_rd_special(dc, true, a->rd, do_rdy);
3296 }
3297
3298 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
3299 {
3300 uint32_t val;
3301
3302 /*
3303 * TODO: There are many more fields to be filled,
3304 * some of which are writable.
3305 */
3306 val = dc->def->nwindows - 1; /* [4:0] NWIN */
3307 val |= 1 << 8; /* [8] V8 */
3308
3309 return tcg_constant_tl(val);
3310 }
3311
3312 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
3313
3314 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
3315 {
3316 update_psr(dc);
3317 gen_helper_rdccr(dst, tcg_env);
3318 return dst;
3319 }
3320
3321 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
3322
3323 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
3324 {
3325 #ifdef TARGET_SPARC64
3326 return tcg_constant_tl(dc->asi);
3327 #else
3328 qemu_build_not_reached();
3329 #endif
3330 }
3331
3332 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
3333
3334 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
3335 {
3336 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3337
3338 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3339 if (translator_io_start(&dc->base)) {
3340 dc->base.is_jmp = DISAS_EXIT;
3341 }
3342 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
3343 tcg_constant_i32(dc->mem_idx));
3344 return dst;
3345 }
3346
3347 /* TODO: non-priv access only allowed when enabled. */
3348 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
3349
3350 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
3351 {
3352 return tcg_constant_tl(address_mask_i(dc, dc->pc));
3353 }
3354
3355 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
3356
3357 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
3358 {
3359 tcg_gen_ext_i32_tl(dst, cpu_fprs);
3360 return dst;
3361 }
3362
3363 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
3364
3365 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
3366 {
3367 gen_trap_ifnofpu(dc);
3368 return cpu_gsr;
3369 }
3370
3371 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
3372
3373 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
3374 {
3375 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
3376 return dst;
3377 }
3378
3379 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
3380
3381 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
3382 {
3383 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
3384 return dst;
3385 }
3386
3387 /* TODO: non-priv access only allowed when enabled. */
3388 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
3389
3390 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
3391 {
3392 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3393
3394 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3395 if (translator_io_start(&dc->base)) {
3396 dc->base.is_jmp = DISAS_EXIT;
3397 }
3398 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
3399 tcg_constant_i32(dc->mem_idx));
3400 return dst;
3401 }
3402
3403 /* TODO: non-priv access only allowed when enabled. */
3404 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
3405
3406 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
3407 {
3408 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
3409 return dst;
3410 }
3411
3412 /* TODO: supervisor access only allowed when enabled by hypervisor. */
3413 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
3414
3415 /*
3416 * UltraSPARC-T1 Strand status.
3417 * HYPV check maybe not enough, UA2005 & UA2007 describe
3418 * this ASR as impl. dep
3419 */
3420 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
3421 {
3422 return tcg_constant_tl(1);
3423 }
3424
3425 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
3426
3427 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
3428 {
3429 update_psr(dc);
3430 gen_helper_rdpsr(dst, tcg_env);
3431 return dst;
3432 }
3433
3434 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
3435
3436 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
3437 {
3438 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
3439 return dst;
3440 }
3441
3442 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
3443
3444 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
3445 {
3446 TCGv_i32 tl = tcg_temp_new_i32();
3447 TCGv_ptr tp = tcg_temp_new_ptr();
3448
3449 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3450 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3451 tcg_gen_shli_i32(tl, tl, 3);
3452 tcg_gen_ext_i32_ptr(tp, tl);
3453 tcg_gen_add_ptr(tp, tp, tcg_env);
3454
3455 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
3456 return dst;
3457 }
3458
3459 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
3460
3461 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
3462 {
3463 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
3464 return dst;
3465 }
3466
3467 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
3468
3469 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
3470 {
3471 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
3472 return dst;
3473 }
3474
3475 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3476
3477 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3478 {
3479 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3480 return dst;
3481 }
3482
3483 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3484
3485 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3486 {
3487 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3488 return dst;
3489 }
3490
3491 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3492 do_rdhstick_cmpr)
3493
3494 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3495 {
3496 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3497 return dst;
3498 }
3499
3500 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3501
3502 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3503 {
3504 #ifdef TARGET_SPARC64
3505 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3506
3507 gen_load_trap_state_at_tl(r_tsptr);
3508 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3509 return dst;
3510 #else
3511 qemu_build_not_reached();
3512 #endif
3513 }
3514
3515 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3516
3517 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3518 {
3519 #ifdef TARGET_SPARC64
3520 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3521
3522 gen_load_trap_state_at_tl(r_tsptr);
3523 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3524 return dst;
3525 #else
3526 qemu_build_not_reached();
3527 #endif
3528 }
3529
3530 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3531
3532 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3533 {
3534 #ifdef TARGET_SPARC64
3535 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3536
3537 gen_load_trap_state_at_tl(r_tsptr);
3538 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3539 return dst;
3540 #else
3541 qemu_build_not_reached();
3542 #endif
3543 }
3544
3545 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3546
3547 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3548 {
3549 #ifdef TARGET_SPARC64
3550 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3551
3552 gen_load_trap_state_at_tl(r_tsptr);
3553 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3554 return dst;
3555 #else
3556 qemu_build_not_reached();
3557 #endif
3558 }
3559
3560 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3561 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3562
3563 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3564 {
3565 return cpu_tbr;
3566 }
3567
3568 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3569 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3570
3571 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3572 {
3573 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3574 return dst;
3575 }
3576
3577 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3578
3579 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3580 {
3581 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3582 return dst;
3583 }
3584
3585 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3586
3587 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3588 {
3589 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3590 return dst;
3591 }
3592
3593 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3594
3595 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3596 {
3597 gen_helper_rdcwp(dst, tcg_env);
3598 return dst;
3599 }
3600
3601 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3602
3603 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3604 {
3605 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3606 return dst;
3607 }
3608
3609 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3610
3611 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3612 {
3613 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3614 return dst;
3615 }
3616
3617 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3618 do_rdcanrestore)
3619
3620 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3621 {
3622 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3623 return dst;
3624 }
3625
3626 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3627
3628 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3629 {
3630 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3631 return dst;
3632 }
3633
3634 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3635
3636 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3637 {
3638 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3639 return dst;
3640 }
3641
3642 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3643
3644 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3645 {
3646 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3647 return dst;
3648 }
3649
3650 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3651
3652 /* UA2005 strand status */
3653 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3654 {
3655 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3656 return dst;
3657 }
3658
3659 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3660
3661 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3662 {
3663 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3664 return dst;
3665 }
3666
3667 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3668
3669 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3670 {
3671 if (avail_64(dc)) {
3672 gen_helper_flushw(tcg_env);
3673 return advance_pc(dc);
3674 }
3675 return false;
3676 }
3677
3678 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3679 void (*func)(DisasContext *, TCGv))
3680 {
3681 TCGv src;
3682
3683 /* For simplicity, we under-decoded the rs2 form. */
3684 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3685 return false;
3686 }
3687 if (!priv) {
3688 return raise_priv(dc);
3689 }
3690
3691 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3692 src = tcg_constant_tl(a->rs2_or_imm);
3693 } else {
3694 TCGv src1 = gen_load_gpr(dc, a->rs1);
3695 if (a->rs2_or_imm == 0) {
3696 src = src1;
3697 } else {
3698 src = tcg_temp_new();
3699 if (a->imm) {
3700 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3701 } else {
3702 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3703 }
3704 }
3705 }
3706 func(dc, src);
3707 return advance_pc(dc);
3708 }
3709
3710 static void do_wry(DisasContext *dc, TCGv src)
3711 {
3712 tcg_gen_ext32u_tl(cpu_y, src);
3713 }
3714
3715 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3716
3717 static void do_wrccr(DisasContext *dc, TCGv src)
3718 {
3719 gen_helper_wrccr(tcg_env, src);
3720 }
3721
3722 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3723
3724 static void do_wrasi(DisasContext *dc, TCGv src)
3725 {
3726 TCGv tmp = tcg_temp_new();
3727
3728 tcg_gen_ext8u_tl(tmp, src);
3729 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3730 /* End TB to notice changed ASI. */
3731 dc->base.is_jmp = DISAS_EXIT;
3732 }
3733
3734 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3735
3736 static void do_wrfprs(DisasContext *dc, TCGv src)
3737 {
3738 #ifdef TARGET_SPARC64
3739 tcg_gen_trunc_tl_i32(cpu_fprs, src);
3740 dc->fprs_dirty = 0;
3741 dc->base.is_jmp = DISAS_EXIT;
3742 #else
3743 qemu_build_not_reached();
3744 #endif
3745 }
3746
3747 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3748
3749 static void do_wrgsr(DisasContext *dc, TCGv src)
3750 {
3751 gen_trap_ifnofpu(dc);
3752 tcg_gen_mov_tl(cpu_gsr, src);
3753 }
3754
3755 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3756
3757 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3758 {
3759 gen_helper_set_softint(tcg_env, src);
3760 }
3761
3762 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3763
3764 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3765 {
3766 gen_helper_clear_softint(tcg_env, src);
3767 }
3768
3769 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3770
3771 static void do_wrsoftint(DisasContext *dc, TCGv src)
3772 {
3773 gen_helper_write_softint(tcg_env, src);
3774 }
3775
3776 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3777
3778 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3779 {
3780 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3781
3782 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3783 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3784 translator_io_start(&dc->base);
3785 gen_helper_tick_set_limit(r_tickptr, src);
3786 /* End TB to handle timer interrupt */
3787 dc->base.is_jmp = DISAS_EXIT;
3788 }
3789
3790 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3791
3792 static void do_wrstick(DisasContext *dc, TCGv src)
3793 {
3794 #ifdef TARGET_SPARC64
3795 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3796
3797 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3798 translator_io_start(&dc->base);
3799 gen_helper_tick_set_count(r_tickptr, src);
3800 /* End TB to handle timer interrupt */
3801 dc->base.is_jmp = DISAS_EXIT;
3802 #else
3803 qemu_build_not_reached();
3804 #endif
3805 }
3806
3807 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3808
3809 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3810 {
3811 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3812
3813 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3814 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3815 translator_io_start(&dc->base);
3816 gen_helper_tick_set_limit(r_tickptr, src);
3817 /* End TB to handle timer interrupt */
3818 dc->base.is_jmp = DISAS_EXIT;
3819 }
3820
3821 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3822
3823 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3824 {
3825 save_state(dc);
3826 gen_helper_power_down(tcg_env);
3827 }
3828
3829 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3830
3831 static void do_wrpsr(DisasContext *dc, TCGv src)
3832 {
3833 gen_helper_wrpsr(tcg_env, src);
3834 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3835 dc->cc_op = CC_OP_FLAGS;
3836 dc->base.is_jmp = DISAS_EXIT;
3837 }
3838
3839 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3840
3841 static void do_wrwim(DisasContext *dc, TCGv src)
3842 {
3843 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3844 TCGv tmp = tcg_temp_new();
3845
3846 tcg_gen_andi_tl(tmp, src, mask);
3847 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3848 }
3849
3850 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3851
3852 static void do_wrtpc(DisasContext *dc, TCGv src)
3853 {
3854 #ifdef TARGET_SPARC64
3855 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3856
3857 gen_load_trap_state_at_tl(r_tsptr);
3858 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3859 #else
3860 qemu_build_not_reached();
3861 #endif
3862 }
3863
3864 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3865
3866 static void do_wrtnpc(DisasContext *dc, TCGv src)
3867 {
3868 #ifdef TARGET_SPARC64
3869 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3870
3871 gen_load_trap_state_at_tl(r_tsptr);
3872 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3873 #else
3874 qemu_build_not_reached();
3875 #endif
3876 }
3877
3878 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3879
3880 static void do_wrtstate(DisasContext *dc, TCGv src)
3881 {
3882 #ifdef TARGET_SPARC64
3883 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3884
3885 gen_load_trap_state_at_tl(r_tsptr);
3886 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3887 #else
3888 qemu_build_not_reached();
3889 #endif
3890 }
3891
3892 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3893
3894 static void do_wrtt(DisasContext *dc, TCGv src)
3895 {
3896 #ifdef TARGET_SPARC64
3897 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3898
3899 gen_load_trap_state_at_tl(r_tsptr);
3900 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3901 #else
3902 qemu_build_not_reached();
3903 #endif
3904 }
3905
3906 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3907
3908 static void do_wrtick(DisasContext *dc, TCGv src)
3909 {
3910 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3911
3912 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3913 translator_io_start(&dc->base);
3914 gen_helper_tick_set_count(r_tickptr, src);
3915 /* End TB to handle timer interrupt */
3916 dc->base.is_jmp = DISAS_EXIT;
3917 }
3918
3919 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3920
3921 static void do_wrtba(DisasContext *dc, TCGv src)
3922 {
3923 tcg_gen_mov_tl(cpu_tbr, src);
3924 }
3925
3926 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3927
3928 static void do_wrpstate(DisasContext *dc, TCGv src)
3929 {
3930 save_state(dc);
3931 if (translator_io_start(&dc->base)) {
3932 dc->base.is_jmp = DISAS_EXIT;
3933 }
3934 gen_helper_wrpstate(tcg_env, src);
3935 dc->npc = DYNAMIC_PC;
3936 }
3937
3938 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3939
3940 static void do_wrtl(DisasContext *dc, TCGv src)
3941 {
3942 save_state(dc);
3943 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3944 dc->npc = DYNAMIC_PC;
3945 }
3946
3947 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3948
3949 static void do_wrpil(DisasContext *dc, TCGv src)
3950 {
3951 if (translator_io_start(&dc->base)) {
3952 dc->base.is_jmp = DISAS_EXIT;
3953 }
3954 gen_helper_wrpil(tcg_env, src);
3955 }
3956
3957 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3958
3959 static void do_wrcwp(DisasContext *dc, TCGv src)
3960 {
3961 gen_helper_wrcwp(tcg_env, src);
3962 }
3963
3964 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3965
3966 static void do_wrcansave(DisasContext *dc, TCGv src)
3967 {
3968 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3969 }
3970
3971 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3972
3973 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3974 {
3975 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3976 }
3977
3978 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3979
3980 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3981 {
3982 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3983 }
3984
3985 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3986
3987 static void do_wrotherwin(DisasContext *dc, TCGv src)
3988 {
3989 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3990 }
3991
3992 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3993
3994 static void do_wrwstate(DisasContext *dc, TCGv src)
3995 {
3996 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3997 }
3998
3999 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
4000
4001 static void do_wrgl(DisasContext *dc, TCGv src)
4002 {
4003 gen_helper_wrgl(tcg_env, src);
4004 }
4005
4006 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
4007
4008 /* UA2005 strand status */
4009 static void do_wrssr(DisasContext *dc, TCGv src)
4010 {
4011 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
4012 }
4013
4014 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
4015
4016 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
4017
4018 static void do_wrhpstate(DisasContext *dc, TCGv src)
4019 {
4020 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
4021 dc->base.is_jmp = DISAS_EXIT;
4022 }
4023
4024 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
4025
4026 static void do_wrhtstate(DisasContext *dc, TCGv src)
4027 {
4028 TCGv_i32 tl = tcg_temp_new_i32();
4029 TCGv_ptr tp = tcg_temp_new_ptr();
4030
4031 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
4032 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
4033 tcg_gen_shli_i32(tl, tl, 3);
4034 tcg_gen_ext_i32_ptr(tp, tl);
4035 tcg_gen_add_ptr(tp, tp, tcg_env);
4036
4037 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
4038 }
4039
4040 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
4041
4042 static void do_wrhintp(DisasContext *dc, TCGv src)
4043 {
4044 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
4045 }
4046
4047 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
4048
4049 static void do_wrhtba(DisasContext *dc, TCGv src)
4050 {
4051 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
4052 }
4053
4054 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
4055
4056 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
4057 {
4058 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
4059
4060 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
4061 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
4062 translator_io_start(&dc->base);
4063 gen_helper_tick_set_limit(r_tickptr, src);
4064 /* End TB to handle timer interrupt */
4065 dc->base.is_jmp = DISAS_EXIT;
4066 }
4067
4068 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
4069 do_wrhstick_cmpr)
4070
4071 static bool do_saved_restored(DisasContext *dc, bool saved)
4072 {
4073 if (!supervisor(dc)) {
4074 return raise_priv(dc);
4075 }
4076 if (saved) {
4077 gen_helper_saved(tcg_env);
4078 } else {
4079 gen_helper_restored(tcg_env);
4080 }
4081 return advance_pc(dc);
4082 }
4083
4084 TRANS(SAVED, 64, do_saved_restored, true)
4085 TRANS(RESTORED, 64, do_saved_restored, false)
4086
4087 static bool trans_NOP_v7(DisasContext *dc, arg_NOP_v7 *a)
4088 {
4089 /*
4090 * TODO: Need a feature bit for sparcv8.
4091 * In the meantime, treat all 32-bit cpus like sparcv7.
4092 */
4093 if (avail_32(dc)) {
4094 return advance_pc(dc);
4095 }
4096 return false;
4097 }
4098
4099 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
4100 void (*func)(TCGv, TCGv, TCGv),
4101 void (*funci)(TCGv, TCGv, target_long))
4102 {
4103 TCGv dst, src1;
4104
4105 /* For simplicity, we under-decoded the rs2 form. */
4106 if (!a->imm && a->rs2_or_imm & ~0x1f) {
4107 return false;
4108 }
4109
4110 if (a->cc) {
4111 dst = cpu_cc_dst;
4112 } else {
4113 dst = gen_dest_gpr(dc, a->rd);
4114 }
4115 src1 = gen_load_gpr(dc, a->rs1);
4116
4117 if (a->imm || a->rs2_or_imm == 0) {
4118 if (funci) {
4119 funci(dst, src1, a->rs2_or_imm);
4120 } else {
4121 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
4122 }
4123 } else {
4124 func(dst, src1, cpu_regs[a->rs2_or_imm]);
4125 }
4126 gen_store_gpr(dc, a->rd, dst);
4127
4128 if (a->cc) {
4129 tcg_gen_movi_i32(cpu_cc_op, cc_op);
4130 dc->cc_op = cc_op;
4131 }
4132 return advance_pc(dc);
4133 }
4134
4135 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
4136 void (*func)(TCGv, TCGv, TCGv),
4137 void (*funci)(TCGv, TCGv, target_long),
4138 void (*func_cc)(TCGv, TCGv, TCGv))
4139 {
4140 if (a->cc) {
4141 assert(cc_op >= 0);
4142 return do_arith_int(dc, a, cc_op, func_cc, NULL);
4143 }
4144 return do_arith_int(dc, a, cc_op, func, funci);
4145 }
4146
4147 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
4148 void (*func)(TCGv, TCGv, TCGv),
4149 void (*funci)(TCGv, TCGv, target_long))
4150 {
4151 return do_arith_int(dc, a, CC_OP_LOGIC, func, funci);
4152 }
4153
4154 TRANS(ADD, ALL, do_arith, a, CC_OP_ADD,
4155 tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc)
4156 TRANS(SUB, ALL, do_arith, a, CC_OP_SUB,
4157 tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc)
4158
4159 TRANS(TADDcc, ALL, do_arith, a, CC_OP_TADD, NULL, NULL, gen_op_add_cc)
4160 TRANS(TSUBcc, ALL, do_arith, a, CC_OP_TSUB, NULL, NULL, gen_op_sub_cc)
4161 TRANS(TADDccTV, ALL, do_arith, a, CC_OP_TADDTV, NULL, NULL, gen_op_taddcctv)
4162 TRANS(TSUBccTV, ALL, do_arith, a, CC_OP_TSUBTV, NULL, NULL, gen_op_tsubcctv)
4163
4164 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
4165 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
4166 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
4167 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
4168 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
4169
4170 TRANS(MULX, 64, do_arith, a, -1, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
4171 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
4172 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
4173
4174 TRANS(UDIVX, 64, do_arith, a, -1, gen_op_udivx, NULL, NULL)
4175 TRANS(SDIVX, 64, do_arith, a, -1, gen_op_sdivx, NULL, NULL)
4176 TRANS(UDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_udiv, NULL, gen_op_udivcc)
4177 TRANS(SDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_sdiv, NULL, gen_op_sdivcc)
4178
4179 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
4180 {
4181 /* OR with %g0 is the canonical alias for MOV. */
4182 if (!a->cc && a->rs1 == 0) {
4183 if (a->imm || a->rs2_or_imm == 0) {
4184 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
4185 } else if (a->rs2_or_imm & ~0x1f) {
4186 /* For simplicity, we under-decoded the rs2 form. */
4187 return false;
4188 } else {
4189 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
4190 }
4191 return advance_pc(dc);
4192 }
4193 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
4194 }
4195
4196 static bool trans_ADDC(DisasContext *dc, arg_r_r_ri_cc *a)
4197 {
4198 switch (dc->cc_op) {
4199 case CC_OP_DIV:
4200 case CC_OP_LOGIC:
4201 /* Carry is known to be zero. Fall back to plain ADD. */
4202 return do_arith(dc, a, CC_OP_ADD,
4203 tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc);
4204 case CC_OP_ADD:
4205 case CC_OP_TADD:
4206 case CC_OP_TADDTV:
4207 return do_arith(dc, a, CC_OP_ADDX,
4208 gen_op_addc_add, NULL, gen_op_addccc_add);
4209 case CC_OP_SUB:
4210 case CC_OP_TSUB:
4211 case CC_OP_TSUBTV:
4212 return do_arith(dc, a, CC_OP_ADDX,
4213 gen_op_addc_sub, NULL, gen_op_addccc_sub);
4214 default:
4215 return do_arith(dc, a, CC_OP_ADDX,
4216 gen_op_addc_generic, NULL, gen_op_addccc_generic);
4217 }
4218 }
4219
4220 static bool trans_SUBC(DisasContext *dc, arg_r_r_ri_cc *a)
4221 {
4222 switch (dc->cc_op) {
4223 case CC_OP_DIV:
4224 case CC_OP_LOGIC:
4225 /* Carry is known to be zero. Fall back to plain SUB. */
4226 return do_arith(dc, a, CC_OP_SUB,
4227 tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc);
4228 case CC_OP_ADD:
4229 case CC_OP_TADD:
4230 case CC_OP_TADDTV:
4231 return do_arith(dc, a, CC_OP_SUBX,
4232 gen_op_subc_add, NULL, gen_op_subccc_add);
4233 case CC_OP_SUB:
4234 case CC_OP_TSUB:
4235 case CC_OP_TSUBTV:
4236 return do_arith(dc, a, CC_OP_SUBX,
4237 gen_op_subc_sub, NULL, gen_op_subccc_sub);
4238 default:
4239 return do_arith(dc, a, CC_OP_SUBX,
4240 gen_op_subc_generic, NULL, gen_op_subccc_generic);
4241 }
4242 }
4243
4244 static bool trans_MULScc(DisasContext *dc, arg_r_r_ri_cc *a)
4245 {
4246 update_psr(dc);
4247 return do_arith(dc, a, CC_OP_ADD, NULL, NULL, gen_op_mulscc);
4248 }
4249
4250 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4251 {
4252 TCGv dst, src1, src2;
4253
4254 /* Reject 64-bit shifts for sparc32. */
4255 if (avail_32(dc) && a->x) {
4256 return false;
4257 }
4258
4259 src2 = tcg_temp_new();
4260 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4261 src1 = gen_load_gpr(dc, a->rs1);
4262 dst = gen_dest_gpr(dc, a->rd);
4263
4264 if (l) {
4265 tcg_gen_shl_tl(dst, src1, src2);
4266 if (!a->x) {
4267 tcg_gen_ext32u_tl(dst, dst);
4268 }
4269 } else if (u) {
4270 if (!a->x) {
4271 tcg_gen_ext32u_tl(dst, src1);
4272 src1 = dst;
4273 }
4274 tcg_gen_shr_tl(dst, src1, src2);
4275 } else {
4276 if (!a->x) {
4277 tcg_gen_ext32s_tl(dst, src1);
4278 src1 = dst;
4279 }
4280 tcg_gen_sar_tl(dst, src1, src2);
4281 }
4282 gen_store_gpr(dc, a->rd, dst);
4283 return advance_pc(dc);
4284 }
4285
4286 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4287 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4288 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4289
4290 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4291 {
4292 TCGv dst, src1;
4293
4294 /* Reject 64-bit shifts for sparc32. */
4295 if (avail_32(dc) && (a->x || a->i >= 32)) {
4296 return false;
4297 }
4298
4299 src1 = gen_load_gpr(dc, a->rs1);
4300 dst = gen_dest_gpr(dc, a->rd);
4301
4302 if (avail_32(dc) || a->x) {
4303 if (l) {
4304 tcg_gen_shli_tl(dst, src1, a->i);
4305 } else if (u) {
4306 tcg_gen_shri_tl(dst, src1, a->i);
4307 } else {
4308 tcg_gen_sari_tl(dst, src1, a->i);
4309 }
4310 } else {
4311 if (l) {
4312 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4313 } else if (u) {
4314 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4315 } else {
4316 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4317 }
4318 }
4319 gen_store_gpr(dc, a->rd, dst);
4320 return advance_pc(dc);
4321 }
4322
4323 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4324 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4325 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4326
4327 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4328 {
4329 /* For simplicity, we under-decoded the rs2 form. */
4330 if (!imm && rs2_or_imm & ~0x1f) {
4331 return NULL;
4332 }
4333 if (imm || rs2_or_imm == 0) {
4334 return tcg_constant_tl(rs2_or_imm);
4335 } else {
4336 return cpu_regs[rs2_or_imm];
4337 }
4338 }
4339
4340 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4341 {
4342 TCGv dst = gen_load_gpr(dc, rd);
4343
4344 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, cmp->c2, src2, dst);
4345 gen_store_gpr(dc, rd, dst);
4346 return advance_pc(dc);
4347 }
4348
4349 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4350 {
4351 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4352 DisasCompare cmp;
4353
4354 if (src2 == NULL) {
4355 return false;
4356 }
4357 gen_compare(&cmp, a->cc, a->cond, dc);
4358 return do_mov_cond(dc, &cmp, a->rd, src2);
4359 }
4360
4361 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4362 {
4363 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4364 DisasCompare cmp;
4365
4366 if (src2 == NULL) {
4367 return false;
4368 }
4369 gen_fcompare(&cmp, a->cc, a->cond);
4370 return do_mov_cond(dc, &cmp, a->rd, src2);
4371 }
4372
4373 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4374 {
4375 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4376 DisasCompare cmp;
4377
4378 if (src2 == NULL) {
4379 return false;
4380 }
4381 gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
4382 return do_mov_cond(dc, &cmp, a->rd, src2);
4383 }
4384
4385 #define CHECK_IU_FEATURE(dc, FEATURE) \
4386 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4387 goto illegal_insn;
4388 #define CHECK_FPU_FEATURE(dc, FEATURE) \
4389 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4390 goto nfpu_insn;
4391
4392 /* before an instruction, dc->pc must be static */
4393 static void disas_sparc_legacy(DisasContext *dc, unsigned int insn)
4394 {
4395 unsigned int opc, rs1, rs2, rd;
4396 TCGv cpu_src1, cpu_src2;
4397 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
4398 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
4399 target_long simm;
4400
4401 opc = GET_FIELD(insn, 0, 1);
4402 rd = GET_FIELD(insn, 2, 6);
4403
4404 switch (opc) {
4405 case 0:
4406 goto illegal_insn; /* in decodetree */
4407 case 1:
4408 g_assert_not_reached(); /* in decodetree */
4409 case 2: /* FPU & Logical Operations */
4410 {
4411 unsigned int xop __attribute__((unused)) = GET_FIELD(insn, 7, 12);
4412 TCGv cpu_dst __attribute__((unused)) = tcg_temp_new();
4413 TCGv cpu_tmp0 __attribute__((unused));
4414
4415 if (xop == 0x34) { /* FPU Operations */
4416 if (gen_trap_ifnofpu(dc)) {
4417 goto jmp_insn;
4418 }
4419 gen_op_clear_ieee_excp_and_FTT();
4420 rs1 = GET_FIELD(insn, 13, 17);
4421 rs2 = GET_FIELD(insn, 27, 31);
4422 xop = GET_FIELD(insn, 18, 26);
4423
4424 switch (xop) {
4425 case 0x1: /* fmovs */
4426 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4427 gen_store_fpr_F(dc, rd, cpu_src1_32);
4428 break;
4429 case 0x5: /* fnegs */
4430 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
4431 break;
4432 case 0x9: /* fabss */
4433 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
4434 break;
4435 case 0x29: /* fsqrts */
4436 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
4437 break;
4438 case 0x2a: /* fsqrtd */
4439 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
4440 break;
4441 case 0x2b: /* fsqrtq */
4442 CHECK_FPU_FEATURE(dc, FLOAT128);
4443 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
4444 break;
4445 case 0x41: /* fadds */
4446 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
4447 break;
4448 case 0x42: /* faddd */
4449 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
4450 break;
4451 case 0x43: /* faddq */
4452 CHECK_FPU_FEATURE(dc, FLOAT128);
4453 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
4454 break;
4455 case 0x45: /* fsubs */
4456 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
4457 break;
4458 case 0x46: /* fsubd */
4459 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
4460 break;
4461 case 0x47: /* fsubq */
4462 CHECK_FPU_FEATURE(dc, FLOAT128);
4463 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
4464 break;
4465 case 0x49: /* fmuls */
4466 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
4467 break;
4468 case 0x4a: /* fmuld */
4469 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
4470 break;
4471 case 0x4b: /* fmulq */
4472 CHECK_FPU_FEATURE(dc, FLOAT128);
4473 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
4474 break;
4475 case 0x4d: /* fdivs */
4476 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
4477 break;
4478 case 0x4e: /* fdivd */
4479 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
4480 break;
4481 case 0x4f: /* fdivq */
4482 CHECK_FPU_FEATURE(dc, FLOAT128);
4483 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
4484 break;
4485 case 0x69: /* fsmuld */
4486 CHECK_FPU_FEATURE(dc, FSMULD);
4487 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
4488 break;
4489 case 0x6e: /* fdmulq */
4490 CHECK_FPU_FEATURE(dc, FLOAT128);
4491 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
4492 break;
4493 case 0xc4: /* fitos */
4494 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
4495 break;
4496 case 0xc6: /* fdtos */
4497 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
4498 break;
4499 case 0xc7: /* fqtos */
4500 CHECK_FPU_FEATURE(dc, FLOAT128);
4501 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
4502 break;
4503 case 0xc8: /* fitod */
4504 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
4505 break;
4506 case 0xc9: /* fstod */
4507 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
4508 break;
4509 case 0xcb: /* fqtod */
4510 CHECK_FPU_FEATURE(dc, FLOAT128);
4511 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
4512 break;
4513 case 0xcc: /* fitoq */
4514 CHECK_FPU_FEATURE(dc, FLOAT128);
4515 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
4516 break;
4517 case 0xcd: /* fstoq */
4518 CHECK_FPU_FEATURE(dc, FLOAT128);
4519 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
4520 break;
4521 case 0xce: /* fdtoq */
4522 CHECK_FPU_FEATURE(dc, FLOAT128);
4523 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
4524 break;
4525 case 0xd1: /* fstoi */
4526 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
4527 break;
4528 case 0xd2: /* fdtoi */
4529 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
4530 break;
4531 case 0xd3: /* fqtoi */
4532 CHECK_FPU_FEATURE(dc, FLOAT128);
4533 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
4534 break;
4535 #ifdef TARGET_SPARC64
4536 case 0x2: /* V9 fmovd */
4537 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4538 gen_store_fpr_D(dc, rd, cpu_src1_64);
4539 break;
4540 case 0x3: /* V9 fmovq */
4541 CHECK_FPU_FEATURE(dc, FLOAT128);
4542 gen_move_Q(dc, rd, rs2);
4543 break;
4544 case 0x6: /* V9 fnegd */
4545 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
4546 break;
4547 case 0x7: /* V9 fnegq */
4548 CHECK_FPU_FEATURE(dc, FLOAT128);
4549 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
4550 break;
4551 case 0xa: /* V9 fabsd */
4552 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
4553 break;
4554 case 0xb: /* V9 fabsq */
4555 CHECK_FPU_FEATURE(dc, FLOAT128);
4556 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
4557 break;
4558 case 0x81: /* V9 fstox */
4559 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
4560 break;
4561 case 0x82: /* V9 fdtox */
4562 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
4563 break;
4564 case 0x83: /* V9 fqtox */
4565 CHECK_FPU_FEATURE(dc, FLOAT128);
4566 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
4567 break;
4568 case 0x84: /* V9 fxtos */
4569 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
4570 break;
4571 case 0x88: /* V9 fxtod */
4572 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
4573 break;
4574 case 0x8c: /* V9 fxtoq */
4575 CHECK_FPU_FEATURE(dc, FLOAT128);
4576 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
4577 break;
4578 #endif
4579 default:
4580 goto illegal_insn;
4581 }
4582 } else if (xop == 0x35) { /* FPU Operations */
4583 #ifdef TARGET_SPARC64
4584 int cond;
4585 #endif
4586 if (gen_trap_ifnofpu(dc)) {
4587 goto jmp_insn;
4588 }
4589 gen_op_clear_ieee_excp_and_FTT();
4590 rs1 = GET_FIELD(insn, 13, 17);
4591 rs2 = GET_FIELD(insn, 27, 31);
4592 xop = GET_FIELD(insn, 18, 26);
4593
4594 #ifdef TARGET_SPARC64
4595 #define FMOVR(sz) \
4596 do { \
4597 DisasCompare cmp; \
4598 cond = GET_FIELD_SP(insn, 10, 12); \
4599 cpu_src1 = get_src1(dc, insn); \
4600 gen_compare_reg(&cmp, cond, cpu_src1); \
4601 gen_fmov##sz(dc, &cmp, rd, rs2); \
4602 } while (0)
4603
4604 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
4605 FMOVR(s);
4606 break;
4607 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
4608 FMOVR(d);
4609 break;
4610 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
4611 CHECK_FPU_FEATURE(dc, FLOAT128);
4612 FMOVR(q);
4613 break;
4614 }
4615 #undef FMOVR
4616 #endif
4617 switch (xop) {
4618 #ifdef TARGET_SPARC64
4619 #define FMOVCC(fcc, sz) \
4620 do { \
4621 DisasCompare cmp; \
4622 cond = GET_FIELD_SP(insn, 14, 17); \
4623 gen_fcompare(&cmp, fcc, cond); \
4624 gen_fmov##sz(dc, &cmp, rd, rs2); \
4625 } while (0)
4626
4627 case 0x001: /* V9 fmovscc %fcc0 */
4628 FMOVCC(0, s);
4629 break;
4630 case 0x002: /* V9 fmovdcc %fcc0 */
4631 FMOVCC(0, d);
4632 break;
4633 case 0x003: /* V9 fmovqcc %fcc0 */
4634 CHECK_FPU_FEATURE(dc, FLOAT128);
4635 FMOVCC(0, q);
4636 break;
4637 case 0x041: /* V9 fmovscc %fcc1 */
4638 FMOVCC(1, s);
4639 break;
4640 case 0x042: /* V9 fmovdcc %fcc1 */
4641 FMOVCC(1, d);
4642 break;
4643 case 0x043: /* V9 fmovqcc %fcc1 */
4644 CHECK_FPU_FEATURE(dc, FLOAT128);
4645 FMOVCC(1, q);
4646 break;
4647 case 0x081: /* V9 fmovscc %fcc2 */
4648 FMOVCC(2, s);
4649 break;
4650 case 0x082: /* V9 fmovdcc %fcc2 */
4651 FMOVCC(2, d);
4652 break;
4653 case 0x083: /* V9 fmovqcc %fcc2 */
4654 CHECK_FPU_FEATURE(dc, FLOAT128);
4655 FMOVCC(2, q);
4656 break;
4657 case 0x0c1: /* V9 fmovscc %fcc3 */
4658 FMOVCC(3, s);
4659 break;
4660 case 0x0c2: /* V9 fmovdcc %fcc3 */
4661 FMOVCC(3, d);
4662 break;
4663 case 0x0c3: /* V9 fmovqcc %fcc3 */
4664 CHECK_FPU_FEATURE(dc, FLOAT128);
4665 FMOVCC(3, q);
4666 break;
4667 #undef FMOVCC
4668 #define FMOVCC(xcc, sz) \
4669 do { \
4670 DisasCompare cmp; \
4671 cond = GET_FIELD_SP(insn, 14, 17); \
4672 gen_compare(&cmp, xcc, cond, dc); \
4673 gen_fmov##sz(dc, &cmp, rd, rs2); \
4674 } while (0)
4675
4676 case 0x101: /* V9 fmovscc %icc */
4677 FMOVCC(0, s);
4678 break;
4679 case 0x102: /* V9 fmovdcc %icc */
4680 FMOVCC(0, d);
4681 break;
4682 case 0x103: /* V9 fmovqcc %icc */
4683 CHECK_FPU_FEATURE(dc, FLOAT128);
4684 FMOVCC(0, q);
4685 break;
4686 case 0x181: /* V9 fmovscc %xcc */
4687 FMOVCC(1, s);
4688 break;
4689 case 0x182: /* V9 fmovdcc %xcc */
4690 FMOVCC(1, d);
4691 break;
4692 case 0x183: /* V9 fmovqcc %xcc */
4693 CHECK_FPU_FEATURE(dc, FLOAT128);
4694 FMOVCC(1, q);
4695 break;
4696 #undef FMOVCC
4697 #endif
4698 case 0x51: /* fcmps, V9 %fcc */
4699 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4700 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
4701 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
4702 break;
4703 case 0x52: /* fcmpd, V9 %fcc */
4704 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4705 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4706 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
4707 break;
4708 case 0x53: /* fcmpq, V9 %fcc */
4709 CHECK_FPU_FEATURE(dc, FLOAT128);
4710 gen_op_load_fpr_QT0(QFPREG(rs1));
4711 gen_op_load_fpr_QT1(QFPREG(rs2));
4712 gen_op_fcmpq(rd & 3);
4713 break;
4714 case 0x55: /* fcmpes, V9 %fcc */
4715 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4716 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
4717 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
4718 break;
4719 case 0x56: /* fcmped, V9 %fcc */
4720 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4721 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4722 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
4723 break;
4724 case 0x57: /* fcmpeq, V9 %fcc */
4725 CHECK_FPU_FEATURE(dc, FLOAT128);
4726 gen_op_load_fpr_QT0(QFPREG(rs1));
4727 gen_op_load_fpr_QT1(QFPREG(rs2));
4728 gen_op_fcmpeq(rd & 3);
4729 break;
4730 default:
4731 goto illegal_insn;
4732 }
4733 } else if (xop < 0x36) {
4734 if (xop < 0x20) {
4735 goto illegal_insn;
4736 } else {
4737 cpu_src1 = get_src1(dc, insn);
4738 cpu_src2 = get_src2(dc, insn);
4739 switch (xop) {
4740 case 0x20: /* taddcc */
4741 case 0x21: /* tsubcc */
4742 case 0x22: /* taddcctv */
4743 case 0x23: /* tsubcctv */
4744 case 0x24: /* mulscc */
4745 case 0x25: /* sll */
4746 case 0x26: /* srl */
4747 case 0x27: /* sra */
4748 goto illegal_insn; /* in decodetree */
4749 case 0x30:
4750 goto illegal_insn; /* WRASR in decodetree */
4751 case 0x32:
4752 goto illegal_insn; /* WRPR in decodetree */
4753 case 0x33: /* wrtbr, UA2005 wrhpr */
4754 goto illegal_insn; /* WRTBR, WRHPR in decodetree */
4755 #ifdef TARGET_SPARC64
4756 case 0x2c: /* V9 movcc */
4757 case 0x2f: /* V9 movr */
4758 goto illegal_insn; /* in decodetree */
4759 case 0x2e: /* V9 popc */
4760 tcg_gen_ctpop_tl(cpu_dst, cpu_src2);
4761 gen_store_gpr(dc, rd, cpu_dst);
4762 break;
4763 #endif
4764 default:
4765 goto illegal_insn;
4766 }
4767 }
4768 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4769 #ifdef TARGET_SPARC64
4770 int opf = GET_FIELD_SP(insn, 5, 13);
4771 rs1 = GET_FIELD(insn, 13, 17);
4772 rs2 = GET_FIELD(insn, 27, 31);
4773 if (gen_trap_ifnofpu(dc)) {
4774 goto jmp_insn;
4775 }
4776
4777 switch (opf) {
4778 case 0x000: /* VIS I edge8cc */
4779 CHECK_FPU_FEATURE(dc, VIS1);
4780 cpu_src1 = gen_load_gpr(dc, rs1);
4781 cpu_src2 = gen_load_gpr(dc, rs2);
4782 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4783 gen_store_gpr(dc, rd, cpu_dst);
4784 break;
4785 case 0x001: /* VIS II edge8n */
4786 CHECK_FPU_FEATURE(dc, VIS2);
4787 cpu_src1 = gen_load_gpr(dc, rs1);
4788 cpu_src2 = gen_load_gpr(dc, rs2);
4789 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4790 gen_store_gpr(dc, rd, cpu_dst);
4791 break;
4792 case 0x002: /* VIS I edge8lcc */
4793 CHECK_FPU_FEATURE(dc, VIS1);
4794 cpu_src1 = gen_load_gpr(dc, rs1);
4795 cpu_src2 = gen_load_gpr(dc, rs2);
4796 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4797 gen_store_gpr(dc, rd, cpu_dst);
4798 break;
4799 case 0x003: /* VIS II edge8ln */
4800 CHECK_FPU_FEATURE(dc, VIS2);
4801 cpu_src1 = gen_load_gpr(dc, rs1);
4802 cpu_src2 = gen_load_gpr(dc, rs2);
4803 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4804 gen_store_gpr(dc, rd, cpu_dst);
4805 break;
4806 case 0x004: /* VIS I edge16cc */
4807 CHECK_FPU_FEATURE(dc, VIS1);
4808 cpu_src1 = gen_load_gpr(dc, rs1);
4809 cpu_src2 = gen_load_gpr(dc, rs2);
4810 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4811 gen_store_gpr(dc, rd, cpu_dst);
4812 break;
4813 case 0x005: /* VIS II edge16n */
4814 CHECK_FPU_FEATURE(dc, VIS2);
4815 cpu_src1 = gen_load_gpr(dc, rs1);
4816 cpu_src2 = gen_load_gpr(dc, rs2);
4817 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4818 gen_store_gpr(dc, rd, cpu_dst);
4819 break;
4820 case 0x006: /* VIS I edge16lcc */
4821 CHECK_FPU_FEATURE(dc, VIS1);
4822 cpu_src1 = gen_load_gpr(dc, rs1);
4823 cpu_src2 = gen_load_gpr(dc, rs2);
4824 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4825 gen_store_gpr(dc, rd, cpu_dst);
4826 break;
4827 case 0x007: /* VIS II edge16ln */
4828 CHECK_FPU_FEATURE(dc, VIS2);
4829 cpu_src1 = gen_load_gpr(dc, rs1);
4830 cpu_src2 = gen_load_gpr(dc, rs2);
4831 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4832 gen_store_gpr(dc, rd, cpu_dst);
4833 break;
4834 case 0x008: /* VIS I edge32cc */
4835 CHECK_FPU_FEATURE(dc, VIS1);
4836 cpu_src1 = gen_load_gpr(dc, rs1);
4837 cpu_src2 = gen_load_gpr(dc, rs2);
4838 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4839 gen_store_gpr(dc, rd, cpu_dst);
4840 break;
4841 case 0x009: /* VIS II edge32n */
4842 CHECK_FPU_FEATURE(dc, VIS2);
4843 cpu_src1 = gen_load_gpr(dc, rs1);
4844 cpu_src2 = gen_load_gpr(dc, rs2);
4845 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4846 gen_store_gpr(dc, rd, cpu_dst);
4847 break;
4848 case 0x00a: /* VIS I edge32lcc */
4849 CHECK_FPU_FEATURE(dc, VIS1);
4850 cpu_src1 = gen_load_gpr(dc, rs1);
4851 cpu_src2 = gen_load_gpr(dc, rs2);
4852 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4853 gen_store_gpr(dc, rd, cpu_dst);
4854 break;
4855 case 0x00b: /* VIS II edge32ln */
4856 CHECK_FPU_FEATURE(dc, VIS2);
4857 cpu_src1 = gen_load_gpr(dc, rs1);
4858 cpu_src2 = gen_load_gpr(dc, rs2);
4859 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4860 gen_store_gpr(dc, rd, cpu_dst);
4861 break;
4862 case 0x010: /* VIS I array8 */
4863 CHECK_FPU_FEATURE(dc, VIS1);
4864 cpu_src1 = gen_load_gpr(dc, rs1);
4865 cpu_src2 = gen_load_gpr(dc, rs2);
4866 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4867 gen_store_gpr(dc, rd, cpu_dst);
4868 break;
4869 case 0x012: /* VIS I array16 */
4870 CHECK_FPU_FEATURE(dc, VIS1);
4871 cpu_src1 = gen_load_gpr(dc, rs1);
4872 cpu_src2 = gen_load_gpr(dc, rs2);
4873 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4874 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4875 gen_store_gpr(dc, rd, cpu_dst);
4876 break;
4877 case 0x014: /* VIS I array32 */
4878 CHECK_FPU_FEATURE(dc, VIS1);
4879 cpu_src1 = gen_load_gpr(dc, rs1);
4880 cpu_src2 = gen_load_gpr(dc, rs2);
4881 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4882 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4883 gen_store_gpr(dc, rd, cpu_dst);
4884 break;
4885 case 0x018: /* VIS I alignaddr */
4886 CHECK_FPU_FEATURE(dc, VIS1);
4887 cpu_src1 = gen_load_gpr(dc, rs1);
4888 cpu_src2 = gen_load_gpr(dc, rs2);
4889 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4890 gen_store_gpr(dc, rd, cpu_dst);
4891 break;
4892 case 0x01a: /* VIS I alignaddrl */
4893 CHECK_FPU_FEATURE(dc, VIS1);
4894 cpu_src1 = gen_load_gpr(dc, rs1);
4895 cpu_src2 = gen_load_gpr(dc, rs2);
4896 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4897 gen_store_gpr(dc, rd, cpu_dst);
4898 break;
4899 case 0x019: /* VIS II bmask */
4900 CHECK_FPU_FEATURE(dc, VIS2);
4901 cpu_src1 = gen_load_gpr(dc, rs1);
4902 cpu_src2 = gen_load_gpr(dc, rs2);
4903 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4904 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4905 gen_store_gpr(dc, rd, cpu_dst);
4906 break;
4907 case 0x020: /* VIS I fcmple16 */
4908 CHECK_FPU_FEATURE(dc, VIS1);
4909 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4910 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4911 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4912 gen_store_gpr(dc, rd, cpu_dst);
4913 break;
4914 case 0x022: /* VIS I fcmpne16 */
4915 CHECK_FPU_FEATURE(dc, VIS1);
4916 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4917 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4918 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4919 gen_store_gpr(dc, rd, cpu_dst);
4920 break;
4921 case 0x024: /* VIS I fcmple32 */
4922 CHECK_FPU_FEATURE(dc, VIS1);
4923 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4924 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4925 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4926 gen_store_gpr(dc, rd, cpu_dst);
4927 break;
4928 case 0x026: /* VIS I fcmpne32 */
4929 CHECK_FPU_FEATURE(dc, VIS1);
4930 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4931 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4932 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4933 gen_store_gpr(dc, rd, cpu_dst);
4934 break;
4935 case 0x028: /* VIS I fcmpgt16 */
4936 CHECK_FPU_FEATURE(dc, VIS1);
4937 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4938 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4939 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4940 gen_store_gpr(dc, rd, cpu_dst);
4941 break;
4942 case 0x02a: /* VIS I fcmpeq16 */
4943 CHECK_FPU_FEATURE(dc, VIS1);
4944 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4945 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4946 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4947 gen_store_gpr(dc, rd, cpu_dst);
4948 break;
4949 case 0x02c: /* VIS I fcmpgt32 */
4950 CHECK_FPU_FEATURE(dc, VIS1);
4951 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4952 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4953 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4954 gen_store_gpr(dc, rd, cpu_dst);
4955 break;
4956 case 0x02e: /* VIS I fcmpeq32 */
4957 CHECK_FPU_FEATURE(dc, VIS1);
4958 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4959 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4960 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4961 gen_store_gpr(dc, rd, cpu_dst);
4962 break;
4963 case 0x031: /* VIS I fmul8x16 */
4964 CHECK_FPU_FEATURE(dc, VIS1);
4965 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4966 break;
4967 case 0x033: /* VIS I fmul8x16au */
4968 CHECK_FPU_FEATURE(dc, VIS1);
4969 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4970 break;
4971 case 0x035: /* VIS I fmul8x16al */
4972 CHECK_FPU_FEATURE(dc, VIS1);
4973 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4974 break;
4975 case 0x036: /* VIS I fmul8sux16 */
4976 CHECK_FPU_FEATURE(dc, VIS1);
4977 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4978 break;
4979 case 0x037: /* VIS I fmul8ulx16 */
4980 CHECK_FPU_FEATURE(dc, VIS1);
4981 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4982 break;
4983 case 0x038: /* VIS I fmuld8sux16 */
4984 CHECK_FPU_FEATURE(dc, VIS1);
4985 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4986 break;
4987 case 0x039: /* VIS I fmuld8ulx16 */
4988 CHECK_FPU_FEATURE(dc, VIS1);
4989 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4990 break;
4991 case 0x03a: /* VIS I fpack32 */
4992 CHECK_FPU_FEATURE(dc, VIS1);
4993 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4994 break;
4995 case 0x03b: /* VIS I fpack16 */
4996 CHECK_FPU_FEATURE(dc, VIS1);
4997 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4998 cpu_dst_32 = gen_dest_fpr_F(dc);
4999 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
5000 gen_store_fpr_F(dc, rd, cpu_dst_32);
5001 break;
5002 case 0x03d: /* VIS I fpackfix */
5003 CHECK_FPU_FEATURE(dc, VIS1);
5004 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5005 cpu_dst_32 = gen_dest_fpr_F(dc);
5006 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
5007 gen_store_fpr_F(dc, rd, cpu_dst_32);
5008 break;
5009 case 0x03e: /* VIS I pdist */
5010 CHECK_FPU_FEATURE(dc, VIS1);
5011 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
5012 break;
5013 case 0x048: /* VIS I faligndata */
5014 CHECK_FPU_FEATURE(dc, VIS1);
5015 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
5016 break;
5017 case 0x04b: /* VIS I fpmerge */
5018 CHECK_FPU_FEATURE(dc, VIS1);
5019 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
5020 break;
5021 case 0x04c: /* VIS II bshuffle */
5022 CHECK_FPU_FEATURE(dc, VIS2);
5023 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
5024 break;
5025 case 0x04d: /* VIS I fexpand */
5026 CHECK_FPU_FEATURE(dc, VIS1);
5027 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
5028 break;
5029 case 0x050: /* VIS I fpadd16 */
5030 CHECK_FPU_FEATURE(dc, VIS1);
5031 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
5032 break;
5033 case 0x051: /* VIS I fpadd16s */
5034 CHECK_FPU_FEATURE(dc, VIS1);
5035 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
5036 break;
5037 case 0x052: /* VIS I fpadd32 */
5038 CHECK_FPU_FEATURE(dc, VIS1);
5039 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
5040 break;
5041 case 0x053: /* VIS I fpadd32s */
5042 CHECK_FPU_FEATURE(dc, VIS1);
5043 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
5044 break;
5045 case 0x054: /* VIS I fpsub16 */
5046 CHECK_FPU_FEATURE(dc, VIS1);
5047 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
5048 break;
5049 case 0x055: /* VIS I fpsub16s */
5050 CHECK_FPU_FEATURE(dc, VIS1);
5051 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
5052 break;
5053 case 0x056: /* VIS I fpsub32 */
5054 CHECK_FPU_FEATURE(dc, VIS1);
5055 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
5056 break;
5057 case 0x057: /* VIS I fpsub32s */
5058 CHECK_FPU_FEATURE(dc, VIS1);
5059 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
5060 break;
5061 case 0x060: /* VIS I fzero */
5062 CHECK_FPU_FEATURE(dc, VIS1);
5063 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5064 tcg_gen_movi_i64(cpu_dst_64, 0);
5065 gen_store_fpr_D(dc, rd, cpu_dst_64);
5066 break;
5067 case 0x061: /* VIS I fzeros */
5068 CHECK_FPU_FEATURE(dc, VIS1);
5069 cpu_dst_32 = gen_dest_fpr_F(dc);
5070 tcg_gen_movi_i32(cpu_dst_32, 0);
5071 gen_store_fpr_F(dc, rd, cpu_dst_32);
5072 break;
5073 case 0x062: /* VIS I fnor */
5074 CHECK_FPU_FEATURE(dc, VIS1);
5075 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
5076 break;
5077 case 0x063: /* VIS I fnors */
5078 CHECK_FPU_FEATURE(dc, VIS1);
5079 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
5080 break;
5081 case 0x064: /* VIS I fandnot2 */
5082 CHECK_FPU_FEATURE(dc, VIS1);
5083 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
5084 break;
5085 case 0x065: /* VIS I fandnot2s */
5086 CHECK_FPU_FEATURE(dc, VIS1);
5087 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
5088 break;
5089 case 0x066: /* VIS I fnot2 */
5090 CHECK_FPU_FEATURE(dc, VIS1);
5091 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
5092 break;
5093 case 0x067: /* VIS I fnot2s */
5094 CHECK_FPU_FEATURE(dc, VIS1);
5095 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
5096 break;
5097 case 0x068: /* VIS I fandnot1 */
5098 CHECK_FPU_FEATURE(dc, VIS1);
5099 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
5100 break;
5101 case 0x069: /* VIS I fandnot1s */
5102 CHECK_FPU_FEATURE(dc, VIS1);
5103 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
5104 break;
5105 case 0x06a: /* VIS I fnot1 */
5106 CHECK_FPU_FEATURE(dc, VIS1);
5107 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
5108 break;
5109 case 0x06b: /* VIS I fnot1s */
5110 CHECK_FPU_FEATURE(dc, VIS1);
5111 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
5112 break;
5113 case 0x06c: /* VIS I fxor */
5114 CHECK_FPU_FEATURE(dc, VIS1);
5115 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
5116 break;
5117 case 0x06d: /* VIS I fxors */
5118 CHECK_FPU_FEATURE(dc, VIS1);
5119 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
5120 break;
5121 case 0x06e: /* VIS I fnand */
5122 CHECK_FPU_FEATURE(dc, VIS1);
5123 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
5124 break;
5125 case 0x06f: /* VIS I fnands */
5126 CHECK_FPU_FEATURE(dc, VIS1);
5127 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
5128 break;
5129 case 0x070: /* VIS I fand */
5130 CHECK_FPU_FEATURE(dc, VIS1);
5131 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
5132 break;
5133 case 0x071: /* VIS I fands */
5134 CHECK_FPU_FEATURE(dc, VIS1);
5135 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
5136 break;
5137 case 0x072: /* VIS I fxnor */
5138 CHECK_FPU_FEATURE(dc, VIS1);
5139 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
5140 break;
5141 case 0x073: /* VIS I fxnors */
5142 CHECK_FPU_FEATURE(dc, VIS1);
5143 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
5144 break;
5145 case 0x074: /* VIS I fsrc1 */
5146 CHECK_FPU_FEATURE(dc, VIS1);
5147 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5148 gen_store_fpr_D(dc, rd, cpu_src1_64);
5149 break;
5150 case 0x075: /* VIS I fsrc1s */
5151 CHECK_FPU_FEATURE(dc, VIS1);
5152 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5153 gen_store_fpr_F(dc, rd, cpu_src1_32);
5154 break;
5155 case 0x076: /* VIS I fornot2 */
5156 CHECK_FPU_FEATURE(dc, VIS1);
5157 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
5158 break;
5159 case 0x077: /* VIS I fornot2s */
5160 CHECK_FPU_FEATURE(dc, VIS1);
5161 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
5162 break;
5163 case 0x078: /* VIS I fsrc2 */
5164 CHECK_FPU_FEATURE(dc, VIS1);
5165 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5166 gen_store_fpr_D(dc, rd, cpu_src1_64);
5167 break;
5168 case 0x079: /* VIS I fsrc2s */
5169 CHECK_FPU_FEATURE(dc, VIS1);
5170 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
5171 gen_store_fpr_F(dc, rd, cpu_src1_32);
5172 break;
5173 case 0x07a: /* VIS I fornot1 */
5174 CHECK_FPU_FEATURE(dc, VIS1);
5175 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
5176 break;
5177 case 0x07b: /* VIS I fornot1s */
5178 CHECK_FPU_FEATURE(dc, VIS1);
5179 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
5180 break;
5181 case 0x07c: /* VIS I for */
5182 CHECK_FPU_FEATURE(dc, VIS1);
5183 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
5184 break;
5185 case 0x07d: /* VIS I fors */
5186 CHECK_FPU_FEATURE(dc, VIS1);
5187 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
5188 break;
5189 case 0x07e: /* VIS I fone */
5190 CHECK_FPU_FEATURE(dc, VIS1);
5191 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5192 tcg_gen_movi_i64(cpu_dst_64, -1);
5193 gen_store_fpr_D(dc, rd, cpu_dst_64);
5194 break;
5195 case 0x07f: /* VIS I fones */
5196 CHECK_FPU_FEATURE(dc, VIS1);
5197 cpu_dst_32 = gen_dest_fpr_F(dc);
5198 tcg_gen_movi_i32(cpu_dst_32, -1);
5199 gen_store_fpr_F(dc, rd, cpu_dst_32);
5200 break;
5201 case 0x080: /* VIS I shutdown */
5202 case 0x081: /* VIS II siam */
5203 // XXX
5204 goto illegal_insn;
5205 default:
5206 goto illegal_insn;
5207 }
5208 #else
5209 goto ncp_insn;
5210 #endif
5211 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
5212 #ifdef TARGET_SPARC64
5213 goto illegal_insn;
5214 #else
5215 goto ncp_insn;
5216 #endif
5217 #ifdef TARGET_SPARC64
5218 } else if (xop == 0x39) { /* V9 return */
5219 save_state(dc);
5220 cpu_src1 = get_src1(dc, insn);
5221 cpu_tmp0 = tcg_temp_new();
5222 if (IS_IMM) { /* immediate */
5223 simm = GET_FIELDs(insn, 19, 31);
5224 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5225 } else { /* register */
5226 rs2 = GET_FIELD(insn, 27, 31);
5227 if (rs2) {
5228 cpu_src2 = gen_load_gpr(dc, rs2);
5229 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5230 } else {
5231 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5232 }
5233 }
5234 gen_check_align(dc, cpu_tmp0, 3);
5235 gen_helper_restore(tcg_env);
5236 gen_mov_pc_npc(dc);
5237 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5238 dc->npc = DYNAMIC_PC_LOOKUP;
5239 goto jmp_insn;
5240 #endif
5241 } else {
5242 cpu_src1 = get_src1(dc, insn);
5243 cpu_tmp0 = tcg_temp_new();
5244 if (IS_IMM) { /* immediate */
5245 simm = GET_FIELDs(insn, 19, 31);
5246 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5247 } else { /* register */
5248 rs2 = GET_FIELD(insn, 27, 31);
5249 if (rs2) {
5250 cpu_src2 = gen_load_gpr(dc, rs2);
5251 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5252 } else {
5253 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5254 }
5255 }
5256 switch (xop) {
5257 case 0x38: /* jmpl */
5258 {
5259 gen_check_align(dc, cpu_tmp0, 3);
5260 gen_store_gpr(dc, rd, tcg_constant_tl(dc->pc));
5261 gen_mov_pc_npc(dc);
5262 gen_address_mask(dc, cpu_tmp0);
5263 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5264 dc->npc = DYNAMIC_PC_LOOKUP;
5265 }
5266 goto jmp_insn;
5267 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5268 case 0x39: /* rett, V9 return */
5269 {
5270 if (!supervisor(dc))
5271 goto priv_insn;
5272 gen_check_align(dc, cpu_tmp0, 3);
5273 gen_mov_pc_npc(dc);
5274 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5275 dc->npc = DYNAMIC_PC;
5276 gen_helper_rett(tcg_env);
5277 }
5278 goto jmp_insn;
5279 #endif
5280 case 0x3b: /* flush */
5281 /* nop */
5282 break;
5283 case 0x3c: /* save */
5284 gen_helper_save(tcg_env);
5285 gen_store_gpr(dc, rd, cpu_tmp0);
5286 break;
5287 case 0x3d: /* restore */
5288 gen_helper_restore(tcg_env);
5289 gen_store_gpr(dc, rd, cpu_tmp0);
5290 break;
5291 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5292 case 0x3e: /* V9 done/retry */
5293 {
5294 switch (rd) {
5295 case 0:
5296 if (!supervisor(dc))
5297 goto priv_insn;
5298 dc->npc = DYNAMIC_PC;
5299 dc->pc = DYNAMIC_PC;
5300 translator_io_start(&dc->base);
5301 gen_helper_done(tcg_env);
5302 goto jmp_insn;
5303 case 1:
5304 if (!supervisor(dc))
5305 goto priv_insn;
5306 dc->npc = DYNAMIC_PC;
5307 dc->pc = DYNAMIC_PC;
5308 translator_io_start(&dc->base);
5309 gen_helper_retry(tcg_env);
5310 goto jmp_insn;
5311 default:
5312 goto illegal_insn;
5313 }
5314 }
5315 break;
5316 #endif
5317 default:
5318 goto illegal_insn;
5319 }
5320 }
5321 break;
5322 }
5323 break;
5324 case 3: /* load/store instructions */
5325 {
5326 unsigned int xop = GET_FIELD(insn, 7, 12);
5327 /* ??? gen_address_mask prevents us from using a source
5328 register directly. Always generate a temporary. */
5329 TCGv cpu_addr = tcg_temp_new();
5330
5331 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5332 if (xop == 0x3c || xop == 0x3e) {
5333 /* V9 casa/casxa : no offset */
5334 } else if (IS_IMM) { /* immediate */
5335 simm = GET_FIELDs(insn, 19, 31);
5336 if (simm != 0) {
5337 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5338 }
5339 } else { /* register */
5340 rs2 = GET_FIELD(insn, 27, 31);
5341 if (rs2 != 0) {
5342 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5343 }
5344 }
5345 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5346 (xop > 0x17 && xop <= 0x1d ) ||
5347 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5348 TCGv cpu_val = gen_dest_gpr(dc, rd);
5349
5350 switch (xop) {
5351 case 0x0: /* ld, V9 lduw, load unsigned word */
5352 gen_address_mask(dc, cpu_addr);
5353 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5354 dc->mem_idx, MO_TEUL | MO_ALIGN);
5355 break;
5356 case 0x1: /* ldub, load unsigned byte */
5357 gen_address_mask(dc, cpu_addr);
5358 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5359 dc->mem_idx, MO_UB);
5360 break;
5361 case 0x2: /* lduh, load unsigned halfword */
5362 gen_address_mask(dc, cpu_addr);
5363 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5364 dc->mem_idx, MO_TEUW | MO_ALIGN);
5365 break;
5366 case 0x3: /* ldd, load double word */
5367 if (rd & 1)
5368 goto illegal_insn;
5369 else {
5370 TCGv_i64 t64;
5371
5372 gen_address_mask(dc, cpu_addr);
5373 t64 = tcg_temp_new_i64();
5374 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5375 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5376 tcg_gen_trunc_i64_tl(cpu_val, t64);
5377 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5378 gen_store_gpr(dc, rd + 1, cpu_val);
5379 tcg_gen_shri_i64(t64, t64, 32);
5380 tcg_gen_trunc_i64_tl(cpu_val, t64);
5381 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5382 }
5383 break;
5384 case 0x9: /* ldsb, load signed byte */
5385 gen_address_mask(dc, cpu_addr);
5386 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, dc->mem_idx, MO_SB);
5387 break;
5388 case 0xa: /* ldsh, load signed halfword */
5389 gen_address_mask(dc, cpu_addr);
5390 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5391 dc->mem_idx, MO_TESW | MO_ALIGN);
5392 break;
5393 case 0xd: /* ldstub */
5394 gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
5395 break;
5396 case 0x0f:
5397 /* swap, swap register with memory. Also atomically */
5398 cpu_src1 = gen_load_gpr(dc, rd);
5399 gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
5400 dc->mem_idx, MO_TEUL);
5401 break;
5402 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5403 case 0x10: /* lda, V9 lduwa, load word alternate */
5404 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5405 break;
5406 case 0x11: /* lduba, load unsigned byte alternate */
5407 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5408 break;
5409 case 0x12: /* lduha, load unsigned halfword alternate */
5410 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5411 break;
5412 case 0x13: /* ldda, load double word alternate */
5413 if (rd & 1) {
5414 goto illegal_insn;
5415 }
5416 gen_ldda_asi(dc, cpu_addr, insn, rd);
5417 goto skip_move;
5418 case 0x19: /* ldsba, load signed byte alternate */
5419 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5420 break;
5421 case 0x1a: /* ldsha, load signed halfword alternate */
5422 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5423 break;
5424 case 0x1d: /* ldstuba -- XXX: should be atomically */
5425 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5426 break;
5427 case 0x1f: /* swapa, swap reg with alt. memory. Also
5428 atomically */
5429 cpu_src1 = gen_load_gpr(dc, rd);
5430 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5431 break;
5432
5433 #ifndef TARGET_SPARC64
5434 case 0x30: /* ldc */
5435 case 0x31: /* ldcsr */
5436 case 0x33: /* lddc */
5437 goto ncp_insn;
5438 #endif
5439 #endif
5440 #ifdef TARGET_SPARC64
5441 case 0x08: /* V9 ldsw */
5442 gen_address_mask(dc, cpu_addr);
5443 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5444 dc->mem_idx, MO_TESL | MO_ALIGN);
5445 break;
5446 case 0x0b: /* V9 ldx */
5447 gen_address_mask(dc, cpu_addr);
5448 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
5449 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5450 break;
5451 case 0x18: /* V9 ldswa */
5452 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5453 break;
5454 case 0x1b: /* V9 ldxa */
5455 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
5456 break;
5457 case 0x2d: /* V9 prefetch, no effect */
5458 goto skip_move;
5459 case 0x30: /* V9 ldfa */
5460 if (gen_trap_ifnofpu(dc)) {
5461 goto jmp_insn;
5462 }
5463 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5464 gen_update_fprs_dirty(dc, rd);
5465 goto skip_move;
5466 case 0x33: /* V9 lddfa */
5467 if (gen_trap_ifnofpu(dc)) {
5468 goto jmp_insn;
5469 }
5470 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5471 gen_update_fprs_dirty(dc, DFPREG(rd));
5472 goto skip_move;
5473 case 0x3d: /* V9 prefetcha, no effect */
5474 goto skip_move;
5475 case 0x32: /* V9 ldqfa */
5476 CHECK_FPU_FEATURE(dc, FLOAT128);
5477 if (gen_trap_ifnofpu(dc)) {
5478 goto jmp_insn;
5479 }
5480 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5481 gen_update_fprs_dirty(dc, QFPREG(rd));
5482 goto skip_move;
5483 #endif
5484 default:
5485 goto illegal_insn;
5486 }
5487 gen_store_gpr(dc, rd, cpu_val);
5488 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5489 skip_move: ;
5490 #endif
5491 } else if (xop >= 0x20 && xop < 0x24) {
5492 if (gen_trap_ifnofpu(dc)) {
5493 goto jmp_insn;
5494 }
5495 switch (xop) {
5496 case 0x20: /* ldf, load fpreg */
5497 gen_address_mask(dc, cpu_addr);
5498 cpu_dst_32 = gen_dest_fpr_F(dc);
5499 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5500 dc->mem_idx, MO_TEUL | MO_ALIGN);
5501 gen_store_fpr_F(dc, rd, cpu_dst_32);
5502 break;
5503 case 0x21: /* ldfsr, V9 ldxfsr */
5504 #ifdef TARGET_SPARC64
5505 gen_address_mask(dc, cpu_addr);
5506 if (rd == 1) {
5507 TCGv_i64 t64 = tcg_temp_new_i64();
5508 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5509 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5510 gen_helper_ldxfsr(cpu_fsr, tcg_env, cpu_fsr, t64);
5511 break;
5512 }
5513 #endif
5514 cpu_dst_32 = tcg_temp_new_i32();
5515 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5516 dc->mem_idx, MO_TEUL | MO_ALIGN);
5517 gen_helper_ldfsr(cpu_fsr, tcg_env, cpu_fsr, cpu_dst_32);
5518 break;
5519 case 0x22: /* ldqf, load quad fpreg */
5520 CHECK_FPU_FEATURE(dc, FLOAT128);
5521 gen_address_mask(dc, cpu_addr);
5522 cpu_src1_64 = tcg_temp_new_i64();
5523 tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5524 MO_TEUQ | MO_ALIGN_4);
5525 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5526 cpu_src2_64 = tcg_temp_new_i64();
5527 tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
5528 MO_TEUQ | MO_ALIGN_4);
5529 gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
5530 break;
5531 case 0x23: /* lddf, load double fpreg */
5532 gen_address_mask(dc, cpu_addr);
5533 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5534 tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
5535 MO_TEUQ | MO_ALIGN_4);
5536 gen_store_fpr_D(dc, rd, cpu_dst_64);
5537 break;
5538 default:
5539 goto illegal_insn;
5540 }
5541 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5542 xop == 0xe || xop == 0x1e) {
5543 TCGv cpu_val = gen_load_gpr(dc, rd);
5544
5545 switch (xop) {
5546 case 0x4: /* st, store word */
5547 gen_address_mask(dc, cpu_addr);
5548 tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
5549 dc->mem_idx, MO_TEUL | MO_ALIGN);
5550 break;
5551 case 0x5: /* stb, store byte */
5552 gen_address_mask(dc, cpu_addr);
5553 tcg_gen_qemu_st_tl(cpu_val, cpu_addr, dc->mem_idx, MO_UB);
5554 break;
5555 case 0x6: /* sth, store halfword */
5556 gen_address_mask(dc, cpu_addr);
5557 tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
5558 dc->mem_idx, MO_TEUW | MO_ALIGN);
5559 break;
5560 case 0x7: /* std, store double word */
5561 if (rd & 1)
5562 goto illegal_insn;
5563 else {
5564 TCGv_i64 t64;
5565 TCGv lo;
5566
5567 gen_address_mask(dc, cpu_addr);
5568 lo = gen_load_gpr(dc, rd + 1);
5569 t64 = tcg_temp_new_i64();
5570 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5571 tcg_gen_qemu_st_i64(t64, cpu_addr,
5572 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5573 }
5574 break;
5575 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5576 case 0x14: /* sta, V9 stwa, store word alternate */
5577 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5578 break;
5579 case 0x15: /* stba, store byte alternate */
5580 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5581 break;
5582 case 0x16: /* stha, store halfword alternate */
5583 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5584 break;
5585 case 0x17: /* stda, store double word alternate */
5586 if (rd & 1) {
5587 goto illegal_insn;
5588 }
5589 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5590 break;
5591 #endif
5592 #ifdef TARGET_SPARC64
5593 case 0x0e: /* V9 stx */
5594 gen_address_mask(dc, cpu_addr);
5595 tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
5596 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5597 break;
5598 case 0x1e: /* V9 stxa */
5599 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
5600 break;
5601 #endif
5602 default:
5603 goto illegal_insn;
5604 }
5605 } else if (xop > 0x23 && xop < 0x28) {
5606 if (gen_trap_ifnofpu(dc)) {
5607 goto jmp_insn;
5608 }
5609 switch (xop) {
5610 case 0x24: /* stf, store fpreg */
5611 gen_address_mask(dc, cpu_addr);
5612 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5613 tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
5614 dc->mem_idx, MO_TEUL | MO_ALIGN);
5615 break;
5616 case 0x25: /* stfsr, V9 stxfsr */
5617 {
5618 #ifdef TARGET_SPARC64
5619 gen_address_mask(dc, cpu_addr);
5620 if (rd == 1) {
5621 tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
5622 dc->mem_idx, MO_TEUQ | MO_ALIGN);
5623 break;
5624 }
5625 #endif
5626 tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
5627 dc->mem_idx, MO_TEUL | MO_ALIGN);
5628 }
5629 break;
5630 case 0x26:
5631 #ifdef TARGET_SPARC64
5632 /* V9 stqf, store quad fpreg */
5633 CHECK_FPU_FEATURE(dc, FLOAT128);
5634 gen_address_mask(dc, cpu_addr);
5635 /* ??? While stqf only requires 4-byte alignment, it is
5636 legal for the cpu to signal the unaligned exception.
5637 The OS trap handler is then required to fix it up.
5638 For qemu, this avoids having to probe the second page
5639 before performing the first write. */
5640 cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
5641 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5642 dc->mem_idx, MO_TEUQ | MO_ALIGN_16);
5643 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5644 cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
5645 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5646 dc->mem_idx, MO_TEUQ);
5647 break;
5648 #else /* !TARGET_SPARC64 */
5649 /* stdfq, store floating point queue */
5650 #if defined(CONFIG_USER_ONLY)
5651 goto illegal_insn;
5652 #else
5653 if (!supervisor(dc))
5654 goto priv_insn;
5655 if (gen_trap_ifnofpu(dc)) {
5656 goto jmp_insn;
5657 }
5658 goto nfq_insn;
5659 #endif
5660 #endif
5661 case 0x27: /* stdf, store double fpreg */
5662 gen_address_mask(dc, cpu_addr);
5663 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5664 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5665 MO_TEUQ | MO_ALIGN_4);
5666 break;
5667 default:
5668 goto illegal_insn;
5669 }
5670 } else if (xop > 0x33 && xop < 0x3f) {
5671 switch (xop) {
5672 #ifdef TARGET_SPARC64
5673 case 0x34: /* V9 stfa */
5674 if (gen_trap_ifnofpu(dc)) {
5675 goto jmp_insn;
5676 }
5677 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5678 break;
5679 case 0x36: /* V9 stqfa */
5680 {
5681 CHECK_FPU_FEATURE(dc, FLOAT128);
5682 if (gen_trap_ifnofpu(dc)) {
5683 goto jmp_insn;
5684 }
5685 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5686 }
5687 break;
5688 case 0x37: /* V9 stdfa */
5689 if (gen_trap_ifnofpu(dc)) {
5690 goto jmp_insn;
5691 }
5692 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5693 break;
5694 case 0x3e: /* V9 casxa */
5695 rs2 = GET_FIELD(insn, 27, 31);
5696 cpu_src2 = gen_load_gpr(dc, rs2);
5697 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5698 break;
5699 #else
5700 case 0x34: /* stc */
5701 case 0x35: /* stcsr */
5702 case 0x36: /* stdcq */
5703 case 0x37: /* stdc */
5704 goto ncp_insn;
5705 #endif
5706 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5707 case 0x3c: /* V9 or LEON3 casa */
5708 #ifndef TARGET_SPARC64
5709 CHECK_IU_FEATURE(dc, CASA);
5710 #endif
5711 rs2 = GET_FIELD(insn, 27, 31);
5712 cpu_src2 = gen_load_gpr(dc, rs2);
5713 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5714 break;
5715 #endif
5716 default:
5717 goto illegal_insn;
5718 }
5719 } else {
5720 goto illegal_insn;
5721 }
5722 }
5723 break;
5724 }
5725 advance_pc(dc);
5726 jmp_insn:
5727 return;
5728 illegal_insn:
5729 gen_exception(dc, TT_ILL_INSN);
5730 return;
5731 #if !defined(CONFIG_USER_ONLY)
5732 priv_insn:
5733 gen_exception(dc, TT_PRIV_INSN);
5734 return;
5735 #endif
5736 nfpu_insn:
5737 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5738 return;
5739 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5740 nfq_insn:
5741 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5742 return;
5743 #endif
5744 #ifndef TARGET_SPARC64
5745 ncp_insn:
5746 gen_exception(dc, TT_NCP_INSN);
5747 return;
5748 #endif
5749 }
5750
5751 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5752 {
5753 DisasContext *dc = container_of(dcbase, DisasContext, base);
5754 CPUSPARCState *env = cpu_env(cs);
5755 int bound;
5756
5757 dc->pc = dc->base.pc_first;
5758 dc->npc = (target_ulong)dc->base.tb->cs_base;
5759 dc->cc_op = CC_OP_DYNAMIC;
5760 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5761 dc->def = &env->def;
5762 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5763 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5764 #ifndef CONFIG_USER_ONLY
5765 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5766 #endif
5767 #ifdef TARGET_SPARC64
5768 dc->fprs_dirty = 0;
5769 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5770 #ifndef CONFIG_USER_ONLY
5771 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5772 #endif
5773 #endif
5774 /*
5775 * if we reach a page boundary, we stop generation so that the
5776 * PC of a TT_TFAULT exception is always in the right page
5777 */
5778 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5779 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5780 }
5781
5782 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5783 {
5784 }
5785
5786 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5787 {
5788 DisasContext *dc = container_of(dcbase, DisasContext, base);
5789 target_ulong npc = dc->npc;
5790
5791 if (npc & 3) {
5792 switch (npc) {
5793 case JUMP_PC:
5794 assert(dc->jump_pc[1] == dc->pc + 4);
5795 npc = dc->jump_pc[0] | JUMP_PC;
5796 break;
5797 case DYNAMIC_PC:
5798 case DYNAMIC_PC_LOOKUP:
5799 npc = DYNAMIC_PC;
5800 break;
5801 default:
5802 g_assert_not_reached();
5803 }
5804 }
5805 tcg_gen_insn_start(dc->pc, npc);
5806 }
5807
5808 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5809 {
5810 DisasContext *dc = container_of(dcbase, DisasContext, base);
5811 CPUSPARCState *env = cpu_env(cs);
5812 unsigned int insn;
5813
5814 insn = translator_ldl(env, &dc->base, dc->pc);
5815 dc->base.pc_next += 4;
5816
5817 if (!decode(dc, insn)) {
5818 disas_sparc_legacy(dc, insn);
5819 }
5820
5821 if (dc->base.is_jmp == DISAS_NORETURN) {
5822 return;
5823 }
5824 if (dc->pc != dc->base.pc_next) {
5825 dc->base.is_jmp = DISAS_TOO_MANY;
5826 }
5827 }
5828
5829 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5830 {
5831 DisasContext *dc = container_of(dcbase, DisasContext, base);
5832 DisasDelayException *e, *e_next;
5833 bool may_lookup;
5834
5835 switch (dc->base.is_jmp) {
5836 case DISAS_NEXT:
5837 case DISAS_TOO_MANY:
5838 if (((dc->pc | dc->npc) & 3) == 0) {
5839 /* static PC and NPC: we can use direct chaining */
5840 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5841 break;
5842 }
5843
5844 may_lookup = true;
5845 if (dc->pc & 3) {
5846 switch (dc->pc) {
5847 case DYNAMIC_PC_LOOKUP:
5848 break;
5849 case DYNAMIC_PC:
5850 may_lookup = false;
5851 break;
5852 default:
5853 g_assert_not_reached();
5854 }
5855 } else {
5856 tcg_gen_movi_tl(cpu_pc, dc->pc);
5857 }
5858
5859 if (dc->npc & 3) {
5860 switch (dc->npc) {
5861 case JUMP_PC:
5862 gen_generic_branch(dc);
5863 break;
5864 case DYNAMIC_PC:
5865 may_lookup = false;
5866 break;
5867 case DYNAMIC_PC_LOOKUP:
5868 break;
5869 default:
5870 g_assert_not_reached();
5871 }
5872 } else {
5873 tcg_gen_movi_tl(cpu_npc, dc->npc);
5874 }
5875 if (may_lookup) {
5876 tcg_gen_lookup_and_goto_ptr();
5877 } else {
5878 tcg_gen_exit_tb(NULL, 0);
5879 }
5880 break;
5881
5882 case DISAS_NORETURN:
5883 break;
5884
5885 case DISAS_EXIT:
5886 /* Exit TB */
5887 save_state(dc);
5888 tcg_gen_exit_tb(NULL, 0);
5889 break;
5890
5891 default:
5892 g_assert_not_reached();
5893 }
5894
5895 for (e = dc->delay_excp_list; e ; e = e_next) {
5896 gen_set_label(e->lab);
5897
5898 tcg_gen_movi_tl(cpu_pc, e->pc);
5899 if (e->npc % 4 == 0) {
5900 tcg_gen_movi_tl(cpu_npc, e->npc);
5901 }
5902 gen_helper_raise_exception(tcg_env, e->excp);
5903
5904 e_next = e->next;
5905 g_free(e);
5906 }
5907 }
5908
5909 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5910 CPUState *cpu, FILE *logfile)
5911 {
5912 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5913 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5914 }
5915
5916 static const TranslatorOps sparc_tr_ops = {
5917 .init_disas_context = sparc_tr_init_disas_context,
5918 .tb_start = sparc_tr_tb_start,
5919 .insn_start = sparc_tr_insn_start,
5920 .translate_insn = sparc_tr_translate_insn,
5921 .tb_stop = sparc_tr_tb_stop,
5922 .disas_log = sparc_tr_disas_log,
5923 };
5924
5925 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5926 target_ulong pc, void *host_pc)
5927 {
5928 DisasContext dc = {};
5929
5930 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5931 }
5932
5933 void sparc_tcg_init(void)
5934 {
5935 static const char gregnames[32][4] = {
5936 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5937 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5938 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5939 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5940 };
5941 static const char fregnames[32][4] = {
5942 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5943 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5944 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5945 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5946 };
5947
5948 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5949 #ifdef TARGET_SPARC64
5950 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5951 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5952 #endif
5953 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5954 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5955 };
5956
5957 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5958 #ifdef TARGET_SPARC64
5959 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5960 #endif
5961 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5962 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5963 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5964 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5965 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5966 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5967 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5968 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5969 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5970 };
5971
5972 unsigned int i;
5973
5974 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5975 offsetof(CPUSPARCState, regwptr),
5976 "regwptr");
5977
5978 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5979 *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5980 }
5981
5982 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5983 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5984 }
5985
5986 cpu_regs[0] = NULL;
5987 for (i = 1; i < 8; ++i) {
5988 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5989 offsetof(CPUSPARCState, gregs[i]),
5990 gregnames[i]);
5991 }
5992
5993 for (i = 8; i < 32; ++i) {
5994 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5995 (i - 8) * sizeof(target_ulong),
5996 gregnames[i]);
5997 }
5998
5999 for (i = 0; i < TARGET_DPREGS; i++) {
6000 cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
6001 offsetof(CPUSPARCState, fpr[i]),
6002 fregnames[i]);
6003 }
6004 }
6005
6006 void sparc_restore_state_to_opc(CPUState *cs,
6007 const TranslationBlock *tb,
6008 const uint64_t *data)
6009 {
6010 SPARCCPU *cpu = SPARC_CPU(cs);
6011 CPUSPARCState *env = &cpu->env;
6012 target_ulong pc = data[0];
6013 target_ulong npc = data[1];
6014
6015 env->pc = pc;
6016 if (npc == DYNAMIC_PC) {
6017 /* dynamic NPC: already stored */
6018 } else if (npc & JUMP_PC) {
6019 /* jump PC: use 'cond' and the jump targets of the translation */
6020 if (env->cond) {
6021 env->npc = npc & ~3;
6022 } else {
6023 env->npc = pc + 4;
6024 }
6025 } else {
6026 env->npc = npc;
6027 }
6028 }